nova-13.1.4/0000775000567000056710000000000013064447471013716 5ustar jenkinsjenkins00000000000000nova-13.1.4/tox.ini0000664000567000056710000001163313064447152015231 0ustar jenkinsjenkins00000000000000[tox] minversion = 2.0 envlist = py34,py27,functional,pep8,pip-missing-reqs skipsdist = True [testenv] usedevelop = True # tox is silly... these need to be separated by a newline.... whitelist_externals = bash find rm install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=stable/mitaka} {opts} {packages} setenv = VIRTUAL_ENV={envdir} OS_TEST_PATH=./nova/tests/unit LANGUAGE=en_US LC_ALL=en_US.utf-8 deps = -r{toxinidir}/test-requirements.txt commands = find . -type f -name "*.pyc" -delete bash tools/pretty_tox.sh '{posargs}' passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY # there is also secret magic in pretty_tox.sh which lets you run in a fail only # mode. To do this define the TRACE_FAILONLY environmental variable. [testenv:pep8] basepython = python2.7 deps = hacking commands = bash tools/flake8wrap.sh {posargs} # Check that .po and .pot files are valid. bash -c "find nova -type f -regex '.*\.pot?' -print0| \ xargs -0 -n 1 msgfmt --check-format -o /dev/null" [testenv:py34] # NOTE(mriedem): If py34 fails with "db type could not be determined", delete # .testrepository and try again. Running py34 before py27 is OK, but not the # other way around. See: https://bugs.launchpad.net/testrepository/+bug/1212909 setenv = {[testenv]setenv} commands = find . -type f -name "*.pyc" -delete ostestr --blacklist_file tests-py3.txt [testenv:functional] # TODO(melwitt): This can be removed when functional tests can be run # with python 3.x basepython = python2.7 usedevelop = True setenv = VIRTUAL_ENV={envdir} OS_TEST_PATH=./nova/tests/functional LANGUAGE=en_US commands = find . -type f -name "*.pyc" -delete bash tools/pretty_tox.sh '{posargs}' [testenv:api-samples] usedevelop = True setenv = VIRTUAL_ENV={envdir} GENERATE_SAMPLES=True PYTHONHASHSEED=0 OS_TEST_PATH=./nova/tests/functional/api_sample_tests LANGUAGE=en_US commands = find . -type f -name "*.pyc" -delete bash tools/pretty_tox.sh '{posargs}' [testenv:genconfig] commands = oslo-config-generator --config-file=etc/nova/nova-config-generator.conf [testenv:cover] # Also do not run test_coverage_ext tests while gathering coverage as those # tests conflict with coverage. commands = coverage erase python setup.py testr --coverage \ --testr-args='{posargs}' coverage combine coverage html --include='nova/*' --omit='nova/openstack/common/*' -d covhtml -i [testenv:venv] commands = {posargs} [testenv:docs] commands = rm -rf doc/source/api doc/build api-guide/build python setup.py build_sphinx bash -c '! find doc/ -type f -name *.json | xargs -t -n1 python -m json.tool 2>&1 > /dev/null | grep -B1 -v ^python' oslo-config-generator --config-file=etc/nova/nova-config-generator.conf sphinx-build -b html api-guide/source api-guide/build/html [testenv:api-guide] # This environment is called from CI scripts to test and publish # the API Guide to developer.openstack.org. # NOTE(sdague): this target does not use constraints because # upstream infra does not yet support it. Once that's fixed, we can # drop the install_command. install_command = pip install -U --force-reinstall {opts} {packages} commands = sphinx-build -b html -d api-guide/build/doctrees api-guide/source api-guide/build/html [testenv:bandit] commands = bandit -c bandit.yaml -r nova -n 5 -ll [testenv:releasenotes] # NOTE(sdague): this target does not use constraints because # upstream infra does not yet support it. Once that's fixed, we can # drop the install_command. install_command = pip install -U --force-reinstall {opts} {packages} commands = rm -rf releasenotes/build sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [flake8] # E125 is deliberately excluded. See https://github.com/jcrocholl/pep8/issues/126 # The rest of the ignores are TODOs # New from hacking 0.9: E129, E131, H407, H405 # E251 Skipped due to https://github.com/jcrocholl/pep8/issues/301 ignore = E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E251,H405 exclude = .venv,.git,.tox,dist,doc,*openstack/common/*,*lib/python*,*egg,build,tools/xenserver*,releasenotes # To get a list of functions that are more complex than 25, set max-complexity # to 25 and run 'tox -epep8'. # 34 is currently the most complex thing we have # TODO(jogo): get this number down to 25 or so max-complexity=35 [hacking] local-check-factory = nova.hacking.checks.factory import_exceptions = nova.i18n [testenv:pip-missing-reqs] # do not install test-requirements as that will pollute the virtualenv for # determining missing packages # this also means that pip-missing-reqs must be installed separately, outside # of the requirements.txt files deps = pip_missing_reqs commands=pip-missing-reqs -d --ignore-file=nova/tests/* --ignore-file=nova/test.py nova nova-13.1.4/setup.cfg0000664000567000056710000002552213064447471015545 0ustar jenkinsjenkins00000000000000[metadata] name = nova summary = Cloud computing fabric controller description-file = README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org home-page = http://docs.openstack.org/developer/nova/ classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 [global] setup-hooks = pbr.hooks.setup_hook [files] packages = nova [entry_points] oslo.config.opts = nova = nova.opts:list_opts nova.conf = nova.conf.opts:list_opts nova.api = nova.api.opts:list_opts nova.cells = nova.cells.opts:list_opts nova.compute = nova.compute.opts:list_opts nova.network = nova.network.opts:list_opts nova.network.neutronv2 = nova.network.neutronv2.api:list_opts nova.virt = nova.virt.opts:list_opts nova.cache_utils = nova.cache_utils:list_opts oslo.config.opts.defaults = nova.api = nova.common.config:set_middleware_defaults nova.compute.monitors.cpu = virt_driver = nova.compute.monitors.cpu.virt_driver:Monitor nova.compute.resources = nova.image.download.modules = file = nova.image.download.file console_scripts = nova-all = nova.cmd.all:main nova-api = nova.cmd.api:main nova-api-metadata = nova.cmd.api_metadata:main nova-api-os-compute = nova.cmd.api_os_compute:main nova-cells = nova.cmd.cells:main nova-cert = nova.cmd.cert:main nova-compute = nova.cmd.compute:main nova-conductor = nova.cmd.conductor:main nova-console = nova.cmd.console:main nova-consoleauth = nova.cmd.consoleauth:main nova-dhcpbridge = nova.cmd.dhcpbridge:main nova-idmapshift = nova.cmd.idmapshift:main nova-manage = nova.cmd.manage:main nova-network = nova.cmd.network:main nova-novncproxy = nova.cmd.novncproxy:main nova-rootwrap = oslo_rootwrap.cmd:main nova-rootwrap-daemon = oslo_rootwrap.cmd:daemon nova-scheduler = nova.cmd.scheduler:main nova-serialproxy = nova.cmd.serialproxy:main nova-spicehtml5proxy = nova.cmd.spicehtml5proxy:main nova-xvpvncproxy = nova.cmd.xvpvncproxy:main nova.api.v21.extensions = access_ips = nova.api.openstack.compute.access_ips:AccessIPs admin_actions = nova.api.openstack.compute.admin_actions:AdminActions admin_password = nova.api.openstack.compute.admin_password:AdminPassword agents = nova.api.openstack.compute.agents:Agents aggregates = nova.api.openstack.compute.aggregates:Aggregates assisted_volume_snapshots = nova.api.openstack.compute.assisted_volume_snapshots:AssistedVolumeSnapshots attach_interfaces = nova.api.openstack.compute.attach_interfaces:AttachInterfaces availability_zone = nova.api.openstack.compute.availability_zone:AvailabilityZone baremetal_nodes = nova.api.openstack.compute.baremetal_nodes:BareMetalNodes block_device_mapping = nova.api.openstack.compute.block_device_mapping:BlockDeviceMapping cells = nova.api.openstack.compute.cells:Cells certificates = nova.api.openstack.compute.certificates:Certificates cloudpipe = nova.api.openstack.compute.cloudpipe:Cloudpipe config_drive = nova.api.openstack.compute.config_drive:ConfigDrive console_auth_tokens = nova.api.openstack.compute.console_auth_tokens:ConsoleAuthTokens console_output = nova.api.openstack.compute.console_output:ConsoleOutput consoles = nova.api.openstack.compute.consoles:Consoles create_backup = nova.api.openstack.compute.create_backup:CreateBackup deferred_delete = nova.api.openstack.compute.deferred_delete:DeferredDelete disk_config = nova.api.openstack.compute.disk_config:DiskConfig evacuate = nova.api.openstack.compute.evacuate:Evacuate extended_availability_zone = nova.api.openstack.compute.extended_availability_zone:ExtendedAvailabilityZone extended_server_attributes = nova.api.openstack.compute.extended_server_attributes:ExtendedServerAttributes extended_status = nova.api.openstack.compute.extended_status:ExtendedStatus extended_volumes = nova.api.openstack.compute.extended_volumes:ExtendedVolumes extension_info = nova.api.openstack.compute.extension_info:ExtensionInfo fixed_ips = nova.api.openstack.compute.fixed_ips:FixedIps flavors = nova.api.openstack.compute.flavors:Flavors flavors_extraspecs = nova.api.openstack.compute.flavors_extraspecs:FlavorsExtraSpecs flavor_access = nova.api.openstack.compute.flavor_access:FlavorAccess flavor_rxtx = nova.api.openstack.compute.flavor_rxtx:FlavorRxtx flavor_manage = nova.api.openstack.compute.flavor_manage:FlavorManage floating_ip_dns = nova.api.openstack.compute.floating_ip_dns:FloatingIpDns floating_ip_pools = nova.api.openstack.compute.floating_ip_pools:FloatingIpPools floating_ips = nova.api.openstack.compute.floating_ips:FloatingIps floating_ips_bulk = nova.api.openstack.compute.floating_ips_bulk:FloatingIpsBulk fping = nova.api.openstack.compute.fping:Fping hide_server_addresses = nova.api.openstack.compute.hide_server_addresses:HideServerAddresses hosts = nova.api.openstack.compute.hosts:Hosts hypervisors = nova.api.openstack.compute.hypervisors:Hypervisors images = nova.api.openstack.compute.images:Images image_metadata = nova.api.openstack.compute.image_metadata:ImageMetadata image_size = nova.api.openstack.compute.image_size:ImageSize instance_actions = nova.api.openstack.compute.instance_actions:InstanceActions instance_usage_audit_log = nova.api.openstack.compute.instance_usage_audit_log:InstanceUsageAuditLog ips = nova.api.openstack.compute.ips:IPs keypairs = nova.api.openstack.compute.keypairs:Keypairs limits = nova.api.openstack.compute.limits:Limits lock_server = nova.api.openstack.compute.lock_server:LockServer migrate_server = nova.api.openstack.compute.migrate_server:MigrateServer migrations = nova.api.openstack.compute.migrations:Migrations multinic = nova.api.openstack.compute.multinic:Multinic multiple_create = nova.api.openstack.compute.multiple_create:MultipleCreate networks = nova.api.openstack.compute.networks:Networks networks_associate = nova.api.openstack.compute.networks_associate:NetworksAssociate pause_server = nova.api.openstack.compute.pause_server:PauseServer personality = nova.api.openstack.compute.personality:Personality preserve_ephemeral_rebuild = nova.api.openstack.compute.preserve_ephemeral_rebuild:PreserveEphemeralRebuild quota_classes = nova.api.openstack.compute.quota_classes:QuotaClasses quota_sets = nova.api.openstack.compute.quota_sets:QuotaSets remote_consoles = nova.api.openstack.compute.remote_consoles:RemoteConsoles rescue = nova.api.openstack.compute.rescue:Rescue scheduler_hints = nova.api.openstack.compute.scheduler_hints:SchedulerHints security_group_default_rules = nova.api.openstack.compute.security_group_default_rules:SecurityGroupDefaultRules security_groups = nova.api.openstack.compute.security_groups:SecurityGroups server_diagnostics = nova.api.openstack.compute.server_diagnostics:ServerDiagnostics server_external_events = nova.api.openstack.compute.server_external_events:ServerExternalEvents server_metadata = nova.api.openstack.compute.server_metadata:ServerMetadata server_migrations = nova.api.openstack.compute.server_migrations:ServerMigrations server_password = nova.api.openstack.compute.server_password:ServerPassword server_usage = nova.api.openstack.compute.server_usage:ServerUsage server_groups = nova.api.openstack.compute.server_groups:ServerGroups servers = nova.api.openstack.compute.servers:Servers services = nova.api.openstack.compute.services:Services shelve = nova.api.openstack.compute.shelve:Shelve simple_tenant_usage = nova.api.openstack.compute.simple_tenant_usage:SimpleTenantUsage suspend_server = nova.api.openstack.compute.suspend_server:SuspendServer tenant_networks = nova.api.openstack.compute.tenant_networks:TenantNetworks used_limits = nova.api.openstack.compute.used_limits:UsedLimits user_data = nova.api.openstack.compute.user_data:UserData versions = nova.api.openstack.compute.versionsV21:Versions virtual_interfaces = nova.api.openstack.compute.virtual_interfaces:VirtualInterfaces volumes = nova.api.openstack.compute.volumes:Volumes nova.api.v21.extensions.server.create = access_ips = nova.api.openstack.compute.access_ips:AccessIPs availability_zone = nova.api.openstack.compute.availability_zone:AvailabilityZone block_device_mapping = nova.api.openstack.compute.block_device_mapping:BlockDeviceMapping block_device_mapping_v1 = nova.api.openstack.compute.block_device_mapping_v1:BlockDeviceMappingV1 config_drive = nova.api.openstack.compute.config_drive:ConfigDrive disk_config = nova.api.openstack.compute.disk_config:DiskConfig keypairs_create = nova.api.openstack.compute.keypairs:Keypairs multiple_create = nova.api.openstack.compute.multiple_create:MultipleCreate personality = nova.api.openstack.compute.personality:Personality scheduler_hints = nova.api.openstack.compute.scheduler_hints:SchedulerHints security_groups = nova.api.openstack.compute.security_groups:SecurityGroups user_data = nova.api.openstack.compute.user_data:UserData nova.api.v21.extensions.server.rebuild = access_ips = nova.api.openstack.compute.access_ips:AccessIPs disk_config = nova.api.openstack.compute.disk_config:DiskConfig personality = nova.api.openstack.compute.personality:Personality preserve_ephemeral_rebuild = nova.api.openstack.compute.preserve_ephemeral_rebuild:PreserveEphemeralRebuild nova.api.v21.extensions.server.update = access_ips = nova.api.openstack.compute.access_ips:AccessIPs disk_config = nova.api.openstack.compute.disk_config:DiskConfig nova.api.v21.extensions.server.resize = disk_config = nova.api.openstack.compute.disk_config:DiskConfig nova.api.v21.test_extensions = basic = nova.tests.unit.api.openstack.compute.basic:Basic microversions = nova.tests.unit.api.openstack.compute.microversions:Microversions nova.ipv6_backend = rfc2462 = nova.ipv6.rfc2462 account_identifier = nova.ipv6.account_identifier nova.scheduler.host_manager = host_manager = nova.scheduler.host_manager:HostManager ironic_host_manager = nova.scheduler.ironic_host_manager:IronicHostManager nova.scheduler.driver = filter_scheduler = nova.scheduler.filter_scheduler:FilterScheduler caching_scheduler = nova.scheduler.caching_scheduler:CachingScheduler chance_scheduler = nova.scheduler.chance:ChanceScheduler fake_scheduler = nova.tests.unit.scheduler.fakes:FakeScheduler [build_sphinx] all_files = 1 build-dir = doc/build source-dir = doc/source [build_apiguide] all_files = 1 build-dir = api-guide/build source-dir = api-guide/source [egg_info] tag_build = tag_date = 0 tag_svn_revision = 0 [compile_catalog] directory = nova/locale domain = nova [update_catalog] domain = nova output_dir = nova/locale input_file = nova/locale/nova.pot [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = nova/locale/nova.pot [wheel] universal = 1 [pbr] autodoc_index_modules = True autodoc_exclude_modules = nova.wsgi.nova-* nova.tests.* warnerrors = true nova-13.1.4/run_tests.sh0000775000567000056710000000152513064447152016302 0ustar jenkinsjenkins00000000000000#!/bin/bash set -eu cat < 5: priority = 5 item['classes'].append('todo_p' + str(priority)) todo_entry['classes'].append('todo_p' + str(priority)) item.append(comment) lists[priority - 1].insert(0, item) node.replace_self(lists) def setup(app): app.add_config_value('todo_include_todos', False, False) app.add_node(todolist) app.add_node(todo_node, html=(visit_todo_node, depart_todo_node), latex=(visit_todo_node, depart_todo_node), text=(visit_todo_node, depart_todo_node)) app.add_directive('todo', Todo) app.add_directive('todolist', TodoList) app.connect('doctree-read', process_todos) app.connect('doctree-resolved', process_todo_nodes) app.connect('env-purge-doc', purge_todos) nova-13.1.4/doc/ext/__init__.py0000664000567000056710000000000013064447140017353 0ustar jenkinsjenkins00000000000000nova-13.1.4/doc/ext/versioned_notifications.py0000664000567000056710000000662113064447152022565 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This provides a sphinx extension able to list the implemented versioned notifications into the developer documentation. It is used via a single directive in the .rst file .. versioned_notifications:: """ from sphinx.util.compat import Directive from docutils import nodes from nova.objects import base from nova.objects import notification def full_name(cls): return cls.__module__ + '.' + cls.__name__ class VersionedNotificationDirective(Directive): LINK_PREFIX = 'https://git.openstack.org/cgit/openstack/nova/plain/' SAMPLE_ROOT = 'doc/notification_samples/' def run(self): notifications = self._collect_notifications() return self._build_markup(notifications) def _collect_notifications(self): notifications = [] ovos = base.NovaObjectRegistry.obj_classes() for name, cls in ovos.items(): cls = cls[0] if (issubclass(cls, notification.NotificationBase) and cls != notification.NotificationBase): payload_name = cls.fields['payload'].objname payload_cls = ovos[payload_name][0] notifications.append((full_name(cls), full_name(payload_cls), cls.sample)) return notifications def _build_markup(self, notifications): content = [] cols = ['Notification class', 'Payload class', 'Sample file link'] table = nodes.table() content.append(table) group = nodes.tgroup(cols=len(cols)) table.append(group) head = nodes.thead() group.append(head) for i in range(len(cols)): group.append(nodes.colspec(colwidth=1)) body = nodes.tbody() group.append(body) # fill the table header row = nodes.row() body.append(row) for col_name in cols: col = nodes.entry() row.append(col) text = nodes.strong(text=col_name) col.append(text) # fill the table content, one notification per row for name, payload, sample in notifications: row = nodes.row() body.append(row) col = nodes.entry() row.append(col) text = nodes.literal(text=name) col.append(text) col = nodes.entry() row.append(col) text = nodes.literal(text=payload) col.append(text) col = nodes.entry() row.append(col) ref = nodes.reference(refuri=self.LINK_PREFIX + self.SAMPLE_ROOT + sample) txt = nodes.inline() col.append(txt) txt.append(ref) ref.append(nodes.literal(text=sample)) return content def setup(app): app.add_directive('versioned_notifications', VersionedNotificationDirective) nova-13.1.4/doc/ext/support_matrix.py0000664000567000056710000004446313064447152020744 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This provides a sphinx extension able to render the source/support-matrix.ini file into the developer documentation. It is used via a single directive in the .rst file .. support_matrix:: """ import re import six from six.moves import configparser from docutils import nodes from docutils.parsers import rst class SupportMatrix(object): """Represents the entire support matrix for Nova virt drivers """ def __init__(self): # List of SupportMatrixFeature instances, describing # all the features present in Nova virt drivers self.features = [] # Dict of (name, SupportMatrixTarget) enumerating # all the hypervisor drivers that have data recorded # for them in self.features. The 'name' dict key is # the value from the SupportMatrixTarget.key attribute self.targets = {} class SupportMatrixFeature(object): STATUS_MANDATORY = "mandatory" STATUS_CHOICE = "choice" STATUS_CONDITION = "condition" STATUS_OPTIONAL = "optional" STATUS_ALL = [STATUS_MANDATORY, STATUS_CHOICE, STATUS_CONDITION, STATUS_OPTIONAL] def __init__(self, key, title, status=STATUS_OPTIONAL, group=None, notes=None, cli=[]): # A unique key (eg 'foo.bar.wizz') to identify the feature self.key = key # A human friendly short title for the feature self.title = title # One of the status constants self.status = status # Detail string if status was choice/condition self.group = group # Arbitrarily long string describing the feature in detail self.notes = notes # Dict of (name, SupportMatrixImplementation) detailing # the implementation for each hypervisor driver. The # 'name' dict key is the value from SupportMatrixTarget.key # for the hypervisor in question self.implementations = {} # A list of CLI commands which are related to that feature self.cli = cli class SupportMatrixImplementation(object): STATUS_COMPLETE = "complete" STATUS_PARTIAL = "partial" STATUS_MISSING = "missing" STATUS_UKNOWN = "unknown" STATUS_ALL = [STATUS_COMPLETE, STATUS_PARTIAL, STATUS_MISSING, STATUS_UKNOWN] def __init__(self, status=STATUS_MISSING, notes=None): # One of the status constants detailing the implementation # level self.status = status # Arbitrary string describing any caveats of the implementation. # Mandatory if status is 'partial', optional otherwise. self.notes = notes class SupportMatrixTarget(object): def __init__(self, key, title, driver, hypervisor=None, architecture=None): """:param key: Unique identifier for the hypervisor driver :param title: Human friendly name of the hypervisor :param driver: Name of the Nova driver :param hypervisor: (optional) Name of the hypervisor, if many :param architecture: (optional) Name of the architecture, if many """ self.key = key self.title = title self.driver = driver self.hypervisor = hypervisor self.architecture = architecture class SupportMatrixDirective(rst.Directive): # The argument is the filename, e.g. support-matrix.ini required_arguments = 1 def run(self): matrix = self._load_support_matrix() return self._build_markup(matrix) def _load_support_matrix(self): """Reads the support-matrix.ini file and populates an instance of the SupportMatrix class with all the data. :returns: SupportMatrix instance """ cfg = configparser.SafeConfigParser() env = self.state.document.settings.env fname = self.arguments[0] rel_fpath, fpath = env.relfn2path(fname) with open(fpath) as fp: cfg.readfp(fp) # This ensures that the docs are rebuilt whenever the # .ini file changes env.note_dependency(rel_fpath) matrix = SupportMatrix() matrix.targets = self._get_targets(cfg) matrix.features = self._get_features(cfg, matrix.targets) return matrix def _get_targets(self, cfg): # The 'targets' section is special - it lists all the # hypervisors that this file records data for targets = {} for item in cfg.options("targets"): if not item.startswith("driver-impl-"): continue # The driver string will optionally contain # a hypervisor and architecture qualifier # so we expect between 1 and 3 components # in the name key = item[12:] title = cfg.get("targets", item) name = key.split("-") if len(name) == 1: target = SupportMatrixTarget(key, title, name[0]) elif len(name) == 2: target = SupportMatrixTarget(key, title, name[0], name[1]) elif len(name) == 3: target = SupportMatrixTarget(key, title, name[0], name[1], name[2]) else: raise Exception("'%s' field is malformed in '[%s]' section" % (item, "DEFAULT")) targets[key] = target return targets def _get_features(self, cfg, targets): # All sections except 'targets' describe some feature of # the Nova hypervisor driver implementation features = [] for section in cfg.sections(): if section == "targets": continue if not cfg.has_option(section, "title"): raise Exception( "'title' field missing in '[%s]' section" % section) title = cfg.get(section, "title") status = SupportMatrixFeature.STATUS_OPTIONAL if cfg.has_option(section, "status"): # The value is a string "status(group)" where # the 'group' part is optional status = cfg.get(section, "status") offset = status.find("(") group = None if offset != -1: group = status[offset + 1:-1] status = status[0:offset] if status not in SupportMatrixFeature.STATUS_ALL: raise Exception( "'status' field value '%s' in ['%s']" "section must be %s" % (status, section, ",".join(SupportMatrixFeature.STATUS_ALL))) notes = None if cfg.has_option(section, "notes"): notes = cfg.get(section, "notes") cli = [] if cfg.has_option(section, "cli"): cli = cfg.get(section, "cli") feature = SupportMatrixFeature(section, title, status, group, notes, cli) # Now we've got the basic feature details, we must process # the hypervisor driver implementation for each feature for item in cfg.options(section): if not item.startswith("driver-impl-"): continue key = item[12:] if key not in targets: raise Exception( "Driver impl '%s' in '[%s]' not declared" % (item, section)) status = cfg.get(section, item) if status not in SupportMatrixImplementation.STATUS_ALL: raise Exception( "'%s' value '%s' in '[%s]' section must be %s" % (item, status, section, ",".join(SupportMatrixImplementation.STATUS_ALL))) noteskey = "driver-notes-" + item[12:] notes = None if cfg.has_option(section, noteskey): notes = cfg.get(section, noteskey) target = targets[key] impl = SupportMatrixImplementation(status, notes) feature.implementations[target.key] = impl for key in targets: if key not in feature.implementations: raise Exception("'%s' missing in '[%s]' section" % (target.key, section)) features.append(feature) return features def _build_markup(self, matrix): """Constructs the docutils content for the support matrix """ content = [] self._build_summary(matrix, content) self._build_details(matrix, content) self._build_notes(content) return content def _build_summary(self, matrix, content): """Constructs the docutils content for the summary of the support matrix. The summary consists of a giant table, with one row for each feature, and a column for each hypervisor driver. It provides an 'at a glance' summary of the status of each driver """ summarytitle = nodes.subtitle(text="Summary") summary = nodes.table() cols = len(matrix.targets.keys()) cols += 2 summarygroup = nodes.tgroup(cols=cols) summarybody = nodes.tbody() summaryhead = nodes.thead() for i in range(cols): summarygroup.append(nodes.colspec(colwidth=1)) summarygroup.append(summaryhead) summarygroup.append(summarybody) summary.append(summarygroup) content.append(summarytitle) content.append(summary) # This sets up all the column headers - two fixed # columns for feature name & status header = nodes.row() blank = nodes.entry() blank.append(nodes.emphasis(text="Feature")) header.append(blank) blank = nodes.entry() blank.append(nodes.emphasis(text="Status")) header.append(blank) summaryhead.append(header) # then one column for each hypervisor driver impls = matrix.targets.keys() impls.sort() for key in impls: target = matrix.targets[key] implcol = nodes.entry() header.append(implcol) implcol.append(nodes.strong(text=target.title)) # We now produce the body of the table, one row for # each feature to report on for feature in matrix.features: item = nodes.row() # the hyperlink target name linking to details id = re.sub("[^a-zA-Z0-9_]", "_", feature.key) # first the to fixed columns for title/status keycol = nodes.entry() item.append(keycol) keyref = nodes.reference(refid=id) keytxt = nodes.inline() keycol.append(keytxt) keytxt.append(keyref) keyref.append(nodes.strong(text=feature.title)) statuscol = nodes.entry() item.append(statuscol) statuscol.append(nodes.inline( text=feature.status, classes=["sp_feature_" + feature.status])) # and then one column for each hypervisor driver impls = matrix.targets.keys() impls.sort() for key in impls: target = matrix.targets[key] impl = feature.implementations[key] implcol = nodes.entry() item.append(implcol) id = re.sub("[^a-zA-Z0-9_]", "_", feature.key + "_" + key) implref = nodes.reference(refid=id) impltxt = nodes.inline() implcol.append(impltxt) impltxt.append(implref) status = "" if impl.status == SupportMatrixImplementation.STATUS_COMPLETE: status = u"\u2714" elif impl.status == SupportMatrixImplementation.STATUS_MISSING: status = u"\u2716" elif impl.status == SupportMatrixImplementation.STATUS_PARTIAL: status = u"\u2714" elif impl.status == SupportMatrixImplementation.STATUS_UKNOWN: status = u"?" implref.append(nodes.literal( text=status, classes=["sp_impl_summary", "sp_impl_" + impl.status])) summarybody.append(item) def _build_details(self, matrix, content): """Constructs the docutils content for the details of the support matrix. This is generated as a bullet list of features. Against each feature we provide the description of the feature and then the details of the hypervisor impls, with any driver specific notes that exist """ detailstitle = nodes.subtitle(text="Details") details = nodes.bullet_list() content.append(detailstitle) content.append(details) # One list entry for each feature we're reporting on for feature in matrix.features: item = nodes.list_item() status = feature.status if feature.group is not None: status += "(" + feature.group + ")" # The hypervisor target name linked from summary table id = re.sub("[^a-zA-Z0-9_]", "_", feature.key) # Highlight the feature title name item.append(nodes.strong(text=feature.title, ids=[id])) para = nodes.paragraph() para.append(nodes.strong(text="Status: " + status + ". ")) if feature.notes is not None: para.append(nodes.inline(text=feature.notes)) item.append(para) if feature.cli: item.append(self._create_cli_paragraph(feature)) para_divers = nodes.paragraph() para_divers.append(nodes.strong(text="drivers:")) # A sub-list giving details of each hypervisor target impls = nodes.bullet_list() for key in feature.implementations: target = matrix.targets[key] impl = feature.implementations[key] subitem = nodes.list_item() id = re.sub("[^a-zA-Z0-9_]", "_", feature.key + "_" + key) subitem += [ nodes.strong(text=target.title + ": "), nodes.literal(text=impl.status, classes=["sp_impl_" + impl.status], ids=[id]), ] if impl.notes is not None: subitem.append(self._create_notes_paragraph(impl.notes)) impls.append(subitem) para_divers.append(impls) item.append(para_divers) details.append(item) def _build_notes(self, content): """Constructs a list of notes content for the support matrix. This is generated as a bullet list. """ notestitle = nodes.subtitle(text="Notes") notes = nodes.bullet_list() content.append(notestitle) content.append(notes) NOTES = [ "Virtuozzo was formerly named Parallels in this document" ] for note in NOTES: item = nodes.list_item() item.append(nodes.strong(text=note)) notes.append(item) def _create_cli_paragraph(self, feature): ''' Create a paragraph which represents the CLI commands of the feature The paragraph will have a bullet list of CLI commands. ''' para = nodes.paragraph() para.append(nodes.strong(text="CLI commands:")) commands = nodes.bullet_list() for c in feature.cli.split(";"): cli_command = nodes.list_item() cli_command += nodes.literal(text=c, classes=["sp_cli"]) commands.append(cli_command) para.append(commands) return para def _create_notes_paragraph(self, notes): """ Constructs a paragraph which represents the implementation notes The paragraph consists of text and clickable URL nodes if links were given in the notes. """ para = nodes.paragraph() # links could start with http:// or https:// link_idxs = [m.start() for m in re.finditer('https?://', notes)] start_idx = 0 for link_idx in link_idxs: # assume the notes start with text (could be empty) para.append(nodes.inline(text=notes[start_idx:link_idx])) # create a URL node until the next text or the end of the notes link_end_idx = notes.find(" ", link_idx) if link_end_idx == -1: # In case the notes end with a link without a blank link_end_idx = len(notes) uri = notes[link_idx:link_end_idx + 1] para.append(nodes.reference("", uri, refuri=uri)) start_idx = link_end_idx + 1 # get all text after the last link (could be empty) or all of the # text if no link was given para.append(nodes.inline(text=notes[start_idx:])) return para def setup(app): app.add_directive('support_matrix', SupportMatrixDirective) app.add_stylesheet('support-matrix.css') nova-13.1.4/doc/notification_samples/0000775000567000056710000000000013064447471020675 5ustar jenkinsjenkins00000000000000nova-13.1.4/doc/notification_samples/service-update.json0000664000567000056710000000114113064447152024501 0ustar jenkinsjenkins00000000000000{ "priority": "INFO", "payload": { "nova_object.namespace": "nova", "nova_object.name": "ServiceStatusPayload", "nova_object.version": "1.0", "nova_object.data": { "host": "host1", "disabled": false, "last_seen_up": "2012-10-29T13:42:05Z", "binary": "nova-compute", "topic": "compute", "disabled_reason": null, "report_count": 1, "forced_down": false, "version": 9 } }, "event_type": "service.update", "publisher_id": "nova-compute:host1" } nova-13.1.4/doc/source/0000775000567000056710000000000013064447471015763 5ustar jenkinsjenkins00000000000000nova-13.1.4/doc/source/feature_classification.rst0000664000567000056710000001506213064447152023223 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ====================== Feature Classification ====================== This document aims to define how we describe features listed in the :doc:`support-matrix`. Aims ==== Our users want the features they rely on to be reliable and always continue to solve for their use case. When things break, users request that we solve their issues quickly. It would be better if we never had those regressions in the first place. We are taking a two-pronged approach: * Tell our users what features are complete, well-documented, and are kept stable by good tests. They will get a good experience if they stick to using those features. Please note that the tests are specific to particular combinations of technologies. A deployment's choice of storage, networking and hypervisor makes a big difference to what features will work. * Get help for the features that are not in the above state, and warn our users about the risks of using those features before they are ready. It should make it much clearer how to help improve the feature. Concepts ======== Some definitions to help understand the later part of the document. Users ----- These are the users we will talk about in this document: * application deployer: creates/deletes servers, directly or indirect via API * application developer: creates images and apps that run on the cloud * cloud operator: administers the cloud * self service administrator: both runs and uses the cloud Now in reality the picture is way more complex. Specifically, there are likely to be different roles for observer, creator and admin roles for the application developer. Similarly, there are likely to be various levels of cloud operator permissions, some read only, see a subset of tenants, etc. Note: this is not attempting to be an exhaustive set of personas that consider various facets of the different users, but instead aims to be a minimal set of users, such that we use a consistent terminology throughout this document. Feature Group ------------- To reduce the size of the matrix, we organize the features into groups. Each group maps to a set of user stories, that can be validated by a set of scenarios, tests. Typically, this means a set of tempest tests. This list focuses on API concepts like attach and detach volumes, rather than deployment specific concepts like attach iSCSI volume to KVM based VM. Deployment ---------- A deployment maps to a specific test environment. A full description of the environment should be provided, so its possible to reproduce the test results that are reported for each of the Feature Groups. Note: this description includes all aspects of the deployment: the hypervisor, the number of nova-compute services, the storage being used, the network driver being used, the types of images being tested, etc. Feature Group Maturity ----------------------- The Feature Group Maturity rating is specific to the API concepts, rather than specific to a particular deployment. That detail is covered in the deployment rating for each feature group. We are starting out these Feature Group ratings: * Incomplete * Experimental * Complete * Complete and Required * Deprecated (scheduled to be removed in a future release) Incomplete features are those that don't have enough functionality to satisfy real world use cases. Experimental features should be used with extreme caution. They are likely to have little or no upstream testing. With little testing there are likely to be many unknown bugs. For a feature to be considered complete, we must have: * Complete API docs (concept and REST call definition) * Complete Administrator docs * Tempest tests that define if the feature works correctly * Has enough functionality, and works reliably enough to be useful in real world scenarios * Unlikely to ever have a reason to drop support for the feature There are various reasons why a feature, once complete, becomes required, but currently its largely when a feature is supported by all drivers. Note that any new drivers need to prove they support all required features before it would be allowed in upstream Nova. Please note that this list is technically unrelated to the DefCore effort, despite there being obvious parallels that could be drawn. Required features are those that any new technology must support before being allowed into tree. The larger the list, the more features can be expected to be available on all Nova based clouds. Deprecated features are those that are scheduled to be removed in a future major release of Nova. If a feature is marked as complete, it should never be deprecated. If a feature is incomplete or experimental for several releases, it runs the risk of being deprecated, and later removed from the code base. Deployment Rating for a Feature Group -------------------------------------- The deployment rating is purely about the state of the tests for each Feature Group on a particular deployment. There will the following ratings: * unknown * not implemented * implemented: self declare the tempest tests pass * regularly tested: tested by third party CI * checked: Tested as part of the check or gate queue The eventual goal is to automate this list from some third party CI reporting system, but so we can make progress, this will be a manual inspection that is documented by an hand written ini file. Ideally, this will be reviewed every milestone. Feature Group Definitions ========================= This is a look at features targeted at application developers, and the current state of each feature, independent of the specific deployment. Please note: this is still a work in progress! Key TODOs: * use new API docs as a template for the feature groups, into ini file * add lists of tempest UUIDs for each group * link from hypervisor support matrix into feature group maturity ratings * add maturity rating into the feature groups, with a justification, which is likely to include lints to API docs, etc * replace tick and cross in support matrix with "deployment ratings" * eventually generate the tick and cross from live, historical, CI results nova-13.1.4/doc/source/filter_scheduler.rst0000664000567000056710000006621313064447152022044 0ustar jenkinsjenkins00000000000000Filter Scheduler ================ The **Filter Scheduler** supports `filtering` and `weighting` to make informed decisions on where a new instance should be created. This Scheduler supports working with Compute Nodes only. Filtering --------- .. image:: ./images/filteringWorkflow1.png During its work Filter Scheduler iterates over all found compute nodes, evaluating each against a set of filters. The list of resulting hosts is ordered by weighers. The Scheduler then chooses hosts for the requested number of instances, choosing the most weighted hosts. For a specific filter to succeed for a specific host, the filter matches the user request against the state of the host plus some extra magic as defined by each filter (described in more detail below). If the Scheduler cannot find candidates for the next instance, it means that there are no appropriate hosts where that instance can be scheduled. The Filter Scheduler has to be quite flexible to support the required variety of `filtering` and `weighting` strategies. If this flexibility is insufficient you can implement `your own filtering algorithm`. There are many standard filter classes which may be used (:mod:`nova.scheduler.filters`): * |AllHostsFilter| - does no filtering. It passes all the available hosts. * |ImagePropertiesFilter| - filters hosts based on properties defined on the instance's image. It passes hosts that can support the properties specified on the image used by the instance. * |AvailabilityZoneFilter| - filters hosts by availability zone. It passes hosts matching the availability zone specified in the instance properties. Use a comma to specify multiple zones. The filter will then ensure it matches any zone specified. * |ComputeCapabilitiesFilter| - checks that the capabilities provided by the host compute service satisfy any extra specifications associated with the instance type. It passes hosts that can create the specified instance type. If an extra specs key contains a colon (:), anything before the colon is treated as a namespace and anything after the colon is treated as the key to be matched. If a namespace is present and is not ``capabilities``, the filter ignores the namespace. For example ``capabilities:cpu_info:features`` is a valid scope format. For backward compatibility, the filter also treats the extra specs key as the key to be matched if no namespace is present; this action is highly discouraged because it conflicts with AggregateInstanceExtraSpecsFilter filter when you enable both filters The extra specifications can have an operator at the beginning of the value string of a key/value pair. If there is no operator specified, then a default operator of ``s==`` is used. Valid operators are: :: * = (equal to or greater than as a number; same as vcpus case) * == (equal to as a number) * != (not equal to as a number) * >= (greater than or equal to as a number) * <= (less than or equal to as a number) * s== (equal to as a string) * s!= (not equal to as a string) * s>= (greater than or equal to as a string) * s> (greater than as a string) * s<= (less than or equal to as a string) * s< (less than as a string) * (substring) * (all elements contained in collection) * (find one of these) Examples are: ">= 5", "s== 2.1.0", " gcc", " aes mmx", and " fpu gpu" * |AggregateInstanceExtraSpecsFilter| - checks that the aggregate metadata satisfies any extra specifications associated with the instance type (that have no scope or are scoped with ``aggregate_instance_extra_specs``). It passes hosts that can create the specified instance type. The extra specifications can have the same operators as |ComputeCapabilitiesFilter|. To specify multiple values for the same key use a comma. E.g., "value1,value2" * |ComputeFilter| - passes all hosts that are operational and enabled. * |CoreFilter| - filters based on CPU core utilization. It passes hosts with sufficient number of CPU cores. * |AggregateCoreFilter| - filters hosts by CPU core number with per-aggregate ``cpu_allocation_ratio`` setting. If no per-aggregate value is found, it will fall back to the global default ``cpu_allocation_ratio``. If more than one value is found for a host (meaning the host is in two different aggregates with different ratio settings), the minimum value will be used. * |IsolatedHostsFilter| - filter based on ``image_isolated``, ``host_isolated`` and ``restrict_isolated_hosts_to_isolated_images`` flags. * |JsonFilter| - allows simple JSON-based grammar for selecting hosts. * |RamFilter| - filters hosts by their RAM. Only hosts with sufficient RAM to host the instance are passed. * |AggregateRamFilter| - filters hosts by RAM with per-aggregate ``ram_allocation_ratio`` setting. If no per-aggregate value is found, it will fall back to the global default ``ram_allocation_ratio``. If more than one value is found for a host (meaning the host is in two different aggregates with different ratio settings), the minimum value will be used. * |DiskFilter| - filters hosts by their disk allocation. Only hosts with sufficient disk space to host the instance are passed. ``disk_allocation_ratio`` setting. The virtual disk to physical disk allocation ratio, 1.0 by default. The total allowed allocated disk size will be physical disk multiplied this ratio. * |AggregateDiskFilter| - filters hosts by disk allocation with per-aggregate ``disk_allocation_ratio`` setting. If no per-aggregate value is found, it will fall back to the global default ``disk_allocation_ratio``. If more than one value is found for a host (meaning the host is in two or more different aggregates with different ratio settings), the minimum value will be used. * |NumInstancesFilter| - filters compute nodes by number of running instances. Nodes with too many instances will be filtered. ``max_instances_per_host`` setting. Maximum number of instances allowed to run on this host. The host will be ignored by the scheduler if more than ``max_instances_per_host`` already exist on the host. * |AggregateNumInstancesFilter| - filters hosts by number of instances with per-aggregate ``max_instances_per_host`` setting. If no per-aggregate value is found, it will fall back to the global default ``max_instances_per_host``. If more than one value is found for a host (meaning the host is in two or more different aggregates with different max instances per host settings), the minimum value will be used. * |IoOpsFilter| - filters hosts by concurrent I/O operations on it. hosts with too many concurrent I/O operations will be filtered. ``max_io_ops_per_host`` setting. Maximum number of I/O intensive instances allowed to run on this host, the host will be ignored by scheduler if more than ``max_io_ops_per_host`` instances such as build/resize/snapshot etc are running on it. * |AggregateIoOpsFilter| - filters hosts by I/O operations with per-aggregate ``max_io_ops_per_host`` setting. If no per-aggregate value is found, it will fall back to the global default ``max_io_ops_per_host``. If more than one value is found for a host (meaning the host is in two or more different aggregates with different max io operations settings), the minimum value will be used. * |PciPassthroughFilter| - Filter that schedules instances on a host if the host has devices to meet the device requests in the 'extra_specs' for the flavor. * |SimpleCIDRAffinityFilter| - allows a new instance on a host within the same IP block. * |DifferentHostFilter| - allows the instance on a different host from a set of instances. * |SameHostFilter| - puts the instance on the same host as another instance in a set of instances. * |RetryFilter| - filters hosts that have been attempted for scheduling. Only passes hosts that have not been previously attempted. * |TrustedFilter| (EXPERIMENTAL) - filters hosts based on their trust. Only passes hosts that meet the trust requirements specified in the instance properties. * |TypeAffinityFilter| - Only passes hosts that are not already running an instance of the requested type. * |AggregateTypeAffinityFilter| - limits instance_type by aggregate. This filter passes hosts if no instance_type key is set or the instance_type aggregate metadata value contains the name of the instance_type requested. The value of the instance_type metadata entry is a string that may contain either a single instance_type name or a comma separated list of instance_type names. e.g. 'm1.nano' or "m1.nano,m1.small" * |ServerGroupAntiAffinityFilter| - This filter implements anti-affinity for a server group. First you must create a server group with a policy of 'anti-affinity' via the server groups API. Then, when you boot a new server, provide a scheduler hint of 'group=' where is the UUID of the server group you created. This will result in the server getting added to the group. When the server gets scheduled, anti-affinity will be enforced among all servers in that group. * |ServerGroupAffinityFilter| - This filter works the same way as ServerGroupAntiAffinityFilter. The difference is that when you create the server group, you should specify a policy of 'affinity'. * |AggregateMultiTenancyIsolation| - isolate tenants in specific aggregates. To specify multiple tenants use a comma. Eg. "tenant1,tenant2" * |AggregateImagePropertiesIsolation| - isolates hosts based on image properties and aggregate metadata. Use a comma to specify multiple values for the same property. The filter will then ensure at least one value matches. * |MetricsFilter| - filters hosts based on metrics weight_setting. Only hosts with the available metrics are passed. * |NUMATopologyFilter| - filters hosts based on the NUMA topology requested by the instance, if any. Now we can focus on these standard filter classes in some detail. We'll skip the simplest ones, such as |AllHostsFilter|, |CoreFilter| and |RamFilter|, because their functionality is relatively simple and can be understood from the code. For example class |RamFilter| has the next realization: :: class RamFilter(filters.BaseHostFilter): """Ram Filter with over subscription flag""" def host_passes(self, host_state, filter_properties): """Only return hosts with sufficient available RAM.""" instance_type = filter_properties.get('instance_type') requested_ram = instance_type['memory_mb'] free_ram_mb = host_state.free_ram_mb total_usable_ram_mb = host_state.total_usable_ram_mb used_ram_mb = total_usable_ram_mb - free_ram_mb return total_usable_ram_mb * FLAGS.ram_allocation_ratio - used_ram_mb >= requested_ram Here ``ram_allocation_ratio`` means the virtual RAM to physical RAM allocation ratio (it is ``1.5`` by default). The |AvailabilityZoneFilter| looks at the availability zone of compute node and availability zone from the properties of the request. Each compute service has its own availability zone. So deployment engineers have an option to run scheduler with availability zones support and can configure availability zones on each compute host. This class's method ``host_passes`` returns ``True`` if availability zone mentioned in request is the same on the current compute host. The |ImagePropertiesFilter| filters hosts based on the architecture, hypervisor type and virtual machine mode specified in the instance. For example, an instance might require a host that supports the ARM architecture on a qemu compute host. The |ImagePropertiesFilter| will only pass hosts that can satisfy this request. These instance properties are populated from properties defined on the instance's image. E.g. an image can be decorated with these properties using ``glance image-update img-uuid --property architecture=arm --property hypervisor_type=qemu`` Only hosts that satisfy these requirements will pass the |ImagePropertiesFilter|. |ComputeCapabilitiesFilter| checks if the host satisfies any ``extra_specs`` specified on the instance type. The ``extra_specs`` can contain key/value pairs. The key for the filter is either non-scope format (i.e. no ``:`` contained), or scope format in capabilities scope (i.e. ``capabilities:xxx:yyy``). One example of capabilities scope is ``capabilities:cpu_info:features``, which will match host's cpu features capabilities. The |ComputeCapabilitiesFilter| will only pass hosts whose capabilities satisfy the requested specifications. All hosts are passed if no ``extra_specs`` are specified. |ComputeFilter| is quite simple and passes any host whose compute service is enabled and operational. Now we are going to |IsolatedHostsFilter|. There can be some special hosts reserved for specific images. These hosts are called **isolated**. So the images to run on the isolated hosts are also called isolated. The filter checks if ``image_isolated`` flag named in instance specifications is the same as the host. Isolated hosts can run non isolated images if the flag ``restrict_isolated_hosts_to_isolated_images`` is set to false. |DifferentHostFilter| - method ``host_passes`` returns ``True`` if the host to place an instance on is different from all the hosts used by a set of instances. |SameHostFilter| does the opposite to what |DifferentHostFilter| does. ``host_passes`` returns ``True`` if the host we want to place an instance on is one of the hosts used by a set of instances. |SimpleCIDRAffinityFilter| looks at the subnet mask and investigates if the network address of the current host is in the same sub network as it was defined in the request. |JsonFilter| - this filter provides the opportunity to write complicated queries for the hosts capabilities filtering, based on simple JSON-like syntax. There can be used the following operations for the host states properties: ``=``, ``<``, ``>``, ``in``, ``<=``, ``>=``, that can be combined with the following logical operations: ``not``, ``or``, ``and``. For example, the following query can be found in tests: :: ['and', ['>=', '$free_ram_mb', 1024], ['>=', '$free_disk_mb', 200 * 1024] ] This query will filter all hosts with free RAM greater or equal than 1024 MB and at the same time with free disk space greater or equal than 200 GB. Many filters use data from ``scheduler_hints``, that is defined in the moment of creation of the new server for the user. The only exception for this rule is |JsonFilter|, that takes data from the schedulers ``HostState`` data structure directly. Variable naming, such as the ``$free_ram_mb`` example above, should be based on those attributes. The |RetryFilter| filters hosts that have already been attempted for scheduling. It only passes hosts that have not been previously attempted. If a compute node is raising an exception when spawning an instance, then the compute manager will reschedule it by adding the failing host to a retry dictionary so that the RetryFilter will not accept it as a possible destination. That means that if all of your compute nodes are failing, then the RetryFilter will return 0 hosts and the scheduler will raise a NoValidHost exception even if the problem is related to 1:N compute nodes. If you see that case in the scheduler logs, then your problem is most likely related to a compute problem and you should check the compute logs. The |TrustedFilter| filters hosts based on their trust. Only passes hosts that match the trust requested in the ``extra_specs`` for the flavor. The key for this filter must be scope format as ``trust:trusted_host``, where ``trust`` is the scope of the key and ``trusted_host`` is the actual key value. The value of this pair (``trusted``/``untrusted``) must match the integrity of a host (obtained from the Attestation service) before it is passed by the |TrustedFilter|. The |NUMATopologyFilter| considers the NUMA topology that was specified for the instance through the use of flavor extra_specs in combination with the image properties, as described in detail in the related nova-spec document: * http://git.openstack.org/cgit/openstack/nova-specs/tree/specs/juno/virt-driver-numa-placement.rst and try to match it with the topology exposed by the host, accounting for the ``ram_allocation_ratio`` and ``cpu_allocation_ratio`` for over-subscription. The filtering is done in the following manner: * Filter will attempt to pack instance cells onto host cells. * It will consider the standard over-subscription limits for each host NUMA cell, and provide limits to the compute host accordingly (as mentioned above). * If instance has no topology defined, it will be considered for any host. * If instance has a topology defined, it will be considered only for NUMA capable hosts. To use filters you specify two settings: * ``scheduler_available_filters`` - Defines filter classes made available to the scheduler. This setting can be used multiple times. * ``scheduler_default_filters`` - Of the available filters, defines those that the scheduler uses by default. The default values for these settings in nova.conf are: :: --scheduler_available_filters=nova.scheduler.filters.all_filters --scheduler_default_filters=RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter' With this configuration, all filters in ``nova.scheduler.filters`` would be available, and by default the |RamFilter|, |ComputeFilter|, |AvailabilityZoneFilter|, |ComputeCapabilitiesFilter|, |ImagePropertiesFilter|, |ServerGroupAntiAffinityFilter|, and |ServerGroupAffinityFilter| would be used. To create **your own filter** you must inherit from |BaseHostFilter| and implement one method: ``host_passes``. This method should return ``True`` if a host passes the filter. It takes ``host_state`` (describing the host) and ``filter_properties`` dictionary as the parameters. As an example, nova.conf could contain the following scheduler-related settings: :: --scheduler_driver=nova.scheduler.FilterScheduler --scheduler_available_filters=nova.scheduler.filters.all_filters --scheduler_available_filters=myfilter.MyFilter --scheduler_default_filters=RamFilter,ComputeFilter,MyFilter With these settings, nova will use the ``FilterScheduler`` for the scheduler driver. The standard nova filters and MyFilter are available to the FilterScheduler. The RamFilter, ComputeFilter, and MyFilter are used by default when no filters are specified in the request. Each filter selects hosts in a different way and has different costs. The order of ``scheduler_default_filters`` affects scheduling performance. The general suggestion is to filter out invalid hosts as soon as possible to avoid unnecessary costs. We can sort ``scheduler_default_filters`` items by their costs in reverse order. For example, ComputeFilter is better before any resource calculating filters like RamFilter, CoreFilter. In medium/large environments having AvailabilityZoneFilter before any capability or resource calculating filters can be useful. Weights ------- Filter Scheduler uses the so-called **weights** during its work. A weigher is a way to select the best suitable host from a group of valid hosts by giving weights to all the hosts in the list. In order to prioritize one weigher against another, all the weighers have to define a multiplier that will be applied before computing the weight for a node. All the weights are normalized beforehand so that the multiplier can be applied easily. Therefore the final weight for the object will be:: weight = w1_multiplier * norm(w1) + w2_multiplier * norm(w2) + ... A weigher should be a subclass of ``weights.BaseHostWeigher`` and they can implement both the ``weight_multiplier`` and ``_weight_object`` methods or just implement the ``weight_objects`` method. ``weight_objects`` method is overridden only if you need access to all objects in order to calculate weights, and it just return a list of weights, and not modify the weight of the object directly, since final weights are normalized and computed by ``weight.BaseWeightHandler``. The Filter Scheduler weighs hosts based on the config option `scheduler_weight_classes`, this defaults to `nova.scheduler.weights.all_weighers`, which selects the following weighers: * |RAMWeigher| Compute weight based on available RAM on the compute node. Sort with the largest weight winning. If the multiplier is negative, the host with least RAM available will win (useful for stacking hosts, instead of spreading). * |DiskWeigher| Hosts are weighted and sorted by free disk space with the largest weight winning. If the multiplier is negative, the host with less disk space available will win (useful for stacking hosts, instead of spreading). * |MetricsWeigher| This weigher can compute the weight based on the compute node host's various metrics. The to-be weighed metrics and their weighing ratio are specified in the configuration file as the followings:: metrics_weight_setting = name1=1.0, name2=-1.0 * |IoOpsWeigher| The weigher can compute the weight based on the compute node host's workload. The default is to preferably choose light workload compute hosts. If the multiplier is positive, the weigher prefer choosing heavy workload compute hosts, the weighing has the opposite effect of the default. * |ServerGroupSoftAffinityWeigher| The weigher can compute the weight based on the number of instances that run on the same server group. The largest weight defines the preferred host for the new instance. For the multiplier only a positive value is meaningful for the calculation as a negative value would mean that the affinity weigher would prefer non collocating placement. * |ServerGroupSoftAntiAffinityWeigher| The weigher can compute the weight based on the number of instances that run on the same server group as a negative value. The largest weight defines the preferred host for the new instance. For the multiplier only a positive value is meaningful for the calculation as a negative value would mean that the anti-affinity weigher would prefer collocating placement. Filter Scheduler makes a local list of acceptable hosts by repeated filtering and weighing. Each time it chooses a host, it virtually consumes resources on it, so subsequent selections can adjust accordingly. It is useful if the customer asks for a large block of instances, because weight is computed for each instance requested. .. image:: ./images/filteringWorkflow2.png At the end Filter Scheduler sorts selected hosts by their weight and attempts to provision instances on the chosen hosts. P.S.: you can find more examples of using Filter Scheduler and standard filters in :mod:`nova.tests.scheduler`. .. |AllHostsFilter| replace:: :class:`AllHostsFilter ` .. |ImagePropertiesFilter| replace:: :class:`ImagePropertiesFilter ` .. |AvailabilityZoneFilter| replace:: :class:`AvailabilityZoneFilter ` .. |BaseHostFilter| replace:: :class:`BaseHostFilter ` .. |ComputeCapabilitiesFilter| replace:: :class:`ComputeCapabilitiesFilter ` .. |ComputeFilter| replace:: :class:`ComputeFilter ` .. |CoreFilter| replace:: :class:`CoreFilter ` .. |AggregateCoreFilter| replace:: :class:`AggregateCoreFilter ` .. |IsolatedHostsFilter| replace:: :class:`IsolatedHostsFilter ` .. |JsonFilter| replace:: :class:`JsonFilter ` .. |RamFilter| replace:: :class:`RamFilter ` .. |AggregateRamFilter| replace:: :class:`AggregateRamFilter ` .. |DiskFilter| replace:: :class:`DiskFilter ` .. |AggregateDiskFilter| replace:: :class:`AggregateDiskFilter ` .. |NumInstancesFilter| replace:: :class:`NumInstancesFilter ` .. |AggregateNumInstancesFilter| replace:: :class:`AggregateNumInstancesFilter ` .. |IoOpsFilter| replace:: :class:`IoOpsFilter ` .. |AggregateIoOpsFilter| replace:: :class:`AggregateIoOpsFilter ` .. |PciPassthroughFilter| replace:: :class:`PciPassthroughFilter ` .. |SimpleCIDRAffinityFilter| replace:: :class:`SimpleCIDRAffinityFilter ` .. |DifferentHostFilter| replace:: :class:`DifferentHostFilter ` .. |SameHostFilter| replace:: :class:`SameHostFilter ` .. |RetryFilter| replace:: :class:`RetryFilter ` .. |TrustedFilter| replace:: :class:`TrustedFilter ` .. |TypeAffinityFilter| replace:: :class:`TypeAffinityFilter ` .. |AggregateTypeAffinityFilter| replace:: :class:`AggregateTypeAffinityFilter ` .. |ServerGroupAntiAffinityFilter| replace:: :class:`ServerGroupAntiAffinityFilter ` .. |ServerGroupAffinityFilter| replace:: :class:`ServerGroupAffinityFilter ` .. |AggregateInstanceExtraSpecsFilter| replace:: :class:`AggregateInstanceExtraSpecsFilter ` .. |AggregateMultiTenancyIsolation| replace:: :class:`AggregateMultiTenancyIsolation ` .. |NUMATopologyFilter| replace:: :class:`NUMATopologyFilter ` .. |RAMWeigher| replace:: :class:`RAMWeigher ` .. |AggregateImagePropertiesIsolation| replace:: :class:`AggregateImagePropertiesIsolation ` .. |MetricsFilter| replace:: :class:`MetricsFilter ` .. |MetricsWeigher| replace:: :class:`MetricsWeigher ` .. |IoOpsWeigher| replace:: :class:`IoOpsWeigher ` .. |ServerGroupSoftAffinityWeigher| replace:: :class:`ServerGroupSoftAffinityWeigher ` .. |ServerGroupSoftAntiAffinityWeigher| replace:: :class:`ServerGroupSoftAntiAffinityWeigher ` .. |DiskWeigher| replace:: :class:`DiskWeigher ` nova-13.1.4/doc/source/services.rst0000664000567000056710000000455313064447140020340 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _service_manager_driver: Services, Managers and Drivers ============================== The responsibilities of Services, Managers, and Drivers, can be a bit confusing to people that are new to nova. This document attempts to outline the division of responsibilities to make understanding the system a little bit easier. Currently, Managers and Drivers are specified by flags and loaded using utils.load_object(). This method allows for them to be implemented as singletons, classes, modules or objects. As long as the path specified by the flag leads to an object (or a callable that returns an object) that responds to getattr, it should work as a manager or driver. The :mod:`nova.service` Module ------------------------------ .. automodule:: nova.service :noindex: :members: :undoc-members: :show-inheritance: The :mod:`nova.manager` Module ------------------------------ .. automodule:: nova.manager :noindex: :members: :undoc-members: :show-inheritance: Implementation-Specific Drivers ------------------------------- A manager will generally load a driver for some of its tasks. The driver is responsible for specific implementation details. Anything running shell commands on a host, or dealing with other non-python code should probably be happening in a driver. Drivers should minimize touching the database, although it is currently acceptable for implementation specific data. This may be reconsidered at some point. It usually makes sense to define an Abstract Base Class for the specific driver (i.e. VolumeDriver), to define the methods that a different driver would need to implement. nova-13.1.4/doc/source/_static/0000775000567000056710000000000013064447471017411 5ustar jenkinsjenkins00000000000000nova-13.1.4/doc/source/_static/support-matrix.css0000664000567000056710000000117613064447140023137 0ustar jenkinsjenkins00000000000000 .sp_feature_mandatory { font-weight: bold; } .sp_feature_optional { } .sp_feature_choice { font-style: italic; font-weight: bold; } .sp_feature_condition { font-style: italic; font-weight: bold; } .sp_impl_complete { color: rgb(0, 120, 0); font-weight: normal; } .sp_impl_missing { color: rgb(120, 0, 0); font-weight: normal; } .sp_impl_partial { color: rgb(170, 170, 0); font-weight: normal; } .sp_impl_unknown { color: rgb(170, 170, 170); font-weight: normal; } .sp_impl_summary { font-size: 2em; } .sp_cli { font-family: monospace; background-color: #F5F5F5; }nova-13.1.4/doc/source/support-matrix.ini0000664000567000056710000011724413064447152021507 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # # # ========================================= # Nova Hypervisor Feature Capability Matrix # ========================================= # # This obsoletes the information previously at # # https://wiki.openstack.org/wiki/HypervisorSupportMatrix # # This file contains a specification of what feature capabilities each # hypervisor driver in Nova is able to support. Feature capabilities include # what API operations are supported, what storage / networking features can be # used and what aspects of the guest machine can be configured. The capabilities # can be considered to be structured into nested groups, but in this file they # have been flattened for ease of representation. The section names represent # the group structure. At the top level there are the following groups defined # # - operation - public API operations # - storage - host storage configuration options # - networking - host networking configuration options # - guest - guest hardware configuration options # # When considering which capabilities should be marked as mandatory, # consider the general guiding principles listed in the support-matrix.rst # file # # The 'status' field takes possible values # # - mandatory - unconditionally required to be implemented # - optional - optional to support, nice to have # - choice(group) - at least one of the options within the named group # must be implemented # - conditional(cond) - required, if the referenced condition is met. # # The value against each 'driver-impl-XXXX' entry refers to the level # of the implementation of the feature in that driver # # - complete - fully implemented, expected to work at all times # - partial - implemented, but with caveats about when it will work # eg some configurations or hardware or guest OS may not # support it # - missing - not implemented at all # # In the case of the driver being marked as 'partial', then # 'driver-notes-XXX' entry should be used to explain the caveats # around the implementation. # # The 'cli' field takes a list of nova client commands, separated by semicolon. # These CLi commands are related to that feature. # Example: # cli=nova list;nova show # [targets] # List of driver impls we are going to record info for later # This list only covers drivers that are in the Nova source # tree. Out of tree drivers should maintain their own equivalent # document, and merge it with this when their code merges into # Nova core. driver-impl-xenserver=XenServer driver-impl-libvirt-kvm-x86=Libvirt KVM (x86) driver-impl-libvirt-kvm-ppc64=Libvirt KVM (ppc64) driver-impl-libvirt-kvm-s390x=Libvirt KVM (s390x) driver-impl-libvirt-qemu-x86=Libvirt QEMU (x86) driver-impl-libvirt-lxc=Libvirt LXC driver-impl-libvirt-xen=Libvirt Xen driver-impl-libvirt-vz-vm=Libvirt Virtuozzo VM driver-impl-libvirt-vz-ct=Libvirt Virtuozzo CT driver-impl-vmware=VMware vCenter driver-impl-hyperv=Hyper-V driver-impl-ironic=Ironic [operation.attach-volume] title=Attach block volume to instance status=optional notes=The attach volume operation provides a means to hotplug additional block storage to a running instance. This allows storage capabilities to be expanded without interruption of service. In a cloud model it would be more typical to just spin up a new instance with large storage, so the ability to hotplug extra storage is for those cases where the instance is considered to be more of a pet than cattle. Therefore this operation is not considered to be mandatory to support. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=missing [operation.detach-volume] title=Detach block volume from instance status=optional notes=See notes for attach volume operation. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=missing [operation.maintenance-mode] title=Set the host in a maintenance mode status=optional notes=This operation allows a host to be placed into maintenance mode, automatically triggering migration of any running instances to an alternative host and preventing new instances from being launched. This is not considered to be a mandatory operation to support. The CLI command is "nova host-update ". The driver methods to implement are "host_maintenance_mode" and "set_host_enabled". cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=missing driver-impl-libvirt-kvm-ppc64=missing driver-impl-libvirt-kvm-s390x=missing driver-impl-libvirt-qemu-x86=missing driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=missing driver-impl-vmware=missing driver-impl-hyperv=missing driver-impl-ironic=missing driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [operation.evacuate] title=Evacuate instances from a host status=optional notes=A possible failure scenario in a cloud environment is the outage of one of the compute nodes. In such a case the instances of the down host can be evacuated to another host. It is assumed that the old host is unlikely ever to be powered back on, otherwise the evacuation attempt will be rejected. When the instances get moved to the new host, their volumes get re-attached and the locally stored data is dropped. That happens in the same way as a rebuild. This is not considered to be a mandatory operation to support. cli=nova evacuate ;nova host-evacuate driver-impl-xenserver=unknown driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=unknown driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=unknown driver-impl-libvirt-lxc=unknown driver-impl-libvirt-xen=unknown driver-impl-vmware=unknown driver-impl-hyperv=unknown driver-impl-ironic=unknown driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [operation.get-guest-info] title=Guest instance status status=mandatory notes=Provides a quick report on information about the guest instance, including the power state, memory allocation, CPU allocation, number of vCPUs and cummulative CPU execution time. As well as being informational, the power state is used by the compute manager for tracking changes in guests. Therefore this operation is considered mandatory to support. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=complete driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [operation.get-host-info] title=Guest host status status=optional notes=Unclear what this refers to cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [operation.live-migrate] title=Live migrate instance across hosts status=optional notes=Live migration provides a way to move an instance off one compute host, to another compute host. Administrators may use this to evacuate instances from a host that needs to undergo maintenance tasks, though of course this may not help if the host is already suffering a failure. In general instances are considered cattle rather than pets, so it is expected that an instance is liable to be killed if host maintenance is required. It is technically challenging for some hypervisors to provide support for the live migration operation, particularly those built on the container based virtualization. Therefore this operation is not considered mandatory to support. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=complete driver-impl-vmware=missing driver-notes-vmware=https://bugs.launchpad.net/nova/+bug/1192192 driver-impl-hyperv=complete driver-impl-ironic=missing driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [operation.launch] title=Launch instance status=mandatory notes=Importing pre-existing running virtual machines on a host is considered out of scope of the cloud paradigm. Therefore this operation is mandatory to support in drivers. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=complete driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [operation.pause] title=Stop instance CPUs (pause) status=optional notes=Stopping an instances CPUs can be thought of as roughly equivalent to suspend-to-RAM. The instance is still present in memory, but execution has stopped. The problem, however, is that there is no mechanism to inform the guest OS that this takes place, so upon unpausing, its clocks will no longer report correct time. For this reason hypervisor vendors generally discourage use of this feature and some do not even implement it. Therefore this operation is considered optional to support in drivers. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=missing driver-impl-hyperv=complete driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=missing [operation.reboot] title=Reboot instance status=optional notes=It is reasonable for a guest OS administrator to trigger a graceful reboot from inside the instance. A host initiated graceful reboot requires guest co-operation and a non-graceful reboot can be achieved by a combination of stop+start. Therefore this operation is considered optional. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=complete driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [operation.rescue] title=Rescue instance status=optional notes=The rescue operation starts an instance in a special configuration whereby it is booted from an special root disk image. The goal is to allow an administrator to recover the state of a broken virtual machine. In general the cloud model considers instances to be cattle, so if an instance breaks the general expectation is that it be thrown away and a new instance created. Therefore this operation is considered optional to support in drivers. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=missing driver-impl-ironic=missing driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [operation.resize] title=Resize instance status=optional notes=The resize operation allows the user to change a running instance to match the size of a different flavor from the one it was initially launched with. There are many different flavor attributes that potentially need to be updated. In general it is technically challenging for a hypervisor to support the alteration of all relevant config settings for a running instance. Therefore this operation is considered optional to support in drivers. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=partial driver-notes-ironic=Only certain ironic drivers support this driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [operation.resume] title=Restore instance status=optional notes=See notes for the suspend operation cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [operation.service-control.wtf.com] title=Service control status=optional notes=Something something, dark side, something something. Hard to claim this is mandatory when no one seems to know what "Service control" refers to in the context of virt drivers. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=missing driver-impl-vmware=complete driver-impl-hyperv=missing driver-impl-ironic=missing driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [operation.set-admin-password] title=Set instance admin password status=optional notes=Provides a mechanism to re(set) the password of the administrator account inside the instance operating system. This requires that the hypervisor has a way to communicate with the running guest operating system. Given the wide range of operating systems in existence it is unreasonable to expect this to be practical in the general case. The configdrive and metadata service both provide a mechanism for setting the administrator password at initial boot time. In the case where this operation were not available, the administrator would simply have to login to the guest and change the password in the normal manner, so this is just a convenient optimization. Therefore this operation is not considered mandatory for drivers to support. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-notes-libvirt-kvm-x86=Requires libvirt>=1.2.16 and hw_qemu_guest_agent. driver-impl-libvirt-kvm-ppc64=missing driver-impl-libvirt-kvm-s390x=missing driver-impl-libvirt-qemu-x86=complete driver-notes-libvirt-qemu-x86=Requires libvirt>=1.2.16 and hw_qemu_guest_agent. driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=missing driver-impl-vmware=missing driver-impl-hyperv=missing driver-impl-ironic=missing driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [operation.snapshot] title=Save snapshot of instance disk status=optional notes=The snapshot operation allows the current state of the instance root disk to be saved and uploaded back into the glance image repository. The instance can later be booted again using this saved image. This is in effect making the ephemeral instance root disk into a semi-persistent storage, in so much as it is preserved even though the guest is no longer running. In general though, the expectation is that the root disks are ephemeral so the ability to take a snapshot cannot be assumed. Therefore this operation is not considered mandatory to support. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=partial driver-notes-libvirt-xen=Only cold snapshots (pause + snapshot) supported driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [operation.suspend] title=Suspend instance status=optional notes=Suspending an instance can be thought of as roughly equivalent to suspend-to-disk. The instance no longer consumes any RAM or CPUs, with its live running state having been preserved in a file on disk. It can later be restored, at which point it should continue execution where it left off. As with stopping instance CPUs, it suffers from the fact that the guest OS will typically be left with a clock that is no longer telling correct time. For container based virtualization solutions, this operation is particularly technically challenging to implement and is an area of active research. This operation tends to make more sense when thinking of instances as pets, rather than cattle, since with cattle it would be simpler to just terminate the instance instead of suspending. Therefore this operation is considered optional to support. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [operation.swap-volume] title=Swap block volumes status=optional notes=The swap volume operation is a mechanism for changing running instance so that its attached volume(s) are backed by different storage in the host. An alternative to this would be to simply terminate the existing instance and spawn a new instance with the new storage. In other words this operation is primarily targeted towards the pet use case rather than cattle. Therefore this is considered optional to support. cli= driver-impl-xenserver=missing driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=missing driver-impl-hyperv=missing driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=missing [operation.terminate] title=Shutdown instance status=mandatory notes=The ability to terminate a virtual machine is required in order for a cloud user to stop utilizing resources and thus avoid indefinitely ongoing billing. Therefore this operation is mandatory to support in drivers. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-notes-libvirt-lxc=Fails in latest Ubuntu Trusty kernel from security repository (3.13.0-76-generic), but works in upstream 3.13.x kernels as well as default Ubuntu Trusty latest kernel (3.13.0-58-generic). driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=complete driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [operation.trigger-crash-dump] title=Trigger crash dump status=optional notes=The trigger crash dump operation is a mechanism for triggering a crash dump in an instance. The feature is typically implemented by injecting an NMI (Non-maskable Interrupt) into the instance. It provides a means to dump the production memory image as a dump file which is useful for users. Therefore this operation is considered optional to support. cli= driver-impl-xenserver=missing driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=missing driver-impl-vmware=missing driver-impl-hyperv=missing driver-impl-ironic=missing driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [operation.unpause] title=Resume instance CPUs (unpause) status=optional notes=See notes for the "Stop instance CPUs" operation cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=missing driver-impl-hyperv=complete driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [guest.disk.autoconfigure.wtf.com] title=Auto configure disk status=optional notes=something something, dark side, something something. Unclear just what this is about. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=missing driver-impl-libvirt-kvm-ppc64=missing driver-impl-libvirt-kvm-s390x=missing driver-impl-libvirt-qemu-x86=missing driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=missing driver-impl-vmware=missing driver-impl-hyperv=complete driver-impl-ironic=missing driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [guest.disk.rate-limit] title=Instance disk I/O limits status=optional notes=The ability to set rate limits on virtual disks allows for greater performance isolation between instances running on the same host storage. It is valid to delegate scheduling of I/O operations to the hypervisor with its default settings, instead of doing fine grained tuning. Therefore this is not considered to be an mandatory configuration to support. cli= driver-impl-xenserver=missing driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=missing driver-impl-vmware=missing driver-impl-hyperv=missing driver-impl-ironic=missing driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [guest.setup.configdrive] title=Config drive support status=choice(guest.setup) notes=The config drive provides an information channel into the guest operating system, to enable configuration of the administrator password, file injection, registration of SSH keys, etc. Since cloud images typically ship with all login methods locked, a mechanism to set the administrator password of keys is required to get login access. Alternatives include the metadata service and disk injection. At least one of the guest setup mechanisms is required to be supported by drivers, in order to enable login access. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=missing driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=complete driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=missing [guest.setup.inject.file] title=Inject files into disk image status=optional notes=This allows for the end user to provide data for multiple files to be injected into the root filesystem before an instance is booted. This requires that the compute node understand the format of the filesystem and any partitioning scheme it might use on the block device. This is a non-trivial problem considering the vast number of filesystems in existence. The problem of injecting files to a guest OS is better solved by obtaining via the metadata service or config drive. Therefore this operation is considered optional to support. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=missing driver-impl-libvirt-kvm-s390x=missing driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=missing driver-impl-vmware=missing driver-impl-hyperv=missing driver-impl-ironic=missing driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [guest.setup.inject.networking] title=Inject guest networking config status=optional notes=This allows for static networking configuration (IP address, netmask, gateway and routes) to be injected directly into the root filesystem before an instance is booted. This requires that the compute node understand how networking is configured in the guest OS which is a non-trivial problem considering the vast number of operating system types. The problem of configuring networking is better solved by DHCP or by obtaining static config via the metadata service or config drive. Therefore this operation is considered optional to support. cli= driver-impl-xenserver=partial driver-notes-xenserver=Only for Debian derived guests driver-impl-libvirt-kvm-x86=partial driver-notes-libvirt-kvm-x86=Only for Debian derived guests driver-impl-libvirt-kvm-ppc64=missing driver-impl-libvirt-kvm-s390x=missing driver-impl-libvirt-qemu-x86=partial driver-notes-libvirt-qemu-x86=Only for Debian derived guests driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=missing driver-impl-vmware=partial driver-notes-vmware=requires vmware tools installed driver-impl-hyperv=missing driver-impl-ironic=missing driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [console.rdp] title=Remote desktop over RDP status=choice(console) notes=This allows the administrator to interact with the graphical console of the guest OS via RDP. This provides a way to see boot up messages and login to the instance when networking configuration has failed, thus preventing a network based login. Some operating systems may prefer to emit messages via the serial console for easier consumption. Therefore support for this operation is not mandatory, however, a driver is required to support at least one of the listed console access operations. cli= driver-impl-xenserver=missing driver-impl-libvirt-kvm-x86=missing driver-impl-libvirt-kvm-ppc64=missing driver-impl-libvirt-kvm-s390x=missing driver-impl-libvirt-qemu-x86=missing driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=missing driver-impl-vmware=missing driver-impl-hyperv=complete driver-impl-ironic=missing driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [console.serial.log] title=View serial console logs status=choice(console) notes=This allows the administrator to query the logs of data emitted by the guest OS on its virtualized serial port. For UNIX guests this typically includes all boot up messages and so is useful for diagnosing problems when an instance fails to successfully boot. Not all guest operating systems will be able to emit boot information on a serial console, others may only support graphical consoles. Therefore support for this operation is not mandatory, however, a driver is required to support at least one of the listed console access operations. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=missing driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=missing driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [console.serial.interactive] title=Remote interactive serial console status=choice(console) notes=This allows the administrator to interact with the serial console of the guest OS. This provides a way to see boot up messages and login to the instance when networking configuration has failed, thus preventing a network based login. Not all guest operating systems will be able to emit boot information on a serial console, others may only support graphical consoles. Therefore support for this operation is not mandatory, however, a driver is required to support at least one of the listed console access operations. This feature was introduced in the Juno release with blueprint https://blueprints.launchpad.net/nova/+spec/serial-ports cli=nova get-serial-console driver-impl-xenserver=missing driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=unknown driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=unknown driver-impl-libvirt-lxc=unknown driver-impl-libvirt-xen=unknown driver-impl-vmware=missing driver-impl-hyperv=missing driver-notes-hyperv=Will be complete when this review is merged: https://review.openstack.org/#/c/145004/ driver-impl-ironic=missing driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [console.spice] title=Remote desktop over SPICE status=choice(console) notes=This allows the administrator to interact with the graphical console of the guest OS via SPICE. This provides a way to see boot up messages and login to the instance when networking configuration has failed, thus preventing a network based login. Some operating systems may prefer to emit messages via the serial console for easier consumption. Therefore support for this operation is not mandatory, however, a driver is required to support at least one of the listed console access operations. cli= driver-impl-xenserver=missing driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=missing driver-impl-libvirt-kvm-s390x=missing driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=missing driver-impl-vmware=missing driver-impl-hyperv=missing driver-impl-ironic=missing driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing [console.vnc] title=Remote desktop over VNC status=choice(console) notes=This allows the administrator to interact with the graphical console of the guest OS via VNC. This provides a way to see boot up messages and login to the instance when networking configuration has failed, thus preventing a network based login. Some operating systems may prefer to emit messages via the serial console for easier consumption. Therefore support for this operation is not mandatory, however, a driver is required to support at least one of the listed console access operations. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=missing driver-impl-libvirt-kvm-s390x=missing driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=missing driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [storage.block] title=Block storage support status=optional notes=Block storage provides instances with direct attached virtual disks that can be used for persistent storage of data. As an alternative to direct attached disks, an instance may choose to use network based persistent storage. OpenStack provides object storage via the Swift service, or a traditional filesystem such as as NFS/GlusterFS may be used. Some types of instances may not require persistent storage at all, being simple transaction processing systems reading requests & sending results to and from the network. Therefore support for this configuration is not considered mandatory for drivers to support. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=missing driver-impl-libvirt-vz-vm=partial driver-impl-libvirt-vz-ct=missing [storage.block.backend.fibrechannel] title=Block storage over fibre channel status=optional notes=To maximise performance of the block storage, it may be desirable to directly access fibre channel LUNs from the underlying storage technology on the compute hosts. Since this is just a performance optimization of the I/O path it is not considered mandatory to support. cli= driver-impl-xenserver=missing driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=missing driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=missing driver-impl-hyperv=missing driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=missing [storage.block.backend.iscsi] title=Block storage over iSCSI status=condition(storage.block==complete) notes=If the driver wishes to support block storage, it is common to provide an iSCSI based backend to access the storage from cinder. This isolates the compute layer for knowledge of the specific storage technology used by Cinder, albeit at a potential performance cost due to the longer I/O path involved. If the driver chooses to support block storage, then this is considered mandatory to support, otherwise it is considered optional. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=missing [storage.block.backend.iscsi.auth.chap] title=CHAP authentication for iSCSI status=optional notes=If accessing the cinder iSCSI service over an untrusted LAN it is desirable to be able to enable authentication for the iSCSI protocol. CHAP is the commonly used authentication protocol for iSCSI. This is not considered mandatory to support. (?) cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=missing [storage.image] title=Image storage support status=mandatory notes=This refers to the ability to boot an instance from an image stored in the glance image repository. Without this feature it would not be possible to bootstrap from a clean environment, since there would be no way to get block volumes populated and reliance on external PXE servers is out of scope. Therefore this is considered a mandatory storage feature to support. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=complete driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [networking.firewallrules] title=Network firewall rules status=optional notes=Unclear how this is different from security groups cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=missing driver-impl-hyperv=missing driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [networking.routing] title=Network routing status=optional notes=Unclear what this refers to cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=missing driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=missing driver-impl-ironic=complete driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [networking.securitygroups] title=Network security groups status=optional notes=The security groups feature provides a way to define rules to isolate the network traffic of different instances running on a compute host. This would prevent actions such as MAC and IP address spoofing, or the ability to setup rogue DHCP servers. In a private cloud environment this may be considered to be a superfluous requirement. Thereforce this is considered to be an optional configuration to support. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=partial driver-notes-vmware=This is supported by the Neutron NSX plugins driver-impl-hyperv=missing driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [networking.topology.flat] title=Flat networking status=choice(networking.topology) notes=Provide network conenctivity to guests using a flat topology across all compute nodes. At least one of the networking configurations is mandatory to support in the drivers. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=complete driver-impl-ironic=complete driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [networking.topology.vlan] title=VLAN networking status=choice(networking.topology) notes=Provide network connectivity to guests using VLANs to define the topology. At least one of the networking configurations is mandatory to support in the drivers. cli= driver-impl-xenserver=complete driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=complete driver-impl-libvirt-kvm-s390x=complete driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=complete driver-impl-libvirt-xen=complete driver-impl-vmware=complete driver-impl-hyperv=missing driver-impl-ironic=missing driver-impl-libvirt-vz-vm=complete driver-impl-libvirt-vz-ct=complete [operation.uefi-boot] title=uefi boot status=optional notes=This allows users to boot a guest with uefi firmware. cli= driver-impl-xenserver=missing driver-impl-libvirt-kvm-x86=complete driver-impl-libvirt-kvm-ppc64=missing driver-impl-libvirt-kvm-s390x=missing driver-impl-libvirt-qemu-x86=complete driver-impl-libvirt-lxc=missing driver-impl-libvirt-xen=missing driver-impl-vmware=missing driver-impl-hyperv=missing driver-impl-ironic=partial driver-notes-ironic=depends on hardware support driver-impl-libvirt-vz-vm=missing driver-impl-libvirt-vz-ct=missing nova-13.1.4/doc/source/conf.py0000664000567000056710000002214413064447152017261 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # # nova documentation build configuration file, created by # sphinx-quickstart on Sat May 1 15:17:47 2010. # # This file is execfile()d with the current directory set to # its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import subprocess import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../')) sys.path.insert(0, os.path.abspath('../')) sys.path.insert(0, os.path.abspath('./')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'ext.nova_todo', 'sphinx.ext.coverage', 'sphinx.ext.graphviz', 'oslosphinx', "ext.support_matrix", 'oslo_config.sphinxconfiggen', 'ext.versioned_notifications' ] config_generator_config_file = '../../etc/nova/nova-config-generator.conf' sample_config_basename = '_static/nova' todo_include_todos = True # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'nova' copyright = u'2010-present, OpenStack Foundation' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # from nova.version import version_info # The full version, including alpha/beta/rc tags. release = version_info.release_string() # The short X.Y version. version = version_info.version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # A list of glob-style patterns that should be excluded when looking for # source files. They are matched against the source file names relative to the # source directory, using slashes as directory separators on all platforms. exclude_patterns = [ 'api/nova.wsgi.nova-*', 'api/nova.tests.*', ] # The reST default role (used for this markup: `text`) to use # for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['nova.'] # -- Options for man page output ---------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' man_pages = [ ('man/nova-all', 'nova-all', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-api-metadata', 'nova-api-metadata', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-api-os-compute', 'nova-api-os-compute', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-api', 'nova-api', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-cells', 'nova-cells', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-cert', 'nova-cert', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-compute', 'nova-compute', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-console', 'nova-console', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-consoleauth', 'nova-consoleauth', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-dhcpbridge', 'nova-dhcpbridge', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-idmapshift', 'nova-idmapshift', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-manage', 'nova-manage', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-network', 'nova-network', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-novncproxy', 'nova-novncproxy', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-spicehtml5proxy', 'nova-spicehtml5proxy', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-serialproxy', 'nova-serialproxy', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-rootwrap', 'nova-rootwrap', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-scheduler', 'nova-scheduler', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-xvpvncproxy', 'nova-xvpvncproxy', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-conductor', 'nova-conductor', u'Cloud controller fabric', [u'OpenStack'], 1), ] # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local", "-n1"] html_last_updated_fmt = subprocess.Popen( git_cmd, stdout=subprocess.PIPE).communicate()[0] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'novadoc' # -- Options for LaTeX output ------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'Nova.tex', u'Nova Documentation', u'OpenStack Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True nova-13.1.4/doc/source/conductor.rst0000664000567000056710000000617213064447140020514 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Conductor as a place for orchestrating tasks ============================================ In addition to its roles as a database proxy and object backporter the conductor service also serves as a centralized place to manage the execution of workflows which involve the scheduler. Rebuild, resize/migrate, and building an instance are managed here. This was done in order to have a better separation of responsibilities between what compute nodes should handle and what the scheduler should handle, and to clean up the path of execution. Conductor was chosen because in order to query the scheduler in a synchronous manner it needed to happen after the API had returned a response otherwise API response times would increase. And changing the scheduler call from asynchronous to synchronous helped to clean up the code. To illustrate this the old process for building an instance was: * API receives request to build an instance. * API sends an RPC cast to the scheduler to pick a compute. * Scheduler sends an RPC cast to the compute to build the instance, which means the scheduler needs to be able to communicate with all computes. * If the build succeeds it stops here. * If the build fails then the compute decides if the max number of scheduler retries has been hit. If so the build stops there. * If the build should be rescheduled the compute sends an RPC cast to the scheduler in order to pick another compute. This was overly complicated and meant that the logic for scheduling/rescheduling was distributed throughout the code. The answer to this was to change to process to be the following: * API receives request to build an instance. * API sends an RPC cast to the conductor to build an instance. (or runs locally if conductor is configured to use local_mode) * Conductor sends an RPC call to the scheduler to pick a compute and waits for the response. If there is a scheduler fail it stops the build at the conductor. * Conductor sends an RPC cast to the compute to build the instance. * If the build succeeds it stops here. * If the build fails then compute sends an RPC cast to conductor to build an instance. This is the same RPC message that was sent by the API. This new process means the scheduler only deals with scheduling, the compute only deals with building an instance, and the conductor manages the workflow. The code is now cleaner in the scheduler and computes. The resize/migrate process has not yet been fully converted to a style to take advantage of what conductor can provide so expect that this will change over time. nova-13.1.4/doc/source/architecture.rst0000664000567000056710000000567413064447140021204 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Nova System Architecture ======================== Nova is comprised of multiple server processes, each performing different functions. The user-facing interface is a REST API, while internally Nova components communicate via an RPC message passing mechanism. The API servers process REST requests, which typically involve database reads/writes, optionally sending RPC messages to other Nova services, and generating responses to the REST calls. RPC messaging is done via the **oslo.messaging** library, an abstraction on top of message queues. Most of the major nova components can be run on multiple servers, and have a `manager` that is listening for `RPC` messages. The one major exception is nova-compute, where a single process runs on the hypervisor it is managing (except when using the VMware or Ironic drivers). The manager also, optionally, has periodic tasks. For more details on our `RPC` system, please see: :doc:`rpc` Nova also uses a central database that is (logically) shared between all components. However, to aid upgrade, the DB is accessed through an object layer that ensures an upgraded control plane can still communicate with a nova-compute running the previous release. To make this possible nova-compute proxies DB requests over `RPC` to a central manager called `nova-conductor` To horizontally expand Nova deployments, we have a deployment sharding concept called cells. For more information please see: :doc:`cells` Components ---------- Below you will find a helpful explanation of the key components of a typical (non-cells v1) Nova deployment. .. image:: ./images/architecture.svg :width: 100% * DB: sql database for data storage. * API: component that receives HTTP requests, converts commands and communicates with other components via the **oslo.messaging** queue or HTTP * Scheduler: decides which host gets each instance * Network: manages ip forwarding, bridges, and vlans * Compute: manages communication with hypervisor and virtual machines. * Conductor: handles requests that need coordination(build/resize), acts as a database proxy, or handles object conversions. While all services are designed to be horizontally scalable, you should have significantly more computes then anything else. nova-13.1.4/doc/source/cells.rst0000664000567000056710000002051013064447152017611 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======= Cells ======= Cells V1 ======== Historically, Nova has depended on a single logical database and message queue that all nodes depend on for communication and data persistence. This becomes an issue for deployers as scaling and providing fault tolerance for these systems is difficult. We have an experimental feature in Nova called "cells", hereafter referred to as "cells v1", which is used by some large deployments to partition compute nodes into smaller groups, coupled with a database and queue. This seems to be a well-liked and easy-to-understand arrangement of resources, but the implementation of it has issues for maintenance and correctness. See `Comparison with Cells V1`_ for more detail. Status ~~~~~~ Cells v1 is considered experimental and receives much less testing than the rest of Nova. For example, there is no job for testing cells v1 with Neutron. The priority for the core team is implementation of and migration to cells v2. Because of this, there are a few restrictions placed on cells v1: #. Cells v1 is in feature freeze. This means no new feature proposals for cells v1 will be accepted by the core team, which includes but is not limited to API parity, e.g. supporting virtual interface attach/detach with Neutron. #. Latent bugs caused by the cells v1 design will not be fixed, e.g. `bug 1489581 `_. So if new tests are added to Tempest which trigger a latent bug in cells v1 it may not be fixed. However, regressions in working function should be tracked with bugs and fixed. **Suffice it to say, new deployments of cells v1 are not encouraged.** The restrictions above are basically meant to prioritize effort and focus on getting cells v2 completed, and feature requests and hard to fix latent bugs detract from that effort. Further discussion on this can be found in the `2015/11/12 Nova meeting minutes `_. There are no plans to remove Cells V1 until V2 is usable by existing deployments and there is a migration path. Cells V2 ======== Manifesto ~~~~~~~~~ Proposal -------- Right now, when a request hits the Nova API for a particular instance, the instance information is fetched from the database, which contains the hostname of the compute node on which the instance currently lives. If the request needs to take action on the instance (which is most of them), the hostname is used to calculate the name of a queue, and a message is written there which finds its way to the proper compute node. The meat of this proposal is changing the above hostname lookup into two parts that yield three pieces of information instead of one. Basically, instead of merely looking up the *name* of the compute node on which an instance lives, we will also obtain database and queue connection information. Thus, when asked to take action on instance $foo, we will: 1. Lookup the three-tuple of (database, queue, hostname) for that instance 2. Connect to that database and fetch the instance record 3. Connect to the queue and send the message to the proper hostname queue The above differs from the current organization in two ways. First, we need to do two database lookups before we know where the instance lives. Second, we need to demand-connect to the appropriate database and queue. Both of these have performance implications, but we believe we can mitigate the impacts through the use of things like a memcache of instance mapping information and pooling of connections to database and queue systems. The number of cells will always be much smaller than the number of instances. There are availability implications with this change since something like a 'nova list' which might query multiple cells could end up with a partial result if there is a database failure in a cell. A database failure within a cell would cause larger issues than a partial list result so the expectation is that it would be addressed quickly and cellsv2 will handle it by indicating in the response that the data may not be complete. Since this is very similar to what we have with current cells, in terms of organization of resources, we have decided to call this "cellsv2" for disambiguation. After this work is complete there will no longer be a "no cells" deployment. The default installation of Nova will be a single cell setup. Benefits -------- The benefits of this new organization are: * Native sharding of the database and queue as a first-class-feature in nova. All of the code paths will go through the lookup procedure and thus we won't have the same feature parity issues as we do with current cells. * No high-level replication of all the cell databases at the top. The API will need a database of its own for things like the instance index, but it will not need to replicate all the data at the top level. * It draws a clear line between global and local data elements. Things like flavors and keypairs are clearly global concepts that need only live at the top level. Providing this separation allows compute nodes to become even more stateless and insulated from things like deleted/changed global data. * Existing non-cells users will suddenly gain the ability to spawn a new "cell" from their existing deployment without changing their architecture. Simply adding information about the new database and queue systems to the new index will allow them to consume those resources. * Existing cells users will need to fill out the cells mapping index, shutdown their existing cells synchronization service, and ultimately clean up their top level database. However, since the high-level organization is not substantially different, they will not have to re-architect their systems to move to cellsv2. * Adding new sets of hosts as a new "cell" allows them to be plugged into a deployment and tested before allowing builds to be scheduled to them. Comparison with Cells V1 ------------------------ In reality, the proposed organization is nearly the same as what we currently have in cells today. A cell mostly consists of a database, queue, and set of compute nodes. The primary difference is that current cells require a nova-cells service that synchronizes information up and down from the top level to the child cell. Additionally, there are alternate code paths in compute/api.py which handle routing messages to cells instead of directly down to a compute host. Both of these differences are relevant to why we have a hard time achieving feature and test parity with regular nova (because many things take an alternate path with cells) and why it's hard to understand what is going on (all the extra synchronization of data). The new proposed cellsv2 organization avoids both of these problems by letting things live where they should, teaching nova to natively find the right db, queue, and compute node to handle a given request. Database split ~~~~~~~~~~~~~~ As mentioned above there is a split between global data and data that is local to a cell. The following is a breakdown of what data can uncontroversially considered global versus local to a cell. Missing data will be filled in as consensus is reached on the data that is more difficult to cleanly place. The missing data is mostly concerned with scheduling and networking. Global (API-level) Tables ------------------------- instance_types instance_type_projects instance_type_extra_specs quotas project_user_quotas quota_classes quota_usages security_groups security_group_rules security_group_default_rules provider_fw_rules key_pairs migrations networks tags Cell-level Tables ----------------- instances instance_info_caches instance_extra instance_metadata instance_system_metadata instance_faults instance_actions instance_actions_events instance_id_mappings pci_devices block_device_mapping virtual_interfaces nova-13.1.4/doc/source/process.rst0000664000567000056710000011770313064447152020200 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _process: ================= Nova team process ================= Nova is always evolving its processes, but it's important to explain why we have them: so that we can all work to ensure the interactions we need to happen do happen. The process we have should always be there to make good communication between all members of our community easier. OpenStack Wide Patterns ======================= Nova follows most of the generally adopted norms for OpenStack projects. You can get more details here: * http://docs.openstack.org/infra/manual/developers.html * http://git.openstack.org/cgit/openstack/project-team-guide If you are new to Nova, please read this first: :ref:`getting_involved`. Dates overview ============== For Mitaka, please see: https://wiki.openstack.org/wiki/Nova/Mitaka_Release_Schedule For Liberty, please see: https://wiki.openstack.org/wiki/Nova/Liberty_Release_Schedule Feature Freeze ~~~~~~~~~~~~~~ This effort is primarily to help the horizontal teams help prepare their items for release, while at the same time giving developers time to focus on stabilising what is currently in master, and encouraging users and packages to perform tests (automated, and manual) on the release, to spot any major bugs. As such we have the following processes: - https://wiki.openstack.org/wiki/FeatureProposalFreeze - make sure all code is up for review - so we can optimise for completed features, not lots of half completed features - https://wiki.openstack.org/wiki/FeatureFreeze - make sure all feature code is merged - https://wiki.openstack.org/wiki/StringFreeze - give translators time to translate all our strings - Note: debug logs are no longer translated - https://wiki.openstack.org/wiki/DepFreeze - time to co-ordinate the final list of deps, and give packagers time to package them - generally it is also quite destabilising to take upgrades (beyond bug fixes) this late We align with this in Nova and the dates for this release are stated above. As with all processes here, there are exceptions. But the exceptions at this stage need to be discussed with the horizontal teams that might be affected by changes beyond this point, and as such are discussed with one of the OpenStack release managers. Spec and Blueprint Approval Freeze ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is a (mostly) Nova specific process. Why do we have a Spec Freeze: - specs take a long time to review, keeping it open distracts from code reviews - keeping them "open" and being slow at reviewing the specs (or just ignoring them) really annoys the spec submitters - we generally have more code submitted that we can review, this time bounding is a way to limit the number of submissions By the freeze date, we expect this to also be the complete list of approved blueprints for liberty: https://blueprints.launchpad.net/nova/liberty The date listed above is when we expect all specifications for Liberty to be merged and displayed here: http://specs.openstack.org/openstack/nova-specs/specs/liberty/approved/ New in Liberty, we will keep the backlog open for submission at all times. Note: the focus is on accepting and agreeing problem statements as being in scope, rather than queueing up work items for the next release. We are still working on a new lightweight process to get out of the backlog and approved for a particular release. For more details on backlog specs, please see: http://specs.openstack.org/openstack/nova-specs/specs/backlog/index.html Also new in Liberty, we will allow people to submit Mitaka specs from liberty-2 (rather than liberty-3 as normal). There can be exceptions, usually it's an urgent feature request that comes up after the initial deadline. These will generally be discussed at the weekly Nova meeting, by adding the spec or blueprint to discuss in the appropriate place in the meeting agenda here (ideally make yourself available to discuss the blueprint, or alternatively make your case on the ML before the meeting): https://wiki.openstack.org/wiki/Meetings/Nova Non-priority Feature Freeze ~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is a Nova specific process. This only applies to low priority blueprints in this list: https://blueprints.launchpad.net/nova/liberty We currently have a very finite amount of review bandwidth. In order to make code review time for the agreed community wide priorities, we have to not do some other things. To this end, we are reserving liberty-3 for priority features and bug fixes. As such, we intend not to merge any non-priority things during liberty-3, so around liberty-2 is the "Feature Freeze" for blueprints that are not a priority for liberty. For liberty, we are not aligning the Non-priority Feature Freeze with the tagging of liberty-2. That means the liberty-2 tag will not include some features that merge later in the week. This means, we only require the code to be approved before the end of July 30th, we don't require it to be merged by that date. This should help stop any gate issues disrupting our ability to merge all the code that we have managed to get reviewed in time. Ideally all code should be merged by the end of July 31st, but the state of the gate will determine how possible that is. You can see the list of priorities for each release: http://specs.openstack.org/openstack/nova-specs/#priorities For things that are very close to merging, it's possible it might get an exception for one week after the freeze date, given the patches get enough +2s from the core team to get the code merged. But we expect this list to be zero, if everything goes to plan (no massive gate failures, etc). For details, process see: http://lists.openstack.org/pipermail/openstack-dev/2015-July/070920.html Exception process: - Please add request in here: https://etherpad.openstack.org/p/liberty-nova-non-priority-feature-freeze (ideally with core reviewers to sponsor your patch, normally the folks who have already viewed those patches) - make sure you make your request before the end of Wednesday 5th August - nova-drivers will meet to decide what gets an exception (just like they did last release: http://lists.openstack.org/pipermail/openstack-dev/2015-February/056208.html) - an initial list of exceptions (probably just a PTL compiled list at that point) will be available for discussion during the Nova meeting on Thursday 6th August - the aim is to merge the code for all exceptions by the end of Monday 10th August Alternatives: - It was hoped to make this a continuous process using "slots" to control what gets reviewed, but this was rejected by the community when it was last discussed. There is hope this can be resurrected to avoid the "lumpy" nature of this process. - Currently the runways/kanban ideas are blocked on us adopting something like phabricator that could support such workflows String Freeze ~~~~~~~~~~~~~ NOTE: this is still a provisional idea There are general guidelines here: https://wiki.openstack.org/wiki/StringFreeze But below is an experiment for Nova during liberty, to trial a new process. There are four views onto this process. First, the user point of view: - Would like to see untranslated strings, rather than hiding error/info/warn log messages as debug Second, the translators: - Translators will start translation without string freeze, just after feature freeze. - Then we have a strict string freeze date (around RC1 date) - After at least 10 days to finish up the translations before the final release Third, the docs team: - Config string updates often mean there is a DocImpact and docs need updating - best to avoid those during feature freeze, where possible Fourth, the developer point of view: - Add any translated strings before Feature Freeze - Post Feature Freeze, allow string changes where an untranslated string is better than no string - i.e. allow new log message strings, until the hard freeze - Post Feature Freeze, have a soft string freeze, try not to change existing strings, where possible - Note: moving a string and re-using a existing string is fine, as the tooling deals with that automatically - Post Hard String Freeze, there should be no extra strings to translate - Assume any added strings will not be translated - Send email about the string freeze exception in this case only, but there should be zero of these So, what has changed from https://wiki.openstack.org/wiki/StringFreeze, well: - no need to block new strings until much later in the cycle - should stop the need to rework bug fixes to remove useful log messages - instead, just accept the idea of untranslated strings being better than no strings in those cases So for Liberty, 21st September, so we will call 21st September the hard freeze date, as we expect RC1 to be cut sometime after 21st September. Note the date is fixed, it's not aligned with the cutting of RC1. This means we must cut another tarball (RC2 or higher) at some point after 5th October to include new translations, even if there are no more bug fixes, to give time before the final release on 13th-16th October. How do I get my code merged? ============================ OK, so you are new to Nova, and you have been given a feature to implement. How do I make that happen? You can get most of your questions answered here: - http://docs.openstack.org/infra/manual/developers.html But let's put a Nova specific twist on things... Overview ~~~~~~~~ .. image:: ./images/Nova_spec_process.svg :alt: Flow chart showing the Nova bug/feature process Where do you track bugs? ~~~~~~~~~~~~~~~~~~~~~~~~ We track bugs here: - http://bugs.launchpad.net/nova If you fix an issue, please raise a bug so others who spot that issue can find the fix you kindly created for them. Also before submitting your patch it's worth checking to see if someone has already fixed it for you (Launchpad helps you with that, at little, when you create the bug report). When do I need a blueprint vs a spec? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For more details see: - http://docs.openstack.org/developer/nova/devref/kilo.blueprints.html#when-is-a-blueprint-needed To understand this question, we need to understand why blueprints and specs are useful. But here is the rough idea: - if it needs a spec, it will need a blueprint. - if it's an API change, it needs a spec. - if it's a single small patch that touches a small amount of code, with limited deployer and doc impact, it probably doesn't need a spec. If you are unsure, please ask johnthetubaguy on IRC, or one of the other nova-drivers. How do I get my blueprint approved? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ So you need your blueprint approved? Here is how: - if you don't need a spec, please add a link to your blueprint to the agenda for the next nova meeting: https://wiki.openstack.org/wiki/Meetings/Nova - be sure your blueprint description has enough context for the review in that meeting. - As of Mitaka, this list is stored in an etherpad: https://etherpad.openstack.org/p/mitaka-nova-spec-review-tracking - if you need a spec, then please submit a nova-spec for review, see: http://docs.openstack.org/infra/manual/developers.html Got any more questions? Contact johnthetubaguy or one of the other nova-specs-core who are awake at the same time as you. IRC is best as you will often get an immediate response, if they are too busy send him/her an email. How do I get a procedural -2 removed from my patch? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When feature freeze hits, any patches for blueprints that are still in review get a procedural -2 to stop them merging. In Nova a blueprint is only approved for a single release. To have the -2 removed, you need to get the blueprint approved for the current release (see `How do I get my blueprint approved?`_). Why are the reviewers being mean to me? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Code reviews take intense concentration and a lot of time. This tends to lead to terse responses with very little preamble or nicety. That said, there's no excuse for being actively rude or mean. OpenStack has a Code of Conduct (https://www.openstack.org/legal/community-code-of-conduct/) and if you feel this has been breached please raise the matter privately. Either with the relevant parties, the PTL or failing those, the OpenStack Foundation. That said, there are many objective reasons for applying a -1 or -2 to a patch: - Firstly and simply, patches must address their intended purpose successfully. - Patches must not have negative side-effects like wiping the database or causing a functional regression. Usually removing anything, however tiny, requires a deprecation warning be issued for a cycle. - Code must be maintainable, that is it must adhere to coding standards and be as readable as possible for an average OpenStack developer (acknowledging this person is ill-defined). - Patches must respect the direction of the project, for example they should not make approved specs substantially more difficult to implement. - Release coordinators need the correct process to be followed so scope can be tracked accurately. Bug fixes require bugs, features require blueprints and all but the simplest features require specs. If there is a blueprint, it must be approved for the release/milestone the patch is attempting to merge into. Please particularly bear in mind that a -2 does not mean "never ever" nor does it mean "your idea is bad and you are dumb". It simply means "do not merge today". You may need to wait some time, rethink your approach or even revisit the problem definition but there is almost always some way forward. The core who applied the -2 should tell you what you need to do. My code review seems stuck, what can I do? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ First and foremost - address any -1s and -2s! The review load on Nova is high enough that patches with negative reviews often get filtered out entirely. A few tips: - Be precise. Ensure you're not talking at cross purposes. - Try to understand where the reviewer is coming from. They may have a very different perspective and/or use-case to you. - If you don't understand the problem, ask them to explain - this is common and helpful behaviour. - Be positive. Everyone's patches have issues, including core reviewers. No-one cares once the issues are fixed. - Try not to flip-flop. When two reviewers are pulling you in different directions, stop pushing code and negotiate the best way forward. - If the reviewer does not respond to replies left on the patchset, reach out to them on IRC or email. If they still don't respond, you can try to ask their colleagues if they're on holiday (or simply wait). Finally, you can ask for mediation in the Nova meeting by adding it to the agenda (https://wiki.openstack.org/wiki/Meetings/Nova). This is also what you should do if you are unable to negotiate a resolution to an issue. Secondly, Nova is a big project, be aware of the average wait times: http://russellbryant.net/openstack-stats/nova-openreviews.html Eventually you should get some +1s from people working through the review queue. Expect to get -1s as well. You can ask for reviews within your company, 1-2 are useful (not more), especially if those reviewers are known to give good reviews. You can spend some time while you wait reviewing other people's code - they may reciprocate and you may learn something (:ref:`Why do code reviews when I'm not core? `). If you've waited an appropriate amount of time and you haven't had any +1s, you can ask on IRC for reviews. Please don't ask for core review straight away, especially not directly (IRC or email). Core reviewer time is very valuable and gaining some +1s is a good way to show your patch meets basic quality standards. Once you have a few +1s, be patient. Remember the average wait times. You can ask for reviews each week in IRC, it helps to ask when cores are awake. Bugs ^^^^ It helps to apply correct tracking information. - Put "Closes-Bug", "Partial-Bug" or "Related-Bug" in the commit message tags as necessary. - If you have to raise a bug in Launchpad first, do it - this helps someone else find your fix. - Make sure the bug has the correct priority and tag set: https://wiki.openstack.org/wiki/Nova/BugTriage#Step_2:_Triage_Tagged_Bugs - If it's a trivial fix (<100 lines as a rule of thumb), add it to: https://etherpad.openstack.org/p/liberty-nova-priorities-tracking Features ^^^^^^^^ Again, it helps to apply correct tracking information. For blueprint-only features: - Put your blueprint in the commit message, EG "blueprint simple-feature". - Mark the blueprint as NeedsCodeReview if you are finished. - Maintain the whiteboard on the blueprint so it's easy to understand which patches need reviews. - Use a single topic for all related patches. All patches for one blueprint should share a topic. For blueprint and spec features, do everything for blueprint-only features and also: - If it's a project or subteam priority, add it to: https://etherpad.openstack.org/p/liberty-nova-priorities-tracking - Ensure your spec is approved for the current release cycle. If your code is a project or subteam priority, the cores interested in that priority might not mind a ping after it has sat with +1s for a week. If you abuse this privilege, you'll lose respect. If it's not a priority, your blueprint/spec has been approved for the cycle and you have been patient, you can raise it during the Nova meeting. The outcome may be that your spec gets unapproved for the cycle, so that priority items can take focus. If this happens to you, sorry - it should not have been approved in the first place, Nova team bit off more than they could chew, it is their mistake not yours. You can re-propose it for the next cycle. If it's not a priority and your spec has not been approved, your code will not merge this cycle. Please re-propose your spec for the next cycle. Nova Process Mission ==================== This section takes a high level look at the guiding principles behind the Nova process. Open ~~~~ Our mission is to have: - Open Source - Open Design - Open Development - Open Community We have to work out how to keep communication open in all areas. We need to be welcoming and mentor new people, and make it easy for them to pickup the knowledge they need to get involved with OpenStack. For more info on Open, please see: https://wiki.openstack.org/wiki/Open Interoperable API, supporting a vibrant ecosystem ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ An interoperable API that gives users on-demand access to compute resources is at the heart of Nova's mission: http://docs.openstack.org/developer/nova/project_scope.html#mission Nova has a vibrant ecosystem of tools built on top of the current Nova API. All features should be designed to work with all technology combinations, so the feature can be adopted by our ecosystem. If a new feature is not adopted by the ecosystem, it will make it hard for your users to make use of those features, defeating most of the reason to add the feature in the first place. The microversion system allows users to isolate themselves This is a very different aim to being "pluggable" or wanting to expose all capabilities to end users. At the same time, it is not just a "lowest common denominator" set of APIs. It should be discoverable which features are available, and while no implementation details should leak to the end users, purely admin concepts may need to understand technology specific details that back the interoperable and more abstract concepts that are exposed to the end user. This is a hard goal, and one area we currently don't do well is isolating image creators from these technology specific details. Smooth Upgrades ~~~~~~~~~~~~~~~ As part of our mission for a vibrant ecosystem around our APIs, we want to make it easy for those deploying Nova to upgrade with minimal impact to their users. Here is the scope of Nova's upgrade support: - upgrade from any commit, to any future commit, within the same major release - only support upgrades between N and N+1 major versions, to reduce technical debt relating to upgrades Here are some of the things we require developers to do, to help with upgrades: - when replacing an existing feature or configuration option, make it clear how to transition to any replacement - deprecate configuration options and features before removing them - i.e. continue to support and test features for at least one release before they are removed - this gives time for operator feedback on any removals - End User API will always be kept backwards compatible Interaction goals ~~~~~~~~~~~~~~~~~ When thinking about the importance of process, we should take a look at: http://agilemanifesto.org With that in mind, let's look at how we want different members of the community to interact. Let's start with looking at issues we have tried to resolve in the past (currently in no particular order). We must: - have a way for everyone to review blueprints and designs, including allowing for input from operators and all types of users (keep it open) - take care to not expand Nova's scope any more than absolutely necessary - ensure we get sufficient focus on the core of Nova so that we can maintain or improve the stability and flexibility of the overall codebase - support any API we release approximately for ever. We currently release every commit, so we're motivate to get the API right first time - avoid low priority blueprints slowing work on high priority work, without blocking those forever - focus on a consistent experience for our users, rather than ease of development - optimise for completed blueprints, rather than more half completed blueprints, so we get maximum value for our users out of our review bandwidth - focus efforts on a subset of patches to allow our core reviewers to be more productive - set realistic expectations on what can be reviewed in a particular cycle, to avoid sitting in an expensive rebase loop - be aware of users that do not work on the project full time - be aware of users that are only able to work on the project at certain times that may not align with the overall community cadence - discuss designs for non-trivial work before implementing it, to avoid the expense of late-breaking design issues FAQs ==== Why bother with all this process? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We are a large community, spread across multiple timezones, working with several horizontal teams. Good communication is a challenge and the processes we have are mostly there to try and help fix some communication challenges. If you have a problem with a process, please engage with the community, discover the reasons behind our current process, and help fix the issues you are experiencing. Why don't you remove old process? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We do! For example, in Liberty we stopped trying to predict the milestones when a feature will land. As we evolve, it is important to unlearn new habits and explore if things get better if we choose to optimise for a different set of issues. Why are specs useful? ~~~~~~~~~~~~~~~~~~~~~ Spec reviews allow anyone to step up and contribute to reviews, just like with code. Before we used gerrit, it was a very messy review process, that felt very "closed" to most people involved in that process. As Nova has grown in size, it can be hard to work out how to modify Nova to meet your needs. Specs are a great way of having that discussion with the wider Nova community. For Nova to be a success, we need to ensure we don't break our existing users. The spec template helps focus the mind on the impact your change might have on existing users and gives an opportunity to discuss the best way to deal with those issues. However, there are some pitfalls with the process. Here are some top tips to avoid them: - keep it simple. Shorter, simpler, more decomposed specs are quicker to review and merge much quicker (just like code patches). - specs can help with documentation but they are only intended to document the design discussion rather than document the final code. - don't add details that are best reviewed in code, it's better to leave those things for the code review. If we have specs, why still have blueprints? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We use specs to record the design agreement, we use blueprints to track progress on the implementation of the spec. Currently, in Nova, specs are only approved for one release, and must be re-submitted for each release you want to merge the spec, although that is currently under review. Why do we have priorities? ~~~~~~~~~~~~~~~~~~~~~~~~~~ To be clear, there is no "nova dev team manager", we are an open team of professional software developers, that all work for a variety of (mostly competing) companies that collaborate to ensure the Nova project is a success. Over time, a lot of technical debt has accumulated, because there was a lack of collective ownership to solve those cross-cutting concerns. Before the Kilo release, it was noted that progress felt much slower, because we were unable to get appropriate attention on the architectural evolution of Nova. This was important, partly for major concerns like upgrades and stability. We agreed it's something we all care about and it needs to be given priority to ensure that these things get fixed. Since Kilo, priorities have been discussed at the summit. This turns in to a spec review which eventually means we get a list of priorities here: http://specs.openstack.org/openstack/nova-specs/#priorities Allocating our finite review bandwidth to these efforts means we have to limit the reviews we do on non-priority items. This is mostly why we now have the non-priority Feature Freeze. For more on this, see below. Blocking a priority effort is one of the few widely acceptable reasons to block someone adding a feature. One of the great advantages of being more explicit about that relationship is that people can step up to help review and/or implement the work that is needed to unblock the feature they want to get landed. This is a key part of being an Open community. Why is there a Feature Freeze (and String Freeze) in Nova? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The main reason Nova has a feature freeze is that it gives people working on docs and translations to sync up with the latest code. Traditionally this happens at the same time across multiple projects, so the docs are synced between what used to be called the "integrated release". We also use this time period as an excuse to focus our development efforts on bug fixes, ideally lower risk bug fixes, and improving test coverage. In theory, with a waterfall hat on, this would be a time for testing and stabilisation of the product. In Nova we have a much stronger focus on keeping every commit stable, by making use of extensive continuous testing. In reality, we frequently see the biggest influx of fixes in the few weeks after the release, as distributions do final testing of the released code. It is hoped that the work on Feature Classification will lead us to better understand the levels of testing of different Nova features, so we will be able to reduce and dependency between Feature Freeze and regression testing. It is also likely that the move away from "integrated" releases will help find a more developer friendly approach to keep the docs and translations in sync. Why is there a non-priority Feature Freeze in Nova? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We have already discussed why we have priority features. The rate at which code can be merged to Nova is primarily constrained by the amount of time able to be spent reviewing code. Given this, earmarking review time for priority items means depriving it from non-priority items. The simplest way to make space for the priority features is to stop reviewing and merging non-priority features for a whole milestone. The idea being developers should focus on bug fixes and priority features during that milestone, rather than working on non-priority features. A known limitation of this approach is developer frustration. Many developers are not being given permission to review code, work on bug fixes or work on priority features, and so feel very unproductive upstream. An alternative approach of "slots" or "runways" has been considered, that uses a kanban style approach to regulate the influx of work onto the review queue. We are yet to get agreement on a more balanced approach, so the existing system is being continued to ensure priority items are more likely to get the attention they require. Why do you still use Launchpad? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We are actively looking for an alternative to Launchpad's bugs and blueprints. Originally the idea was to create Storyboard. However the development has stalled. A more likely front runner is this: http://phabricator.org/applications/projects/ When should I submit my spec? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Ideally we want to get all specs for a release merged before the summit. For things that we can't get agreement on, we can then discuss those at the summit. There will always be ideas that come up at the summit and need to be finalised after the summit. This causes a rush which is best avoided. How can I get my code merged faster? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ So no-one is coming to review your code, how do you speed up that process? Firstly, make sure you are following the above process. If it's a feature, make sure you have an approved blueprint. If it's a bug, make sure it is triaged, has its priority set correctly, it has the correct bug tag and is marked as in progress. If the blueprint has all the code up for review, change it from Started into NeedsCodeReview so people know only reviews are blocking you, make sure it hasn't accidentally got marked as implemented. Secondly, if you have a negative review (-1 or -2) and you responded to that in a comment or uploading a new change with some updates, but that reviewer hasn't come back for over a week, it's probably a good time to reach out to the reviewer on IRC (or via email) to see if they could look again now you have addressed their comments. If you can't get agreement, and your review gets stuck (i.e. requires mediation), you can raise your patch during the Nova meeting and we will try to resolve any disagreement. Thirdly, is it in merge conflict with master or are any of the CI tests failing? Particularly any third-party CI tests that are relevant to the code you are changing. If you're fixing something that only occasionally failed before, maybe recheck a few times to prove the tests stay passing. Without green tests, reviews tend to move on and look at the other patches that have the tests passing. OK, so you have followed all the process (i.e. your patches are getting advertised via the project's tracking mechanisms), and your patches either have no reviews, or only positive reviews. Now what? Have you considered reviewing other people's patches? Firstly, participating in the review process is the best way for you to understand what reviewers are wanting to see in the code you are submitting. As you get more practiced at reviewing it will help you to write "merge-ready" code. Secondly, if you help review other peoples code and help get their patches ready for the core reviewers to add a +2, it will free up a lot of non-core and core reviewer time, so they are more likely to get time to review your code. For more details, please see: :ref:`Why do code reviews when I'm not core? ` Please note, I am not recommending you go to ask people on IRC or via email for reviews. Please try to get your code reviewed using the above process first. In many cases multiple direct pings generate frustration on both sides and that tends to be counter productive. Now you have got your code merged, lets make sure you don't need to fix this bug again. The fact the bug exists means there is a gap in our testing. Your patch should have included some good unit tests to stop the bug coming back. But don't stop there, maybe its time to add tempest tests, to make sure your use case keeps working? Maybe you need to set up a third party CI so your combination of drivers will keep working? Getting that extra testing in place should stop a whole heap of bugs, again giving reviewers more time to get to the issues or features you want to add in the future. Process Evolution Ideas ======================= We are always evolving our process as we try to improve and adapt to the changing shape of the community. Here we discuss some of the ideas, along with their pros and cons. Splitting out the virt drivers (or other bits of code) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Currently, Nova doesn't have strong enough interfaces to split out the virt drivers, scheduler or REST API. This is seen as the key blocker. Let's look at both sides of the debate here. Reasons for the split: - can have separate core teams for each repo - this leads to quicker turn around times, largely due to focused teams - splitting out things from core means less knowledge required to become core in a specific area Reasons against the split: - loss of interoperability between drivers - this is a core part of Nova's mission, to have a single API across all deployments, and a strong ecosystem of tools and apps built on that - we can overcome some of this with stronger interfaces and functional tests - new features often need changes in the API and virt driver anyway - the new "depends-on" can make these cross-repo dependencies easier - loss of code style consistency across the code base - fear of fragmenting the nova community, leaving few to work on the core of the project - could work in subteams within the main tree TODO - need to complete analysis Subteam recommendation as a +2 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are groups of people with great knowledge of particular bits of the code base. It may be a good idea to give their recommendation of a merge. In addition, having the subteam focus review efforts on a subset of patches should help concentrate the nova-core reviews they get, and increase the velocity of getting code merged. The first part is for subgroups to show they can do a great job of recommending patches. This is starting in here: https://etherpad.openstack.org/p/liberty-nova-priorities-tracking Ideally this would be done with gerrit user "tags" rather than an etherpad. There are some investigations by sdague in how feasible it would be to add tags to gerrit. Stop having to submit a spec for each release ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As mentioned above, we use blueprints for tracking, and specs to record design decisions. Targeting specs to a specific release is a heavyweight solution and blurs the lines between specs and blueprints. At the same time, we don't want to lose the opportunity to revise existing blueprints. Maybe there is a better balance? What about this kind of process: - backlog has these folders: - backlog/incomplete - merge a partial spec - backlog/complete - merge complete specs (remove tracking details, such as assignee part of the template) - ?? backlog/expired - specs are moved here from incomplete or complete when no longer seem to be given attention (after 1 year, by default) - /implemented - when a spec is complete it gets moved into the release directory and possibly updated to reflect what actually happened - there will no longer be a per-release approved spec list To get your blueprint approved: - add it to the next nova meeting - if a spec is required, update the URL to point to the spec merged in a spec to the blueprint - ensure there is an assignee in the blueprint - a day before the meeting, a note is sent to the ML to review the list before the meeting - discuss any final objections in the nova-meeting - this may result in a request to refine the spec, if things have changed since it was merged - trivial cases can be approved in advance by a nova-driver, so not all folks need to go through the meeting This still needs more thought, but should decouple the spec review from the release process. It is also more compatible with a runway style system, that might be less focused on milestones. Runways ~~~~~~~ Runways are a form of Kanban, where we look at optimising the flow through the system, by ensure we focus our efforts on reviewing a specific subset of patches. The idea goes something like this: - define some states, such as: design backlog, design review, code backlog, code review, test+doc backlog, complete - blueprints must be in one of the above state - large or high priority bugs may also occupy a code review slot - core reviewer member moves item between the slots - must not violate the rules on the number of items in each state - states have a limited number of slots, to ensure focus - certain percentage of slots are dedicated to priorities, depending on point in the cycle, and the type of the cycle, etc Reasons for: - more focused review effort, get more things merged more quickly - more upfront about when your code is likely to get reviewed - smooth out current "lumpy" non-priority feature freeze system Reasons against: - feels like more process overhead - control is too centralised Replacing Milestones with SemVer Releases ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can deploy any commit of Nova and upgrade to a later commit in that same release. Making our milestones versioned more like an official release would help signal to our users that people can use the milestones in production, and get a level of upgrade support. It could go something like this: - 14.0.0 is milestone 1 - 14.0.1 is milestone 2 (maybe, because we add features, it should be 14.1.0?) - 14.0.2 is milestone 3 - we might do other releases (once a critical bug is fixed?), as it makes sense, but we will always be the time bound ones - 14.0.3 two weeks after milestone 3, adds only bug fixes (and updates to RPC versions?) - maybe a stable branch is created at this point? - 14.1.0 adds updated translations and co-ordinated docs - this is released from the stable branch? - 15.0.0 is the next milestone, in the following cycle - not the bump of the major version to signal an upgrade incompatibility with 13.x We are currently watching Ironic to see how their use of semver goes, and see what lessons need to be learnt before we look to maybe apply this technique during M. Feature Classification ~~~~~~~~~~~~~~~~~~~~~~ This is a look at moving forward this effort: - http://docs.openstack.org/developer/nova/support-matrix.html The things we need to cover: - note what is tested, and how often that test passes (via 3rd party CI, or otherwise) - link to current test results for stable and master (time since last pass, recent pass rate, etc) - TODO - sync with jogo on his third party CI audit and getting trends, ask infra - include experimental features (untested feature) - get better at the impact of volume drivers and network drivers on available features (not just hypervisor drivers) Main benefits: - users get a clear picture of what is known to work - be clear about when experimental features are removed, if no tests are added - allows a way to add experimental things into Nova, and track either their removal or maturation * https://wiki.openstack.org/wiki/Nova/Mitaka_Release_Schedule nova-13.1.4/doc/source/vmstates.rst0000664000567000056710000001211613064447140020355 0ustar jenkinsjenkins00000000000000Virtual Machine States and Transitions ======================================= The following diagrams and tables show the required virtual machine (VM) states and task states for various commands issued by the user. Allowed State Transitions -------------------------- .. graphviz:: digraph states { graph [pad=".35", ranksep="0.65", nodesep="0.55", concentrate=true]; node [fontsize=10 fontname="Monospace"]; edge [arrowhead="normal", arrowsize="0.8"]; label="All states are allowed to transition to DELETED and ERROR."; forcelabels=true; labelloc=bottom; labeljust=left; /* states */ building [label="BUILDING"] active [label="ACTIVE"] paused [label="PAUSED"] suspended [label="SUSPENDED"] stopped [label="STOPPED"] rescued [label="RESCUED"] resized [label="RESIZED"] soft_deleted [label="SOFT_DELETED"] shelved [label="SHELVED"] shelved_offloaded [label="SHELVED_OFFLOADED"] deleted [label="DELETED", color="red"] error [label="ERROR", color="red"] /* transitions [action] */ building -> active active -> active [headport=nw, tailport=ne] // manual layout active -> soft_deleted [tailport=e] // prevent arrowhead overlap active -> suspended active -> paused [tailport=w] // prevent arrowhead overlap active -> stopped active -> shelved active -> shelved_offloaded active -> rescued active -> resized soft_deleted -> active [headport=e] // prevent arrowhead overlap suspended -> active suspended -> shelved suspended -> shelved_offloaded paused -> active paused -> shelved paused -> shelved_offloaded stopped -> active stopped -> stopped [headport=nw, tailport=ne] // manual layout stopped -> resized stopped -> rescued stopped -> shelved stopped -> shelved_offloaded resized -> active rescued -> active shelved -> shelved_offloaded shelved -> active shelved_offloaded -> active } Requirements for Commands ------------------------- ================== ================== ==================== ================ Command Req'd VM States Req'd Task States Target State ================== ================== ==================== ================ pause Active, Shutoff, Resize Verify, unset Paused Rescued unpause Paused N/A Active suspend Active, Shutoff N/A Suspended resume Suspended N/A Active rescue Active, Shutoff Resize Verify, unset Rescued unrescue Rescued N/A Active set admin password Active N/A Active rebuild Active, Shutoff Resize Verify, unset Active force delete Soft Deleted N/A Deleted restore Soft Deleted N/A Active soft delete Active, Shutoff, N/A Soft Deleted Error delete Active, Shutoff, N/A Deleted Building, Rescued, Error backup Active, Shutoff N/A Active, Shutoff snapshot Active, Shutoff N/A Active, Shutoff start Shutoff, Stopped N/A Active stop Active, Shutoff, Resize Verify, unset Stopped Rescued reboot Active, Shutoff, Resize Verify, unset Active Rescued resize Active, Shutoff Resize Verify, unset Resized revert resize Active, Shutoff Resize Verify, unset Active confirm resize Active, Shutoff Resize Verify, unset Active ================== ================== ==================== ================ VM states and Possible Commands ------------------------------- ============ ================================================================= VM State Commands ============ ================================================================= Paused unpause Suspended resume Active set admin password, suspend, pause, rescue, rebuild, soft delete, delete, backup, snapshot, stop, reboot, resize, revert resize, confirm resize Shutoff suspend, pause, rescue, rebuild, soft delete, delete, backup, start, snapshot, stop, reboot, resize, revert resize, confirm resize Rescued unrescue, pause Stopped rescue, delete, start Soft Deleted force delete, restore Error soft delete, delete Building delete Rescued delete, stop, reboot ============ ================================================================= Create Instance States ---------------------- The following diagram shows the sequence of VM states, task states, and power states when a new VM instance is created. .. image:: ./images/create_vm_states.svg :alt: Sequence of VM states, task states, and power states when a new VM instance is created. nova-13.1.4/doc/source/i18n.rst0000664000567000056710000000342513064447152017274 0ustar jenkinsjenkins00000000000000Internationalization ==================== Nova uses the `oslo.i18n library `_ to support internationalization. The oslo.i18n library is built on top of `gettext `_ and provides functions that are used to enable user-facing strings such as log messages to appear in the appropriate language in different locales. Nova exposes the oslo.i18n library support via the ``nova/i18n.py`` integration module. This module provides the functions needed to wrap translatable strings. It provides the ``_()`` wrapper for general user-facing messages and specific wrappers for messages used only for logging. DEBUG level messages do not need translation but CRITICAL, ERROR, WARNING and INFO messages should be wrapped with ``_LC()``, ``_LE()``, ``_LW()`` or ``_LI()`` respectively. For example:: LOG.debug("block_device_mapping %(mapping)s", {'mapping': block_device_mapping}) or:: LOG.warn(_LW('Unknown base file %(img)s'), {'img': img}) You should use the basic wrapper ``_()`` for strings which are not log messages:: raise nova.SomeException(_('Invalid service catalogue')) Do not use ``locals()`` for formatting messages because: 1. It is not as clear as using explicit dicts. 2. It could produce hidden errors during refactoring. 3. Changing the name of a variable causes a change in the message. 4. It creates a lot of otherwise unused variables. If you do not follow the project conventions, your code may cause hacking checks to fail. The ``_()``, ``_LC()``, ``_LE()``, ``_LW()`` and ``_LI()`` functions can be imported with:: from nova.i18n import _ from nova.i18n import _LC from nova.i18n import _LE from nova.i18n import _LW from nova.i18n import _LI nova-13.1.4/doc/source/api_microversion_history.rst0000664000567000056710000000010313064447152023634 0ustar jenkinsjenkins00000000000000.. include:: ../../nova/api/openstack/rest_api_version_history.rst nova-13.1.4/doc/source/gmr.rst0000664000567000056710000000565013064447152017304 0ustar jenkinsjenkins00000000000000.. Copyright (c) 2014 OpenStack Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Guru Meditation Reports ======================= Nova contains a mechanism whereby developers and system administrators can generate a report about the state of a running Nova executable. This report is called a *Guru Meditation Report* (*GMR* for short). Generating a GMR ---------------- A *GMR* can be generated by sending the *USR2* signal to any Nova process with support (see below). The *GMR* will then be outputted standard error for that particular process. For example, suppose that ``nova-api`` has process id ``8675``, and was run with ``2>/var/log/nova/nova-api-err.log``. Then, ``kill -USR2 8675`` will trigger the Guru Meditation report to be printed to ``/var/log/nova/nova-api-err.log``. Structure of a GMR ------------------ The *GMR* is designed to be extensible; any particular executable may add its own sections. However, the base *GMR* consists of several sections: Package Shows information about the package to which this process belongs, including version information Threads Shows stack traces and thread ids for each of the threads within this process Green Threads Shows stack traces for each of the green threads within this process (green threads don't have thread ids) Configuration Lists all the configuration options currently accessible via the CONF object for the current process Adding Support for GMRs to New Executables ------------------------------------------ Adding support for a *GMR* to a given executable is fairly easy. First import the module (currently residing in oslo-incubator), as well as the Nova version module: .. code-block:: python from oslo_reports import guru_meditation_report as gmr from nova import version Then, register any additional sections (optional): .. code-block:: python TextGuruMeditation.register_section('Some Special Section', some_section_generator) Finally (under main), before running the "main loop" of the executable (usually ``service.server(server)`` or something similar), register the *GMR* hook: .. code-block:: python TextGuruMeditation.setup_autorun(version) Extending the GMR ----------------- As mentioned above, additional sections can be added to the GMR for a particular executable. For more information, see the inline documentation under :mod:`oslo.reports` nova-13.1.4/doc/source/index.rst0000664000567000056710000001560213064447152017624 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2012 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ========================================== Welcome to Nova's developer documentation! ========================================== Nova is an OpenStack project designed to provide power massively scalable, on demand, self service access to compute resources. The developer documentation provided here is continually kept up-to-date based on the latest code, and may not represent the state of the project at any specific prior release. .. note:: This is documentation for developers, if you are looking for more general documentation including API, install, operator and user guides see `docs.openstack.org`_ .. _`docs.openstack.org`: http://docs.openstack.org This documentation is intended to help explain what the Nova developers think is the current scope of the Nova project, as well as the architectural decisions we have made in order to support that scope. We also document our plans for evolving our architecture over time. Finally, we documented our current development process and policies. Compute API References ====================== Nova has had a v2 API for a long time. We are currently in the process of moving to a new implementation of that API, which we have called v2.1. v2.1 started life as an API called v3, but that name should never be used any more. We are currently in the process of transitioning users over to the v2.1 implementation, at which point the v2 code will be deleted. * `v2.1 (CURRENT)`_ * `v2 (SUPPORTED)`_ and `v2 extensions (SUPPORTED)`_ (Will be deprecated in the near future.) Changes to the Compute API post v2.1 are made using microversions. You can see a history of our microversions here: .. toctree:: :maxdepth: 1 api_microversion_history We also publish end-user API docs as an API Guide. * `Compute API Guide`_ .. _`v2.1 (CURRENT)`: http://developer.openstack.org/api-ref-compute-v2.1.html .. _`v2 (SUPPORTED)`: http://developer.openstack.org/api-ref-compute-v2.html .. _`v2 extensions (SUPPORTED)`: http://developer.openstack.org/api-ref-compute-v2-ext.html .. _`Compute API Guide`: http://developer.openstack.org/api-guide/compute/ There was a session on the v2.1 API at the Liberty summit which you can watch `here `_. Feature Status ============== Nova aims to have a single compute API that works the same across all deployments of Nova. While many features are well-tested, well-documented, support live upgrade, and are ready for production, some are not. Also the choice of underlying technology affects the list of features that are ready for production. Our first attempt to communicate this is the feature support matrix (previously called the hypervisor support matrix). Over time we hope to evolve that to include a classification of each feature's maturity and exactly what technology combinations are covered by current integration testing efforts. .. toctree:: :maxdepth: 1 test_strategy feature_classification support-matrix Developer Guide =============== If you are new to Nova, this should help you start to understand what Nova actually does, and why. .. toctree:: :maxdepth: 1 how_to_get_involved process architecture project_scope development.environment Development Policies -------------------- The Nova community is a large community. We have lots of users, and they all have a lot of expectations around upgrade and backwards compatibility. For example, having a good stable API, with discoverable versions and capabilities is important for maintaining the strong ecosystem around Nova. Our process is always evolving, just as Nova and the community around Nova evolves over time. If there are things that seem strange, or you have ideas on how to improve things, please engage in that debate, so we continue to improve how the Nova community operates. This section looks at the processes and why. The main aim behind all the process is to aid good communication between all members of the Nova community, while keeping users happy and keeping developers productive. .. toctree:: :maxdepth: 1 process blueprints policies Architecture Concepts ---------------------- This follows on for the discussion in the introduction, and digs into details on specific parts of the Nova architecture. We find it important to document the reasons behind our architectural decisions, so its easier for people to engage in the debates about the future of Nova's architecture. This is all part of Open Design and Open Development. .. toctree:: :maxdepth: 1 aggregates threading vmstates i18n filter_scheduler rpc block_device_mapping addmethod.openstackapi conductor notifications Architecture Evolution Plans ----------------------------- The following section includes documents that describe the overall plan behind groups of nova-specs. Most of these cover items relating to the evolution of various parts of Nova's architecture. Once the work is complete, these documents will move into the "Concepts" section. If you want to get involved in shaping the future of Nova's architecture, these are a great place to start reading up on the current plans. .. toctree:: :maxdepth: 1 cells upgrade api_plugins api_microversion_dev policy_enforcement stable_api code-review scheduler_evolution Advanced testing and guides ---------------------------- .. toctree:: :maxdepth: 1 gmr testing/libvirt-numa testing/serial-console Sample Configuration File ------------------------- .. toctree:: :maxdepth: 1 sample_config Man Pages ---------- .. toctree:: :maxdepth: 1 man/index Module Reference ---------------- .. toctree:: :maxdepth: 1 services .. # NOTE(mriedem): This is the section where we hide things that we don't # actually want in the table of contents but sphinx build would fail if # they aren't in the toctree somewhere. For example, we hide api/autoindex # since that's already covered with modindex below. .. toctree:: :hidden: api/autoindex Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` nova-13.1.4/doc/source/support-matrix.rst0000664000567000056710000000361413064447140021530 0ustar jenkinsjenkins00000000000000 Feature Support Matrix ====================== .. warning:: Please note, while this document is still being maintained, this is slowly being updated to re-group and classify features using the definitions described in here: :doc:`feature_classification` When considering which capabilities should be marked as mandatory the following general guiding principles were applied * **Inclusivity** - people have shown ability to make effective use of a wide range of virtualization technologies with broadly varying featuresets. Aiming to keep the requirements as inclusive as possible, avoids second-guessing what a user may wish to use the cloud compute service for. * **Bootstrapping** - a practical use case test is to consider that starting point for the compute deploy is an empty data center with new machines and network connectivity. The look at what are the minimum features required of a compute service, in order to get user instances running and processing work over the network. * **Competition** - an early leader in the cloud compute service space was Amazon EC2. A sanity check for whether a feature should be mandatory is to consider whether it was available in the first public release of EC2. This had quite a narrow featureset, but none the less found very high usage in many use cases. So it serves to illustrate that many features need not be considered mandatory in order to get useful work done. * **Reality** - there are many virt drivers currently shipped with Nova, each with their own supported feature set. Any feature which is missing in at least one virt driver that is already in-tree, must by inference be considered optional until all in-tree drivers support it. This does not rule out the possibility of a currently optional feature becoming mandatory at a later date, based on other principles above. .. support_matrix:: support-matrix.ini nova-13.1.4/doc/source/blueprints.rst0000664000567000056710000000473013064447152020704 0ustar jenkinsjenkins00000000000000================================== Blueprints, Specs and Priorities ================================== Like most OpenStack projects, Nova uses `blueprints`_ and specifications (specs) to track new features, but not all blueprints require a spec. This document covers when a spec is needed. .. note:: Nova's specs live at: `specs.openstack.org`_ .. _`blueprints`: http://docs.openstack.org/infra/manual/developers.html#working-on-specifications-and-blueprints .. _`specs.openstack.org`: http://specs.openstack.org/openstack/nova-specs/ Specs ===== A spec is needed for any feature that requires a design discussion. All features need a blueprint but not all blueprints require a spec. If a new feature is straightforward enough that it doesn't need any design discussion, then no spec is required. In order to provide the sort of documentation that would otherwise be provided via a spec, the commit message should include a ``DocImpact`` flag and a thorough description of the feature from a user/operator perspective. Guidelines for when a feature doesn't need a spec. * Is the feature a single self contained change? * If the feature touches code all over the place, it probably should have a design discussion. * If the feature is big enough that it needs more then one commit, it probably should have a design discussion. * Not an API change. * API changes always require a design discussion. Project Priorities =================== * Pick several project priority themes, in the form of use cases, to help us prioritize work * Generate list of improvement blueprints based on the themes * Produce rough draft of list going into summit and finalize the list at the summit * Publish list of project priorities and look for volunteers to work on them * Update spec template to include * Specific use cases * State if the spec is project priority or not * Keep an up to date list of project priority blueprints that need code review in an etherpad. * Consumers of project priority and project priority blueprint lists: * Reviewers looking for direction of where to spend their blueprint review time. If a large subset of nova-core doesn't use the project priorities it means the core team is not aligned properly and should revisit the list of project priorities * The blueprint approval team, to help find the right balance of blueprints * Contributors looking for something to work on * People looking for what they can expect in the next release nova-13.1.4/doc/source/man/0000775000567000056710000000000013064447471016536 5ustar jenkinsjenkins00000000000000nova-13.1.4/doc/source/man/nova-rootwrap.rst0000664000567000056710000000221513064447152022102 0ustar jenkinsjenkins00000000000000============= nova-rootwrap ============= ----------------------- Root wrapper for Nova ----------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-rootwrap [options] DESCRIPTION =========== Filters which commands nova is allowed to run as another user. To use this, you should set the following in nova.conf: rootwrap_config=/etc/nova/rootwrap.conf You also need to let the nova user run nova-rootwrap as root in sudoers: nova ALL = (root) NOPASSWD: /usr/bin/nova-rootwrap /etc/nova/rootwrap.conf * To make allowed commands node-specific, your packaging should only install {compute,network}.filters respectively on compute and network nodes (i.e. nova-api nodes should not have any of those files installed). OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.1.4/doc/source/man/nova-consoleauth.rst0000664000567000056710000000140413064447152022550 0ustar jenkinsjenkins00000000000000================ nova-consoleauth ================ ------------------------------------------- Nova Console Authentication Server ------------------------------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-consoleauth [options] DESCRIPTION =========== Provides Authentication for nova consoles OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.1.4/doc/source/man/nova-cells.rst0000664000567000056710000000144313064447152021331 0ustar jenkinsjenkins00000000000000========== nova-cells ========== -------------------------------- Server for the Nova Cells -------------------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-cells [options] DESCRIPTION =========== Starts the nova-cells service. The nova-cells service handles communication between cells and selects cells for new instances. OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.1.4/doc/source/man/nova-compute.rst0000664000567000056710000000162113064447152021701 0ustar jenkinsjenkins00000000000000============ nova-compute ============ --------------------- Nova Compute Server --------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-compute [options] DESCRIPTION =========== Handles all processes relating to instances (guest vms). nova-compute is responsible for building a disk image, launching it via the underlying virtualization driver, responding to calls to check its state, attaching persistent storage, and terminating it. OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.1.4/doc/source/man/nova-dhcpbridge.rst0000664000567000056710000000164613064447152022327 0ustar jenkinsjenkins00000000000000=============== nova-dhcpbridge =============== -------------------------------------------------- Handles Lease Database updates from DHCP servers -------------------------------------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-dhcpbridge [options] DESCRIPTION =========== Handles lease database updates from DHCP servers. Used whenever nova is managing DHCP (vlan and flatDHCP). nova-dhcpbridge should not be run as a daemon. OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/api-paste.ini * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.1.4/doc/source/man/index.rst0000664000567000056710000000240413064447152020373 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Command-line Utilities ====================== In this section you will find information on Nova's command line utilities. Reference --------- .. toctree:: :maxdepth: 1 nova-all nova-api-metadata nova-api-os-compute nova-api nova-cells nova-cert nova-compute nova-conductor nova-console nova-consoleauth nova-dhcpbridge nova-idmapshift nova-manage nova-network nova-novncproxy nova-rootwrap nova-scheduler nova-spicehtml5proxy nova-xvpvncproxy nova-serialproxy nova-13.1.4/doc/source/man/nova-api.rst0000664000567000056710000000147613064447152021006 0ustar jenkinsjenkins00000000000000======== nova-api ======== ------------------------------------------- Server for the Nova EC2 and OpenStack APIs ------------------------------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-api [options] DESCRIPTION =========== nova-api is a server daemon that serves the nova EC2 and OpenStack APIs in separate greenthreads OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/api-paste.ini * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.1.4/doc/source/man/nova-api-metadata.rst0000664000567000056710000000145313064447152022557 0ustar jenkinsjenkins00000000000000================= nova-api-metadata ================= --------------------------------- Server for the Nova Metadata API --------------------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-api-metadata [options] DESCRIPTION =========== nova-api-metadata is a server daemon that serves the Nova Metadata API OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/api-paste.ini * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.1.4/doc/source/man/nova-serialproxy.rst0000664000567000056710000000154513064447152022613 0ustar jenkinsjenkins00000000000000================ nova-serialproxy ================ -------------------------------------------------------- Websocket serial Proxy for OpenStack Nova serial ports. -------------------------------------------------------- :Author: openstack@lists.launchpad.net :Date: 2014-03-15 :Copyright: OpenStack Foundation :Version: 2014.2 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-serialproxy [options] DESCRIPTION =========== Websocket proxy that is compatible with OpenStack Nova serial ports. OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova is sourced in Launchpad so you can view current bugs at `OpenStack Nova `__ nova-13.1.4/doc/source/man/nova-all.rst0000664000567000056710000000141713064447152021000 0ustar jenkinsjenkins00000000000000========= nova-all ========= ----------------------------- Server for all Nova services ----------------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-all [options] DESCRIPTION =========== nova-all is a server daemon that serves all Nova services, each in a separate greenthread OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/api-paste.ini * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.1.4/doc/source/man/nova-conductor.rst0000664000567000056710000000135713064447152022233 0ustar jenkinsjenkins00000000000000============== nova-conductor ============== ----------------------------- Server for the Nova Conductor ----------------------------- :Author: openstack@lists.openstack.org :Date: 2012-11-16 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-conductor [options] DESCRIPTION =========== nova-conductor is a server daemon that serves the Nova Conductor service, which provides coordination and database query support for Nova. OPTIONS ======= **General options** FILES ===== * /etc/nova/nova.conf SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.1.4/doc/source/man/nova-idmapshift.rst0000664000567000056710000000610113064447152022353 0ustar jenkinsjenkins00000000000000=============== nova-idmapshift =============== ----------------------------------------- Tool used by Nova libvirt-lxc virt driver ----------------------------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-idmapshift [options] path DESCRIPTION =========== nova-idmapshift is a tool that properly sets the ownership of a filesystem for use with linux user namespaces. This tool can only be used with linux lxc containers. When using user namespaces with linux lxc containers, the filesystem of the container must be owned by the targeted user and group ids being applied to that container. Otherwise, processes inside the container won't be able to access the filesystem. For example: nova-idmapshift -i -u 0:10000:2000 -g 0:10000:2000 path This command will idempotently shift `path` to proper ownership using the provided uid and gid mappings. When using the uid map string '0:10000:2000', this means that user ids inside the container between 0 and 1999 will map to user ids on the host between 10000 and 11999. Root (0) becomes 10000, user 1 becomes 10001, user 50 becomes 10050 and user 1999 becomes 11999. This means that files that are owned by root need to actually be owned by user 10000, and files owned by 50 need to be owned by 10050, and so on. nova-idmapshift will take the uid and gid strings used for user namespaces and properly set up the filesystem for use by those users. Uids and gids outside of provided ranges will be mapped to nobody-id (default is max uid/gid) so that they are inaccessible inside the container. OPTIONS ======= Positional arguments ~~~~~~~~~~~~~~~~~~~~ path Root path of the filesystem to be shifted Optional arguments ~~~~~~~~~~~~~~~~~~ -h, --help Show this help message and exit. -u USER_MAPS, --uid=USER_MAPS User ID mappings, in the form: [[guest-uid:host-uid:count],...] -g GROUP_MAPS, --gid=GROUP_MAPS Group ID mappings, in the form: [[guest-gid:host-gid:count],...] -n nobody-id, --nobody nobody-id ID to map all unmapped uid and gids to. Defaults to 65534. -i, --idempotent Shift operation will only be performed if filesystem appears unshifted. Defaults to false. -c, --confirm Will perform check on the filesystem: Returns 0 when filesystem appears shifted. Returns 1 when filesystem appears unshifted. Defaults to false. -d, --dry-run Print chown operations, but won't perform them. Defaults to false. -v, --verbose Print chown operations while performing them. Defaults to false. SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.1.4/doc/source/man/nova-manage.rst0000664000567000056710000001057713064447152021467 0ustar jenkinsjenkins00000000000000=========== nova-manage =========== ------------------------------------------------------ control and manage cloud computer instances and images ------------------------------------------------------ :Author: openstack@lists.openstack.org :Date: 2012-04-05 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-manage [] DESCRIPTION =========== nova-manage controls cloud computing instances by managing shell selection, vpn connections, and floating IP address configuration. More information about OpenStack Nova is at http://nova.openstack.org. OPTIONS ======= The standard pattern for executing a nova-manage command is: ``nova-manage []`` Run without arguments to see a list of available command categories: ``nova-manage`` Categories are project, shell, vpn, and floating. Detailed descriptions are below. You can also run with a category argument such as user to see a list of all commands in that category: ``nova-manage floating`` These sections describe the available categories and arguments for nova-manage. Nova Db ~~~~~~~ ``nova-manage db version`` Print the current main database version. ``nova-manage db sync`` Sync the main database up to the most recent version. This is the standard way to create the db as well. ``nova-manage db archive_deleted_rows [--max_rows ] [--verbose]`` Move deleted rows from production tables to shadow tables. Specifying --verbose will print the results of the archive operation for any tables that were changed. ``nova-manage db null_instance_uuid_scan [--delete]`` Lists and optionally deletes database records where instance_uuid is NULL. Nova ApiDb ~~~~~~~~~~ ``nova-manage api_db version`` Print the current cells api database version. ``nova-manage api_db sync`` Sync the api cells database up to the most recent version. This is the standard way to create the db as well. Nova Logs ~~~~~~~~~ ``nova-manage logs errors`` Displays nova errors from log files. ``nova-manage logs syslog `` Displays nova alerts from syslog. Nova Shell ~~~~~~~~~~ ``nova-manage shell bpython`` Starts a new bpython shell. ``nova-manage shell ipython`` Starts a new ipython shell. ``nova-manage shell python`` Starts a new python shell. ``nova-manage shell run`` Starts a new shell using python. ``nova-manage shell script `` Runs the named script from the specified path with flags set. Nova VPN ~~~~~~~~ ``nova-manage vpn list`` Displays a list of projects, their IP port numbers, and what state they're in. ``nova-manage vpn run `` Starts the VPN for the named project. ``nova-manage vpn spawn`` Runs all VPNs. Nova Floating IPs ~~~~~~~~~~~~~~~~~ ``nova-manage floating create [--pool ] [--interface ]`` Creates floating IP addresses for the given range, optionally specifying a floating pool and a network interface. ``nova-manage floating delete `` Deletes floating IP addresses in the range given. ``nova-manage floating list`` Displays a list of all floating IP addresses. Nova Images ~~~~~~~~~~~ ``nova-manage image image_register `` Registers an image with the image service. ``nova-manage image kernel_register `` Registers a kernel with the image service. ``nova-manage image ramdisk_register `` Registers a ramdisk with the image service. ``nova-manage image all_register `` Registers an image kernel and ramdisk with the image service. ``nova-manage image convert `` Converts all images in directory from the old (Bexar) format to the new format. Nova VM ~~~~~~~~~~~ ``nova-manage vm list [host]`` Show a list of all instances. Accepts optional hostname (to show only instances on specific host). ``nova-manage live-migration `` Live migrate instance from current host to destination host. Requires instance id (which comes from euca-describe-instance) and destination host name (which can be found from nova-manage service list). SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.1.4/doc/source/man/nova-spicehtml5proxy.rst0000664000567000056710000000154713064447152023413 0ustar jenkinsjenkins00000000000000==================== nova-spicehtml5proxy ==================== -------------------------------------------------------- Websocket Proxy for OpenStack Nova SPICE HTML5 consoles. -------------------------------------------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-spicehtml5proxy [options] DESCRIPTION =========== Websocket proxy that is compatible with OpenStack Nova SPICE HTML5 consoles. OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.1.4/doc/source/man/nova-scheduler.rst0000664000567000056710000000127613064447152022211 0ustar jenkinsjenkins00000000000000============== nova-scheduler ============== -------------- Nova Scheduler -------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-scheduler [options] DESCRIPTION =========== Nova Scheduler picks a compute node to run a VM instance. OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.1.4/doc/source/man/nova-api-os-compute.rst0000664000567000056710000000153513064447152023073 0ustar jenkinsjenkins00000000000000==================== nova-api-os-compute ==================== ------------------------------------------- Server for the Nova OpenStack Compute APIs ------------------------------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-api-os-compute [options] DESCRIPTION =========== nova-api-os-compute is a server daemon that serves the Nova OpenStack Compute API OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/api-paste.ini * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.1.4/doc/source/man/nova-console.rst0000664000567000056710000000136613064447152021675 0ustar jenkinsjenkins00000000000000============ nova-console ============ ---------------------------- Nova Console Server ---------------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-console [options] DESCRIPTION =========== nova-console is a console Proxy to set up multi-tenant VM console access (i.e. with xvp) OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.1.4/doc/source/man/nova-network.rst0000664000567000056710000000133413064447152021717 0ustar jenkinsjenkins00000000000000============= nova-network ============= --------------------- Nova Network Server --------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-network [options] DESCRIPTION =========== Nova Network is responsible for allocating IPs and setting up the network OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.1.4/doc/source/man/nova-novncproxy.rst0000664000567000056710000000151513064447152022454 0ustar jenkinsjenkins00000000000000=============== nova-novncproxy =============== -------------------------------------------------------- Websocket novnc Proxy for OpenStack Nova noVNC consoles. -------------------------------------------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-novncproxy [options] DESCRIPTION =========== Websocket proxy that is compatible with OpenStack Nova noVNC consoles. OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.1.4/doc/source/man/nova-cert.rst0000664000567000056710000000150613064447152021164 0ustar jenkinsjenkins00000000000000========== nova-cert ========== -------------------------------- Server for the Nova Cert -------------------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-cert [options] DESCRIPTION =========== nova-cert is a server daemon that serves the Nova Cert service for X509 certificates. Used to generate certificates for euca-bundle-image. Only needed for EC2 API. OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.1.4/doc/source/man/nova-xvpvncproxy.rst0000664000567000056710000000132613064447152022655 0ustar jenkinsjenkins00000000000000================ nova-xvpvncproxy ================ ----------------------------- XVP VNC Console Proxy Server ----------------------------- :Author: openstack@lists.openstack.org :Date: 2012-09-27 :Copyright: OpenStack Foundation :Version: 2012.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== nova-xvpvncproxy [options] DESCRIPTION =========== XVP VNC Console Proxy Server OPTIONS ======= **General options** FILES ======== * /etc/nova/nova.conf * /etc/nova/policy.json * /etc/nova/rootwrap.conf * /etc/nova/rootwrap.d/ SEE ALSO ======== * `OpenStack Nova `__ BUGS ==== * Nova bugs are managed at Launchpad `Bugs : Nova `__ nova-13.1.4/doc/source/policies.rst0000664000567000056710000001016213064447152020320 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Development policies -------------------- Out Of Tree Support =================== While nova has many entrypoints and other places in the code that allow for wiring in out of tree code, upstream doesn't actively make any guarantees about these extensibility points; we don't support them, make any guarantees about compatibility, stability, etc. Public Contractual APIs ======================== Although nova has many internal APIs, they are not all public contractual APIs. Below is a link of our public contractual APIs: * http://developer.openstack.org/api-ref-compute-v2.1.html Anything not in this list is considered private, not to be used outside of nova, and should not be considered stable. REST APIs ========== Follow the guidelines set in: https://wiki.openstack.org/wiki/APIChangeGuidelines The canonical source for REST API behavior is the code *not* documentation. Documentation is manually generated after the code by folks looking at the code and writing up what they think it does, and it is very easy to get this wrong. This policy is in place to prevent us from making backwards incompatible changes to REST APIs. Patches and Reviews =================== Merging a patch requires a non-trivial amount of reviewer resources. As a patch author, you should try to offset the reviewer resources spent on your patch by reviewing other patches. If no one does this, the review team (cores and otherwise) become spread too thin. For review guidelines see: http://docs.openstack.org/infra/manual/developers.html#peer-review Reverts for Retrospective Vetos =============================== Sometimes our simple "2 +2s" approval policy will result in errors. These errors might be a bug that was missed, or equally importantly, it might be that other cores feel that there is a need for more discussion on the implementation of a given piece of code. Rather than `an enforced time-based solution`_ - for example, a patch couldn't be merged until it has been up for review for 3 days - we have chosen an honor-based system where core reviewers would not approve potentially contentious patches until the proposal had been sufficiently socialized and everyone had a chance to raise any concerns. Recognising that mistakes can happen, we also have a policy where contentious patches which were quickly approved should be reverted so that the discussion around the proposal can continue as if the patch had never been merged in the first place. In such a situation, the procedure is: 0. The commit to be reverted must not have been released. 1. The core team member who has a -2 worthy objection should propose a revert, stating the specific concerns that they feel need addressing. 2. Any subsequent patches depending on the to-be-reverted patch may need to be reverted also. 3. Other core team members should quickly approve the revert. No detailed debate should be needed at this point. A -2 vote on a revert is strongly discouraged, because it effectively blocks the right of cores approving the revert from -2 voting on the original patch. 4. The original patch submitter should re-submit the change, with a reference to the original patch and the revert. 5. The original reviewers of the patch should restore their votes and attempt to summarize their previous reasons for their votes. 6. The patch should not be re-approved until the concerns of the people proposing the revert are worked through. A mailing list discussion or design spec might be the best way to achieve this. .. _`an enforced time-based solution`: https://lists.launchpad.net/openstack/msg08574.html nova-13.1.4/doc/source/addmethod.openstackapi.rst0000664000567000056710000000435113064447140023122 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Adding a Method to the OpenStack API ==================================== The interface is a mostly RESTful API. REST stands for Representational State Transfer and provides an architecture "style" for distributed systems using HTTP for transport. Figure out a way to express your request and response in terms of resources that are being created, modified, read, or destroyed. Routing ------- To map URLs to controllers+actions, OpenStack uses the Routes package, a clone of Rails routes for Python implementations. See http://routes.groovie.org/ for more information. URLs are mapped to "action" methods on "controller" classes in ``nova/api/openstack/__init__/ApiRouter.__init__`` . See http://routes.groovie.org/manual.html for all syntax, but you'll probably just need these two: - mapper.connect() lets you map a single URL to a single action on a controller. - mapper.resource() connects many standard URLs to actions on a controller. Controllers and actions ----------------------- Controllers live in ``nova/api/openstack``, and inherit from nova.wsgi.Controller. See ``nova/api/openstack/compute/servers.py`` for an example. Action methods take parameters that are sucked out of the URL by mapper.connect() or .resource(). The first two parameters are self and the WebOb request, from which you can get the req.environ, req.body, req.headers, etc. Serialization ------------- Actions return a dictionary, and wsgi.Controller serializes that to JSON. Faults ------ If you need to return a non-200, you should return faults.Fault(webob.exc.HTTPNotFound()) replacing the exception as appropriate. nova-13.1.4/doc/source/upgrade.rst0000664000567000056710000002501113064447152020137 0ustar jenkinsjenkins00000000000000.. Copyright 2014 Rackspace All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Upgrades ======== Nova aims to provide upgrades with minimal downtime. Firstly, the data plane. There should be no VM downtime when you upgrade Nova. Nova has had this since the early days, with the exception of some nova-network related services. Secondly, we want no downtime during upgrades of the Nova control plane. This document is trying to describe how we can achieve that. Once we have introduced the key concepts relating to upgrade, we will introduce the process needed for a no downtime upgrade of nova. Current Database Upgrade Types ------------------------------ Currently Nova has 2 types of database upgrades that are in use. #. Offline Migrations #. Online Migrations Offline Migrations consist of: '''''''''''''''''''''''''''''' #. Database schema migrations from pre-defined migrations in nova/db/sqlalchemy/migrate_repo/versions. #. *Deprecated* Database data migrations from pre-defined migrations in nova/db/sqlalchemy/migrate_repo/versions. Online Migrations consist of: ''''''''''''''''''''''''''''' #. Online data migrations from inside Nova object source code. #. *Future* Online schema migrations using auto-generation from models. An example of online data migrations are the flavor migrations done as part of Nova object version 1.18. This included a transient migration of flavor storage from one database location to another. :emphasis:`Note: Database downgrades are not supported.` Migration policy: ''''''''''''''''' The following guidelines for schema and data migrations are followed in order to ease upgrades: * Additive schema migrations - In general, almost all schema migrations should be additive. Put simply, they should only create elements like columns, indices, and tables. * Subtractive schema migrations - To remove an element like a column or table during the N release cycle: #. The element must be deprecated and retained for backward compatibility. (This allows for graceful upgrade from N to N+1.) #. Data migration, by the objects layer, must completely migrate data from the old version of the schema to the new version. * `Data migration example `_ * `Data migration enforcement example `_ (for sqlalchemy migrate/deprecated scripts): #. The column can then be removed with a migration at the start of N+2. * All schema migrations should be idempotent. (For example, a migration should check if an element exists in the schema before attempting to add it.) This logic comes for free in the autogenerated workflow of the online migrations. * Constraints - When adding a foreign or unique key constraint, the schema migration code needs to handle possible problems with data before applying the constraint. (Example: A unique constraint must clean up duplicate records before applying said constraint.) * Data migrations - As mentioned above, data migrations will be done in an online fashion by custom code in the object layer that handles moving data between the old and new portions of the schema. In addition, for each type of data migration performed, there should exist a nova-manage option for an operator to manually request that rows be migrated. * See `flavor migration spec `_ for an example of data migrations in the object layer. *Future* work - #. Adding plumbing to enforce that relevant data migrations are completed before running `contract` in the expand/migrate/contract schema migration workflow. A potential solution would be for `contract` to run a gating test for each specific subtract operation to determine if the operation can be completed. Concepts -------- Here are the key concepts you need to know before reading the section on the upgrade process: RPC version pinning Through careful RPC versioning, newer nodes are able to talk to older nova-compute nodes. When upgrading control plane nodes, we can pin them at an older version of the compute RPC API, until all the compute nodes are able to be upgraded. https://wiki.openstack.org/wiki/RpcMajorVersionUpdates .. note:: This does not apply to cells deployments since cells does not currently support rolling upgrades. It is assumed that cells deployments are upgraded in lockstep so n-1 cells compatibility does not work. Online Configuration Reload During the upgrade, we pin new serves at the older RPC version. When all services are updated to use newer code, we need to unpin them so we are able to use any new functionality. To avoid having to restart the service, using the current SIGHUP signal handling, or otherwise, ideally we need a way to update the currently running process to use the latest configuration. Graceful service shutdown Many nova services are python processes listening for messages on a AMQP queue, including nova-compute. When sending the process the SIGTERM the process stops getting new work from its queue, completes any outstanding work, then terminates. During this process, messages can be left on the queue for when the python process starts back up. This gives us a way to shutdown a service using older code, and start up a service using newer code with minimal impact. If its a service that can have multiple workers, like nova-conductor, you can usually add the new workers before the graceful shutdown of the old workers. In the case of singleton services, like nova-compute, some actions could be delayed during the restart, but ideally no actions should fail due to the restart. NOTE: while this is true for the RabbitMQ RPC backend, we need to confirm what happens for other RPC backends. API load balancer draining When upgrading API nodes, you can make your load balancer only send new connections to the newer API nodes, allowing for a seamless update of your API nodes. Expand/Contract DB Migrations Modern databases are able to make many schema changes while you are still writing to the database. Taking this a step further, we can make all DB changes by first adding the new structures, expanding. Then you can slowly move all the data into a new location and format. Once that is complete, you can drop bits of the scheme that are no long needed, i.e. contract. We have plans to implement this here: https://review.openstack.org/#/c/102545/5/specs/juno/online-schema-changes.rst,cm Online Data Migrations using objects In Kilo we are moving all data migration into the DB objects code. When trying to migrate data in the database from the old format to the new format, this is done in the object code when reading or saving things that are in the old format. For records that are not updated, you need to run a background process to convert those records into the newer format. This process must be completed before you contract the database schema. We have the first example of this happening here: http://specs.openstack.org/openstack/nova-specs/specs/kilo/approved/flavor-from-sysmeta-to-blob.html DB prune deleted rows Currently resources are soft deleted in the database, so users are able to track instances in the DB that are created and destroyed in production. However, most people have a data retention policy, of say 30 days or 90 days after which they will want to delete those entries. Not deleting those entries affects DB performance as indices grow very large and data migrations take longer as there is more data to migrate. nova-conductor object backports RPC pinning ensures new services can talk to the older service's method signatures. But many of the parameters are objects that may well be too new for the old service to understand, so you are able to send the object back to the nova-conductor to be downgraded to a version the older service can understand. Process ------- NOTE: This still requires much work before it can become reality. This is more an aspirational plan that helps describe how all the pieces of the jigsaw fit together. This is the planned process for a zero downtime upgrade: #. Prune deleted DB rows, check previous migrations are complete #. Expand DB schema (e.g. add new column) #. Pin RPC versions for all services that are upgraded from this point, using the current version #. Upgrade all nova-conductor nodes (to do object backports) #. Upgrade all other services, except nova-compute and nova-api, using graceful shutdown #. Upgrade nova-compute nodes (this is the bulk of the work). #. Unpin RPC versions #. Add new API nodes, and enable new features, while using a load balancer to "drain" the traffic from old API nodes #. Run the new nova-manage command that ensures all DB records are "upgraded" to new data version #. "Contract" DB schema (e.g. drop unused columns) Testing ------- Once we have all the pieces in place, we hope to move the Grenade testing to follow this new pattern. The current tests only cover the existing upgrade process where: * old computes can run with new control plane * but control plane is turned off for DB migrations Unresolved issues ----------------- Ideally you could rollback. We would need to add some kind of object data version pinning, so you can be running all new code to some extent, before there is no path back. Or have some way of reversing the data migration before the final contract. It is unknown how expensive on demand object backports would be. We could instead always send older versions of objects until the RPC pin is removed, but that means we might have new code getting old objects, which is currently not the case. nova-13.1.4/doc/source/test_strategy.rst0000664000567000056710000001000613064447140021404 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============== Test Strategy ============== A key part of the "four opens" is ensuring the OpenStack delivers well-tested and usable software. For more details see: http://docs.openstack.org/project-team-guide/introduction.html#the-four-opens Experience has shown that untested features are frequently broken, in part due to the velocity of upstream changes. As we aim to ensure we keep all features working across upgrades, we must aim to test all features. Reporting Test Coverage ======================= For details on plans to report the current test coverage, please see: :doc:`feature_classification` Running tests and reporting results =================================== Voting in Gerrit ---------------- On every review in gerrit, check tests are run on very patch set, and are able to report a +1 or -1 vote. For more details, please see: http://docs.openstack.org/infra/manual/developers.html#automated-testing Before merging any code, there is an integrate gate test queue, to ensure master is always passing all tests. For more details, please see: http://docs.openstack.org/infra/zuul/gating.html Infra vs Third-Party -------------------- Tests that use fully open source components are generally run by the OpenStack Infra teams. Test setups that use non-open technology must be run outside of that infrastructure, but should still report their results upstream. For more details, please see: http://docs.openstack.org/infra/system-config/third_party.html Ad-hoc testing -------------- It is particularly common for people to run ad-hoc tests on each released milestone, such as RC1, to stop regressions. While these efforts can help stabilize the release, as a community we have a much stronger preference for continuous integration testing. Partly this is because we encourage users to deploy master, and we generally have to assume that any upstream commit may already been deployed in production. Types of tests ============== Unit tests ---------- Unit tests help document and enforce the contract for each component. Without good unit test coverage it is hard to continue to quickly evolve the codebase. The correct level of unit test coverage is very subjective, and as such we are not aiming for a particular percentage of coverage, rather we are aiming for good coverage. Generally, every code change should have a related unit test: http://docs.openstack.org/developer/hacking/#creating-unit-tests Integration tests ----------------- Today, our integration tests involve running the Tempest test suite on a variety of Nova deployment scenarios. In addition, we have third parties running the tests on their preferred Nova deployment scenario. Functional tests ---------------- Nova has a set of in-tree functional tests that focus on things that are out of scope for tempest testing and unit testing. Tempest tests run against a full live OpenStack deployment, generally deployed using devstack. At the other extreme, unit tests typically use mock to test a unit of code in isolation. Functional tests don't run an entire stack, they are isolated to nova code, and have no reliance on external services. They do have a WSGI app, nova services and a database, with minimal stubbing of nova internals. Interoperability tests ----------------------- The DefCore committee maintains a list that contains a subset of Tempest tests. These are used to verify if a particular Nova deployment's API responds as expected. For more details, see: https://github.com/openstack/defcore nova-13.1.4/doc/source/threading.rst0000664000567000056710000000473213064447152020464 0ustar jenkinsjenkins00000000000000Threading model =============== All OpenStack services use *green thread* model of threading, implemented through using the Python `eventlet `_ and `greenlet `_ libraries. Green threads use a cooperative model of threading: thread context switches can only occur when specific eventlet or greenlet library calls are made (e.g., sleep, certain I/O calls). From the operating system's point of view, each OpenStack service runs in a single thread. The use of green threads reduces the likelihood of race conditions, but does not completely eliminate them. In some cases, you may need to use the ``@lockutils.synchronized(...)`` decorator to avoid races. In addition, since there is only one operating system thread, a call that blocks that main thread will block the entire process. Yielding the thread in long-running tasks ----------------------------------------- If a code path takes a long time to execute and does not contain any methods that trigger an eventlet context switch, the long-running thread will block any pending threads. This scenario can be avoided by adding calls to the eventlet sleep method in the long-running code path. The sleep call will trigger a context switch if there are pending threads, and using an argument of 0 will avoid introducing delays in the case that there is only a single green thread:: from eventlet import greenthread ... greenthread.sleep(0) In current code, time.sleep(0)does the same thing as greenthread.sleep(0) if time module is patched through eventlet.monkey_patch(). To be explicit, we recommend contributors use ``greenthread.sleep()`` instead of ``time.sleep()``. MySQL access and eventlet ------------------------- Queries to the MySQL database will block the main thread of a service. This is because OpenStack services use an external C library for accessing the MySQL database. Since eventlet cannot use monkey-patching to intercept blocking calls in a C library, the resulting database query blocks the thread. The Diablo release contained a thread-pooling implementation that did not block, but this implementation resulted in a `bug`_ and was removed. See this `mailing list thread`_ for a discussion of this issue, including a discussion of the `impact on performance`_. .. _bug: https://bugs.launchpad.net/nova/+bug/838581 .. _mailing list thread: https://lists.launchpad.net/openstack/msg08118.html .. _impact on performance: https://lists.launchpad.net/openstack/msg08217.html nova-13.1.4/doc/source/_ga/0000775000567000056710000000000013064447471016511 5ustar jenkinsjenkins00000000000000nova-13.1.4/doc/source/_ga/layout.html0000664000567000056710000000110713064447140020704 0ustar jenkinsjenkins00000000000000{% extends "!layout.html" %} {% block footer %} {{ super() }} {% endblock %} nova-13.1.4/doc/source/block_device_mapping.rst0000664000567000056710000002451213064447140022636 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Block Device Mapping in Nova ============================ Nova has a concept of block devices that can be exposed to cloud instances. There are several types of block devices an instance can have (we will go into more details about this later in this document), and which ones are available depends on a particular deployment and the usage limitations set for tenants and users. Block device mapping is a way to organize and keep data about all of the block devices an instance has. When we talk about block device mapping, we usually refer to one of two things 1. API/CLI structure and syntax for specifying block devices for an instance boot request 2. The data structure internal to Nova that is used for recording and keeping, which is ultimately persisted in the block_device_mapping table. However, Nova internally has several "slightly" different formats for representing the same data. All of them are documented in the code and or presented by a distinct set of classes, but not knowing that they exist might trip up people reading the code. So in addition to BlockDeviceMapping [1]_ objects that mirror the database schema, we have: 2.1 The API format - this is the set of raw key-value pairs received from the API client, and is almost immediately transformed into the object; however, some validations are done using this format. We will refer to this format as the 'API BDMs' from now on. 2.2 The virt driver format - this is the format defined by the classes in :mod: `nova.virt.block_device`. This format is used and expected by the code in the various virt drivers. These classes, in addition to exposing a different format (mimicking the Python dict interface), also provide a place to bundle some functionality common to certain types of block devices (for example attaching volumes which has to interact with both Cinder and the virt driver code). We will refer to this format as 'Driver BDMs' from now on. Data format and its history ---------------------------- In the early days of Nova, block device mapping general structure closely mirrored that of the EC2 API. During the Havana release of Nova, block device handling code, and in turn the block device mapping structure, had work done on improving the generality and usefulness. These improvements included exposing additional details and features in the API. In order to facilitate this, a new extension was added to the v2 API called `BlockDeviceMappingV2Boot` [2]_, that added an additional `block_device_mapping_v2` field to the instance boot API request. Block device mapping v1 (aka legacy) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This was the original format that supported only cinder volumes (similar to how EC2 block devices support only EBS volumes). Every entry was keyed by device name (we will discuss why this was problematic in its own section later on this page), and would accept only: * UUID of the Cinder volume or snapshot * Type field - used only to distinguish between volumes and Cinder volume snapshots * Optional size field * Optional `delete_on_termination` flag While all of Nova internal code only uses and stores the new data structure, we still need to handle API requests that use the legacy format. This is handled by the Nova API service on every request. As we will see later, since block device mapping information can also be stored in the image metadata in Glance, this is another place where we need to handle the v1 format. The code to handle legacy conversions is part of the :mod: `nova.block_device` module. Intermezzo - problem with device names ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Using device names as the primary per-instance identifier, and exposing them in the API, is problematic for Nova mostly because several hypervisors Nova supports with its drivers can't guarantee that the device names the guest OS assigns are the ones the user requested from Nova. Exposing such a detail in the public API of Nova is obviously not ideal, but it needed to stay for backwards compatibility. It is also required for some (slightly obscure) features around overloading a block device in a Glance image when booting an instance [3]. The plan for fixing this was to allow users to not specify the device name of a block device, and Nova will determine it (with the help of the virt driver), so that it can still be discovered through the API and used when necessary, like for the features mentioned above (and preferably only then). Another use for specifying the device name was to allow the "boot from volume" functionality, by specifying a device name that matches the root device name for the instance (usually `/dev/vda`). Currently (mid Liberty) users are discouraged from specifying device names for all calls requiring or allowing block device mapping, except when trying to override the image block device mapping on instance boot, and it will likely remain like that in the future. Libvirt device driver will outright override any device names passed with it's own values. Block device mapping v2 ^^^^^^^^^^^^^^^^^^^^^^^ New format was introduced in an attempt to solve issues with the original block device mapping format discussed above, and also to allow for more flexibility and addition of features that were not possible with the simple format we had. New block device mapping is a list of dictionaries containing the following fields (in addition to the ones that were already there): * source_type - this can have one of the following values: * `image` * `volume` * `snapshot` * `blank` * dest_type - this can have one of the following values: * `local` * `volume` Combination of the above two fields would define what kind of block device the entry is referring to. We currently support the following combinations: * `image` -> `local` - this is only currently reserved for the entry referring to the Glance image that the instance is being booted with (it should also be marked as a boot device). It is also worth noting that an API request that specifies this, also has to provide the same Glance uuid as the `image_ref` parameter to the boot request (this is done for backwards compatibility and may be changed in the future). This functionality might be extended to specify additional Glance images to be attached to an instance after boot (similar to kernel/ramdisk images) but this functionality is not supported by any of the current drivers. * `volume` -> `volume` - this is just a Cinder volume to be attached to the instance. It can be marked as a boot device. * `snapshot` -> `volume` - this works exactly as passing `type=snap` does. It would create a volume from a Cinder volume snapshot and attach that volume to the instance. Can be marked bootable. * `image` -> `volume` - As one would imagine, this would download a Glance image to a cinder volume and attach it to an instance. Can also be marked as bootable. This is really only a shortcut for creating a volume out of an image before booting an instance with the newly created volume. * `blank` -> `volume` - Creates a blank Cinder volume and attaches it. This will also require the volume size to be set. * `blank` -> `local` - Depending on the guest_format field (see below), this will either mean an ephemeral blank disk on hypervisor local storage, or a swap disk (instances can have only one of those). * guest_format - Tells Nova how/if to format the device prior to attaching, should be only used with blank local images. Denotes a swap disk if the value is `swap`. * device_name - See the previous section for a more in depth explanation of this - currently best left empty (not specified that is), unless the user wants to override the existing device specified in the image metadata. In case of Libvirt, even when passed in with the purpose of overriding the existing image metadata, final set of device names for the instance may still get changed by the driver. * disk_bus and device_type - low level details that some hypervisors (currently only libvirt) may support. Some example disk_bus values can be: `ide`, `usb`, `virtio`, `scsi`, while device_type may be `disk`, `cdrom`, `floppy`, `lun`. This is not an exhaustive list as it depends on the virtualization driver, and may change as more support is added. Leaving these empty is the most common thing to do. * boot_index - Defines the order in which a hypervisor will try devices when attempting to boot the guest from storage. Each device which is capable of being used as boot device should be given a unique boot index, starting from 0 in ascending order. Some hypervisors may not support booting from multiple devices, so will only consider the device with boot index of 0. Some hypervisors will support booting from multiple devices, but only if they are of different types - eg a disk and CD-ROM. Setting a negative value or None indicates that the device should not be used for booting. The simplest usage is to set it to 0 for the boot device and leave it as None for any other devices. Nova will not allow mixing of two formats in a single request, and will do basic validation to make sure that the requested block device mapping is valid before accepting a boot request. .. [1] In addition to the BlockDeviceMapping Nova object, we also have the BlockDeviceDict class in :mod: `nova.block_device` module. This class handles transforming and validating the API BDM format. .. [2] This work predates API microversions and thus the only way to add it was by means of an API extension. .. [3] This is a feature that the EC2 API offers as well and has been in Nova for a long time, although it has been broken in several releases. More info can be found on `this bug ` nova-13.1.4/doc/source/aggregates.rst0000664000567000056710000001463513064447140020630 0ustar jenkinsjenkins00000000000000.. Copyright 2012 OpenStack Foundation Copyright 2012 Citrix Systems, Inc. Copyright 2012, The Cloudscaling Group, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Host Aggregates =============== Host aggregates can be regarded as a mechanism to further partition an availability zone; while availability zones are visible to users, host aggregates are only visible to administrators. Host aggregates started out as a way to use Xen hypervisor resource pools, but has been generalized to provide a mechanism to allow administrators to assign key-value pairs to groups of machines. Each node can have multiple aggregates, each aggregate can have multiple key-value pairs, and the same key-value pair can be assigned to multiple aggregate. This information can be used in the scheduler to enable advanced scheduling, to set up xen hypervisor resources pools or to define logical groups for migration. Availability Zones (AZs) ------------------------ Availability Zones are the end-user visible logical abstraction for partitioning a cloud without knowing the physical infrastructure. That abstraction doesn't come up in Nova with an actual database model since the availability zone is actually a specific metadata information attached to an aggregate. Adding that specific metadata to an aggregate makes the aggregate visible from an end-user perspective and consequently allows to schedule upon a specific set of hosts (the ones belonging to the aggregate). That said, there are a few rules to know that diverge from an API perspective between aggregates and availability zones: - one host can be in multiple aggregates, but it can only be in one availability zone - by default a host is part of a default availability zone even if it doesn't belong to an aggregate (the configuration option is named ``default_availability_zone``) .. warning:: That last rule can be very error-prone. Since the user can see the list of availability zones, they have no way to know whether the default availability zone name (currently *nova*) is provided because an host belongs to an aggregate whose AZ metadata key is set to *nova*, or because there are at least one host belonging to no aggregate. Consequently, it is highly recommended for users to never ever ask for booting an instance by specifying an explicit AZ named *nova* and for operators to never set the AZ metadata for an aggregate to *nova*. That leads to some problems due to the fact that the instance AZ information is explicitly attached to *nova* which could break further move operations when either the host is moved to another aggregate or when the user would like to migrate the instance. Xen Pool Host Aggregates ------------------------ Originally all aggregates were Xen resource pools, now an aggregate can be set up as a resource pool by giving the aggregate the correct key-value pair. You can use aggregates for XenServer resource pools when you have multiple compute nodes installed (only XenServer/XCP via xenapi driver is currently supported), and you want to leverage the capabilities of the underlying hypervisor resource pools. For example, you want to enable VM live migration (i.e. VM migration within the pool) or enable host maintenance with zero-downtime for guest instances. Please, note that VM migration across pools (i.e. storage migration) is not yet supported in XenServer/XCP, but will be added when available. Bear in mind that the two migration techniques are not mutually exclusive and can be used in combination for a higher level of flexibility in your cloud management. Design ------ The OSAPI Admin API is extended to support the following operations: * Aggregates * list aggregates: returns a list of all the host-aggregates (optionally filtered by availability zone) * create aggregate: creates an aggregate, takes a friendly name, etc. returns an id * show aggregate: shows the details of an aggregate (id, name, availability_zone, hosts and metadata) * update aggregate: updates the name and availability zone of an aggregate * set metadata: sets the metadata on an aggregate to the values supplied * delete aggregate: deletes an aggregate, it fails if the aggregate is not empty * add host: adds a host to the aggregate * remove host: removes a host from the aggregate * Hosts * start host maintenance (or evacuate-host): disallow a host to serve API requests and migrate instances to other hosts of the aggregate * stop host maintenance: (or rebalance-host): put the host back into operational mode, migrating instances back onto that host Using the Nova CLI ------------------ Using the nova command you can create, delete and manage aggregates. The following section outlines the list of available commands. Usage ~~~~~ :: * aggregate-list Print a list of all aggregates. * aggregate-create Create a new aggregate with the specified details. * aggregate-delete Delete the aggregate by its id. * aggregate-details Show details of the specified aggregate. * aggregate-add-host Add the host to the specified aggregate. * aggregate-remove-host Remove the specified host from the specified aggregate. * aggregate-set-metadata [ ...] Update the metadata associated with the aggregate. * aggregate-update [] Update the aggregate's name and optionally availability zone. * host-list List all hosts by service * host-update --maintenance [enable | disable] Put/resume host into/from maintenance. nova-13.1.4/doc/source/stable_api.rst0000664000567000056710000000743013064447152020620 0ustar jenkinsjenkins00000000000000.. Copyright 2015 Intel All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Nova Stable REST API ==================== This document describes both the current state of the Nova REST API -- as of the Kilo release -- and also attempts to describe how the Nova team intends to evolve the REST API's implementation over time and remove some of the cruft that has crept in over the years. Background ---------- Nova currently includes two distinct frameworks for exposing REST API functionality. Older code is called the "V2 API" and exists in the /nova/api/openstack/compute/contrib/ directory. Newer code is called the "v2.1 API" and exists in the /nova/api/openstack/compute/plugins directory. The V2 API is the old Nova REST API. It will be replaced by V2.1 API totally. The code tree of V2 API will be removed in the future also. The V2.1 API is the new Nova REST API with a set of improvements which includes Microversion and standardized validation of inputs using JSON-Schema. Also the V2.1 API is totally backwards compatible with the V2 API (That is the reason we call it as V2.1 API). Stable API ---------- In the V2 API, there is a concept called 'extension'. An operator can use it to enable/disable part of Nova REST API based on requirements. An end user may query the '/extensions' API to discover what *API functionality* is supported by the Nova deployment. Unfortunately, because V2 API extensions could be enabled or disabled from one deployment to another -- as well as custom API extensions added to one deployment and not another -- it was impossible for an end user to know what the OpenStack Compute API actually included. No two OpenStack deployments were consistent, which made cloud interoperability impossible. API extensions, while not (yet) removed from the V2.1 API, are no longer needed to evolve the REST API, and no new API functionality should use the API extension classes to implement new functionality. Instead, new API functionality should use the microversioning decorators to add or change the REST API. The extension is considered as two things in the Nova V2.1 API: * The '/extensions' API In the V2 API the user can query it to determine what APIs are supported by the current Nova deployment. In V2.1 API, microversions enable us to add new features in backwards- compatible ways. And microversions not only enable us to add new futures by backwards-compatible method, also can be added by appropriate backwards- incompatible method. The '/extensions' API is frozen in Nova V2.1 API and will be deprecated in the future. * The plugin framework One of the improvements in the V2.1 API was using stevedore to load Nova REST API extensions instead of old V2 handcrafted extension load mechanism. There was an argument that the plugin framework supported extensibility in the Nova API to allow deployers to publish custom API resources. We will keep the existing plugin mechanisms in place within Nova but only to enable modularity in the codebase, not to allow extending of the Nova REST API. As the extension will be removed from Nove V2.1 REST API. So the concept of core API and extension API is eliminated also. There is no difference between Nova V2.1 REST API, all of them are part of Nova stable REST API. nova-13.1.4/doc/source/testing/0000775000567000056710000000000013064447471017440 5ustar jenkinsjenkins00000000000000nova-13.1.4/doc/source/testing/libvirt-numa.rst0000664000567000056710000005225213064447152022605 0ustar jenkinsjenkins00000000000000 ================================================ Testing NUMA related hardware setup with libvirt ================================================ This page describes how to test the libvirt driver's handling of the NUMA placement, large page allocation and CPU pinning features. It relies on setting up a virtual machine as the test environment and requires support for nested virtualization since plain QEMU is not sufficiently functional. The virtual machine will itself be given NUMA topology, so it can then act as a virtual "host" for testing purposes. ------------------------------------------ Provisioning a virtual machine for testing ------------------------------------------ The entire test process will take place inside a large virtual machine running Fedora 21. The instructions should work for any other Linux distribution which includes libvirt >= 1.2.9 and QEMU >= 2.1.2 The tests will require support for nested KVM, which is not enabled by default on hypervisor hosts. It must be explicitly turned on in the host when loading the kvm-intel/kvm-amd kernel modules. On Intel hosts verify it with .. code-block:: bash # cat /sys/module/kvm_intel/parameters/nested N # rmmod kvm-intel # echo "options kvm-intel nested=y" > /etc/modprobe.d/dist.conf # modprobe kvm-intel # cat /sys/module/kvm_intel/parameters/nested Y While on AMD hosts verify it with .. code-block:: bash # cat /sys/module/kvm_amd/parameters/nested 0 # rmmod kvm-amd # echo "options kvm-amd nested=1" > /etc/modprobe.d/dist.conf # modprobe kvm-amd # cat /sys/module/kvm_amd/parameters/nested 1 The virt-install command below shows how to provision a basic Fedora 21 x86_64 guest with 8 virtual CPUs, 8 GB of RAM and 20 GB of disk space: .. code-block:: bash # cd /var/lib/libvirt/images # wget http://download.fedoraproject.org/pub/fedora/linux/releases/test/21-Alpha/Server/x86_64/iso/Fedora-Server-netinst-x86_64-21_Alpha.iso # virt-install \ --name f21x86_64 \ --ram 8000 \ --vcpus 8 \ --file /var/lib/libvirt/images/f21x86_64.img \ --file-size 20 --cdrom /var/lib/libvirt/images/Fedora-Server-netinst-x86_64-21_Alpha.iso \ --os-variant fedora20 When the virt-viewer application displays the installer, follow the defaults for the installation with a couple of exceptions * The automatic disk partition setup can be optionally tweaked to reduce the swap space allocated. No more than 500MB is required, free'ing up an extra 1.5 GB for the root disk. * Select "Minimal install" when asked for the installation type since a desktop environment is not required. * When creating a user account be sure to select the option "Make this user administrator" so it gets 'sudo' rights Once the installation process has completed, the virtual machine will reboot into the final operating system. It is now ready to deploy an OpenStack development environment. --------------------------------- Setting up a devstack environment --------------------------------- For later ease of use, copy your SSH public key into the virtual machine .. code-block:: bash # ssh-copy-id Now login to the virtual machine .. code-block:: bash # ssh We'll install devstack under $HOME/src/cloud/. .. code-block:: bash # mkdir -p $HOME/src/cloud # cd $HOME/src/cloud # chmod go+rx $HOME The Fedora minimal install does not contain git and only has the crude & old-fashioned "vi" editor. .. code-block:: bash # sudo yum -y install git emacs At this point a fairly standard devstack setup can be done. The config below is just an example that is convenient to use to place everything in $HOME instead of /opt/stack. Change the IP addresses to something appropriate for your environment of course .. code-block:: bash # git clone git://github.com/openstack-dev/devstack.git # cd devstack # cat >>local.conf < select numa_topology from compute_nodes; +----------------------------------------------------------------------------+ | numa_topology | +----------------------------------------------------------------------------+ | { | "nova_object.name": "NUMATopology", | "nova_object.data": { | "cells": [{ | "nova_object.name": "NUMACell", | "nova_object.data": { | "cpu_usage": 0, | "memory_usage": 0, | "cpuset": [0, 1, 2, 3, 4, 5, 6, 7], | "pinned_cpus": [], | "siblings": [], | "memory": 7793, | "mempages": [ | { | "nova_object.name": "NUMAPagesTopology", | "nova_object.data": { | "total": 987430, | "used": 0, | "size_kb": 4 | }, | }, | { | "nova_object.name": "NUMAPagesTopology", | "nova_object.data": { | "total": 0, | "used": 0, | "size_kb": 2048 | }, | } | ], | "id": 0 | }, | }, | ] | }, | } +----------------------------------------------------------------------------+ Meanwhile, the guest instance should not have any NUMA configuration recorded .. code-block:: bash MariaDB [nova]> select numa_topology from instance_extra; +---------------+ | numa_topology | +---------------+ | NULL | +---------------+ ----------------------------------------------------- Reconfiguring the test instance to have NUMA topology ----------------------------------------------------- Now that devstack is proved operational, it is time to configure some NUMA topology for the test VM, so that it can be used to verify the OpenStack NUMA support. To do the changes, the VM instance that is running devstack must be shut down. .. code-block:: bash # sudo shutdown -h now And now back on the physical host edit the guest config as root .. code-block:: bash # sudo virsh edit f21x86_64 The first thing is to change the block to do passthrough of the host CPU. In particular this exposes the "SVM" or "VMX" feature bits to the guest so that "Nested KVM" can work. At the same time we want to define the NUMA topology of the guest. To make things interesting we're going to give the guest an asymmetric topology with 4 CPUS and 4 GBs of RAM in the first NUMA node and 2 CPUs and 2 GB of RAM in the second and third NUMA nodes. So modify the guest XML to include the following CPU XML .. code-block:: bash The guest can now be started again, and ssh back into it .. code-block:: bash # virsh start f21x86_64 ...wait for it to finish booting # ssh Before starting OpenStack services again, it is necessary to reconfigure Nova to enable the NUMA scheduler filter. The libvirt virtualization type must also be explicitly set to KVM, so that guests can take advantage of nested KVM. .. code-block:: bash # sudo emacs /etc/nova/nova.conf Set the following parameters: .. code-block:: bash [DEFAULT] scheduler_default_filters=RetryFilter, AvailabilityZoneFilter, RamFilter, ComputeFilter, ComputeCapabilitiesFilter, ImagePropertiesFilter, ServerGroupAntiAffinityFilter, ServerGroupAffinityFilter, NUMATopologyFilter [libvirt] virt_type = kvm With that done, OpenStack can be started again .. code-block:: bash # cd $HOME/src/cloud/devstack # ./rejoin-stack.sh The first thing is to check that the compute node picked up the new NUMA topology setup for the guest .. code-block:: bash # mysql -u root -p nova MariaDB [nova]> select numa_topology from compute_nodes; +----------------------------------------------------------------------------+ | numa_topology | +----------------------------------------------------------------------------+ | { | "nova_object.name": "NUMATopology", | "nova_object.data": { | "cells": [{ | "nova_object.name": "NUMACell", | "nova_object.data": { | "cpu_usage": 0, | "memory_usage": 0, | "cpuset": [0, 1, 2, 3], | "pinned_cpus": [], | "siblings": [], | "memory": 3857, | "mempages": [ | { | "nova_object.name": "NUMAPagesTopology", | "nova_object.data": { | "total": 987430, | "used": 0, | "size_kb": 4 | }, | }, | { | "nova_object.name": "NUMAPagesTopology", | "nova_object.data": { | "total": 0, | "used": 0, | "size_kb": 2048 | }, | } | ], | "id": 0 | }, | }, | { | "nova_object.name": "NUMACell", | "nova_object.data": { | "cpu_usage": 0, | "memory_usage": 0, | "cpuset": [4, 5], | "pinned_cpus": [], | "siblings": [], | "memory": 1969, | "mempages": [ | { | "nova_object.name": "NUMAPagesTopology", | "nova_object.data": { | "total": 504216, | "used": 0, | "size_kb": 4 | }, | }, | { | "nova_object.name": "NUMAPagesTopology", | "nova_object.data": { | "total": 0, | "used": 0, | "size_kb": 2048 | }, | } | ], | "id": 1 | }, | }, | { | "nova_object.name": "NUMACell", | "nova_object.data": { | "cpu_usage": 0, | "memory_usage": 0, | "cpuset": [6, 7], | "pinned_cpus": [], | "siblings": [], | "memory": 1967, | "mempages": [ | { | "nova_object.name": "NUMAPagesTopology", | "nova_object.data": { | "total": 503575, | "used": 0, | "size_kb": 4 | }, | }, | { | "nova_object.name": "NUMAPagesTopology", | "nova_object.data": { | "total": 0, | "used": 0, | "size_kb": 2048 | }, | } | ], | "id": 2 | }, | } | ] | }, | } +----------------------------------------------------------------------------+ This indeed shows that there are now 3 NUMA nodes for the "host" machine, the first with 4 GB of RAM and 4 CPUs, and others with 2 GB of RAM and 2 CPUs each. ----------------------------------------------------- Testing instance boot with no NUMA topology requested ----------------------------------------------------- For the sake of backwards compatibility, if the NUMA filter is enabled, but the flavor/image does not have any NUMA settings requested, it should be assumed that the guest will have a single NUMA node. The guest should be locked to a single host NUMA node too. Boot a guest with the m1.tiny flavor to test this condition .. code-block:: bash # . openrc admin admin # nova boot --image cirros-0.3.2-x86_64-uec --flavor m1.tiny cirros1 Now look at the libvirt guest XML. It should show that the vCPUs are locked to pCPUs within a particular node. .. code-block:: bash # virsh -c qemu:///system list .... # virsh -c qemu:///system dumpxml instanceXXXXXX ... 1 ... This example shows that the guest has been locked to the 3rd NUMA node (which contains pCPUs 6 and 7). Note that there is no explicit NUMA topology listed in the guest XML. ------------------------------------------------ Testing instance boot with 1 NUMA cell requested ------------------------------------------------ Moving forward a little, explicitly tell Nova that the NUMA topology for the guest should have a single NUMA node. This should operate in an identical manner to the default behavior where no NUMA policy is set. To define the topology we will create a new flavor .. code-block:: bash # nova flavor-create m1.numa 999 1024 1 4 # nova flavor-key m1.numa set hw:numa_nodes=1 # nova flavor-show m1.numa Now boot the guest using this new flavor .. code-block:: bash # nova boot --image cirros-0.3.2-x86_64-uec --flavor m1.numa cirros2 Looking at the resulting guest XML from libvirt .. code-block:: bash # virsh -c qemu:///system dumpxml instanceXXXXXX ... 4 ... ... The XML shows: * Each guest CPU has been pinned to the physical CPUs associated with a particular NUMA node * The emulator threads have been pinned to the union of all physical CPUs in the host NUMA node that the guest is placed on * The guest has been given a virtual NUMA topology with a single node holding all RAM and CPUs * The guest NUMA node has been strictly pinned to a host NUMA node. As a further sanity test, check what Nova recorded for the instance in the database. This should match the information .. code-block:: bash MariaDB [nova]> select numa_topology from instance_extra; +----------------------------------------------------------------------------+ | numa_topology | +----------------------------------------------------------------------------+ | { | "nova_object.name": "InstanceNUMATopology", | "nova_object.data": { | "instance_uuid": "4c2302fe-3f0f-46f1-9f3e-244011f6e03a", | "cells": [ | { | "nova_object.name": "InstanceNUMACell", | "nova_object.data": { | "cpu_topology": null, | "pagesize": null, | "cpuset": [ | 0, | 1, | 2, | 3 | ], | "memory": 1024, | "cpu_pinning_raw": null, | "id": 0 | }, | } | ] | }, | } +----------------------------------------------------------------------------+ ------------------------------------------------- Testing instance boot with 2 NUMA cells requested ------------------------------------------------- Now getting more advanced we tell Nova that the guest will have two NUMA nodes. To define the topology we will change the previously defined flavor .. code-block:: bash # nova flavor-key m1.numa set hw:numa_nodes=2 # nova flavor-show m1.numa Now boot the guest using this changed flavor .. code-block:: bash # nova boot --image cirros-0.3.2-x86_64-uec --flavor m1.numa cirros2 Looking at the resulting guest XML from libvirt .. code-block:: bash # virsh -c qemu:///system dumpxml instanceXXXXXX ... 4 ... ... The XML shows: * Each guest CPU has been pinned to the physical CPUs associated with particular NUMA nodes * The emulator threads have been pinned to the union of all physical CPUs in the host NUMA nodes that the guest is placed on * The guest has been given a virtual NUMA topology with two nodes, each holding half the RAM and CPUs * The guest NUMA nodes have been strictly pinned to different host NUMA node. As a further sanity test, check what Nova recorded for the instance in the database. This should match the information .. code-block:: bash MariaDB [nova]> select numa_topology from instance_extra; +----------------------------------------------------------------------------+ | numa_topology | +----------------------------------------------------------------------------+ | { | "nova_object.name": "InstanceNUMATopology", | "nova_object.data": { | "instance_uuid": "a14fcd68-567e-4d71-aaa4-a12f23f16d14", | "cells": [ | { | "nova_object.name": "InstanceNUMACell", | "nova_object.data": { | "cpu_topology": null, | "pagesize": null, | "cpuset": [ | 0, | 1 | ], | "memory": 512, | "cpu_pinning_raw": null, | "id": 0 | }, | }, | { | "nova_object.name": "InstanceNUMACell", | "nova_object.data": { | "cpu_topology": null, | "pagesize": null, | "cpuset": [ | 2, | 3 | ], | "memory": 512, | "cpu_pinning_raw": null, | "id": 1 | }, | } | ] | }, | } | +----------------------------------------------------------------------------+ nova-13.1.4/doc/source/testing/serial-console.rst0000664000567000056710000000527013064447140023106 0ustar jenkinsjenkins00000000000000 ====================== Testing Serial Console ====================== The main aim of this feature is exposing an interactive web-based serial consoles through a web-socket proxy. This page describes how to test it from a devstack environment. --------------------------------- Setting up a devstack environment --------------------------------- For instructions on how to setup devstack with serial console support enabled see `this guide `_. --------------- Testing the API --------------- Starting a new instance. .. code-block:: bash # cd devstack && . openrc # nova boot --flavor 1 --image cirros-0.3.2-x86_64-uec cirros1 Nova provides a command `nova get-serial-console` which will returns a URL with a valid token to connect to the serial console of VMs. .. code-block:: bash # nova get-serial-console cirros1 +--------+-----------------------------------------------------------------+ | Type | Url | +--------+-----------------------------------------------------------------+ | serial | ws://127.0.0.1:6083/?token=5f7854b7-bf3a-41eb-857a-43fc33f0b1ec | +--------+-----------------------------------------------------------------+ Currently nova does not provide any client able to connect from an interactive console through a web-socket. A simple client for *test purpose* can be written with few lines of Python. .. code-block:: python # sudo easy_install ws4py || sudo pip install ws4py # cat >> client.py < cirros1 login nova-13.1.4/doc/source/development.environment.rst0000664000567000056710000001510713064447152023402 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======================= Development Quickstart ======================= This page describes how to setup and use a working Python development environment that can be used in developing nova on Ubuntu, Fedora or Mac OS X. These instructions assume you're already familiar with git. Following these instructions will allow you to build the documentation and run the nova unit tests. If you want to be able to run nova (i.e., launch VM instances), you will also need to --- either manually or by letting DevStack do it for you --- install libvirt and at least one of the `supported hypervisors`_. Running nova is currently only supported on Linux, although you can run the unit tests on Mac OS X. .. _supported hypervisors: http://wiki.openstack.org/HypervisorSupportMatrix .. note:: For how to contribute to Nova, see HowToContribute_. Nova uses the Gerrit code review system, GerritWorkflow_. .. _GerritWorkflow: http://docs.openstack.org/infra/manual/developers.html#development-workflow .. _HowToContribute: http://docs.openstack.org/infra/manual/developers.html .. _`docs.openstack.org`: http://docs.openstack.org Setup ===== There are two ways to create a development environment: using DevStack, or explicitly installing and cloning just what you need. Using DevStack -------------- See `Devstack`_ Documentation. If you would like to use Vagrant, there is a `Vagrant`_ for DevStack. .. _`Devstack`: http://docs.openstack.org/developer/devstack/ .. _`Vagrant`: https://github.com/openstack-dev/devstack-vagrant/blob/master/README.md .. Until the vagrant markdown documents are rendered somewhere on .openstack.org, linking to github Explicit Install/Clone ---------------------- DevStack installs a complete OpenStack environment. Alternatively, you can explicitly install and clone just what you need for Nova development. The first step of this process is to install the system (not Python) packages that are required. Following are instructions on how to do this on Linux and on the Mac. Linux Systems ````````````` .. note:: This section is tested for Nova on Ubuntu (14.04-64) and Fedora-based (RHEL 6.1) distributions. Feel free to add notes and change according to your experiences or operating system. Install the prerequisite packages. On Ubuntu:: sudo apt-get install python-dev libssl-dev python-pip git-core libxml2-dev libxslt-dev pkg-config libffi-dev libpq-dev libmysqlclient-dev graphviz libsqlite3-dev python-tox python3-dev python3 gettext On Fedora-based distributions (e.g., Fedora/RHEL/CentOS/Scientific Linux):: sudo yum install python-devel openssl-devel python-pip git gcc libxslt-devel mysql-devel postgresql-devel libffi-devel libvirt-devel graphviz sqlite-devel python3-devel python3 gettext sudo pip-python install tox On openSUSE-based distributions (SLES 12, openSUSE 13.1, Factory or Tumbleweed):: sudo zypper in gcc git libffi-devel libmysqlclient-devel libvirt-devel libxslt-devel postgresql-devel python-devel python-pip python-tox python-virtualenv python3-devel python3 gettext-runtime Mac OS X Systems ```````````````` Install virtualenv:: sudo easy_install virtualenv Check the version of OpenSSL you have installed:: openssl version The stock version of OpenSSL that ships with Mac OS X 10.6 (OpenSSL 0.9.8l) or Mac OS X 10.7 (OpenSSL 0.9.8r) or Mac OS X 10.10.3 (OpenSSL 0.9.8zc) works fine with nova. OpenSSL versions from brew like OpenSSL 1.0.1k work fine as well. Getting the code ```````````````` Once you have the prerequisite system packages installed, the next step is to clone the code. Grab the code from git:: git clone https://git.openstack.org/openstack/nova cd nova Building the Documentation ========================== Install the prerequisite packages: graphviz To do a full documentation build, issue the following command while the nova directory is current. .. code-block:: bash tox -edocs That will create a Python virtual environment, install the needed Python prerequisites in that environment, and build all the documentation in that environment. Running unit tests ================== See `Running Python Unit Tests`_. .. _`Running Python Unit Tests`: http://docs.openstack.org/infra/manual/python.html#running-python-unit-tests Note that some unit and functional tests use a database. See the file ``tools/test-setup.sh`` on how the databases are set up in the OpenStack CI environment and replicate it in your test environment. Using a remote debugger ======================= Some modern IDE such as pycharm (commercial) or Eclipse (open source) support remote debugging. In order to run nova with remote debugging, start the nova process with the following parameters --remote_debug-host --remote_debug-port Before you start your nova process, start the remote debugger using the instructions for that debugger. For pycharm - http://blog.jetbrains.com/pycharm/2010/12/python-remote-debug-with-pycharm/ For Eclipse - http://pydev.org/manual_adv_remote_debugger.html More detailed instructions are located here - http://novaremotedebug.blogspot.com Using fake computes for tests ============================= The number of instances supported by fake computes is not limited by physical constraints. It allows you to perform stress tests on a deployment with few resources (typically a laptop). But you must avoid using scheduler filters limiting the number of instances per compute (like RamFilter, DiskFilter, AggregateCoreFilter), otherwise they will limit the number of instances per compute. Fake computes can also be used in multi hypervisor-type deployments in order to take advantage of fake and "real" computes during tests: * create many fake instances for stress tests * create some "real" instances for functional tests Fake computes can be used for testing Nova itself but also applications on top of it. nova-13.1.4/doc/source/how_to_get_involved.rst0000664000567000056710000003443613064447152022567 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _getting_involved: ===================================== How to get (more) involved with Nova ===================================== So you want to get more involved with Nova? Or you are new to Nova and wondering where to start? We are working on building easy ways for you to get help and ideas on how to learn more about Nova and how the Nova community works. Any questions, please ask! If you are unsure who to ask, then please contact the `Mentoring Czar`__. __ `Nova People`_ How do I get started? ===================== There are quite a few global docs on this: - http://www.openstack.org/assets/welcome-guide/OpenStackWelcomeGuide.pdf - https://wiki.openstack.org/wiki/How_To_Contribute - http://www.openstack.org/community/ There is more general info, non Nova specific info here: - https://wiki.openstack.org/wiki/Mentors - https://wiki.openstack.org/wiki/OpenStack_Upstream_Training What should I work on? ~~~~~~~~~~~~~~~~~~~~~~ So you are starting out your Nova journey, where is a good place to start? If you'd like to learn how Nova works before changing anything (good idea!), we recommend looking for reviews with -1s and -2s and seeing why they got downvoted. There is also the :ref:`code-review`. Once you have some understanding, start reviewing patches. It's OK to ask people to explain things you don't understand. It's also OK to see some potential problems but put a +0. Another way is to look for a subteam you'd like to get involved with and review their patches. See: https://etherpad.openstack.org/p/mitaka-nova-priorities-tracking Once you're ready to write code, take a look at some of the work already marked as low-hanging fruit: * https://bugs.launchpad.net/nova/+bugs?field.tag=low-hanging-fruit * https://etherpad.openstack.org/p/nova-low-hanging-fruit How do I get my feature in? ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The best way of getting your feature in is... well it depends. First concentrate on solving your problem and/or use case, don't fixate on getting the code you have working merged. Its likely things will need significant re-work after you discuss how your needs match up with all the existing ways Nova is currently being used. The good news, is this process should leave you with a feature thats more flexible and doesn't lock you into your current way of thinking. A key part of getting code merged, is helping with reviewing other people's code. Great reviews of others code will help free up more core reviewer time to look at your own patches. In addition, you will understand how the review is thinking when they review your code. Also, work out if any on going efforts are blocking your feature and helping out speeding those up. The spec review process should help with this effort. For more details on our process, please see: :ref:`process`. What is expected of a good contributor? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ TODO - need more info on this Top Tips for working with the Nova community ============================================ Here are some top tips around engaging with the Nova community: - IRC - we talk a lot in #openstack-nova - do ask us questions in there, and we will try to help you - not sure about asking questions? feel free to listen in around other people's questions - we recommend you setup an IRC bouncer: https://wiki.openstack.org/wiki/IRC - Email - Use the [nova] tag in the mailing lists - Filtering on [nova] and [all] can help tame the list - Be Open - i.e. don't review your teams code in private, do it publicly in gerrit - i.e. be ready to talk about openly about problems you are having, not "theoretical" issues - that way you can start to gain the trust of the wider community - Got a problem? Please ask! - Please raise any problems and ask questions early - we want to help you before you are frustrated or annoyed - unsure who to ask? Just ask in IRC, or check out the list of `Nova people`_. - Talk about problems first, then solutions - Nova is a big project. At first, it can be hard to see the big picture - Don't think about "merging your patch", instead think about "solving your problem" - conversations are more productive that way - Its not the decision thats important, it's the reason behind it thats important - Don't like the way the community is going? - Please ask why we ware going that way, and please engage with the debate - If you don't, we are unable to learn from what you have to offer - No one will decide, this is stuck, who can help me? - it's rare, but it happens - it's the `Nova PTL`__'s job to help you - ...but if you don't ask, it's hard for them to help you __ `Nova People`_ Process ======= It can feel like you are faced with a wall of process. We are a big community, to make sure the right communication happens, we do use a minimal amount of process. If you find something that doesn't make sense, please: - ask questions to find out \*why\* it happens - if you know of a better way to do it, please speak up - one "better way" might be to remove the process if it no longer helps To learn more about Nova's process, please read :ref:`process`. Why bother with any process? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Why is it worth creating a bug or blueprint to track your code review? This may seem like silly process, but there is usually a good reason behind it. We have lots of code to review, and we have tools to try and get to really important code reviews first. If yours is really important, but not picked up by our tools, it's possible you just get lost in the bottom of a big queue. If you have a bug fix, you have done loads of work to identify the issue, and test out your fix, and submit it. By adding a bug report, you are making it easier for other folks who hit the same problem to find your work, possibly saving them the hours of pain you went through. With any luck that gives all those people the time to fix different bugs, all that might have affected you, if you had not given them the time go fix it. Its similar with blueprints. You have worked out how to scratch your itch, lets tell others about that great new feature you have added, so they can use that. Also, it stops someone with a similar idea going through all the pain of creating a feature only to find you already have that feature ready and up for review, or merged into the latest release. Hopefully this gives you an idea why we have applied a small layer of process to what we are doing. Having said all this, we need to unlearn old habits to move forward, there may be better ways to do things, and we are open to trying them. Please help be part of the solution. .. _why_plus1: Why do code reviews if I am not in nova-core? ============================================= Code reviews are the life blood of the Nova developer community. There is a good discussion on how you do good reviews, and how anyone can be a reviewer: http://docs.openstack.org/infra/manual/developers.html#peer-review In the draft process guide, I discuss how doing reviews can help get your code merged faster: :ref:`process`. Lets look at some of the top reasons why participating with code reviews really helps you: - Doing more reviews, and seeing what other reviewers notice, will help you better understand what is expected of code that gets merged into master - Having more non-core people do great reviews, leaves less review work for the core reviewers to do, so we are able get more code merged - Empathy is one of the keys to a happy community. If you are used to doing code reviews, you will better understand the comments you get when people review your code. As you do more code reviews, and see what others notice, you will get a better idea of what people are looking for when then apply a +2 to your code. - TODO - needs more detail What are the most useful types of code review comments? Well here are a few to the top ones: - Fundamental flaws are the biggest thing to spot. Does the patch break a whole set of existing users, or an existing feature? - Consistency of behaviour is really important. Does this bit of code do things differently to where similar things happen else where in Nova? - Is the code easy to maintain, well tested and easy to read? Code is read order of magnitude times more than it is written, so optimise for the reader of the code, not the writer. - TODO - what others should go here? Let's look at some problems people hit when starting out doing code reviews: - My +1 doesn't mean anything, why should I bother? - So your +1 really does help. Some really useful -1 votes that lead to a +1 vote helps get code into a position - When to use -1 vs 0 vs +1 - Please see the guidelines here: http://docs.openstack.org/infra/manual/developers.html#peer-review - I have already reviewed this code internally, no point in adding a +1 externally? - Please talk to your company about doing all code reviews in the public, that is a much better way to get involved. showing how the code has evolved upstream, is much better than trying to 'perfect' code internally, before uploading for public review. You can use Draft mode, and mark things as WIP if you prefer, but please do the reviews upstream. - Where do I start? What should I review? - There are various tools, but a good place to start is: https://etherpad.openstack.org/p/ -nova-priorities-tracking - Depending on the time in the cycle, it's worth looking at NeedsCodeReview blueprints: https://blueprints.launchpad.net/nova/ - Maybe take a look at things you want to see merged, bug fixes and features, or little code fixes - Look for things that have been waiting a long time for a review: http://5885fef486164bb8596d-41634d3e64ee11f37e8658ed1b4d12ec.r44.cf3.rackcdn.com/nova-openreviews.html - If you get through the above lists, try other tools, such as: http://status.openstack.org/reviews - TODO - I think there is more to add here How to do great code reviews? ============================= http://docs.openstack.org/infra/manual/developers.html#peer-review For more tips, please see: `Why do code reviews if I am not in nova-core?`_ How do I become nova-core? ========================== You don't have to be nova-core to be a valued member of the Nova community. There are many, many ways you can help. Every quality review that helps someone get their patch closer to being ready to merge helps everyone get their code merged faster. The first step to becoming nova-core is learning how to be an active member of the Nova community, including learning how to do great code reviews. For more details see: https://wiki.openstack.org/wiki/Nova/CoreTeam#Membership_Expectations If you feel like you have the time to commit to all the nova-core membership expectations, reach out to the Nova PTL who will be able to find you an existing member of nova-core to help mentor you. If all goes well, and you seem like a good candidate, your mentor will contact the rest of the nova-core team to ask them to start looking at your reviews, so they are able to vote for you, if you get nominated for join nova-core. We encourage all mentoring, where possible, to occur on #openstack-nova so everyone can learn and benefit from your discussions. The above mentoring is available to every one who wants to learn how to better code reviews, even if you don't ever want to commit to becoming nova-core. If you already have a mentor, that's great, the process is only there for folks who are still trying to find a mentor. Being admitted to the mentoring program no way guarantees you will become a member of nova-core eventually, it's here to help you improve, and help you have the sort of involvement and conversations that can lead to becoming a member of nova-core. How to do great nova-spec reviews? ================================== http://specs.openstack.org/openstack/nova-specs/specs/mitaka/template.html http://docs.openstack.org/developer/nova/devref/kilo.blueprints.html#when-is-a-blueprint-needed Spec reviews are always a step ahead of the normal code reviews. Follow the above links for some great information on specs/reviews. The following could be some important tips: 1. The specs are published as html documents. Ensure that the author has a proper render of the same via the .rst file. 2. More often than not, it's important to know that there are no overlaps across multiple specs. 3. Ensure that a proper dependency of the spec is identified. For example - a user desired feature that requires a proper base enablement should be a dependent spec. 4. Ask for clarity on changes that appear ambiguous to you. 5. Every release nova gets a huge set of spec proposals and that's a huge task for the limited set of nova cores to complete. Helping the cores with additional reviews is always a great thing. How to do great bug triage? =========================== https://wiki.openstack.org/wiki/Nova/BugTriage More details coming soon... How to step up into a project leadership role? ============================================== There are many ways to help lead the Nova project: * Mentoring efforts, and getting started tips: https://wiki.openstack.org/wiki/Nova/Mentoring * Info on process, with a focus on how you can go from an idea to getting code merged Nova: https://wiki.openstack.org/wiki/Nova/Mitaka_Release_Schedule * Consider leading an existing `Nova subteam`_ or forming a new one. * Consider becoming a `Bug tag owner`_. * Contact the PTL about becoming a Czar `Nova People`_. .. _`Nova people`: https://wiki.openstack.org/wiki/Nova#People .. _`Nova subteam`: https://wiki.openstack.org/wiki/Nova#Nova_subteams .. _`Bug tag owner`: https://wiki.openstack.org/wiki/Nova/BugTriage#Step_2:_Triage_Tagged_Bugs nova-13.1.4/doc/source/api_microversion_dev.rst0000664000567000056710000003277013064447152022730 0ustar jenkinsjenkins00000000000000API Microversions ================= Background ---------- Nova uses a framework we call 'API Microversions' for allowing changes to the API while preserving backward compatibility. The basic idea is that a user has to explicitly ask for their request to be treated with a particular version of the API. So breaking changes can be added to the API without breaking users who don't specifically ask for it. This is done with an HTTP header ``X-OpenStack-Nova-API-Version`` which is a monotonically increasing semantic version number starting from ``2.1``. If a user makes a request without specifying a version, they will get the ``DEFAULT_API_VERSION`` as defined in ``nova/api/openstack/wsgi.py``. This value is currently ``2.1`` and is expected to remain so for quite a long time. There is a special value ``latest`` which can be specified, which will allow a client to always receive the most recent version of API responses from the server. .. warning:: The ``latest`` value is mostly meant for integration testing and would be dangerous to rely on in client code since Nova microversions are not following semver and therefore backward compatibility is not guaranteed. Clients, like python-novaclient, should always require a specific microversion but limit what is acceptable to the version range that it understands at the time. For full details please read the `Kilo spec for microversions `_ When do I need a new Microversion? ---------------------------------- A microversion is needed when the contract to the user is changed. The user contract covers many kinds of information such as: - the Request - the list of resource urls which exist on the server Example: adding a new servers/{ID}/foo which didn't exist in a previous version of the code - the list of query parameters that are valid on urls Example: adding a new parameter ``is_yellow`` servers/{ID}?is_yellow=True - the list of query parameter values for non free form fields Example: parameter filter_by takes a small set of constants/enums "A", "B", "C". Adding support for new enum "D". - new headers accepted on a request - the list of attributes and data structures accepted. Example: adding a new attribute 'locked': True/False to the request body However, the attribute ``os.scheduler_hints`` of the "create a server" API is an exception to this. A new scheduler which adds a new attribute to ``os:scheduler_hints`` doesn't require a new microversion, because available schedulers depend on cloud environments, and we accept customized schedulers as a rule. - the Response - the list of attributes and data structures returned Example: adding a new attribute 'locked': True/False to the output of servers/{ID} - the allowed values of non free form fields Example: adding a new allowed ``status`` to servers/{ID} - the list of status codes allowed for a particular request Example: an API previously could return 200, 400, 403, 404 and the change would make the API now also be allowed to return 409. See [#f2]_ for the 400, 403 and 404 cases. - changing a status code on a particular response Example: changing the return code of an API from 501 to 400. .. note:: Fixing a bug so that a 400+ code is returned rather than a 500 or 503 does not require a microversion change. It's assumed that clients are not expected to handle a 500 or 503 response and therefore should not need to opt-in to microversion changes that fixes a 500 or 503 response from happening. According to the OpenStack API Working Group, a **500 Internal Server Error** should **not** be returned to the user for failures due to user error that can be fixed by changing the request on the client side. See [#f1]_. - new headers returned on a response The following flow chart attempts to walk through the process of "do we need a microversion". .. graphviz:: digraph states { label="Do I need a microversion?" silent_fail[shape="diamond", style="", group=g1, label="Did we silently fail to do what is asked?"]; ret_500[shape="diamond", style="", group=g1, label="Did we return a 500 before?"]; new_error[shape="diamond", style="", group=g1, label="Are we changing what status code is returned?"]; new_attr[shape="diamond", style="", group=g1, label="Did we add or remove an attribute to a payload?"]; new_param[shape="diamond", style="", group=g1, label="Did we add or remove an accepted query string parameter or value?"]; new_resource[shape="diamond", style="", group=g1, label="Did we add or remove a resource url?"]; no[shape="box", style=rounded, label="No microversion needed"]; yes[shape="box", style=rounded, label="Yes, you need a microversion"]; no2[shape="box", style=rounded, label="No microversion needed, it's a bug"]; silent_fail -> ret_500[label=" no"]; silent_fail -> no2[label="yes"]; ret_500 -> no2[label="yes [1]"]; ret_500 -> new_error[label=" no"]; new_error -> new_attr[label=" no"]; new_error -> yes[label="yes"]; new_attr -> new_param[label=" no"]; new_attr -> yes[label="yes"]; new_param -> new_resource[label=" no"]; new_param -> yes[label="yes"]; new_resource -> no[label=" no"]; new_resource -> yes[label="yes"]; {rank=same; yes new_attr} {rank=same; no2 ret_500} {rank=min; silent_fail} } **Footnotes** .. [#f1] When fixing 500 errors that previously caused stack traces, try to map the new error into the existing set of errors that API call could previously return (400 if nothing else is appropriate). Changing the set of allowed status codes from a request is changing the contract, and should be part of a microversion (except in [#f2]_). The reason why we are so strict on contract is that we'd like application writers to be able to know, for sure, what the contract is at every microversion in Nova. If they do not, they will need to write conditional code in their application to handle ambiguities. When in doubt, consider application authors. If it would work with no client side changes on both Nova versions, you probably don't need a microversion. If, on the other hand, there is any ambiguity, a microversion is probably needed. .. [#f2] The exception to not needing a microversion when returning a previously unspecified error code is the 400, 403 and 404 cases. This is considered OK to return even if previously unspecified in the code since it's implied given keystone authentication can fail with a 403 and API validation can fail with a 400 for invalid json request body. Request to url/resource that does not exist always fails with 404. When a microversion is not needed --------------------------------- A microversion is not needed in the following situation: - the response - Changing the error message without changing the response code does not require a new microversion. - Removing an inapplicable HTTP header, for example, suppose the Retry-After HTTP header is being returned with a 4xx code. This header should only be returned with a 503 or 3xx response, so it may be removed without bumping the microversion. In Code ------- In ``nova/api/openstack/wsgi.py`` we define an ``@api_version`` decorator which is intended to be used on top-level Controller methods. It is not appropriate for lower-level methods. Some examples: Adding a new API method ~~~~~~~~~~~~~~~~~~~~~~~ In the controller class:: @wsgi.Controller.api_version("2.4") def my_api_method(self, req, id): .... This method would only be available if the caller had specified an ``X-OpenStack-Nova-API-Version`` of >= ``2.4``. If they had specified a lower version (or not specified it and received the default of ``2.1``) the server would respond with ``HTTP/404``. Removing an API method ~~~~~~~~~~~~~~~~~~~~~~ In the controller class:: @wsgi.Controller.api_version("2.1", "2.4") def my_api_method(self, req, id): .... This method would only be available if the caller had specified an ``X-OpenStack-Nova-API-Version`` of <= ``2.4``. If ``2.5`` or later is specified the server will respond with ``HTTP/404``. Changing a method's behavior ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the controller class:: @wsgi.Controller.api_version("2.1", "2.3") def my_api_method(self, req, id): .... method_1 ... @wsgi.Controller.api_version("2.4") # noqa def my_api_method(self, req, id): .... method_2 ... If a caller specified ``2.1``, ``2.2`` or ``2.3`` (or received the default of ``2.1``) they would see the result from ``method_1``, ``2.4`` or later ``method_2``. It is vital that the two methods have the same name, so the second of them will need ``# noqa`` to avoid failing flake8's ``F811`` rule. The two methods may be different in any kind of semantics (schema validation, return values, response codes, etc) A change in schema only ~~~~~~~~~~~~~~~~~~~~~~~ If there is no change to the method, only to the schema that is used for validation, you can add a version range to the ``validation.schema`` decorator:: @wsgi.Controller.api_version("2.1") @validation.schema(dummy_schema.dummy, "2.3", "2.8") @validation.schema(dummy_schema.dummy2, "2.9") def update(self, req, id, body): .... This method will be available from version ``2.1``, validated according to ``dummy_schema.dummy`` from ``2.3`` to ``2.8``, and validated according to ``dummy_schema.dummy2`` from ``2.9`` onward. When not using decorators ~~~~~~~~~~~~~~~~~~~~~~~~~ When you don't want to use the ``@api_version`` decorator on a method or you want to change behavior within a method (say it leads to simpler or simply a lot less code) you can directly test for the requested version with a method as long as you have access to the api request object (commonly called ``req``). Every API method has an api_version_request object attached to the req object and that can be used to modify behavior based on its value:: def index(self, req): req_version = req.api_version_request req1_min = api_version_request.APIVersionRequest("2.1") req1_max = api_version_request.APIVersionRequest("2.5") req2_min = api_version_request.APIVersionRequest("2.6") req2_max = api_version_request.APIVersionRequest("2.10") if req_version.matches(req1_min, req1_max): ....stuff.... elif req_version.matches(req2min, req2_max): ....other stuff.... elif req_version > api_version_request.APIVersionRequest("2.10"): ....more stuff..... The first argument to the matches method is the minimum acceptable version and the second is maximum acceptable version. A specified version can be null:: null_version = APIVersionRequest() If the minimum version specified is null then there is no restriction on the minimum version, and likewise if the maximum version is null there is no restriction the maximum version. Alternatively a one sided comparison can be used as in the example above. Other necessary changes ----------------------- If you are adding a patch which adds a new microversion, it is necessary to add changes to other places which describe your change: * Update ``REST_API_VERSION_HISTORY`` in ``nova/api/openstack/api_version_request.py`` * Update ``_MAX_API_VERSION`` in ``nova/api/openstack/api_version_request.py`` * Add a verbose description to ``nova/api/openstack/rest_api_version_history.rst``. There should be enough information that it could be used by the docs team for release notes. * Update the expected versions in affected tests, for example in ``nova/tests/unit/api/openstack/compute/test_versions.py``. * Update the get versions api sample files: ``doc/api_samples/versions/versions-get-resp.json`` and ``nova/tests/functional/api_samples/versions/versions-get-resp.json.tpl``. * Make a new commit to python-novaclient and update corresponding files to enable the newly added microversion API. Allocating a microversion ------------------------- If you are adding a patch which adds a new microversion, it is necessary to allocate the next microversion number. Except under extremely unusual circumstances and this would have been mentioned in the nova spec for the change, the minor number of ``_MAX_API_VERSION`` will be incremented. This will also be the new microversion number for the API change. It is possible that multiple microversion patches would be proposed in parallel and the microversions would conflict between patches. This will cause a merge conflict. We don't reserve a microversion for each patch in advance as we don't know the final merge order. Developers may need over time to rebase their patch calculating a new version number as above based on the updated value of ``_MAX_API_VERSION``. Testing Microversioned API Methods ---------------------------------- Testing a microversioned API method is very similar to a normal controller method test, you just need to add the ``X-OpenStack-Nova-API-Version`` header, for example:: req = fakes.HTTPRequest.blank('/testable/url/endpoint') req.headers = {'X-OpenStack-Nova-API-Version': '2.2'} req.api_version_request = api_version.APIVersionRequest('2.6') controller = controller.TestableController() res = controller.index(req) ... assertions about the response ... For many examples of testing, the canonical examples are in ``nova/tests/unit/api/openstack/compute/test_microversions.py``. nova-13.1.4/doc/source/image_src/0000775000567000056710000000000013064447471017714 5ustar jenkinsjenkins00000000000000nova-13.1.4/doc/source/image_src/architecture.dia0000664000567000056710000001527213064447140023055 0ustar jenkinsjenkins00000000000000‹í]ms7’þž_¡òVí'F7 `¹ÉV^î’­ÝÚKmrŸ]´Äȼ¥HI9ñ}¸ß~hŒ[ÒP"‡ŠãkV%eÒãiØýàéF¿üù/¿]ÍÏÞOWëÙrñå 0öÅÙtq¾¼˜-.¿|ñŸ?ÿûËøâ/_}ñç‹ÙäOù¿ËÕäê,ÿ‹ÅZÞ}ùâífóîO¯^ýúë¯fþa=Ù,Wf>»6ëé«ÿÌç“Wù¢W/¾úâììÓ\L6ùìæÓÉf³š½¹ÞLÏ“«é—/ÞLÎÿu¹Z^/.^4WÝ\w¾œ/Wgï'ó/_üá—òzñêæ6¯îÜç‘{¿›\N߬¦“m¿µÍ¯”ºÜúÝtuÿ¶Wï–ëY¾dóá݃K¶ÜGþÿÉ57W­óE‹Ë¯þð÷éf3]ý¡ùZ7~¼_Û—Ý*hs5Y]Îeåõ™7‹Æ»$/†èÅàn×eqoŽ+n~\q«ãŠ›­_¿[®6«ÉlóPä›år>,©›Õõ´»œõùdžUì±Ç‚îwÿe¶Ù,Ÿøþ¿Læë] ùøw‹Û×z/W³‹Ç÷Î[îòëìbóöõo•–«¹û‡Jw?[ÏÞ̧mß~¶ØôvûýÜþþ¯Süã??Pš»mÝ".âÔOýþ[Äåõìbº~BÍî^³åNoo.{õԪ߿n×…i>º·aóɇéêæöß|ܪÏn~àÄ9›œofïoß}²&Ë7ÿ5=ßÜ<ì÷ÓåÕ4K=?{yöãtõ‹üÍOÿ}=Yå|ä$/Îf_¾ø{wáî?j¾qÆÄ+÷nù»zYc bJ#ÑòýUyˆ)Ûä¼ys_Ì*õÉâr>mD½Ì²l " ‚å±7li„h3u“œ—jòâ ;¸˜oºÝ}:Ÿ^½>_®)JïkXd8{¸Ž·Pæ´‰C HÎSÀd½Î.ßnÆÉG÷û  ›°¼YNŸz0kÀ6/ dða‚K׫‡jq—ÐZÛMÄ/³ù|wéø~"Öo—¿¾náüO“™½×j½ù0ŸÞ0]\_Ýü0à´ucÝkýæ³w¯ß.W³ÿY.6“ù£wéì-&æfv^OÈúúÐÁé‹Î‹Ô|ØìÛ¶„ogëóeÞþ9O6ÿó½¿ËÛϛɺmO€C÷´&² #éâ–ÐHÂFRHct&€ó#p"9 wKèe wÙÐDJè ’ODÎUÜÀ¤»¯¨;‚îº#Gø)£èÅdu‘7…Ÿ§¿mZv8}»\\\Ÿo–+…¨êµE'?„‚Ï ¡ö:œæƒÁ Œ‰GÉ@t©*Z$"I.KË µ‡0ʪ™°«äS8ˆèe õhúÎ ô B"ô bGÊz ¬Ž|hüZH5w[I`ÈqÒê|D‘ ‘H=j¥«é£zÔGå«¿/Ôb‘Í8cÎú•ÝþÍÙÛŒR7*ºYjzöñoóOʳAïݵwvÆÅ¬–0S;ö.úºì8ùF’Í’„{‡-ãcJaÀì¸%Tv¬i:ÊŽ•wbÇ©v2* ñb]vŒž@Øq²…#3‹döä”+;~L5š;vŸŸƒí!x¬E)ʱܕíJÙßH¡ÇÈ褰)ZNƒ¾„J•+=Vz܉ôÀÉ3Êár²©nÂCH1ˆ 46.ŒÐ vjƒrä§uR)ò`(ræ§ÏÏ‘±2{áwÞv­Ü-Aä†Ì`„Î %vùÐT‚¬Y ²änÙõ@#Qu¾«õíZ¯ì¸ägú#™Bd‘œÑ3(EVŠü´V*GGÆ*yP<¸ˆ Ñ8ëgž°r ” !Öh’(Ù%$L‰PÄõÓùÛéÅõ|ª˜X·*¥bâ° %À÷вÉŠ£L©Se¼²x# (Vö'9ì `qÐ-›_BõåÕ—W_^}ùn´•{ ­ÑÙ$ !Ö è"”n–„ÜÉ£âÛgôŒÊ[•·>­•Ê[‡ãËû8ï =ÐäRÕ Åäë²dâFK~Œœ¹ejλœwƒ&É/ rdåÈÊ‘•#wãÈ—Í ל,¸@u92s*}M! GŽ£HÎ話]åÈ;h¥räápäPû¼ëïy#ûí-(öWEæ£êV‘HÕ˜/í2¥Š ¨©1ðì;±¬ëëéâ¢Y–ÃÊ]‹çX¨O°Á×¶òmÙêÐG,—¡Zùçeå÷-Õïeæá™òWz8ÀKi6r]'ÜöÂË!0Q)þ ! èå»oÔµ9Bø¿MÕ±Žcsã±ØöÀv(H}foC]¶CH!Yº“‚'yŸ «÷°áiÎG&“aæH]»vT!<»DiI¹Ôѹ<3 ¸Èšþ ëÒ¡d~¡‡PèE×ôO Ú±­Ù¾%ÇH7sIA@A ‡<€½!Ž!Ô‘`ˆî¶€µrØ$ïåbšD„ã )c±jTŸG´?f „äï¶ŒjÍß‹BBz( z&²WúZS¤8B2ÁG®ÛšÊò$YJ.9B9)¹ìÊHN¤5ÕáK¨™Hš‰¤™Hš‰Ô-HÏ=L ŒAÅ2ë²ì+ã‚FR¶t_¸æ}¤Àoó}óõ«?´Àv•Ôhý`¢õx©úú˜ýe9ŒÐˆ¶rûVò7’œ-í[­L.Ì9¡vûÖ×P9²rdåÈÊ‘»qäØCb*2fBÅÊój£ÅÂ|È5ój±‘›±Ó+EVŠü„F*CC'ÀS 9&WècÕb‘D$°´o%Ÿ½DÆq„A3ä×P²2deÈÊ;1dg{`ÈL)¡`k÷o")H°ôoÛHÎè”#+G~J'•$‡$§g*rsÐ[Kë X)aåjV‹$ \ªYÑS!”‘}ýjÖöçl¯f%— ßµ?—Ö¹ %§j¯”ªú–þõê¼ÍбC÷Ìñ8ÃO=ÜVXÈ z2˜<ËçÑÁÐÛŸ³<¿^½Ÿ¾¾˜­óœ?‘_fa. s"rÖaR 8™Šv8 ø4$öÃô·ÉårÑæ\âñŒƒäÓ(åý|ÍJ/R¢,ʆ,jÌ&ë¾ ~Ë{~è:éí¢aý,â.á°`8Y°LÑzN>ÄT1–Ÿ”R°É»($Í‘†Ã4¦á°“ ‡>¹Ã„à¡ÞzHUOEŒ³œ]*“ܘLþL¥Ð=ÀZ ÿmúa½Y.4V¿Â6¥ÔxØ`âa™¤ÞaÈ’Zñ°>ê’ÏÎjq}Í!ÈlŠ› ^¦ £±ed Ê‘Am/yÛc>¼”s.ùÆ V½èÔ˦—] CöJž‘<›¥^wí ¼kæÎĺ†n¥Û|dmLÅÐYʬ «ú–Çlµs Í΋:]G;¹ž„¡c¥÷D8B–éQ\×ÒÑÙlÙhØ8–M¤Ž‡³—©þ–Þþœ[LÝúòE;7LQS?US¬>àÙL}·ê:|€D4Iú_›ÀT3§4KŠÑ¹" ÈNb€Ù‡òàø>–PK 4çAs4ç¡Sl›.-pÎØÌ¹Ä›®YUîÀdà" m{cGfD.”¸Ól?—²Ñ³?N®Þ¿8ûv¶¸ÐòGsoQOr&ÊMÏUc@¾ÁÒL©Œ ¡ò`é̺Êä!™²–LD‰z wîJŠöš-Ýúœ-œ7™€’­MŽ´È@½ícПD¸y­žQ:Œ¡ÅÊ "Q40Ä)»ÙÖXB.r9aÊZûS¶BH"ä‹zVQ9JÀîù0äàÌô—Î$›FN"ÑUADDùNDY¿”‘–ˆä“@”*#Éögmíîa¢o®uN#ô§gð=ÙPìÁ†"±ÏºâDW ® ÅÄ\DÙÌæÅ„˜I>d¹¾ myÔv Ѻrm:ôT­³ƒu¦>¬3ˆJóIŠ•78´ŽDTÞÖÒ8[XçË£?‚q¶>i«m"xç›kƒSª¬¶¹÷?8Ù3o_6+‰è™gùº“Ë>&Ec2‘1’òéŸ,×󥹚®×“Ëü^O jŸ<¦ ŸÃIÎIAO˜=`äÝ¢ðZ=Ö…¬[YhR²8Î6 Ý<°ðý̪O²¾ûFaê0µM'¥†ˆRس²VÜnBòÁFW™[}—uœßÿÈ6jѵ¢Õ㺩€5DÀr½Ð*t)ŠFøP›V5²œIY Ç`¼óPâ4èàþðóÏ?*P…Vµë¤¢ÔÉ¡Ô^}=õW¶èå4ÑJÎÕ ,Ûp+Jæ-¿#B dqè:Lî$¦(ô²Š»”:€± “GË™²…Š¥`“‹.$¦(^Zê ¥Zêp’¥Þ÷´¶Ìx„P=$XdYã‘ØJ"{ÙÐÊgù~r¶ž®ÞÏÎušÂ‘bƒ­Ê©$ö¤Iìö"]Ï=°WÄÌF˜ $›ê²WŒŽŠ$²%+‚°P>×µöáD¸kk¸wõ„d}H}òà]]îêdRµ–§hÃ1¸ëí ”»*wUîº+w ½p×€ÅBô\›»ÞÊʈÉco,…àå}v—áô¹ë¿ý¶™®ùGVþzLþÚ® Ê_Où¨hKF¯=̱ý™f:»«;ÇÚEéÝæ2'òTÖÛX$3v%aû̱nÎöK}JeØOçÁ5Z­òy–§=Öx®§~RßÍ&WËLX[Jê£#¬'ðo¡jû8ià&%èÒ~É£KÙk„Rì" x„V/K¸Û -²d£%`Œ‰²YÑQ%ÃéÎËé!‹:ªê¨ž¢£Ê¶ÖÇÖ9‰b»º]Deü*‡Ÿ Áñdþi*‡íaNêùnÙ;]/µTu÷t«Zj#©Á4’ò÷èu<ÒzòGlØaÕ$‘ä"¸0Èì GC‰B~rèüòfˆŒ–穦FíN.j÷íl}¾Ì8õÏé|"(—Õæ»Éfòf²nË䃫S0ë£ #Ê{¦åš%ùEûInŒÎäM:K–)GaÈ¢{XÁ]úL…)I‚&DòI’O«ž<ßõçST‡^zuèOÒ¡?¸âGz‚zˆ‚až¡æ.ð»¤Ò-)ŽÅgcr"9tžpLòúõUÂZ°nÓG%¬¬Hdê!àhSâ%ƒŽ©®oM’IÑ0A9f>€Hv±ëHŽcâÓ·ËÅÅõùFãG‰7¶k¥Æ‡]‘Ⱦ‡ƒrϘF.J[+ª{PîáF ÉA9`ö²%qÐç䇯ànçäyýlâ’óSvvr«½áä£ûý  ÝêV«[}’n5÷@[ÄÔ8·¶vvä$ï„¶fp!yŸÑ3ª[­„õ1}T·z0ä™›ÖÎ }š‡>I¥ØÄe·Ú¡«Ë6’06‰¤^2ˆÈKKtv"éÁK¨YÏ” +AîF{)rÎ D‹u 2b(çLÒ”Uré&–%gô J• ?¦ÑA'@û¨´â šÈY#“¯KI“‚œßòÈ£¡ØµYÚ‰ä×P ²d%ÈJ;äÐG¥ÉTJ™¶†Ê‰! ä=¤$Ljll "7c§¦=(IÞA'•$‡$§ç'É¡’+ëbvêCvת¶ú”`k¼‘ä IæljB’v§y$¹%T’¬$YI²’än$ûˆ"'r#oÅÄu«ÀqêCŽJ;’<õ,"‚Òd¥ÉOk¥ÒäÁÐäLQkÐä@ñà’.Dƒ! RY´•ëz9ø9 ôQêzcÄ:€àPÑõÓùÛéÅõ|ª˜X·)¥Bâ° &õІ@¦¦–S(ª¨É¤ÔHò 3^eg,Š+ïb ƒîCpøª+¯®¼ºòêÊwc­¾Ö*K‹ƒl͈.‚A‰àfÚ“c$C¶á&L•¶*m}Z+•·Ç•§8ñâhrÖÀ渆CªëÖÝH‚ÆÈÆzlN¼ÀÅAÓä×Pi²Òd¥ÉJ“»ÑäƒËçÊlnÎÀǺ4™ KB…&Go¡œxqÔè®Òä´Riòph2×>ñÚ2‹*ôWLæÅ®•½»Ï¢JIšìH n™EŽ„R"uåÆ{Í¢j}Îm³¨ I[ÓQTŸÙ(ªû•N{M¢ª´½ÍЯ Æ{I¾¬Ÿï“%Yñ“aF7ξ£÷®ìp.úTÛз=g»SKà›K‘ÕÒ?/K¿o¬~/KçgJb‰¶$®TiÛªã%³ŸbiNlʆŽ&@(…Àäyý“¾ûFý›cd¯´j£ú7Ã)|ÁÊó…¶°ž=°ŠÒ¸¨~¯Œ,©Ô„€‰1Ea=‰bé•Á݈}XOûsn›¼r)rð'Äzv‰Ö’ª“qö"Tφ!ØŸò^,[z=TܘùTòT¦û@þƒªl¡M3†ÎÉi;cÈÖçlßÔ¸CÔ•é)†ütÊèIà÷AŽ"®ðK‹ija+‡_Ц"ˆ<ΠB MœÕví ¾Wø¥õ9[@L&LQ¾(Z " "=T=Ù+§)R­®lr0ò”ÝŽÊEüÙm/‚\ R¼ÉÞãˆKñft£«ƒPš4¡Iš4¡©[ ß÷0€0RÊ–™]þCÝ>W.ФÒî0•I ¶‘œÁ“†PÂõ._¯áþ# !l×I ÷&ÜO ë?r ™¢Ð»ècåV°Ò°"˱±4‚µ2 1óc¶4ìF°‡.ŸÒc¥ÇJ•w£Ç¡‡ÔVt,L$X®Þ¶h¦ßr*o3t&åÆÊŸPH¥ÆÃ¡Æ|Ô8ö@#‡‘/ÞVG(‚ÐÛÒ–0;‡,1eæA“ãƒPÙ±²ceÇÊŽ»±ãÔ;ft# P¬ÝþUê¥.Ãq3#¡Èu@Ù±²ã'RÙñpØq|¦ò¸d{k‡&†À•ë`ƒ/=a"²D lŠ\¸$S:Blëc¶_ ò~RéYšCU!‡j¯ªú†þõê¼ÍÎaPÃS}–ÎY¦8†” ܇²Á¥t¬r÷§ëAú ‚ç׫÷Ó׳u^€óGÈñË,ËdNDÎ:LЧSΧŸFÂ~˜þ6¹\.ZÜŸtpQÇK0ŽØÁÈES³P–C–9ˆœàÇœßIÉ9åÝÞ7þÕÃêíþ †“Ë­çäCLÃ_ù§I)›¼‹@iøKÃ_þ:ÅðW:|Ò‡  AJ¡êˆLoÈa‹aL³[Ux¡óv=“ÿ6ý°Þ,«ß+a›Rj l01°t¯ºè¡R+ÖGMQ˜§±jã.õÍM³„âgÉ¡öÞU/QÜö˜/eãl@×$­W‚Sçz ÎõóˆïD‚“6Ùi«™xí ¹Ò„ ÅD³ §ðõ®l{̇—fWßÉ•ÑcP Q 9 †€õÏ"}ÔnDà$õfȱ*†€¥” Ù8Cb”l£=i}Ì6 ÉA ò=ãIõ¤U 9m y,Lÿ|²c”ÿð–ÿÑ$ µ°!ªƒ±¢‰^¸GÆÌ ÆŽ sh¿$ò|ÄVPCýê×P¿†ú»…ú®vpÎd»+³µ#Ùš…`Œ÷$$OcG†$Ôï­ P öý\ò7Îþ8¹z7þâìÛÙâBGÀÔûoUP û'ìžËÙîe2„‹Xšyz_y2•”8g0YcVx+Ù01ãf×™{µ&l}ÎÖ›2MÌÎCT[ýíãÄýŸ E²ÒG«uL^‚Y1ÚÊNË@0Ù{ËÎvVÂh‰8¡Õzës¶ÂH’ïÙµïªbˆÆì†ƒ!}dçs¤›š“ª;©ÂñRvƒ&‚‹¥ 'sƒRÑݵè>Ùùí¹-‘K ½Î¨RS¯?ŽîÓèüw³ÉÕrqј‹}ób©=áT¹EH‘Š ×4¢pžâˆ!îzžv*(]ÁÝQ%-cLd¹fx>£cºórž×ð¼†çO1<Öõ@ú|`nº…º}Ú"Y'½9É£t¢p€QäBJþô£ó?ä»­ÞÏÖKÉ£E«RjH~0!y!¨uGŽmÅÄÃSñ³»ç¬9Á([IÒþK²h½ñH(’½µ|ú¨øéõfµ\($V‡Äm:ù9`¢&ö—`'é}}ÔÑ»eFaªZ4TJé ¹‡H®Ó³÷#ïòwè*údÊé^DM³S?^ýxõã;rVޗ3+Å’U+ê‰åL„’ñI ê­‹®ÈÍÛ€WƪŒõ T'~@Nüá(ÍûùäÃtõÕÍ›üßåjrõÕÿz);þnova-13.1.4/doc/source/image_src/PowerStates.odp0000664000567000056710000005263413064447140022703 0ustar jenkinsjenkins00000000000000PK¤@3&¬¨//mimetypeapplication/vnd.oasis.opendocument.presentationPK¤@Configurations2/statusbar/PK¤@'Configurations2/accelerator/current.xmlPKPK¤@Configurations2/floater/PK¤@Configurations2/popupmenu/PK¤@Configurations2/progressbar/PK¤@Configurations2/toolpanel/PK¤@Configurations2/menubar/PK¤@Configurations2/toolbar/PK¤@Configurations2/images/Bitmaps/PK¤@ content.xmlí]ërä6vþŸ§`õ:©¤Ê¤ˆ oÊ̸Öv¶v«f¶œñÆÎŸ)ª›-q‡Mö’liä—Èϼ_ž$ï—&¨fKl-]e š@ðǃsÀ7ß}Ýʽ'~¾]!M_)^¸Ž6~xûvõó§?¨öê»wÿô&Únýµw½‰Ö‡¦ê: SøWÒar-î¾]âð:r?¹Ý—\§ëëhï…y©ëjîkþ,q%IéâÈfyÔjñm$[øk¨ÛPßíÝÔo´âkà‡_Þ®îÒt}uõðð =-Šo¯ã8WünÑàu‘oˆžk³¾ò=,¹BºÊóî¼Ô•mË[mRxØÝx±44nê¶zu{ d×eÄ”«¨Z¦Æ¯û[ivÝßöÀ¼¾scižñÌuª-ž›_VÝÄwþ›Ì—x_³Ûòï×’ÞÏ÷~Å5VâíÊéPŽ4‰×æÍîö®ÃÒ.^ÜÏ*®%Ÿ8Œ«À=dÃé& 6½¸‰"•›¢T~»»lK_é"//ZKÀ> ÝбáÔy³ä 5œZÞŽó¾_þ”q÷Qòïïý/æÒ\ù膉¸X{7‘_½õB/ö×oWq´sÃZ޽Ÿ®a ºwcŸ…¬šPÚ%Z—B#U˜¼Bz!fæöÓ2 EsþË ÿóà†¿úÊGÊ=°y Éc’z»vsóûe£eeHöœ¢·3£;?Uþ臉¶e{[WäèoßS¥wË]ô,ÃÉÕÆ–©ÝÆ¡§z^Ë3öyÏ7û~ji5¬eókfK ’jæó 'º§ù§O-Õr4¹/ZCüÔÒG¼ÿ¥+zŸZŠÞ˜Î6}íSK_›©d[Ô®9I¶–Ú5†ÜÚò-WjãkHe†À»÷‚L8Ý ƒ"n²ëPÿJü·TÕùvõÿû?Eó*•TÉË0MàÞÀÂe /Üxø˜ºñÇÇˆŠ CâA]á&zPyÎÌ1T8ñ*ÀPãŸ9.ÇÞtø’½»†J½m{9¯!2Bbñëõ!D'DÈ~•“!„5ú*2'Cˆ¼J|¬éðy¥rÚž !úJå´3!B¯SN³ž‰ 2.KPWngºöUï^ƒìÆM´y,~TIß½áKœ,^],vfK«ð­òµôÇbá”oQÈÖxf³‰@÷ê¢ê*«r}H 1jrçî½vE<|½X®-Îþ”ßñ7oWþg¿÷‘ÓÃ?Ñ!kúYa±¸Ì®˱šÅãøÅ¯|éœæ?YŸbÉŒnûŒõF¼ûmÓ ìýñœ:Æ+óò{áÛK´/ÚyiüÈŸÎ6Å|A“tEW02õìoŽÆãꌽuꆷE\QÕžoBû °Âï³âµj”ò_]ùoåÏœN­þì"ÜÛE¸ÖEH¢‹p³‹*—¸“C3ÍJ™š¡“Áz“æ°}ÿÜÿý:õïó~aÙÞMÑG·ÁÁS÷‘¦ c–ÍÑ&È$⮈kü²)±² ¢¦ì ÿ›]áwÅV¨Š: ”€§”µWŠ×(ã¿OzósþŒÖ_h¦yN¢ÔÇ2™€(T£†Ó`ÊðX>Ê cýDçv»Pe.T¡§SÅž\¤Qþ#Ž£x¡ÉÉ4‰Â&¹(îâäHƒ•V¯ñ}ò÷ñ¸,«$*Y r`>óF‘.à"R-].Í®NsârÙgÜ«’´)³cQ%°y‡°˜˜ìɺJˆeª#]E"EáVAÌ·¤çÍHœ&jøi¨‘§£f@+j8GêºÃPËR€žjÎtLëC¦¸X…±‚˜Ó ™‚,GÐL¤$i6‘7&˜ík N%ÿ?ø;[eãs™í­Ó©‚Á œž*ñ’õÁÛ,Ly9¦ÔmM{¡bihz[ócí÷ SÎÏYÇ‘)Áª£A×-ÝKÒ®£/Ô!ôLªÖ5{@WTJU2j—ÕK³T–°n7”rè)”ržš™R>lÉÈb6Ò©`Ö2dlÑLÃä©ya6¬’ËBF{!Ó{ ­«’ÆüÄD£Aifß”˜ÐÆH¶œ|$óÔ¼°”1¯eÁi]K€Ù¶ºÌæ6æfq#[ÑÒ~*Z6nPIHA=ž:·É]׎nÇhǦ¦Oí_¯¯¥5TãSj­©Y‹Êýâ*7Ò%øg kHç¶4Ãn(݃ KÆÓ¢s÷ K0z&[³)ªJK¤9¢[º$¦3næá'°dBÓBµÆVic>LjÈLzºÌ”&­Ìò5-=}¤ÍmFPöQ¼ö^w„ÁlÈ ÓšnO@îNu¿ ÐM¡IzໄbÅSÏLúá°¤ÙzKW謩 jf¤òãV"kÒhy\€žöB߈–™" ¨‚nóà¦ÓT)˜>ýßzT©Sêý«ûÛÇEE»Øs[³Z˘X¯ë ù¨émê4ºPß:gEn(›5 ÐÓ-¢"f´" S–¢ºcÎÈNmü Ôæzú(Pûíþ§ƒÊ ›5,•Ãf " Zóµå‘:™¨ýŽ©'c*Êß);NI&DwpWö¤ÑG`qÂÄ뉺 ± qÿÞq#ª=ì˜ÕëøÃÝK¨}£«ºõGoL¦Š,ƒÀ4€E¦{>"k¸‘áp†W´ E`Çgáöó‡W`™ðló‡jÈiŪ/°ü…[?Þ-öÏ3éê†FöŒý„°ŒÎ£Ñ363€œÂræg •#1©ZJaÚ2*á ŸqT‘¤ºíÌÕ! ¨)“ÔýQ°cÏA’ØRЄSâXÌO( ©%ôõm uÁiôÂ9òF5]oˉ5£ŠL®o0ùš¬Qü™7Ñ×ôJÊäñßŇ6Ê_?LÜNyü]ê%Ýõ^5^ãªÄú¼¸W|¬ý¸£ îÐ \ç˜ù?%Iï¹9§ÔûC´Û¹aOÅ3Ç\G õ#‹ïX`"ì†1RÄ€7¦çzEÊ|r“/s“3Cš¸ÙÛ2Œ-Å»q,©m‹Dwž‡±z÷3ó(;Ï {09¥ò0RRèD%a˜O_½Ÿ( i°/=»¢N©}ãmýÐÛhS»Rª>æŽàn½ø[·Y¢¨,™årÜX]»APª[»hão}/†ªT ÿQÍ$T7lRØ`Y„­ÀŠL—€†ÝC ‡µôæaÉ÷Êw[ ÿ3·ŽíXìÂ&Ñá…ÿ ö”!]ùUä¯ÈJ™ð¿%Êdn^N¤¿ÛÚð¿“•×~)D쫱Œu”çÕÂ=Ö4D²‹¼¿få³X#‘‘•-^®±æ!‹[pyþý >Íʼn¾ßŸ2â`þvõ®¨ÜuÄ ¿þr¨UI•ÃÍrþö_¡¡¶ò-<ø[ýߎ—&ý¥‘ò­‰í¡ hg`JH=Þè/ GÁC5˜Ý5˜âù/`õWMà<¨Áî®ÁMy §¿É6 íx‹FHUÑb`Y…dg n2:¢ÝŒäUÈ1uS’˜òýºyI2R ï&%±¥I‰ºYIlÉtQ’=ZEßÂèv”¡òÝ„DYU€[tto¨A,Ø"¡(ˆ ¶¨íÉ ‘2P´‹rZ˜>º›p€‡ ªª E7&ûŬ¼`ô3ø£-™Gw­xy{ðå[T㈫ý¢=b·žè­'ÝROWDÓ%*hϺºò¤GÒñN'DæÑ=S¯4kHœM0djèsòÜtñ<±Ë÷²O¶ÿ:]Ñ€AAGº)(OÚ¥î éÒôŠ" ]o‚Ì8iìYŸÕÂëþ•ë-¢{a¡ú•¡ë0ûµm{nö8×jù‡„Ò»Ãî&tý ËL©GDsÌöB®ÑÒAþ•}UÍ®:b°†p $>hÍ`ÏÌj¯±Ü$_G* ®Z¶j»÷ñðú Œ´ÚÑbͰj¦tíþºY±®FrÈ·ö»öî¢`ÃÂ?@Õðƒ\5üWí^Ìs0*}W%ûKªzwOöõ(Ùñð°i8ÜhÕ¨UKC§y0–=ÛÁuv/MÌ:Md¾26D³æU?™&æòU¢gã‰#Ë“ >TÀvÈNÌ“å+3ûJ™àÛ'xR–,_(x>–Èž‡§ø˜…Ñ\_>~o9 î•Xà)¾ÐdkΤü[¢_þTg«N™+;÷ÿÕ¦³<̤b¾Z§~wÇâ[ÝÙç¼&_ñ|•ñ—VEõäßÇhn>#eDiËû(oX=ú×® Vxè ´0€æÒ¦¢…"Ò±HQ:«0²mOS!Úÿ‘§#JÍ¢¦•#ÊSóBëµ°K«œ^z¥šsБßê´ ¯3Il9"ìSìÐ(Õ¢ŽH`Gê Ì©Äó“j|WYw¬éÅóOî!™ÜÑ5«}ÜÒ®È >ÎÁŽ‚³kf¡îØ'ëû¼»p6 Þ£G4¬·BNQóC.õÓ´ÓGóÏáþòÇó<”-0ÜuΨlõ›uLÙRUˆ«"57Õ Ì;¢#OÑA뤨rDyjnˆS¶:VÙ’ù¶[KÙR)Û’Ê–HH*[Ï·mșґçäÊ@ŠL§ )²2Hyb^:®è<¨Tz P-ͬoÅÂZï Xxäw‹ˆ> ¨ T,Ö(]%”Ú*ÌEìÜaC)j82{§Z{˜b-Sלš5`eÖÁ‰+‡dÏpœÞµ¼¨¯3R_evl;Cê+HR:¹þšð²õשºIÆ­K§ÝM¨¼(d…NQ½£Ð‰G‚p¿nrØ]¸¥1UOÉ1Éô”¥ÕÎ#ÅÄ;j}¸ðŽš‡·x´3Ö[,c¶¼Åsk2g1KÌË|iâi•XOP¾©´bØf·b(³AŸƒi!®hj0D™¶ò»?#8‹“àÄO;%ª Y9fÖü Ã5·9*5å6Ðkh5Ôö`¿›§›† C“˜bqD‰­ÚC”š»ˆðyŒ¨áS³îçËçéÅøÞ½¿öÎPñÎÝï6Ÿu~èºjÌÇÝ£ê)FÇæ‡Z8®^œu4ѱ½ýçÎõ gæåÎ5 Ø–¤?²·L¯jŠ˜špî<„ =Î*sû‰Ñ¹Ã<á2×’‘¹¤Î•\Ÿ˜ÊóÄhs.9{~owí¨mQ=ż¸»ç» l“ÁxO2^°leròÐߢ/¾°¥ÉóG»êdà±x’Å'å&ˈ¡õÛÂú‡åðŽÀ+[8L­&ˆ§:>Upr/\7<ýg§uÂCûÎNÓ°™Ð$•mË5œSO™¹*l, ÜhЪ¾±0P;±+°ÅU1‚QgÃó47ˆj qp\yÔt7N¹RÙ‰®áéЀ{dÀô"æÉòy¸øú{{i kRè.:$žzï'þM€lÝ áè]EÛ­¿ö®«™Ë«7Ñæ±üµ‰Ö‡dR`)üûîÿPKˆ!ˆ,QÒPK¤@ styles.xmlí]K㸾çWZìÞ$ëá÷L÷b²‹`˜^L0“9ÒmsFJjwÏ)ÿ ‡òÿòK‡$SÖÃ’­¶ÝnÍ‹6Y«>‹Åª2ýþç'Ï‚·Ê€Z€“çÏøéNÑúÀÔ–.Úéö Ú¦¬mcêß•áý{a»¸±›ìŒѶTUžï”5ÁÙJJ›|VB% ¢;){|ü RÕr15ž?ÌtöŸ"x\!×Íz윮ÆÊ`…[ú(Šƒ@ÈÇ*û¼c*ðÉrSñ.¶ª Žp¶y#bA Ü`”„,ˆ};ŠùàϾSBä.LûéöÕ%€nA”dGi[»tÇS=ìÐÇ»D–i× SùdŠÊÜ þÆw;VÀ aÝv)¨8©TÃj±2r&WAÚ8„*ݼUùä ’‰!‡‘7¦¯ë'àðÝGD÷0.øà3$h%Zs"ˆêú”€š&‚=àç(ÙÔx?‚˜÷°›+Dß)*æ(ˆx› üu Ö´ ú¼ÁƱÊÌ/ʦ¤êü”Ó_áWð÷xð™.—ƒL¦#Ãç0‚^‘×´DZDÁ˜N ë‰$ܧ}ß7iO"FÚñËe¬1[ê§£ÄÉÆV ”QT‹”‘” •õnоXY×ï(;õÌ™‚T/e» NßÄQÚ[ n„¹ ÏÁ.rÙ‚l‘Ã\Ýö”ƒ†EBÊ+õX²šÉ†ûmºX™c'VhŽ€®äšá¬·l03k™Õ†Ž÷°• ¨¥Q7˜ ïôå—Ú*´¦zõ5#´zæ+';½¨Ôd0V s̘‘:–8Š˜sPÖçÂUÄØï h½‘zÄ Ø‡;0Èqز•Uºç„0RŸòhä;ŸK;÷ÞÒý{~hqQ˜ß[ötH&s©oã bu».Œ¢“µÓ-VE—Ê|Õ;åÿýw¦pÒC$ãc<ä«.XÒžì]O¨Ã*ë»oZ?Ó7øùÙ[bWic™…µ䫬NÒ8˜'àÀ7Mu ©{Sn!«3„ ¡ç7‡Ð¨C„f7‰Ð¸3„Lmt“M:CȺI|¦Ýás£vzÖB£µÓóºM;mèA4~]†ZêNNlêƒWm\…ÎMO;k&°8E$Gˆ¤1=Aä[Å)%ß–P’fÙ@1ÞÐõy+Gñø‰ ­Ä´nÒ@GlÂìœ&uqF¢ Áñz£&YûB(MNóGDPŒYAm)OÄî¥ô &x•Š)|DŸÊ¤Ù&˜§ÏKÖŒè9V¥ÇZŸ‡«d+c4ÿ€þ_càÿ Mðà7ˆÎ« É|$½yé$ÁkÊ#7ñEƒß¨’5®ÛÈM…ˆYµ$\? l€P4Ywøp]W%J…ËtªŒ Y‚»å(‘ðò+´£-Ѝ™qëÒRÅ$ËÚª…ÈÁ©a&ÍWDštþ¯$’”…ÛkbPÓú”Xv•!(KXĶh Sñ’p&¶è…ˆ«ô‹DŽœÚ}{{Ñ醈ò•Ò˜u¶–Π÷bùžÄå;Ï—ä´ÞÕJ6Ò 5 ­¥bׂ•1ìóIWå’‰LÜZLáf]¯-üμۘ:ã á@‘´ _©"F­™‰d\B¦Ê¤‡.§gζ]4_Ebnú:Ìö¯Ã|ñ=(óô]6'÷V+۞ϯë4Yäb–ýã¥fT0³Ž’÷ÛéV«ÍdÃüáË[–Ð'hŽÌ*8M£9FíFŠÑELÕå@ÊïÓóø»N{è^Ȭ\-t£’ÈŠè"·OEÔƒ ŒÉ9ÖkñÐÙÉѲ¬>¡t\EUƒônS“N zì£(9—6}GfðkRW²ö·5Á±_p»å Ø—ò¶DkiìIÄ94Žài uzòùµfgõ@&@´ÞfÔÚÔû¨õá Cýæ£Ö"ÞlÔ:5[Éj-8³Z.ÑÀ œUºÃl³üYZ\\WUW`óLÕuûUeµõó+K£žXw÷¯ÿS35¾„¦×ZwgÖW+Ì^«•×Ý£CWSñÒ±uWw÷ÊÊ9.Pw7¹Ñº²î*ï¦7Z5Õ]åÝü&ñé®îÎЯÅP_oáa¼.SÝ}å]m9÷ßd¼Á2ûxÃÁxƒé¦ã "¾•xC!”‹7T§-JâX£Q‹\a¡ºQŽ“•vîbL³cr ‰¨V7ˆ™Ý fµÌ®V"VÚ™ÿ𱈺AÌê1³b…Xdm,¶Ç<±q7ˆºA¬MÿRˆMºAlüv›vƒØäí 6ë±éÛAlÞ b³7€X/KkI;Ïÿd%l¤½+ðã¸Mó@»/žº¦Óù‰ þ"…þ"…þ"…kLèô)ô)ô)ô)ô)\y>ç•eÞ/s‘ü”¯®4@CØ'~úÄOŸøy-‰Ÿ×eèC }ˆ¡1ô!†>Äp½õ!†>ÄЇúCbxõ!†’¸Á› ŒÊî èCùÁ¨P1vs!‚ o$D ŸùÕ€ý¬‡ žéBÍÄ?|Ô¿˜¬Æ'÷KbK æ»TѹqgÁ²Ô&Ä ¼Çõ‚ÕDhúx–ÞˆøÌŽ Ó‘•~NL«¡kãñ$mL¿œªêši%Uާñ`PÃnŒ.ÌÄ>Ô§3ÌKq &ö <ÍŒKqÃúõD ö~n+ùȾ‰Í~ÉËVÓŽt{._‹Ÿt¥„æˆR¨|s“”Ÿ/AÄ4´ñ\nOA1§Ú\À™‚¨ Âí~µýTH®Ê!uã:˜•JoÔŠîRå mÀzÙ‹ÁÐ'(Ü üv&W³ ¹±´ÝÝ+“89KL–:úOÊWáÛ­øÊ­Q…tý>«ìjªÝ]¶¥$+Œ£$|ªô77Ÿ³”Ðõ>¿h¯Õu3kr¹‹ÇJ¯çcÿÜñÀÕ›êvvsºfͧ9#vXêËÝvHêÊœƒXö/ŽMPÔˆ–õ®ÅËŸ®EOF£yK,NýfÑõbÑ^/NýÎй±¸ôJúT\HiÈB9z‘¯ú«8º–wî.Îoõe§‡OEM/0Û$â}§ÝE†ç‘®¨»GI×îŠÜ3 ø¥ kŒµ‹©Y]ZüáãÙî]zÏ>SÞgÊûLùÕ ÔgÊûLyŸ)ï3å—E¨Ï”÷™òæ™òzgÛì+O{º÷§{º÷§¯¡ÞŸîýéÞŸîýéÞŸ¾zXYÙ’tx Œøo‰VžFrÁ3kbOÜ}&ቋ ÆPy¿ôã? hÒ Z%©Mq Ø­¡H~쉕Ûqªá¾ð¤ÜHBç몊…Ô\Ù]–Üïg¥@i¢ï9KòŠŒ„i>&ÚÄÞÒÈì8,ÃB®wškæ<«‰JSxgð²Â)©fŠzVr‹ìy'6tm4³.2óL[ÆÙf¦g€ùøbˆïÏ~fØ Óïc¿"T÷KÖ¯Ïɲٹ´:Ï/·áÓšÌ÷ùdIí‰Ä¨.ñÈÿÎ-wÛa(~w’t‰r¾–ø)Ùl‚dKØc4é¥;…_BðÅØ¯ÍsdÖ›KÿRSµ›y¸Ãð<ÍÎñ4LÍ4›`šUpµÕl k6ÍK k^JS͉65ÐeЊB»—UW1Ç™A}yu­V*Oì\kÅðÝ÷?¹Ñ;ñçOëè]Š­DpôÃ2'õ{'MQö­þ:¯Æ(÷j %÷¶sʤ{w4ÇæX3æ…—lic=÷’5¹X\×fÖ´ê 'TÕ§'n÷Þ»V+øA‘w¿ÖBê‘6±ŠªmjÆtV)öH›‹šù2±.Î!¸(·ìÄlÒµjZÅ>U£`Ì5ƒ»!ÝíIímgg{Ò‘ [m@ži³éøÈsm<4†¹ýþd]h:àVûT#-6uMŸŽ#üê7ªœPü§akÏÔåGšî÷ÃÒæ“YáeÑ£ÕTÞp,m–?jf­O±w(:Èwò[¹-øžjæÔ(Ù1ÆSSÖ2ÍÏe%³Òaeœ'\œe˸˜§}gÂ#qíφ lõgÄ‹€ÛŸO=+÷àì)wÙƒ½$AÚì`;ö²oÈ„÷ÿPKéÿ^Ûà ©­PK¤@~UüÄ££meta.xml 2012-05-02T10:10:082012-05-03T21:24:43.07PT26H29M35S13OpenOffice.org/3.2$Win32 OpenOffice.org_project/320m18$Build-95022012-05-02T11:37:56PK¤@Thumbnails/thumbnail.pngÍxõ_T_ÔîÌ0È0ÔÐÒ)ÒJ7(]Ò(]"- "ˆ€t  t‚4*èÀHw# Ò5 å;¾ßû˽Áýå|öÚg×Y{­çyΊÓÕV#ƒ2B™†º²>Ä7]þi÷î®0@ÿ^CYÁ0 g7W!vÅ8ë§Ü÷œz쀴­ˆÛ³8¶TäwRLu¼ìcÆ,4*xs¨L—‹DÄGÞßzîDôÐþGÌçOèð৤2Rç÷ükÇGµ6ô§b`÷)g~qq!ùFZï}WLŒÇ¶ó>M³4÷Óì½?;Ê7œ¸üÓ^2‚Äò¤³©›áì÷á<|®ñ@’ah øüz 2C ¶Ñ‘ªòäÓYÁÔÃ%Ñ:9gM ‡çLRG_ŠŸèÐv?8ž°<}nõoBs!©Å¼nöÈ—I`s¿¹Ken•@U—(X`.5?Îç…{¶mø8–ÇsThÊæL´ÂLTʇWUÙR;àcÛWcšózQ†ŒƒË¨ùvýþ®!_mKÿBèçßÜ)Ð]סñîm."{˜Jm Ýõ ’âd|„ºøv׳àô3Ç#‡Ò—b#F“¾:úð¢azÍNç1;£¼”ÁŸSûÛ œÔf$ŸÐ)B3¥#zt9A‚»O7J"9[-Ïò*,ÿÈ›[ÒÙŸBBô¾sÉ_¸!Í£Óô]’­T à¯cÒ^›6+¦ªÍ€ë8kz⹜vt¿Qv›Þóçý}{Ç5fЍðO|šZ£ê&þÐÞxè<]Óa"O |ât½ Ã*:6°˜~¾îCÂ0éßÍÞÓž’v´_%ÚÖÛœöHÇá¹d_Ý=A™ ­Ôˆ>´If鼆ÃZÐE'µÒ²ÿÊ ü~Öè©Ø¡øZ–Bä²cŠ^kp’MòÁ¡3Ÿéëà¨þG½† K1o)% Ÿ˜–)ŸÆ‚‡£Ù f@ø÷;¤«1úYÜ™ˆ—HVðÍNž“¡ÚaÔVÂŒîûã;ÍývËLVÁDo}Ï8ê{çÂT±Ñ%Hó, `†‘ø½þ;}nQ`8­yVg3EÛÓ}òä@þÉ„_íÄÉ/Ô#€×5ÍÒ<β¦r$*GŠç~A7€}bv"'ÉÉ:Õkcãþ¦sjótéîJáo±Ÿ:·H”®¾m^4d™ \k|ÝEgäóCøÏõÍpÊhp‘¸ÌRh@{,Ž=Otç®7Ò!û˜l-ùÛQõÓ–ðE§¡ã£Ö,± ˆ=å#9žˆl§ªƒÍŒ£“R‹ÒLY?çæ`ûmnŒ8-TÖñ{!j²"QÆuíÕ-Ì…3³ßšwÝxZ:Qal¦èÁ>`XQèJb§£‡vÓ^M|/·“¥ “û-‹»•¾ÄÕ0²'èZ-P9ßd$L°c^¨TÖÒ®zÃP x¥4(Ð Eí(–Zçfþ ×륥ºUÙY]ëû‹í2ÍI¿4•Îã;¥”ÐSÃê èÞt»¤Zûá‰P‡Ö2˜ø³ó¯ëŽ(–άv¡-›ïŸv£b{ØUŠl˜€ãÇj†âzdÕnTÐÜŒ4òÝ9­òµ'ˆòjóúšÖÛµ'-Š|“ûæÉ @¶²[„ˆbF@²‹.àáÚB{ÐM“ÿþÂü¤“°–3gï”b­§8UÛ„3³Ï—Íš¼_éÔ "„IiµvD˜¨oƒw+ü¶Ô¬pªÃK“r¬dg&·øNìíŽÎ“Öˆ1âW““¹bí/Ã6ÔzÜ$4ò‘àkrW1Xm§tåx—êp8ø1âv"ÁèKÚƒŸC/¤ý×n_pj·Ëþ¥73˜¬)ݦZdäC”Nmž°ÂŠ®ž:€¯‡3"8÷»ådeêØ_6Bß‘A¹°B?¨„ð¢;Ü}.ü§´T€äȱ6³Tóƒg /ƒ¼ŠüjE›¥Gž´2€¶®ŠsáØ¼M"é#"qr¶ )!@7%ºÊ™l–‡fØïщ¸‡kêÞl÷Fz¿åï VÛmzìïþÅ“à~8kƒìt)ëhûû·Þö3jePÂBšäH{J—ñŠì‘b ƇMhÀùå[<@ø];:âõJÄzÁÖÜ2ê$gWÙ»DçgnëÞåÚÆ'ÓéN=€*2“WõçG©Am¹]—Xß<rYÖ7Ùëê¡ý°¿(Žž»/fæî2e·ÎTXªÐÛÊí£ºÌë§´Hšú fx¶v6S#îzeÇ èý0Ù#äϹNQ+䑞;ï‹õE¥MÁÈEFƒy²½ýÞìÉ9¯ñÄæ=ãm\ÍjŽ©UáÄІÇÓuéµÊǽBèkªŒ>G1nÌV|öì¼Á¢°ÁçÃ>{ŧ#9GïCOóá€ß«+ØÎLàËóÍõô†Õ~ 3£µ´‡ÅÞí,þUe ( mJÚ7­AÜå0ZÒ‹Í~6/4©-ß6á’+¾ã¿´™Èù´>1ý` ÏAÅL*|Ø™ýv·¤šR;’y«Ðv½ô“}ø(á‡A‘úg¾N·TŸÔ¨» °‰ô6‰’•§l«³‹Î™ÖP+ù†tmŠë $\ÕŽFl¿ö6{e˜ÕåõN4u–º°®Îå<ìŽ@gI¼äcßxå“0 ɇï¢âÙjs¨¹ò%àÿ˜¸àܵ=o€úr¼ÔÝ vÇp¼ 0],äÓ\¬¯Ñïæ·kòRG$k ËëãµçLVToû­òv·L¦ø£I­±Ußʆ¬þ Ñz>`ý@®5oéù­£ÂF¼§p±ö˜¬ö»K5$Ùõ#8¦¾-˜–Î+ª\ÈX"XD%+ÏkýñyŽy™Ñ Z£ .—0Ío}:ïªýI–ùz[c#WÒÑD?…F^ÿÚÐ:°}[œk”£N4CʾÞe4hr¼ÙËèYÕkótM®€¯îkš¶g•0g9úÙ[ë7ó¬0+å—ËígÇÁÆ»Ÿ@}Uù ÜQ+ MÀ£{̯1Æ] LÛ¾j«gs”â= /í¡XßõWƒ4¶åÍ6¨aÅ8UÌ‘dݺ,b½Ü¥"»†ø&5É~Y5]A¦ìÊ·¥œmù…kÔßo ZúEÔ½~I¦¼÷û¿iC¿a&Ôf#?©ŸˆFìMˆ)éöÏëØËÍ®è6aKùñ3S™L äzÿ„(¬¤»äÍ©›dgq‹UP÷lëê§m¼`kúµ˜.€²~˜­¶{YD8Ö5³ÐYVxf¤æ— IKuø7U»Ÿ+š^T$;!pô„¤¥c©z'v’øöîòÛZ¼ðk³ðåüé<9|[§ƒ6‘Ð7ÄŸí:ƒx 4]ÚÄMZ=Rfúòxá‚<£×¯ÏÍo3fv'€!];Z‚/U4ÃJ^.9îzIÝ6d÷4aX1¾ñSd ‘/VIœDš*t¸°A(yzbϤ_a¨@[Eu¿ŠÞ%[.@߸=ÉÐ8Œ6A|gݾ@EžD—:óô·Õ?håË‚›ª[©½º£Ü@9‡´sCP ¯$ XJ©Î³TÅÏ6ççü¾Î&ÍyX•s4zFA¾F“£Å#3it¼MŸPJ Sä.|q©³;ŽÄ„K¨k˜£=EgSE‘ù?f×{ÊÙ *qN½˜Þù;Ÿp<>=’›µ¬Åa×:#jD<ÝõÌÝv u#t=ö Ca™ö9Ð<™´ÁV £‚+±0ÇVqÉ#Ÿhѯ9ë{׿Ói9ò?ŸÍr C7Ï鿦΢¹ÄH”ÐJu™~&8·xs¦Cß±eÂÇRb[Ú¹Gk(²LÝ·7$ÊÕ°ãhr‹õ/r¯Q?g´ö£7!øzYxÅŒ„Z™s^{a«ô&ÐÓÖ™UB†MS«üÇ1>‹«^ý.“˜Dÿ»¿¸«‚(^z{ÏöZ=– É.iÚ†*2H'ñÓn4ýTg¿*>7êOç-¬é×Y” “™y‹íR^…Ö ÙjUŽ¥¼Åɽ?Õ œæï…É/OIsÊ&Á5mÎÄÞ +ëÔËñº§'å»òÀnHƒÊ´ ‡‚C—ñÒ%Mr¶ ÿEÀLJ>´8ðà|‹°Ö`ÍåÁ%J¢Ü«†½ä‚äMb?"–ìߨ€ uéM(¥#^#…WÄy@ê›uÊ&±ç)¤ÀâÈ¥j˜ÂH¸‚é]F"ú@J6’ÙÏq€rxX"VÑŽ àBpƒLmI‘¾ò o>– )mþ€Ý{˜lì=ºPFUUCÊÒlª¹K._™ƒý‹~‘‚JëÀ«1Ju/H8Íáá@ªIN§õ€&CªMž¾Ú¢ª÷à-Ü¡¦ï®î[q!ž%œ¡÷»"—ž˜Šƒ,ÜH‘¤R­K8‘H7R[€XDAÕæÃ +z4¹gf–Œ3ö„åRNbÎg Ä G’Ôd6Ç*Ú5$ú›¨ ÇSÿ$‰ÛU‘)t9%¼Œˆ”…sÝ;¢ôGF/éÒþ÷KÈgœ¬@ëX‡0ø~‹¹Af,Ñ’H޲\–Ç0k·X„'ê›9àMOS”Ù1žý[z¥_#‡jqÓI~¢æ±hÚåð;’´=“Òí„× ¿ÞÝJÊÆ‰¤!-ŠÝ¬4ò Ê¿ð&9Þd éJÄGi@8hŠŸTlt,ßõå=ˆŸ7¢‹¨†Ù&€\¹0Köœ’2þkgåýëœKõГ K¬ÇP{§ï]|Ò SU t«Ywzè„8üy§ ˜ ¸Ð5¹â†2¶øš"³A^AßXí]|µZKPRòý»^¦ÈN¨y3¹L‹SÔg;YÒ¬l±[˜@ = Û Ì¬EŠÕ‹þáÊ•ÉË Ú¯“ìGœ¾è¢kM“ƒ:Ø#Z)¸ãWDƒÖ¸Û6âÓ^Á¤XÇ‘w!EÎ…ÁÅÊ¿¡¶›õ2Í阵÷n.v¸ 2W©ÚtÝ~ë÷âKÈþ¥Rw„Ê›«ö\™ÊCµ ™ý™S„ʼnñKÜñè“ þc«8àãÖãñ¹>‚4q¸Nûh>6ÞX,IËÇÔ0¹ŸÏ!d+’,ÿfšðPÏ cÎøÂÝS±Ý|ží&Zí|„ü¢hz§eŠûyîý›ÌušÒÆiž„QbnCÈ_h@Uo´˜^<–ØAÊ;¦¹g裧dgï žëw®•ÅÝŸ*l- /äñ|)©èp¹Ë*çý—ùr&ÌÿÖÕ÷Ç¿l×w/-ë¶ †0Ý¢$!_7›;¯Cò%óXhäÏuø¦§=öUie'ÙgÑù>:S„ËŸ×Hó'žðžÌˆš¸C lwoƒ¹T^_îý¤ˆœ¬éÚüUžÛðˆhü° 08šß)’re[ùjê¾i{u÷’ʦ¨è£úÞð4%(*øžœût(s™éÂy—O@bSÇo7`€qS-æ ÈŠÊæ…~%{ ÏÂIc³; ÃÓ(ןú(fã¾, /OõÃLœº4(ö À }Û+ɲéK|jÒg÷/ntï?©º ³ åüGxü*úˆOAm|¾…äõŸ€ÀµDi0§x*|*Tž2B‡è¸IñiYL]ïkX?r-°Â-,>¼Íßx’Ž»@Fš'œð?B!§ã>'Ñ%jäÁûXPtöEN¥alݺ¨»5œåikç«î¶v*÷Õ?H'¨.w/œ¬è³À¢Ý4ÛÜôÜ\3è+†>3³Ò,E©²47MÞWØÀ‚}=ýË5ˆZw½pÄ`übªétá[µ1ÒÒû\WÐi¨D¹³²9®ÍùÐiL¤¯Â´»ZñŠñÛg½ô!Ç¡'æ+݇hjõY¡¢œ•ìôáüQ§" øR]x5œò—2˜J\ë¼Åìáyåû¿…£­úQƒÓ¨•|ÂãWú²#ùÿØè›ê.;ÄS ?ûi€?áßKbRóŸ§ú)ol®v=–ßVSíË>dYÝq³É µºòn½ ¥ÖÄKUžÔ$7=¯Pì»NÑ)n¶çÊ);ÞÒlÞ[‚•æžP/º£Ÿ­ ÏäÍS¡¦ˆ,jdPþ£ñÄ]¼4P‡ÌÓÙÇwdïKþ«:rX˜§Âo] aìì3…l”a ØœM7=cDÉ_~¡»Þ3r¨ }\ÓM¸ƒ—q¼|ˆÜðF¾—ÿÔ-‡GÀŒijùUÈø¨)˜O­$7 ñÓÕ²ÞñtäÖ>(³Y4ë¨Ò&—ÉåV¤Üt­b&bXtŽ.aÒ—¬©_^Ù\Ù†.®«äÞçÃwå_ñ;)+4âû,2Ÿy}À0cy÷ßùìûd(Œ’i´S­ 5˜à}±>È{p¿³oÕ†=–{Ôa4¸xÜ´xÝÿ±YÚø¾ÝéÙ‡9ˆ×‘xw÷NZK§ÈïHL’Ädw7­ØåG¤ÊæbÕRÆL€Rj.kÛHk?Cg4lõ„ü¾V¿ ð¬~5»î¾ÇžµqÏ1I쿲p,Ø'Jú. dÉŽw!›"Þï7"Köe<8˜áÍuü5ñ~§û¸ò?¡ø¡þÿéÀ…*´Oªpék ü+Õk¨h+W+Ú„ÿPKêfÊPK¤@ settings.xmlí[[s£:~ß_‘ríÃ9µ•ñ-™‰S“œ6Ø$16ðåM€‚‰òr v~ý¶°“É88ãÁáì9»£ª€Ñ¥¿–ºÕÝjÈ×?V>=y$aä±àªRÿT«œÀfޏWC—N/*\ÿã+»¿÷lré0;ñIŸF$Ž¡KtÃèrÓ|UIÂà’áÈ‹.ì“è2¶/Ù’ÏÃ._÷¾ÌÀ65+ê‹«Ê<Ž——Õjš¦ŸÒæ'ºÕz«Õªf­Ï]—!‰€Ž3†|=æ5¬Í‚{Ï=”ʦ÷ëñŒ±¦ù€ÍÄ2ƵÚYuóüÜ;ò=z(ï{j3 <[ô»µÂçJ†÷}3ë WïóÞzá}+¬W*Ò¨\?ëó\Ý.ÎævêÅÄç:r²­æ<^UòòÑ#é‹öTòÆ}?Æô"¾($XgËÊsc¼^B£Ä•ëÓfýË×ê[:?EûŽÜǹÄŸkõ£©='žç‘o6gÍ£É÷ˆçÎsÙoÔ?Ÿ5¥êãå©8dEœ],’æ +ú®ᘤ²³Ãf‡  •k®ºzŽm×c‡¼Å%8¨\ßc‘cèK! v×ø#¨ËÑ(ÀKq”}äã09’ú»¤C× ¢ò@øõÎ H‰ëرBeÙ C[hïTŽ’ø&Q%¢)^—¨¼\qáG'Äî6-C8"õ|/À12ºÎ–í*bÄáò¬Rý¼V+:›·Xeˆh+;¼··KGäó™L„ëÊuõàrqUŒ—!ÈÿU¸¹cö‚8?f¥úcR C1X« *¬Í({ËâñÊ®%”„eºnªoÁ ï:Ê9 aïÜ8#BÁ¤‡Ï'|p˜°»#3]è3§ë%xîÖR–B¾Ãb›zöB'«Xt¼½>þ¿’´ç8p‰Æ6ç‡QÏ!Ñ„Kóe|Vоêpbw53³éõò™´Ââ7þº\wX’ÖÿH jÉÁ’‚ ·#²—{8¸Ô—oÄßáý(êÆï1~åw¹.NšÇÃÓ%ñIˆc–ˆÑ!Ë‚»|”#އ/Ó?aÓræ!GY@ÍaĀǃ{‹GxDN½„¸}Ƙ?r‚Ü^Nåë¬Ã¾æm"#¿CDâÃSV›Š$ÌÜöÏä®ÐrI×FDÂŽñÇû{Á‹a¢:WC»Û“Wùço 0ðû m]ø§ˆYÅP!Š 1„’a›ù<¯Ê³‚:@~h¸šï…gfíàh^Ó)ˆIîqBc€ÅùÑD½q~ð"îÏàI,´<Ç!Á‹øŽÏçÝA8›äœ'¶kE‚¢²O8þªÆ¨`t‚C¿Iàm©pXˆë<¹U˜‚8ïŽÿûʪþOeõCŸô#ünˆˆ6Gn±•ß—¢ ½'Ę~¼ÛêáØ.ngçEƒ§[(½&'ïžÁKîò|ÀØ‚’rÞ¼°½(#añãÝ7Gƒ@lTNæhC=Ä)ר²¸—¼ø½¨öø ä'0> ø$Jym“aäew>p ƒ$¦o­õðÕQÞyåt<‚îQR®"é^)üs7 Þ²°=/ ˰£ì°€îF•ƒWß"!Î>ÎM¬L“ !þvC¼œß/Ë eWL½xwyŽÉªfdI(Cÿ5å¯ßZ§g©S–žÂ ‘Ñ„Oó(ÈœÝ÷ŒÒž ²¯Qàð> ÙÉÅy­v‚ZõÚÉo ‰S.~? {Dâd÷höýk·šþ«Z›«²Øòg>}˜—Ôi ªí_¸ƒŽ‘öÛ‚0ÐÅTnϳ‰Ö´š7á­Šþke$!WoÌ|ºã"EEéÆNê,PŸ?CÏSg¢µñø )©0ÒLEs‹ÌñùÒöÍ'M\õ‘¸zœ6¤l’dùN„' ›ÔÍO4:“ZºÓ•j³Iá·Û4qºæÚèÝ<:ÝÖƒÕH‘ÝU¢~éÌi<;K #ZÝ”[Z§Š.&³î*BbkÇfÂiâq+A’04¨6R j Qy´º­µ>n-Àέ•îMjƒý4Dg=P}Ã#Ì«;O¦™ÚÐ×é)|Nêˆ`»c6™×Ìî|=ƒ9XÕuäçªÖ¼™ÛÝùrZן7î¶¢Bë¾ ôÁÆCj3ªw¥s«iÖ8=­g¦V×<».7jMêg´¶õúXZÏ4AµÙ>§ã•k’À:ʦÔRUSÓGæRšÔ4I[H¦.©œGe6V÷1v#Ãy‚±°nš:2h2ó/P?›s«1›Ü<Ù½›9QÑy¿çò¿¿+ª0~­ž¹YÃÎ4ÍÚ:ÂÂî.8ÆFN’Tž–NôAºàígʬÌÁðW5®/з >wXÐ|ÚÖ© Cj×µGóÒ T¤H-Áê®6õ)_3’žvc-n¢éX QG¸ŸuéÓt|ÃõHNjSeI|é¢i˜¢9BbBŸ¹Ù£)—ã݃øWqN§cMži€A¿³5‘”t66úØ|À=ý™›tƸR ÑÍö«¡>XSI” ðµß—ý~Drasÿ˃yû¼ñ‡_À£¾ *ØžÃvD÷wÛ'wÇÛ‡üÒɶðÅËó+¼Îš»=ñ~æß.‚çN›öìyx“þõž•_åWù;ÁÀ=Ã_w.wГܑWƒy=x@5¸×ûmð¡q]"~3q[îˆÛŒÌ>¾¶·êËÍù†StUðÓÍ~$ææß8A›²¨Œ|ÐËGhù‰ËíkáÏňKÇD ™¯IßÉÿÜçÙçÕ7ÿÓRÝ÷_O×ÿPKh&­H 75PK¤@META-INF/manifest.xmlµ•KjÃ0@÷9…ÑÞR›U1q-ôéyìôC3 Éí+’¸m(M±V–Ìè½ñH­6GkªDÔÞµì™?± œòvCË>¶ïõ Û¬++ ª¼ÎáuÚ²]ã%jlœ´€ ©ÆpWÉ‚£æk|3šÖ‹êîµ:ÆSu“A§eM§-“!­$å<ÅÁuüìâS0?Ï1ìF™|ÝròºOÆÔAÒ¾e‚‰‡r¹Oyó®×CŠg?.æLîd,ƒ—J<õQ¨ãX\äâ®"‚ÞxIP|H!ŸTýO^¹&ïMLüX™¢¹ƒk+@ñªÉÊ€E²¿w+Lnü9yÒ\MËáA9Á‘ÄØ þ×+~ç" àìX $gëkÛ}²;'µAA—!n˜>oa(_Ç×Ò®ÄÛxý PK’ð§ API Cell Folder 1 nova-cells nova-cells rabbit cell slots cell slots nova-api child cell Folder 2 rabbit nova-scheduler mysql - hoststate hoststate nova-cells nova-compute <?xml version="1.0" encoding="utf-8"?> <svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" width="40px" height="48px" viewBox="0 0 40 48" enable-background="new 0 0 40 48" xml:space="preserve"> <defs> </defs> <linearGradient id="SVGID_1_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="655.0938" x2="409.4502" y2="655.0938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#4D4D4D"/> <stop offset="0.0558" style="stop-color:#5F5F5F"/> <stop offset="0.2103" style="stop-color:#8D8D8D"/> <stop offset="0.3479" style="stop-color:#AEAEAE"/> <stop offset="0.4623" style="stop-color:#C2C2C2"/> <stop offset="0.5394" style="stop-color:#C9C9C9"/> <stop offset="0.6247" style="stop-color:#C5C5C5"/> <stop offset="0.7072" style="stop-color:#BABABA"/> <stop offset="0.7885" style="stop-color:#A6A6A6"/> <stop offset="0.869" style="stop-color:#8B8B8B"/> <stop offset="0.9484" style="stop-color:#686868"/> <stop offset="1" style="stop-color:#4D4D4D"/> </linearGradient> <path fill="url(#SVGID_1_)" d="M19.625,37.613C8.787,37.613,0,35.738,0,33.425v10c0,2.313,8.787,4.188,19.625,4.188 c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,35.738,30.464,37.613,19.625,37.613z"/> <linearGradient id="SVGID_2_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="649.0938" x2="409.4502" y2="649.0938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#B3B3B3"/> <stop offset="0.0171" style="stop-color:#B6B6B6"/> <stop offset="0.235" style="stop-color:#D7D7D7"/> <stop offset="0.4168" style="stop-color:#EBEBEB"/> <stop offset="0.5394" style="stop-color:#F2F2F2"/> <stop offset="0.6579" style="stop-color:#EEEEEE"/> <stop offset="0.7724" style="stop-color:#E3E3E3"/> <stop offset="0.8853" style="stop-color:#CFCFCF"/> <stop offset="0.9965" style="stop-color:#B4B4B4"/> <stop offset="1" style="stop-color:#B3B3B3"/> </linearGradient> <path fill="url(#SVGID_2_)" d="M19.625,37.613c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.927-18.396,3.927 c-9.481,0-17.396-1.959-18.396-3.927l-1.229,2C0,35.738,8.787,37.613,19.625,37.613z"/> <linearGradient id="SVGID_3_" gradientUnits="userSpaceOnUse" x1="371.4297" y1="646" x2="408.2217" y2="646" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#C9C9C9"/> <stop offset="1" style="stop-color:#808080"/> </linearGradient> <ellipse fill="url(#SVGID_3_)" cx="19.625" cy="31.425" rx="18.396" ry="3.926"/> <linearGradient id="SVGID_4_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="641.0938" x2="409.4502" y2="641.0938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#4D4D4D"/> <stop offset="0.0558" style="stop-color:#5F5F5F"/> <stop offset="0.2103" style="stop-color:#8D8D8D"/> <stop offset="0.3479" style="stop-color:#AEAEAE"/> <stop offset="0.4623" style="stop-color:#C2C2C2"/> <stop offset="0.5394" style="stop-color:#C9C9C9"/> <stop offset="0.6247" style="stop-color:#C5C5C5"/> <stop offset="0.7072" style="stop-color:#BABABA"/> <stop offset="0.7885" style="stop-color:#A6A6A6"/> <stop offset="0.869" style="stop-color:#8B8B8B"/> <stop offset="0.9484" style="stop-color:#686868"/> <stop offset="1" style="stop-color:#4D4D4D"/> </linearGradient> <path fill="url(#SVGID_4_)" d="M19.625,23.613C8.787,23.613,0,21.738,0,19.425v10c0,2.313,8.787,4.188,19.625,4.188 c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,21.738,30.464,23.613,19.625,23.613z"/> <linearGradient id="SVGID_5_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="635.0938" x2="409.4502" y2="635.0938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#B3B3B3"/> <stop offset="0.0171" style="stop-color:#B6B6B6"/> <stop offset="0.235" style="stop-color:#D7D7D7"/> <stop offset="0.4168" style="stop-color:#EBEBEB"/> <stop offset="0.5394" style="stop-color:#F2F2F2"/> <stop offset="0.6579" style="stop-color:#EEEEEE"/> <stop offset="0.7724" style="stop-color:#E3E3E3"/> <stop offset="0.8853" style="stop-color:#CFCFCF"/> <stop offset="0.9965" style="stop-color:#B4B4B4"/> <stop offset="1" style="stop-color:#B3B3B3"/> </linearGradient> <path fill="url(#SVGID_5_)" d="M19.625,23.613c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.926-18.396,3.926 c-9.481,0-17.396-1.959-18.396-3.926l-1.229,2C0,21.738,8.787,23.613,19.625,23.613z"/> <linearGradient id="SVGID_6_" gradientUnits="userSpaceOnUse" x1="371.4297" y1="632" x2="408.2217" y2="632" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#C9C9C9"/> <stop offset="1" style="stop-color:#808080"/> </linearGradient> <ellipse fill="url(#SVGID_6_)" cx="19.625" cy="17.426" rx="18.396" ry="3.926"/> <linearGradient id="SVGID_7_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="627.5938" x2="409.4502" y2="627.5938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#4D4D4D"/> <stop offset="0.0558" style="stop-color:#5F5F5F"/> <stop offset="0.2103" style="stop-color:#8D8D8D"/> <stop offset="0.3479" style="stop-color:#AEAEAE"/> <stop offset="0.4623" style="stop-color:#C2C2C2"/> <stop offset="0.5394" style="stop-color:#C9C9C9"/> <stop offset="0.6247" style="stop-color:#C5C5C5"/> <stop offset="0.7072" style="stop-color:#BABABA"/> <stop offset="0.7885" style="stop-color:#A6A6A6"/> <stop offset="0.869" style="stop-color:#8B8B8B"/> <stop offset="0.9484" style="stop-color:#686868"/> <stop offset="1" style="stop-color:#4D4D4D"/> </linearGradient> <path fill="url(#SVGID_7_)" d="M19.625,10.113C8.787,10.113,0,8.238,0,5.925v10c0,2.313,8.787,4.188,19.625,4.188 c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,8.238,30.464,10.113,19.625,10.113z"/> <linearGradient id="SVGID_8_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="621.5938" x2="409.4502" y2="621.5938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#B3B3B3"/> <stop offset="0.0171" style="stop-color:#B6B6B6"/> <stop offset="0.235" style="stop-color:#D7D7D7"/> <stop offset="0.4168" style="stop-color:#EBEBEB"/> <stop offset="0.5394" style="stop-color:#F2F2F2"/> <stop offset="0.6579" style="stop-color:#EEEEEE"/> <stop offset="0.7724" style="stop-color:#E3E3E3"/> <stop offset="0.8853" style="stop-color:#CFCFCF"/> <stop offset="0.9965" style="stop-color:#B4B4B4"/> <stop offset="1" style="stop-color:#B3B3B3"/> </linearGradient> <path fill="url(#SVGID_8_)" d="M19.625,10.113c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.926-18.396,3.926 c-9.481,0-17.396-1.959-18.396-3.926L0,5.925C0,8.238,8.787,10.113,19.625,10.113z"/> <linearGradient id="SVGID_9_" gradientUnits="userSpaceOnUse" x1="371.4297" y1="618.5" x2="408.2217" y2="618.5" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#C9C9C9"/> <stop offset="1" style="stop-color:#808080"/> </linearGradient> <ellipse fill="url(#SVGID_9_)" cx="19.625" cy="3.926" rx="18.396" ry="3.926"/> <path opacity="0.24" fill="#FFFFFF" enable-background="new " d="M31.291,46.792c0,0-4.313,0.578-7.249,0.694 C20.917,47.613,15,47.613,15,47.613l-2.443-10.279l-0.119-2.283l-1.231-1.842L9.789,23.024l-0.082-0.119L9.3,20.715l-1.45-1.44 L5.329,8.793c0,0,5.296,0.882,7.234,1.07s8.375,0.25,8.375,0.25l3,9.875l-0.25,1.313l1.063,2.168l2.312,9.644l-0.375,1.875 l1.627,2.193L31.291,46.792z"/> </svg> <?xml version="1.0" encoding="utf-8"?> <svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" width="41px" height="48px" viewBox="-0.875 -0.887 41 48" enable-background="new -0.875 -0.887 41 48" xml:space="preserve"> <defs> </defs> <linearGradient id="SVGID_1_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-979.1445" x2="682.0508" y2="-979.1445" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#3C89C9"/> <stop offset="0.1482" style="stop-color:#60A6DD"/> <stop offset="0.3113" style="stop-color:#81C1F0"/> <stop offset="0.4476" style="stop-color:#95D1FB"/> <stop offset="0.5394" style="stop-color:#9CD7FF"/> <stop offset="0.636" style="stop-color:#98D4FD"/> <stop offset="0.7293" style="stop-color:#8DCAF6"/> <stop offset="0.8214" style="stop-color:#79BBEB"/> <stop offset="0.912" style="stop-color:#5EA5DC"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <path fill="url(#SVGID_1_)" d="M19.625,36.763C8.787,36.763,0,34.888,0,32.575v10c0,2.313,8.787,4.188,19.625,4.188 c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,34.888,30.464,36.763,19.625,36.763z"/> <linearGradient id="SVGID_2_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-973.1445" x2="682.0508" y2="-973.1445" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#9CD7FF"/> <stop offset="0.0039" style="stop-color:#9DD7FF"/> <stop offset="0.2273" style="stop-color:#BDE5FF"/> <stop offset="0.4138" style="stop-color:#D1EEFF"/> <stop offset="0.5394" style="stop-color:#D9F1FF"/> <stop offset="0.6155" style="stop-color:#D5EFFE"/> <stop offset="0.6891" style="stop-color:#C9E7FA"/> <stop offset="0.7617" style="stop-color:#B6DAF3"/> <stop offset="0.8337" style="stop-color:#9AC8EA"/> <stop offset="0.9052" style="stop-color:#77B0DD"/> <stop offset="0.9754" style="stop-color:#4D94CF"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <path fill="url(#SVGID_2_)" d="M19.625,36.763c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.927-18.396,3.927 c-9.481,0-17.396-1.959-18.396-3.927l-1.229,2C0,34.888,8.787,36.763,19.625,36.763z"/> <path fill="#3C89C9" d="M19.625,26.468c10.16,0,19.625,2.775,19.625,2.775c-0.375,2.721-5.367,5.438-19.554,5.438 c-12.125,0-18.467-2.484-19.541-4.918C-0.127,29.125,9.465,26.468,19.625,26.468z"/> <linearGradient id="SVGID_3_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-965.6948" x2="682.0508" y2="-965.6948" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#3C89C9"/> <stop offset="0.1482" style="stop-color:#60A6DD"/> <stop offset="0.3113" style="stop-color:#81C1F0"/> <stop offset="0.4476" style="stop-color:#95D1FB"/> <stop offset="0.5394" style="stop-color:#9CD7FF"/> <stop offset="0.636" style="stop-color:#98D4FD"/> <stop offset="0.7293" style="stop-color:#8DCAF6"/> <stop offset="0.8214" style="stop-color:#79BBEB"/> <stop offset="0.912" style="stop-color:#5EA5DC"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <path fill="url(#SVGID_3_)" d="M19.625,23.313C8.787,23.313,0,21.438,0,19.125v10c0,2.313,8.787,4.188,19.625,4.188 c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,21.438,30.464,23.313,19.625,23.313z"/> <linearGradient id="SVGID_4_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-959.6948" x2="682.0508" y2="-959.6948" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#9CD7FF"/> <stop offset="0.0039" style="stop-color:#9DD7FF"/> <stop offset="0.2273" style="stop-color:#BDE5FF"/> <stop offset="0.4138" style="stop-color:#D1EEFF"/> <stop offset="0.5394" style="stop-color:#D9F1FF"/> <stop offset="0.6155" style="stop-color:#D5EFFE"/> <stop offset="0.6891" style="stop-color:#C9E7FA"/> <stop offset="0.7617" style="stop-color:#B6DAF3"/> <stop offset="0.8337" style="stop-color:#9AC8EA"/> <stop offset="0.9052" style="stop-color:#77B0DD"/> <stop offset="0.9754" style="stop-color:#4D94CF"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <path fill="url(#SVGID_4_)" d="M19.625,23.313c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.926-18.396,3.926 c-9.481,0-17.396-1.959-18.396-3.926l-1.229,2C0,21.438,8.787,23.313,19.625,23.313z"/> <path fill="#3C89C9" d="M19.476,13.019c10.161,0,19.625,2.775,19.625,2.775c-0.375,2.721-5.367,5.438-19.555,5.438 c-12.125,0-18.467-2.485-19.541-4.918C-0.277,15.674,9.316,13.019,19.476,13.019z"/> <linearGradient id="SVGID_5_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-952.4946" x2="682.0508" y2="-952.4946" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#3C89C9"/> <stop offset="0.1482" style="stop-color:#60A6DD"/> <stop offset="0.3113" style="stop-color:#81C1F0"/> <stop offset="0.4476" style="stop-color:#95D1FB"/> <stop offset="0.5394" style="stop-color:#9CD7FF"/> <stop offset="0.636" style="stop-color:#98D4FD"/> <stop offset="0.7293" style="stop-color:#8DCAF6"/> <stop offset="0.8214" style="stop-color:#79BBEB"/> <stop offset="0.912" style="stop-color:#5EA5DC"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <path fill="url(#SVGID_5_)" d="M19.625,10.113C8.787,10.113,0,8.238,0,5.925v10c0,2.313,8.787,4.188,19.625,4.188 c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,8.238,30.464,10.113,19.625,10.113z"/> <linearGradient id="SVGID_6_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-946.4946" x2="682.0508" y2="-946.4946" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#9CD7FF"/> <stop offset="0.0039" style="stop-color:#9DD7FF"/> <stop offset="0.2273" style="stop-color:#BDE5FF"/> <stop offset="0.4138" style="stop-color:#D1EEFF"/> <stop offset="0.5394" style="stop-color:#D9F1FF"/> <stop offset="0.6155" style="stop-color:#D5EFFE"/> <stop offset="0.6891" style="stop-color:#C9E7FA"/> <stop offset="0.7617" style="stop-color:#B6DAF3"/> <stop offset="0.8337" style="stop-color:#9AC8EA"/> <stop offset="0.9052" style="stop-color:#77B0DD"/> <stop offset="0.9754" style="stop-color:#4D94CF"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <path fill="url(#SVGID_6_)" d="M19.625,10.113c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.926-18.396,3.926 c-9.481,0-17.396-1.959-18.396-3.926L0,5.925C0,8.238,8.787,10.113,19.625,10.113z"/> <linearGradient id="SVGID_7_" gradientUnits="userSpaceOnUse" x1="644.0293" y1="-943.4014" x2="680.8223" y2="-943.4014" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#9CD7FF"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <ellipse fill="url(#SVGID_7_)" cx="19.625" cy="3.926" rx="18.396" ry="3.926"/> <path opacity="0.24" fill="#FFFFFF" enable-background="new " d="M31.04,45.982c0,0-4.354,0.664-7.29,0.781 c-3.125,0.125-8.952,0-8.952,0l-2.384-10.292l0.044-2.108l-1.251-1.154L9.789,23.024l-0.082-0.119L9.5,20.529l-1.65-1.254 L5.329,8.793c0,0,4.213,0.903,7.234,1.07s8.375,0.25,8.375,0.25l3,9.875l-0.25,1.313l1.063,2.168l2.312,9.645l-0.521,1.416 l1.46,1.834L31.04,45.982z"/> </svg> nova-13.1.4/doc/source/image_src/create_vm_states.diag0000664000567000056710000000177313064447140024073 0ustar jenkinsjenkins00000000000000seqdiag { edge_length = 250; span_height = 40; node_width=200; default_note_color = lightblue; // Use note (put note on rightside) api [label="Compute.api"]; manager [label="Compute.manager"]; api -> manager [label = "create_db_entry_for_new_instance", note = "VM: Building Task: Scheduling Power: No State"]; manager -> manager [label="_start_building", note ="VM: Building Task: None"]; manager -> manager [label="_allocate_network", note ="VM: Building Task: Networking"]; manager -> manager [label="_prep_block_device", note ="VM: Building Task: Block_Device_Mapping"]; manager -> manager [label="_spawn", note ="VM: Building Task: Spawning"]; api <-- manager [note ="VM: Active Task: None"]; } nova-13.1.4/doc/source/scheduler_evolution.rst0000664000567000056710000001545613064447140022603 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =================== Scheduler Evolution =================== Evolving the scheduler has been a priority item over several releases: http://specs.openstack.org/openstack/nova-specs/#priorities The scheduler has become tightly coupled with the rest of nova, limiting its capabilities, accuracy, flexibility and maintainability. The goal of scheduler evolution is to bring about a better separation of concerns between scheduling functionality and the rest of nova. Once this effort has completed, its conceivable that the nova-scheduler could become a separate git repo, outside of nova but within the compute project. This is not the current focus. Problem Use Cases ================== Many users are wanting to do more advanced things with the scheduler, but the current architecture is not ready to support those use cases in a maintainable way. A few examples will help to illustrate where the scheduler falls short: Cross Project Affinity ----------------------- It can be desirable, when booting from a volume, to use a compute node that is close to the shared storage where that volume is. Similarly, for the sake of performance, it can be desirable to use a compute node that is in a particular location in relation to a pre-created port. Accessing Aggregates in Filters and Weights -------------------------------------------- Any DB access in a filter or weight slows down the scheduler. Until the end of kilo, there was no way to deal with the scheduler accessing information about aggregates without querying the DB in every call to host_passes() in a filter. Filter Scheduler Alternatives ------------------------------ For certain use cases, radically different schedulers may perform much better than the filter scheduler. We should not block this innovation. It is unreasonable to assume a single scheduler will work for all use cases. However, to enable this kind of innovation in a maintainable way, a single strong scheduler interface is required. Project Scale issues --------------------- There are many interesting ideas for new schedulers, like the solver scheduler, and frequent requests to add new filters and weights to the scheduling system. The current nova team does not have the bandwidth to deal with all these requests. A dedicated scheduler team could work on these items independently of the rest of nova. The tight coupling that currently exists makes it impossible to work on the scheduler in isolation. A stable interface is required before the code can be split out. Key areas we are evolving ========================== Here we discuss, at a high level, areas that are being addressed as part of the scheduler evolution work. Fixing the Scheduler DB model ------------------------------ We need the nova and scheduler data models to be independent of each other. The first step is breaking the link between the ComputeNode and Service DB tables. In theory where the Service information is stored should be pluggable through the service group API, and should be independent of the scheduler service. For example, it could be managed via zookeeper rather than polling the nova DB. There are also places where filters and weights call into the nova DB to find out information about aggregates. This needs to be sent to the scheduler, rather than reading directly from the nova database. Versioning Scheduler Placement Interfaces ------------------------------------------ At the start of kilo, the scheduler is passed a set of dictionaries across a versioned RPC interface. The dictionaries can create problems with the backwards compatibility needed for live-upgrades. Luckily we already have the oslo.versionedobjects infrastructure we can use to model this data in a way that can be versioned across releases. This effort is mostly focusing around the request_spec. See, for example, `this spec`_. Sending host and node stats to the scheduler --------------------------------------------- Periodically nova-compute updates the scheduler state stored in the database. We need a good way to model the data that is being sent from the compute nodes into the scheduler, so over time, the scheduler can move to having its own database. This is linked to the work on the resource tracker. Updating the Scheduler about other data ---------------------------------------- For things like host aggregates, we need the scheduler to cache information about those, and know when there are changes so it can update its cache. Over time, its possible that we need to send cinder and neutron data, so the scheduler can use that data to help pick a nova-compute host. Resource Tracker ----------------- The recent work to add support for NUMA and PCI pass through have shown we have no good pattern to extend the resource tracker. Ideally we want to keep the innovation inside the nova tree, but we also need it to be easier. This is very related to the effort to re-think how we model resources, as covered by discussion about `resource providers`_. Parallelism and Concurrency ---------------------------- The current design of the nova-scheduler is very racy, and can lead to excessive numbers of build retries before the correct host is found. The recent NUMA features are particularly impacted by how the scheduler works. All this has lead to many people running only a single nova-scheduler process configured to use a very small greenthread pool. The work on cells v2 will mean that we soon need the scheduler to scale for much larger problems. The current scheduler works best with less than 1k nodes but we will need the scheduler to work with at least 10k nodes. Various ideas have been discussed to reduce races when running multiple nova-scheduler processes. One idea is to use two-phase commit "style" resource tracker claims. Another idea involves using incremental updates so it is more efficient to keep the scheduler's state up to date, potentially using Kafka. For more details, see the `backlog spec`_ that describes more of the details around this problem. .. _this spec: http://specs.openstack.org/openstack/nova-specs/specs/kilo/approved/sched-select-destinations-use-request-spec-object.html .. _resource providers: https://blueprints.launchpad.net/nova/+spec/resource-providers .. _backlog spec: http://specs.openstack.org/openstack/nova-specs/specs/backlog/approved/parallel-scheduler.html nova-13.1.4/doc/source/sample_config.rst0000664000567000056710000000072713064447140021322 0ustar jenkinsjenkins00000000000000========================== Nova Configuration Options ========================== The following is a sample Nova configuration for adaptation and use. It is auto-generated from Nova when this documentation is built, so if you are having issues with an option, please compare your version of Nova with the version of this documentation. The sample configuration can also be viewed in `file form <_static/nova.conf.sample>`_. .. literalinclude:: _static/nova.conf.sample nova-13.1.4/doc/source/policy_enforcement.rst0000664000567000056710000001623213064447152022401 0ustar jenkinsjenkins00000000000000.. Copyright 2014 Intel All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Rest API Policy Enforcement =========================== Here is a vision of how we want policy to be enforced in nova. Problems with current system ---------------------------- There are several problems for current API policy. * The permission checking is spread through the various levels of the nova code, also there are some hard-coded permission checks that make some policies not enforceable. * API policy rules need better granularity. Some of extensions just use one rule for all the APIs. Deployer can't get better granularity control for the APIs. * More easy way to override default policy settings for deployer. And Currently all the API(EC2, V2, V2.1) rules mix in one policy.conf file. These are the kinds of things we need to make easier: 1. Operator wants to enable a specific role to access the service API which is not possible because there is currently a hard coded admin check. 2. One policy rule per API action. Having a check in the REST API and a redundant check in the compute API can confuse developers and deployers. 3. Operator can specify different rules for APIs that in same extension. 4. Operator can override the default policy rule easily without mixing his own config and default config in one policy.conf file. Future of policy enforcement ---------------------------- The generic rule for all the improvement is keep V2 API back-compatible. Because V2 API may be deprecated after V2.1 parity with V2. This can reduce the risk we take. The improvement just for EC2 and V2.1 API. There isn't any user for V2.1, as it isn't ready yet. We have to do change for EC2 API. EC2 API won't be removed like v2 API. If we keep back-compatible for EC2 API also, the old compute api layer checks won't be removed forever. EC2 API is really small than Nova API. It's about 29 APIs without volume and image related(those policy check done by cinder and glance). So it will affect user less. Enforcement policy at REST API layer ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The policy should be only enforced at REST API layer. This is clear for user to know where the policy will be enforced. If the policy spread into multiple layer of nova code, user won't know when and where the policy will be enforced if they didn't have knowledge about nova code. Remove all the permission checking under REST API layer. Policy will only be enforced at REST API layer. This will affect the EC2 API and V2.1 API, there are some API just have policy enforcement at Compute/Network API layer, those policy will be move to API layer and renamed. Removes hard-code permission checks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Hard-coded permission checks make it impossible to supply a configurable policy. They should be removed in order to make nova auth completely configurable. This will affect EC2 API and Nova V2.1 API. User need update their policy rule to match the old hard-code permission. For Nova V2 API, the hard-code permission checks will be moved to REST API layer to guarantee it won't break the back-compatibility. That may ugly some hard-code permission check in API layer, but V2 API will be removed once V2.1 API ready, so our choice will reduce the risk. Port policy.d from oslo-incubator into nova ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This feature make deployer can override default policy rule easily. And When nova default policy config changed, deployer only need replace default policy config files with new one. It won't affect his own policy config in other files. Use different prefix in policy rule name for EC2/V2/V2.1 API ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Currently all the APIs(Nova v2/v2.1 API, EC2 API) use same set of policy rules. Especially there isn't obvious mapping between those policy rules and EC2 API. User can know clearly which policy should be configured for specific API. Nova should provide different prefix for policy rule name that used to group them, and put them in different policy configure file in policy.d * EC2 API: Use prefix "ec2_api". The rule looks like "ec2_api:[action]" * Nova V2 API: After we move to V2.1, we needn't spend time to change V2 api rule, and needn't to bother deployer upgrade their policy config. So just keep V2 API policy rule named as before. * Nova V2.1 API: We name the policy rule as "os_compute_api:[extension]:[action]". The core API may be changed in the future, so we needn't name them as "compute" or "compute_extension" to distinguish the core or extension API. This will affect EC2 API and V2.1 API. For EC2 API, it need deployer update their policy config. For V2.1 API, there isn't any user yet, so there won't any effect. Group the policy rules into different policy files ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ After group the policy rules for different API, we can separate them into different files. Then deployer will more clear for which rule he can set for specific API. The rules can be grouped as below: * policy.conf: It only contains the generic rule, like: :: "context_is_admin": "role:admin", "admin_or_owner": "is_admin:True or project_id:%(project_id)s", "default": "rule:admin_or_owner", * policy.d/00-ec2-api.conf: It contains all the policy rules for EC2 API. * policy.d/00-v2-api.conf: It contains all the policy rules for nova V2 API. * policy.d/00-v2.1-api.conf: It contains all the policy rules for nova v2.1 API. The prefix '00-' is used to order the configure file. All the files in policy.d will be loaded by alphabetical order. '00-' means those files will be loaded very early. Add separated rule for each API in extension ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is for provider better granularity for policy rules. Not just provide policy rule for extension as unit. This need user to move the policy rule into separated rule for each API. Enable action level rule override extension level rule ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ After separated rule for each API in extension, that will increase the work for deployer. So enable extension level rule as default for each API in that extension will ease that a lot. Deployer also can specify one rule for each API to override the extension level rule. Existed Nova API being restricted --------------------------------- Nova provide default policy rules for all the APIs. Operator should only make the policy rule more permissive. If the Operator make the API to be restricted that make break the existed API user or application. That's kind of back-incompatible. SO Operator can free to add additional permission to the existed API. nova-13.1.4/doc/source/code-review.rst0000664000567000056710000002700713064447152020730 0ustar jenkinsjenkins00000000000000.. _code-review: ========================== Code Review Guide for Nova ========================== This is a very terse set of points for reviewers to consider when looking at nova code. These are things that are important for the continued smooth operation of Nova, but that tend to be carried as "tribal knowledge" instead of being written down. It is an attempt to boil down some of those things into nearly checklist format. Further explanation about why some of these things are important belongs elsewhere and should be linked from here. Upgrade-Related Concerns ======================== RPC API Versions ---------------- * If an RPC method is modified, the following needs to happen: * The manager-side (example: compute/manager) needs a version bump * The manager-side method needs to tolerate older calls as well as newer calls * Arguments can be added as long as they are optional. Arguments cannot be removed or changed in an incompatible way. * The RPC client code (example: compute/rpcapi.py) needs to be able to honor a pin for the older version (see self.client.can_send_version() calls). If we are pinned at 1.5, but the version requirement for a method is 1.7, we need to be able to formulate the call at version 1.5. * Methods can drop compatibility with older versions when we bump a major version. * RPC methods can be deprecated by removing the client (example: compute/rpcapi.py) implementation. However, the manager method must continue to exist until the major version of the API is bumped. Object Versions --------------- * If a tracked attribute (i.e. listed in fields) or remotable method is added, or a method is changed, the object version must be bumped. Changes for methods follow the same rules as above for regular RPC methods. We have tests to try to catch these changes, which remind you to bump the version and then correct the version-hash in the tests. * Field types cannot be changed. If absolutely required, create a new attribute and deprecate the old one. Ideally, support converting the old attribute to the new one with an obj_load_attr() handler. There are some exceptional cases where changing the type can be allowed, but care must be taken to ensure it does not affect the wireline API. * New attributes should be removed from the primitive in obj_make_compatible() if the attribute was added after the target version. * Remotable methods should not return unversioned structures wherever possible. They should return objects or simple values as the return types are not (and cannot) be checked by the hash tests. * Remotable methods should not take complex structures as arguments. These cannot be verified by the hash tests, and thus are subject to drift. Either construct an object and pass that, or pass all the simple values required to make the call. * Changes to an object as described above will cause a hash to change in TestObjectVersions. This is a reminder to the developer and the reviewer that the version needs to be bumped. There are times when we need to make a change to an object without bumping its version, but those cases are only where the hash logic detects a change that is not actually a compatibility issue and must be handled carefully. Database Schema --------------- * Changes to the database schema must generally be additive-only. This means you can add columns, but you can't drop or alter a column. We have some hacky tests to try to catch these things, but they are fragile. Extreme reviewer attention to non-online alterations to the DB schema will help us avoid disaster. * Dropping things from the schema is a thing we need to be extremely careful about, making sure that the column has not been used (even present in one of our models) for at least a release. * Data migrations must not be present in schema migrations. If data needs to be converted to another format, or moved from one place to another, then that must be done while the database server remains online. Generally, this can and should be hidden within the object layer so that an object can load from either the old or new location, and save to the new one. REST API ========= When making a change to the nova API, we should always follow `the API WG guidelines `_ rather than going for "local" consistency. Developers and reviewers should read all of the guidelines, but they are very long. So here are some key points: * `Terms `_ * ``project`` should be used in the REST API instead of ``tenant``. * ``server`` should be used in the REST API instead of ``instance``. * ``compute`` should be used in the REST API instead of ``nova``. * `Naming Conventions `_ * URL should not include underscores; use hyphens ('-') instead. * The field names contained in a request/response body should use snake_case style, not CamelCase or Mixed_Case style. * `HTTP Response Codes `_ * Synchronous resource creation: ``201 Created`` * Asynchronous resource creation: ``202 Accepted`` * Synchronous resource deletion: ``204 No Content`` * For all other successful operations: ``200 OK`` Config Options ============== Location -------- The central place where all config options should reside is the ``/nova/conf/`` package. Options that are in named sections of ``nova.conf``, such as ``[serial_console]``, should be in their own module. Options that are in the ``[DEFAULT]`` section should be placed in modules that represent a natural grouping. For example, all of the options that affect the scheduler would be in the ``scheduler.py`` file, and all the networking options would be moved to ``network.py``. Implementation -------------- A config option should be checked for: * A short description which explains what it does. If it is a unit (e.g. timeouts or so) describe the unit which is used (seconds, megabyte, mebibyte, ...). * A long description which shows the impact and scope. The operators should know the expected change in the behavior of Nova if they tweak this. * Hints which services will consume this config option. Operators/Deployers should not be forced to read the code to know which one of the services will change its behavior nor should they set this in every ``nova.conf`` file to be sure. * Descriptions/Validations for the possible values. * If this is an option with numeric values (int, float), describe the edge cases (like the min value, max value, 0, -1). * If this is a DictOpt, describe the allowed keys. * If this is a StrOpt, list any possible regex validations, or provide a list of acceptable and/or prohibited values. * Interdependencies to other options. If other config options have to be considered when this config option gets changed, is this described? Third Party Tests ================= Any change that is not tested well by the Jenkins check jobs must have a recent +1 vote from an appropriate third party test (or tests) on the latest patchset, before a core reviewer is allowed to make a +2 vote. Virt drivers ------------ At a minimum, we must ensure that any technology specific code has a +1 from the relevant third party test, on the latest patchset, before a +2 vote can be applied. Specifically, changes to nova/virt/driver/ need a +1 vote from the respective third party CI. For example, if you change something in the XenAPI virt driver, you must wait for a +1 from the XenServer CI on the latest patchset, before you can give that patch set a +2 vote. This is important to ensure: * We keep those drivers stable * We don't break that third party CI Notes ----- Please note: * Long term, we should ensure that any patch a third party CI is allowed to vote on, can be blocked from merging by that third party CI. But we need a lot more work to make something like that feasible, hence the proposed compromise. * While its possible to break a virt driver CI system by changing code that is outside the virt drivers, this policy is not focusing on fixing that. A third party test failure should always be investigated, but the failure of a third party test to report in a timely manner should not block others. * We are only talking about the testing of in-tree code. Please note the only public API is our REST API, see: :doc:`policies` Microversion API ================ * If an new microversion API is added, the following needs to happen: * A new patch for the microversion API change in python-novaclient side should be submitted. Release Notes ============= What is reno ? -------------- Nova uses `reno `_ for providing release notes in-tree. That means that a patch can include a *reno file* or a series can have a follow-on change containing that file explaining what the impact is. A *reno file* is a YAML file written in the releasenotes/notes tree which is generated using the reno tool this way: .. code-block:: bash $ tox -e venv -- reno new where usually ```` can be ``bp-`` for a blueprint or ``bug-XXXXXX`` for a bugfix. Refer to the `reno documentation `_ for the full list of sections. When a release note is needed ----------------------------- A release note is required anytime a reno section is needed. Below are some examples for each section. Any sections that would be blank should be left out of the note file entirely. If no section is needed, then you know you don't need to provide a release note :-) * ``upgrade`` * The patch has an `UpgradeImpact `_ tag * A DB change needs some deployer modification (like a migration) * A configuration option change (deprecation, removal or modified default) * some specific changes that have a `DocImpact `_ tag but require further action from an deployer perspective * any patch that requires an action from the deployer in general * ``security`` * If the patch fixes a known vulnerability * ``features`` * If the patch has an `APIImpact `_ tag * For nova-manage and python-novaclient changes, if it adds or changes a new command, including adding new options to existing commands * not all blueprints in general, just the ones impacting a `contractual API `_ * a new virt driver is provided or an existing driver impacts the `HypervisorSupportMatrix `_ * ``critical`` * Bugfixes categorized as Critical in Launchpad *impacting users* * ``fixes`` * No clear definition of such bugfixes. Hairy long-standing bugs with high importance that have been fixed are good candidates though. Three sections are left intentionally unexplained (``prelude``, ``issues`` and ``other``). Those are targeted to be filled in close to the release time for providing details about the soon-ish release. Don't use them unless you know exactly what you are doing. Notifications ============= * Every new notification type shall use the new versioned notification infrastructure documented in :doc:`notifications` nova-13.1.4/doc/source/rpc.rst0000664000567000056710000003177213064447140017304 0ustar jenkinsjenkins00000000000000. Copyright (c) 2010 Citrix Systems, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. AMQP and Nova ============= AMQP is the messaging technology chosen by the OpenStack cloud. The AMQP broker, either RabbitMQ or Qpid, sits between any two Nova components and allows them to communicate in a loosely coupled fashion. More precisely, Nova components (the compute fabric of OpenStack) use Remote Procedure Calls (RPC hereinafter) to communicate to one another; however such a paradigm is built atop the publish/subscribe paradigm so that the following benefits can be achieved: * Decoupling between client and servant (such as the client does not need to know where the servant's reference is). * Full a-synchronism between client and servant (such as the client does not need the servant to run at the same time of the remote call). * Random balancing of remote calls (such as if more servants are up and running, one-way calls are transparently dispatched to the first available servant). Nova uses direct, fanout, and topic-based exchanges. The architecture looks like the one depicted in the figure below: .. image:: ./images/rpc/arch.png :width: 60% .. Nova implements RPC (both request+response, and one-way, respectively nicknamed 'rpc.call' and 'rpc.cast') over AMQP by providing an adapter class which take cares of marshaling and unmarshaling of messages into function calls. Each Nova service (for example Compute, Scheduler, etc.) create two queues at the initialization time, one which accepts messages with routing keys 'NODE-TYPE.NODE-ID' (for example compute.hostname) and another, which accepts messages with routing keys as generic 'NODE-TYPE' (for example compute). The former is used specifically when Nova-API needs to redirect commands to a specific node like 'euca-terminate instance'. In this case, only the compute node whose host's hypervisor is running the virtual machine can kill the instance. The API acts as a consumer when RPC calls are request/response, otherwise it acts as a publisher only. Nova RPC Mappings ----------------- The figure below shows the internals of a message broker node (referred to as a RabbitMQ node in the diagrams) when a single instance is deployed and shared in an OpenStack cloud. Every Nova component connects to the message broker and, depending on its personality (for example a compute node or a network node), may use the queue either as an Invoker (such as API or Scheduler) or a Worker (such as Compute or Network). Invokers and Workers do not actually exist in the Nova object model, but we are going to use them as an abstraction for sake of clarity. An Invoker is a component that sends messages in the queuing system via two operations: 1) rpc.call and ii) rpc.cast; a Worker is a component that receives messages from the queuing system and reply accordingly to rpc.call operations. Figure 2 shows the following internal elements: * Topic Publisher: a Topic Publisher comes to life when an rpc.call or an rpc.cast operation is executed; this object is instantiated and used to push a message to the queuing system. Every publisher connects always to the same topic-based exchange; its life-cycle is limited to the message delivery. * Direct Consumer: a Direct Consumer comes to life if (an only if) a rpc.call operation is executed; this object is instantiated and used to receive a response message from the queuing system; Every consumer connects to a unique direct-based exchange via a unique exclusive queue; its life-cycle is limited to the message delivery; the exchange and queue identifiers are determined by a UUID generator, and are marshaled in the message sent by the Topic Publisher (only rpc.call operations). * Topic Consumer: a Topic Consumer comes to life as soon as a Worker is instantiated and exists throughout its life-cycle; this object is used to receive messages from the queue and it invokes the appropriate action as defined by the Worker role. A Topic Consumer connects to the same topic-based exchange either via a shared queue or via a unique exclusive queue. Every Worker has two topic consumers, one that is addressed only during rpc.cast operations (and it connects to a shared queue whose exchange key is 'topic') and the other that is addressed only during rpc.call operations (and it connects to a unique queue whose exchange key is 'topic.host'). * Direct Publisher: a Direct Publisher comes to life only during rpc.call operations and it is instantiated to return the message required by the request/response operation. The object connects to a direct-based exchange whose identity is dictated by the incoming message. * Topic Exchange: The Exchange is a routing table that exists in the context of a virtual host (the multi-tenancy mechanism provided by Qpid or RabbitMQ); its type (such as topic vs. direct) determines the routing policy; a message broker node will have only one topic-based exchange for every topic in Nova. * Direct Exchange: this is a routing table that is created during rpc.call operations; there are many instances of this kind of exchange throughout the life-cycle of a message broker node, one for each rpc.call invoked. * Queue Element: A Queue is a message bucket. Messages are kept in the queue until a Consumer (either Topic or Direct Consumer) connects to the queue and fetch it. Queues can be shared or can be exclusive. Queues whose routing key is 'topic' are shared amongst Workers of the same personality. .. image:: ./images/rpc/rabt.png :width: 60% .. RPC Calls --------- The diagram below shows the message flow during an rpc.call operation: 1. a Topic Publisher is instantiated to send the message request to the queuing system; immediately before the publishing operation, a Direct Consumer is instantiated to wait for the response message. 2. once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic.host') and passed to the Worker in charge of the task. 3. once the task is completed, a Direct Publisher is allocated to send the response message to the queuing system. 4. once the message is dispatched by the exchange, it is fetched by the Direct Consumer dictated by the routing key (such as 'msg_id') and passed to the Invoker. .. image:: ./images/rpc/flow1.png :width: 60% .. RPC Casts --------- The diagram below shows the message flow during an rpc.cast operation: 1. A Topic Publisher is instantiated to send the message request to the queuing system. 2. Once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic') and passed to the Worker in charge of the task. .. image:: ./images/rpc/flow2.png :width: 60% .. AMQP Broker Load ---------------- At any given time the load of a message broker node running either Qpid or RabbitMQ is function of the following parameters: * Throughput of API calls: the number of API calls (more precisely rpc.call ops) being served by the OpenStack cloud dictates the number of direct-based exchanges, related queues and direct consumers connected to them. * Number of Workers: there is one queue shared amongst workers with the same personality; however there are as many exclusive queues as the number of workers; the number of workers dictates also the number of routing keys within the topic-based exchange, which is shared amongst all workers. The figure below shows the status of a RabbitMQ node after Nova components' bootstrap in a test environment. Exchanges and queues being created by Nova components are: * Exchanges 1. nova (topic exchange) * Queues 1. compute.phantom (phantom is hostname) 2. compute 3. network.phantom (phantom is hostname) 4. network 5. scheduler.phantom (phantom is hostname) 6. scheduler .. image:: ./images/rpc/state.png :width: 60% .. RabbitMQ Gotchas ---------------- Nova uses Kombu to connect to the RabbitMQ environment. Kombu is a Python library that in turn uses AMQPLib, a library that implements the standard AMQP 0.8 at the time of writing. When using Kombu, Invokers and Workers need the following parameters in order to instantiate a Connection object that connects to the RabbitMQ server (please note that most of the following material can be also found in the Kombu documentation; it has been summarized and revised here for sake of clarity): * Hostname: The hostname to the AMQP server. * Userid: A valid username used to authenticate to the server. * Password: The password used to authenticate to the server. * Virtual_host: The name of the virtual host to work with. This virtual host must exist on the server, and the user must have access to it. Default is "/". * Port: The port of the AMQP server. Default is 5672 (amqp). The following parameters are default: * Insist: insist on connecting to a server. In a configuration with multiple load-sharing servers, the Insist option tells the server that the client is insisting on a connection to the specified server. Default is False. * Connect_timeout: the timeout in seconds before the client gives up connecting to the server. The default is no timeout. * SSL: use SSL to connect to the server. The default is False. More precisely Consumers need the following parameters: * Connection: the above mentioned Connection object. * Queue: name of the queue. * Exchange: name of the exchange the queue binds to. * Routing_key: the interpretation of the routing key depends on the value of the exchange_type attribute. * Direct exchange: if the routing key property of the message and the routing_key attribute of the queue are identical, then the message is forwarded to the queue. * Fanout exchange: messages are forwarded to the queues bound the exchange, even if the binding does not have a key. * Topic exchange: if the routing key property of the message matches the routing key of the key according to a primitive pattern matching scheme, then the message is forwarded to the queue. The message routing key then consists of words separated by dots (".", like domain names), and two special characters are available; star ("") and hash ("#"). The star matches any word, and the hash matches zero or more words. For example ".stock.#" matches the routing keys "usd.stock" and "eur.stock.db" but not "stock.nasdaq". * Durable: this flag determines the durability of both exchanges and queues; durable exchanges and queues remain active when a RabbitMQ server restarts. Non-durable exchanges/queues (transient exchanges/queues) are purged when a server restarts. It is worth noting that AMQP specifies that durable queues cannot bind to transient exchanges. Default is True. * Auto_delete: if set, the exchange is deleted when all queues have finished using it. Default is False. * Exclusive: exclusive queues (such as non-shared) may only be consumed from by the current connection. When exclusive is on, this also implies auto_delete. Default is False. * Exchange_type: AMQP defines several default exchange types (routing algorithms) that covers most of the common messaging use cases. * Auto_ack: acknowledgment is handled automatically once messages are received. By default auto_ack is set to False, and the receiver is required to manually handle acknowledgment. * No_ack: it disable acknowledgment on the server-side. This is different from auto_ack in that acknowledgment is turned off altogether. This functionality increases performance but at the cost of reliability. Messages can get lost if a client dies before it can deliver them to the application. * Auto_declare: if this is True and the exchange name is set, the exchange will be automatically declared at instantiation. Auto declare is on by default. Publishers specify most the parameters of Consumers (such as they do not specify a queue name), but they can also specify the following: * Delivery_mode: the default delivery mode used for messages. The value is an integer. The following delivery modes are supported by RabbitMQ: * 1 or "transient": the message is transient. Which means it is stored in memory only, and is lost if the server dies or restarts. * 2 or "persistent": the message is persistent. Which means the message is stored both in-memory, and on disk, and therefore preserved if the server dies or restarts. The default value is 2 (persistent). During a send operation, Publishers can override the delivery mode of messages so that, for example, transient messages can be sent over a durable queue. nova-13.1.4/doc/source/images/0000775000567000056710000000000013064447471017230 5ustar jenkinsjenkins00000000000000nova-13.1.4/doc/source/images/rpc/0000775000567000056710000000000013064447471020014 5ustar jenkinsjenkins00000000000000nova-13.1.4/doc/source/images/rpc/flow1.png0000664000567000056710000012002613064447140021544 0ustar jenkinsjenkins00000000000000‰PNG  IHDR°Y¡iásRGB®ÎégAMA± üa cHRMz&€„ú€èu0ê`:˜pœºQ< pHYsÊ&ó?ŸIDATx^í ¸ÅÕþï?ÆpE«PÃÅŠŠŠWŒJHb"nñªqGçDT¢âB *"Ê§Ä¨Ä „ˆŠQEÑDÀ%¸cÔHLŒ&1Iýëmïë=ÓÕ3Ý=½¼ó<õ̽ÓÕÕÕoWw_ŸSU üP*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T  ôÒg}®N£Û·oëFm´3Q¶¶¶Ì¶ñx–¯»n›+[®á)ú»G!{8ž4 T€ P* ú¶mÛöú¶mÛ}°é¦›}|ÚÃ?1r”;îZ5eê …¿™¨ÛÛÛ@vÛÀ„‰7ªQ^ê=ˇyÌß7Üh£¿555ýQCíe„Ù\ôã< *@¨ …P Ç†nøD·î›ÿå’1WªÅKW¨÷>úŒ‰° ° °   <6o‘~öÈÏ;wî¼fà 7¾V÷zM…èùx’T€ P*@¨@ö€±£eòí÷ÒP-€¡Ê|1Ã6À6P® ¼±zúÙ¹|ÖÔ´ášvíÚ“½5¦T€ P*@ò¬@Ó×;v\cF Zµllllh+W½§öÛÐG;vº9Ï Ï P*@¨ÈŽ=6ÙdÓ?Þ~÷ôÒ`¥ÁÊ6À6À6À6à×.¼ḩbçé®­1;ÝkJ¨ T€ äMM¾Ñ¥Ëó-¥×•!ÃllllÛÀÍ·Mù—î4bóf ð|¨ T€ dCF6üâ¬GçÑh¥ÑÊ6À6À6À6àÔ.ýÅÕÚ»É]ÙèæRUË&]›ÑÒ€ÚÂóí-gWebùùÓ¿oªZ8+C¨ˆ[„‚ám:Ã.È6À6À6À6¦ ü¸ù„õr;çÅÝOå¬üú|VU€Olú ¯^½,¿2ÀfMŸéúšNj0ÜN¨È“Cöðí¿„1X˜—.ÛÛÛÛÚ&ûëÐÔô±î7ÉSÇó¹æÄ| _š °Å¹Øeÿê¤ÖI&f¢T€ P*ÐZáÍ'œüž4<ÙØØØ¢h}úî°Fw3€3~*+@€e ¡T€ P*V¦7^8eê NÞÄõÙØØØ"i—Œ¹RµmÛöú°ýQó`.àyó”© T€ T¯@Û¶í>X¼tE$FKoîY=@lllÙn“nºMuØh£YÕ÷L…Ú“Ëêród© T fÖùêW?Çú}4³m0òúñú± ° ¤¥ L›9[m´ÑFKjî X T€ P*@,š×ãøW† òÛÛÛ@dmQ=ˆîaK¨ T€ P¨ ÀÒhÌhM‹÷‡õ '’m ¾mQ=]»uÿ³î°°.%?T€ $£@£> CÒ“ÑšG¡T Î `±llll‘¶ÎDœlÏ®×Þ=OQ1eOƒõ×ßà•ˆZK³.grDe±*@¨@ªˆÔh¡ç£¾žêOýÙØÒаÉöû`§N˜x#ûó ¾ˆjyéEƒ!ÀF¡"Ë T  °ÃË`‡—•u (± ° ”kØdûlvïEl²÷ F¨@> À`ÙØØØ"mØd ,V·8z`“½íx4*@ê¨@¤F =2ÙíDyíxíØØ¢jX§^î uʉ›Ý{—Ø(î–A¨@Ñ ÀÒóÂ6À6À6À6i À:™u®9N9 °‘¶Ï¨^ÔDQ6Š;€eP*P4rÛ)Dѱ°Œì¾ÕæµKßµ[¹ê=Õ¡C“óL©ë®»®sÞ#Ð)‡œò¡Ìõ×_ß9/òÇQçÆÆF5ÑÒL=« °N¦–/ŽäùâÔ`25ë휅8 %Y ©W SF¡$}PÂkÂkâÚ/]¡ºwï¡\>ãÇWýúõsÉêåÙ{ï½ÕE]䔿K—.jΜ9Ny¥®Ÿ0uSßúܦ͜©g5Ö©ï'À` °N· 3Q*@Z+)£ÈÕPf>BÛ@úÚöK&À²+Ö ` °X> ¨ U(@€eÊ6À6H À`«è£ò¼ –Ï^lžïpž ±)ˆáJoXú¼a¼&¼&I·,6¶ž,›` °ØlÞ»¬5 uV€Ë”m€m ‘6@€%ÀÖ¹¿KÛá °|ö`ÓvW²>T€ dBD פ==<½‹lékXl&zÅä*I€%À`“»ßx$*@r¤–(ÛÛ@"m€K€ÍQßÅ©`ÏûùŪ\zí­j~ö<½x™Úµÿî ßY|q© 6ŠO³.„ËèD¡$Ë T õ dòŸÅNŠuNŸG×$ÙkB€%À¦¾GL¶‚…ØÍ·ØR6lx h£X”ñC«–¯Ê¤=C€Mö¦ãѨȇ™|à<’êM½£hXl>ºÍÈ΢k>;°>±€v‡õ!ÀöÐeõ¬u² šhÔ%à&G úlbä•}Â|³ü/µöÓ-ïú_·³#aø(ÛÛ@"m€K€Íc'ZÃ9`[ž½ðÊn%!äXÀÀ ëÄI·”¶ãŒÍbì/e¥=¼8B€­¡)r×8hÖ…®Öéa¨•>CõÆ9U&–_Y»"èGûM{™‰®QxoX½€lÙnXlÚ;Ä„ëG€Õ+°)žÙi3g{ð `Å3¿ò¢FÂßGÝ\²_LÏî­wNõöG9ØÿKYiìC° ßu ËAÉ ÎCJ,½oll‰´,¶P½kðÉ`õ³°yùã[=ƒàa/+yÌñ²½¤&Àb?nÓ­f°Á7JVs`³zåXï,(ˆášö„õ˶g×/×K€ÍB§˜`ûëc!º­æOSSÓÔ oL}î7Öï7*¼¬âEó9/Pë°ØÏ AN{ÿ@€­¹ù§¶lj/ +–Rß᥽óaý²O¼Nõ¿NXlų́O¡)Š‹°2–Eëaq)@€KY–Kô¸öõ7ìy x ŠÐ°Xvºñ(e€µÇ³âYhzQÅÛjÎ^Œas"'Ó‹‹m⽕çjËõÄõŒ¦6ž{" ¥`ÓpX‡¼*@€åøG¶¶DÚ–›×Ž´Þç•e€Å$K€8€'B¦&€Ê$N²†,ò!¿LÒ°4“<á”#å¥9¤˜ýݳiôEVU"¶*Ù¸Szèß# Ùɉ‰®q½¹d¹ô\² d§ ` °9é7Óp­l™¬,@ÒÏ ÅR:€N{B'ñÀb™À+’ ¯èìr±åÊK[Ÿ!À¶E»(Üç|}ÆÓŒ³~Yÿ½S T À¦à"ä¨ ˜ÑmŠŸ/ ÀÒûÆ6À6H À`ÙñF¦&€Â²‘(1+[ <Ú6USFš÷‰` ËKا°‘=\XP: À¶¾.‰®iîx^îÞ,¶Š>›»TV!Ä·mÛvnÖåóríçe–cK‘®Ô Ј¾ñ€ Šÿ1ËþÆ ‰íøðkîk7mÀ­”#À*eZQ&Ê|²?ÖV’ÿçQlë«J˜#̱ $Ü0Ɉ‘£ —NWý¿ÿ÷ÿ>yþ…|–'ü,È[x/ {»Y7¶€¨yXó7ÓÓ Ø4'X*W9ÌŒò©A@,òʇ¤·çAlë«È/ƒ^&Ëp÷F­Õî{P‡yL¦€$ àÀvìØÉ‰ÃÀ`ÞvÇv"Àæ„£»ãUì_ùÊW>þå•×°?Ï`ž5€µg÷µá&&¼¬2©¾Å[[ "l(µópªŒ²P~0><äÄsH…X,;ù vòQCd=ËÀÚË@Ô³>I›!Ä !N…žJÀ¾ÅLµ|é§ë1°ÓB\¿—’µh¨Ø.“8á[BŠm¸Åÿ2‰ö“0d„ËøX.~/ŒVCë&sµ†LÙÕ@_Ã^ô?ØÖ"²ó©±ó)O“nºMuêܹ­ÇsŒzî¹çÔi§VÚšÔ%c®ä5ˆé¤l °Xl½yö‹¨`›^“X+¢}b¯ø?­ÏÙ¬Ô+Kk/_ƒ¶'ÞQs6b{YbЉ2n½ö²:ø_f5¸ÊGƽ^BlN …|f9~õÌìã^WÀë“_ÿºZ°Ë.LÒ× ××0‚H€%ÀÆÚáÎztžê·ó.%pÝ}÷ÝÕÃ?¬Þ|óÍRZ°`4hP)OÏ­¶VS¦Îˆµ^YéÌ‹PO,–Aožý"jØVg·ö¼Ÿ_ìy]_{냊ýÔ ËWyp‹d?ËŸ^¼¬ô¶›ÿK^üfoC™HfyvùR–ì/y¥>öþ²½\]¥|”çWÏ(û©,¬ß-ç7‰SöoÍì¢ï}O­X±‚)Càš`c»™Myÿ–­x] ùþJPÚµkWuÓM7©·Þz«lº÷Þ{UïÞ½Kûì³ß ãe£ìYV´ãW£Ð“K€%ÀÆÖ§e©àÌ,<­®ôüƒGVB‹‘›!ÆøM@Üð¿”yÚ°á¥ýÍ}ÅókÛ2ñ-õ“ãâûò+Æ·*ïÖ;§¶h»®&c›.Ås¿\Ø,ݲu¨+6»ÐN€õ†!,Õ°ç:êÂK°àê¼óÎS¯½öšzûí·Òå—_®:tèàuè/{¦7Ëñ±éϨŒ,–k¿–•Â3°:6íg!<œ6° ¬J^Qñ† `ÊvìoB¦½_%€E߉òGê‚ãɱ£f¸³®”‰ºšÛnƒ<ÎQô YX4b3Ô7+7_fêI€%À¶4V†3„82hŸ|û½ª[·î%ê!CÔ’%KÔ;ï¼:-_¾\x≥²:uÞDwmdu¢£eÑ@5–K€ÍŒùgEs°6¬š )žMñÀšá»ETšv¸¯‹Ö,åÙÀmÖ!ÁØ.cye<¯Y†lO¢ÏË:ÀÆyƒ°l­–K€õ}ªðÀb,‡ÓÒñ¨vÚIýö·¿Uï¾ûnÍéÉ'ŸTûì³O©ìÞ}¶WW›DGÊcD¨A:` °Xš¦Z\¬ß„Nf¨oÀ,¿sÈ`.ÍpåjÖ>– °VôÝR®|ãØ¦7Øooг½šíØü> †èS]ëé` °-m¨¯þnªµ=åh‚Q€Å8W,#àÚ©S'uà 7¨Õ«WGž¦L™¢½»Ý¾ôîêñµ8~5$÷IL]u&À` °9êE«?•Ì,à®ÒŒÃv®¬ì¬9æccícÛc`mlÀÚùíg·Y¾ës½Ú|,l[ظü¤Df]„}Öô!À`kj@ùÝ™@ä°XîËÞ ³Á8Õ#F¨7ÞxC½÷ž^Ë5¦„òGÕj|숑£ÆÝVÛYr¿úÃ,–K€Ío§âÌ2°·Eߘ4û€+<§²ß寕¬:ŒãˆGÔOÆÄP¨‹9‰S€•c3k‚sÆ=°!šÞËߨKØ„*À!ó:Ö† û‘µ^ÃîWm~{Yžj˱÷#ÀfhÆà8fxŽx§¨Úe^Ê!,:d,sƒé;ßùŽ·žëŸþô§ÄÒ‹/¾¨Ž=öXc|lg…uf £õ‡Ñj®–K€ÍKZÓyd`ñœ“Y†e‚#|›a·2k¯„äÚ^Ì €µg– dR&Ù.ÞàZÖž1uËAq¨ÆˆµVÑåhpFù‘µaÖ èEýâúøA5Ž ¢ü` °Q.£eÛÌCY„ €ýêW¿êã6Ûl£¦OŸ®Þÿýº¥'žxBõëׯ²‡‹ñ¸Õ@÷©ü` °Øø ®éÆoT;w.,Æår|lý€4ìË,–_§–¡’ûëºN¢¾MMMS'L¼‘ý¹ã °Ïì8ó×`j6ùˆ§¿áo®|ð7~CB¨­ùÁ6¤M<ø[>ðèÚKì˜å™yÍr2e»N’õ‘º™uÆvìk×ypî€wÙ&e¡ž€ø(?ͺ0Ž-0Ä2„8ÊÛi­²Øá9,Öh}ä‘GÔ1Ç£zõê¥î¿ÿ~õá‡Ö5¤Ï9çcýÙ&o=Ú8;Z– $` °ØXûµ,ÞEe °Ñ<›ëÑÇé럸Öö¶\uHØ6¸¥QQ9@€ÛLÈÃo²¯@1Ê0µ½ÊÈ‹ñ·å€QöÃväC2ËEõä7”)PjzP‘y¤Ž¨Î eâœðmB|^ßf} ,Va"®(ô,£•G€?¾zúé§Õ´iÓÔn»í¦<ð@õì³Ïª?ÿùÏuMX?öûßÿ~É‹ñºS¦ÎàuMñ›x,–Ëž8J°Ø0í pg{FýBˆí|€@s쬩9¦Û$m(Ä6Ó‹[É £Æ4úÍúáw”eåJXt%-· ¡Î8f5!Ïå´Ï=À>õÔSžGÇ/ÁëÅÄH\pBŠ¢¬¤Ë 6Ìc)t^‚NH€]¸p¡Z´h‘š0a‚êÑ£‡6l˜ZµjU]!=sæLÕ§OŸÈî³ßjþ¢¥¼¾)Y,–º¯â À`ÃÜ 6üa_`Ëž™O¼²æ±MϪYF§_Þ ã¢æyáoxXMFžJõðÓ%Œ®vÞÜìñǯvÚi'/aœ’üØa‡E(gÿý÷¤¬Œl_ÝÀšji9Û—€S%ÀÂûŠÙˆ±œÎf›m¦ÆŽ«>ú裺§qãÆµ{Ò©ÃÔÊUz©Ÿ‚\QëD€%À`sÖ“Öùt°ØÛ6nàÇô¢Jæ¨Ö„Úz,¼»fì*L€­!Ü÷þç)k›om«¶Ó çRöºÜ]vÝÍ)o7=¤Ç¥Lä Sç0õÅzÏY»_ °±öÛkN€%À†X?Ï)àði{+ò;þ6'N’rÌYŒM(µAÿ›å:åcz…ýV–ø‘ü2i“ÔÍ †V~—±°æMcÖA~G½ñ{”ŸÂ,pŠPEñŽ<ñ&€@Åÿ¦÷+chM€Å>È+p›6«]l”·6¬×M+¦°ŒŽÌB,“8Á»ù‹_üBulß^Ý}÷ÝžVöøƒZºt©:ûì³=½öÚkÕ_ÿú׺§_þò—Þ¹àœÕˆ‘£Ô«×dÙ¢lØ{ù³k”`cí· ° ?³ã|¡?ލµ8ó’žßqýBhñ›ßï&‡™½7LÞ m*•Ui›}>€W{fæ cmw¾ • Êrq¹°b@(<+~Vü&“CÙÛ± »i†XlÐ-RÓvÂKØ{î¹Gí¢=š?\}µ]Û¶ê׿þõZ‹åmV®\©,X Ž<òHµë®»ª¹s窿ýíouM¯¾úª:á„JÞØN;«YÎc;HÐ "ÀfÌâ4dóT6Ö©_î¡svʉØì>Sê°hNðXÖ l~žÜ(ÚsÒeHHrÔÇ%À–™Ø‰[USã$N­e#¸„ØÙ³g«æþPõÔàª_Š)Mj¯ví*ì+¯¼¢xq´Ã;¨Ã?\ýñTŸ|òI]ÀÉ óDXgžŒç´Ÿ 6»ÆfÚÛVZêG€u²Oê\º+©ýC€Íî3¥^‹a3„·šV®µŒjŽõ>G¶ÇGqŒþºSj-(ËXLÀd{LýBˆíÙ‹ñ¿LÜdzq±¶,n˜¨Ö˜Û{Kl­­¿âþ—{ê‰'ªÚ´ñÀU’+ÀZ1;0ÆÇvéÒÅ[—ë·þýïO<ýãÿPÿùϼI§ð,Àø½´¾E¨6»ÆfÚgçH€uê· ° F¾DÑ®ã(£^ëÔBÓœI[/›ü§¡a¼þî•æzÖZ·,,‹ñ¬æ$Næ¬Â2i“ä‘IÌ1²&àb(ü–I¡âÑjË'ÀÖÚú“ØO†PÍœ;Bç"c`1Óh=æuu ûÆoxÙ³Î:KõèÑCM:U}ú駉¤Ï>ûLýûßÿVÿüç?ÕÓO?­.½ôRl (le€=òèfõÚ[äîY‡ñ›Ö2 °Nýv¤{†îƒ§é>˜)[`3i륇x4È.ù¯övj«,.µ»e`á•‘‰—Làƒ·Ð)3 ÛË與՜Ø^FÇ.ÿKy8nµ€÷~ØXo«ÈŒÄwëîy%ñý‰ž胔Ír‹P«1öL€ôÔày¢÷*ÏÍqÚ#Û¥©I]rÉ%­&q’1°B,X,f&~ûí·½‰ž ä…òbR(f ×Ï?ÿÜóº¾öÚk^ôŃ>H€­¼¢ `+ìæ[l©}bAU÷k5÷xœû|çÁ )Îc¤±l¬S¿À¶k×î˜6Úh Sö4@ø·Sk ÎÔ¬³Ô¼ìhðaR’ÃX3,NÃìt ³CSRÍš«‘€­+­[m™iÙ[só¯T@d†•¬ùùW¿]ÔÇoTª£2̯½ûl§ªX`1 ñ6›m¦–´xaá]¥Ó5ÔvÝpCuÙe—)ÌBì°ï¾û®Z½zµš>}ºêÝ»·:ùä“Õ›o¾©œQ%€ëÿû_õᇪyóæ©Çœ['p•öL€M'ÀÆÎð†ÝzçÔÈž³Q=ã.‡ëÔoG°NGc¦¼+@€µ@v†Ù4Ìb,jf?Ø©õ°26ÖÛ.2ÃÊ`åYòß6ê³ïÿP­¹ýÞÈŽÖƒ! p»ŸÀ^sÍ5jÿ 6ð¼°ûë vÖ9Ù ;nÜ8oâJXØ?ýéOêƒ>PW]u•êÙ³§3fŒ·n,B}«Mâq…Gž^x °é{I€ °¶7ö…å«<­ý»Ÿ×ÖÕ“‹|؉“nY«ÜrÇóäéÅËJÏ”aþí…ÆþæsGʳó†}6¥9?Ö©ß&À:ÉÄLŽ `M€µ`v¹þÿ\xmÅLM¶¼,Ʊ¦9 8R+m'ÀÆz…ºrFS%€mõéÔYýýÔaêÃEK#;¶«!wɘ+ÕIúØ®ù‘Ï`-Z¤öÕcȧ·ÌB|æ™gª-µ÷õá¯,<²»hìÿþïÿ†XxIW­Z¥†®úöíë…ùþë_ÿ •\1ÖáÊX?áÉØtÀ+Cˆƒ¯ƒé ÅxXü/÷,ÿïÚ÷RÂ6@!~Ç‹*É+¿ (– çÅvì‹{ßH²}<\9êqùãKuÂvÔY¶Ÿ÷ó‹½zÊÿ§ Þ*/þó<ÊJ^¬S¿M€u’‰™°˜üH§ÑYMÚ»zM9`­ô»ÞoŽÞÞ¬S££°uÍ–w€­Ó¾o¶%”/‡}–ŒÖÆ«F‘þÛ¡C«Ùy]ž+Ÿ÷ÜZ}<îZõþŠ×3â?ò5A‡5»‚åöŽ;îPýô9ï¬=°XvþüùjÀöÛ«1úœ;àö½ök,3#=óÌ3jß}÷õÆÈ¾ôÒKÞÖ „1®þ裼0æeË–`ë.ì×ÎèuóÀþL˜ 5½ª€R@ümÂ#ÀÒ„Gl3·Ûׯ!–ãbM 5å›û ñÌ6¿a› Ùy /&À:™¼X'™˜ÉQw€ÕO¢.FZžóhýL§É:Äx£ÀuÉF€Ívñ¹ºÓߪ¡á7ºñhæª)-Ñû?PcµÖ¡ÕþÛ64\õφ†Ïóüœp9·5ÌÞ1ø{êܳGªªãJÃuù]õDS·ÜvÄ–Xx5þÞ÷<ƒû /x“8 Ðk½^?Ó©›3Æz«˜uØo';„k§{î¹Ç +†WÛàYµÀŒ›…×Мv€…— Ë|yáúÒ"î|Ø`€O¥ «¤>ŒoäHo©íu½ž6À @Ûá¿f>ùÌ<Ø.¿™k{c]ë•Å|X'S—ë$39* `‡¸fyÏ£v¡Øáð"9Šœx6lvö©ÁƒÕI9Ø! “òþŒp9¿?m¼±zBƒW÷“ØàP €ÝRÃò£ÇÔ °³gÏVõ²:&ÀÞwß}j;=&猿M¯ª„‹÷ÔZéºù¬éÁ•}ƒÖ„Zlâæ^–H€ÍÒÕJ]ClcKm¢aQ†ÖB<½åÜã^rGÛ÷ ãkm7ØìlÄ!ÄxÉÒTk{ŠzD0Dy_»–¥]Á‘„#¹šbÌRüW=.5©%wÞX½Ftð¡jÊÔÎÐTÉûÜsÏ©o¼Q=ùä“%,Â}: /ìr¶ìÔÉ `ßÿ}% ³7®»®:lþ0ÅLÅ»dÉ/\ø“O>ñàõ½÷ÞK%ÀBóI7ݦöÙ÷€´ ¼`ƒÇƒÖÊYÝW h.AãâÁÌb;|ØE‹¨åàø8&6êÞ2Wå`su9ë~2°mSëHŒ\xM]¼#t11B…‡êüIŽ{u£PA¬,Öp}ê©§ÊÎŒu1qSØñ«W\qEè}Â#êülä÷PÆ t¹ Ðu§h€ÄÒ:IŽ{EÝ«×J“8!„‹I’“B èüÍo~£öÒžY<[wÑßðÊ– !~ýõ×Õ]wÝ¥0±$,àîäcŽiõ»l¿ôÒKÕüãVÚ´ì|=IױǨ:èqÂ&°ÚÓKˆ-7U¼¨Ž+“-Ù3 ›^Z„¦Ë$OæØUãM+9Å~ævÏjŽYµ!Z¼ÁR9¾x~Íü²Í¬?ÇÀf¼­­úØÚôãÞEV Àj`]Õ²„N=Ç·f`1#0Öf•6wÒ3œbßr0h®û ˜E~|W‚Çûï¿_uïÞ[ä~ís`á™ýTuü‹öÂýI{ã‚@8ŽíÕÂkµ‹ñ§Ûvéâ­;Y§ÃôDLåö{ßþ¶:PCnŸnÝÔ¬Y³2–´¨§q€õ輪Î÷.¼ˆ˜eKÑ,\¸PaJXìĉÕ`  ˜Ì©ƒ »|ùro+&YÂXÕ·ß~[]rÞyê§-ãe»ðÖ=è uà 7¨ú˜Wõ«ê¤£V«W¯n•°-ÖÛzÀ"\xˆ^ó7Èó*[D€Å8`´ÉzÞi>¶ía…çÕô²â„èÂÃê·–j¥år‚Ö^(Û/äx~/d¼+<®€U¿u`íßð?òº®Q›ækU®nX§Žžë$3Q2›òI˜2 °¦Ù0ÞÏ0ë²K€å#Àï•qôO=Æca?œ·(W†yµ `/ìõÒ:ß7vêÔ©¤ÚC8nÑÐÚY¯!»·Þð /ìÒ¥Kf,–4~üx`m°­7ÀÚm cFlNÚd{f‹BÕ=ÇrüïíµZãÔÉoâ8—•² °NöE:H‡*@ò¨@.ðà a¾H€Y3ä° /ªÀ([ü†¼HæøU3„û˜åIH±ì' -+0-^b€‘Ç–}M¯1¶¡>øÍo_W›c`c½­s›qwµìù眣~ºÞzj¡¾÷wÚb‹À‹»žuyþ]¡Æò?¾á‘Å6xaO<ê(õÎ;ï”öhí™5ÃßiXóz<¦_l ìÖ/&Àr l”÷­½”N”eû•o¯½ŒNÜÇÌBùØXûmN¨@ÈÀ"Å+0‹o9H"›È#Ð)aÂ~+å!dIÊ–ñ·r\lC9´&› ŒÐdÔCê ùñþsTËå'ÀÆz—`õä.• ÂZãW»j/ëj,¥×„}üñÇÕË/¿¬ú}ó›%@¤þ¸©I~ðÁª›!¾P‡›`ë­'«½°˜( aÇHXJ `µSš—Ñ­—èÙ§±œ –› cõSl¬ý6 §T  d`í1°Š®k™5á²Àâ¸&À ú…›¡Ê¨ŸæŒ: 9†6h¬mTà*å`c½K °1ìÊ•+Õ¨‘#=/ì/´'õ´NPCöÞ[MÑKäˆçð:~ÌDÿûß«³N;ÍÙ[ô>’ÇóÂyd V`ß|óMe§,,a  P/ꕵ6@€µßfáT€ d@Ì,`PBÍYˆ]Öž…Ø„ÍJk†›e,òÂÛ$áÃò-¡ÍõC1ÀÔçØ#í>©*`cØçŸ^uÒcZWé{«Ñ3µ'ք׫/»l-½çž{Ô€–ex×óÂêýP€uܸqê(VŒ1µv"Àv²;¬oþÚ,6©.œÇ¡%`ÛÂÆå'% d`Ë-ƒ&à0(„ØÞûÈo•Ö ôVÐJPl{Rs°˜0mŠŸ/ Àְ矾zòÉ'[­‹Yˆ1‰<°¯¼òŠ:þðý±¬z=êµà³ÛÉX@ì¹ë¬£~¢½°È{ÕUWy XõK8&ê€f ÀŒÉ˜AyÞ¼y^3^r=øàƒ ëÉ¢ ì¹×¶ƒ€v@ÈÊdñšÆwM °41¨@â DÂK‰×:ÇŒä‚hÃqà\m¨Á›uˆ«_y&l–Ûnzeò+¡ºÈ¿Í0`^wê°öÒ9AXÔõ†ÁëWgl.ï2‚K•‹µX·èØQuÐ`úë_ÿº"ÀbÒ¦ÎFH0†Çixôó â·»ï¾»äÅøÙ“ôŒÄ½»vU=ô·ìk¯½¦üRšËÇ`©ÃõÚÀXj‡c`ヵ­W ÀæÒ^àI¥[Hx)ݧ˜­ÚErAÒ°2»°Ì> `µV¼§«æ,ÅAc`1vÕÞ/`œÅ++kØÚ³'ñÀBmÙsë’g–K‹œXV:ÚÖÉD&À:ÉÄLŽ `…ÊT6,–K€­Æ¸Õªµ !ÀÞ~ûí%/ì:„¸“ž˜éú–u^áa}hÝu¿\ïUƒêØË.S/¿ü²—f̘áÁ+ 0+¿‡ùþå/éy`¨A)­ëÀ\/s¥Z¹ê½Â½D¡6UÍó€û¸];¬“‰L€u’‰™ À: •©lX,–[ñé°‹-Rû¶Œ…Eø0€rèÁ«õß–‡§h=tÏ=×>°o_o&cdõXÙ°cÇ1òGáÍP”Ò °Õ\‹¼ìC€uƒ ¼\ï"žÖÉD&À:ÉÄLŽ `…ÊT6,–K€­Æ,°wÜq‡ç…5g!¾óÎ;Õßü¦ú‘þÝ›m¸sgõì³Ï®¨'Nô&rÒ²Õ<°j€œº$ÔãwçÍ›§üqo‰¯|Ð[Îç·ç^ ç­¦-Dµ–U[Jk9X'™ë$39*@€u*SÙ°X,¶c¯À ÷í×O5~å+ê׿þu«Yˆ¯ºê*Õuà խ·Þª–/_î›0‰ ·›†Ý§žzªl>¿ýñ‹_xØçŸÞ)`ÓLØt]jž ܧò5$À:™ÈX'™˜ÉQ¬£P™ÊF€%À` °Õ_Õc[±;ºï¾ûÆÀ"„P8eÊo› °AËÛ`ûu×]§Õ^X,«ÓüÃV\Ç.o̘1jèСjñâÅN‰›.`"À¦ëzTó\à>ØŒ`l"²ˆ’Ø<6,–K€­Æèœ0ñFµžž¤  ºÎ:먣Ž:J=öØcÀ>÷ÜsjÐþû«»ï¾»•6hyÙÞI{_±Ô&zæ™gJKâ<úè£êà}÷-»DŽ,êàšBœh"À¦çZTóLà>Á×X'K:2€m×®Ý/ÑG1eOƒõ×ßà§Öœ‰¬Qâ9šj="–K€%ÀVkxb¦Üc?©d´×Ëâœ{î¹Àb ê’%KZ,Ö‚uI#Ï>[©á Ë>“'OöŽ5aÂßr.ӳà èuMØ`£»Úöv?lz®EØkÇün׎ëdµF°MMMSñ²•íÓ­}¦I§–—N & 6 #,c¨.ë†ZË#À`[ÚÐ&ú»±Öö”£ýÙá¬kvtó-U»í±W d{ôèás56hmVs;¸k‡Þš±=›š&Âö[n¹EmÕØèmC»LX„3»&lz lz®Eš Ù<Õ…ëd%`Cô¿yº?Ìs‰`aÛÂÆå'% DòFAöém¶QÏ Æ”! pÍæj×0%m2OÕ ÀVÑN™:Cmö.%ÝW‡ûbvß 5Yý¶Ÿzâ‰ê"šŒPâ=õÄO#Ï}úx³c|¬kBþ¾ùMuŒÙ5^°H[³¬‹/¾ØóÀÎ;7t"À`ëuáeO‡MjÀÞ{¦­¶ÚZ­¯£\ò"φn¤ºèµ•]òo¼ñÆ ÷§KÞ6mÚ8åCYaꦾôøø¬A,Ö©ãî¥sÕ<Ç ŽÄIœêÿ\¯ö¹J€uºW2™‰›ÉËÆJgDlÄo€'ß~¯g¤·tJê»ßý®úÝï~çÍVìšÎ<í4u¨†V؇5Äî¥ nÙôèÑÀ>ñÄ¡¶þ†NQ=°‹—®PÝ»÷P.D1ôë×Ï%«—go ]t‘S~€îœ9sœòâ>vý„©s˜úŽá­­Ö@®Ç~Ød-lýŸëÕÞgØdï•$F€MRm«h dÊ(ª¶ƒHz?Œ~öHo\,:§F=£ðgœ¡žþy§tã7zc``ñ  7nœ·¿,`´šôÈ#¨|Pa2(ÔoϽ°Dü"£R›#À#a$À¦Ïx'À&kJ`Ów¸ÚØdï•$F€MRm«h \b„L:øÐ’7vÓM7U×^{­·4N¥ô«_ýªÀ.×¹³§qúéÞ~ð4ýèG?ò<»Õ$X¬eK€MÞð!À`é-ZWïù`“Ž»jP>l¼÷F=K'ÀÖSýü{>%Œ;áç °1¬t\³§zmó­È"4rêÔ©jñâžiÒ¤IÀbì9z)-:vTøMò ÀΞ=[U“î»ï>€1¾màØãOd;H H{ À` °ì‚£T€K€m±maãò“°)¹9©Æd}hSü`‡¶±ã®Uíõ,Å€F$@$B€±ôŽ™n¸áÕMÏB¼¥Î{ÆO~²Öö /¼ÐÛžÔ° ¡Ì˜ÄFêpÒ©g¨•«ÞK\‹ ·ÒyÞN€%À`ÙG©–ÛbÛÂÆå'% `Sr!rR lë IpIÐó(,žvÆðÒøØvÚÓ:räHµhÑ¢Rz衇Ô=TáÛü]þ¾à‚ <€Åv×tõÕW«-·Ü²®ûìw€Â’&yÅ´ž–K€Í‰E‘’Ó À` °)¹j`ÓwM²\#,6ІY‘â íÖ­›ºþúëÕ3Ï<˜°?üáÕ¬Y³ÓwÜ¡X:NO½<É”©3R¡AZ3îz` °Ø,›é«{šö¼Ÿ_¬Ê¥×Þú æ¾èéÅËÔ®ýwWøŽûÙGù-6@м…Š–Á ¡˜,ªK€MU'‡å1¾Ñ¥k 0û÷ïïͼpá²éç?ÿ¹°¿ýío˦ûï¿_qÄ¥q®XÚç’1W¦êÜã0²P&–K€¥5¥YØÍ·ØR6lx h£X”ñC«–¯ÊdÿF€òNHWYØt]¬×†K€Me'¸\OOØ„ÎluÖQÍÍÍjîܹjÁ‚k¥Q£Fy;sæLßtþùç·çzìq'*̈œ¸+B °XlÖM‰Hê %oˆ¢¤4¬ùLÀ>úÄöEÆÐ%lw@:Ë À¦óºdµVXlj;OŒ=öø“JÞXŒ½øâ‹ÕÓO?Ý* À>ðÀÊLX;sûí·/í¿ÇžÔcó¥ö|‹«~çH€%À`³jBDZﺴ9Q”˜u€…Wp+ !ÇòìðÂÃ:qÒ-¥íøßc3„ûKYi/&ÀFq¤³ ¼¡šÎª±VT€K€M=Ðar¥]wÛ£¢˜| kÃΟ?ßKð°Â;mÚ4/ÝrË-jРAÆxÚîjòí÷¦þ< °Ùx¤šk‡qßÝ»÷¦W/c°Ü”ë' véÒEÍ™3Ç©h—®Ÿ0uSß{ï­0Ô ÍëµOŸ¾;èUÀhü”W€«=‘›â™E[|XÑ~ñ;îC€(„‘ð÷‘G7—î Ó³{ëS½ýåžÁÿRV½î‡JÇ%ÀòA¨€‹Xlf AL¶Ô±c§˜|ðÁjÆŒÀvØa ë¹"ÔžZt‚jÄÈQêÕk2sŽi4(â®=°ÁHQZ $ÀÆÿâ„ëbŽx€_x,`óò+Æ·ê³àa/+yÌñ²½ò¬6û™p÷ó¼Öò °N÷ 3QÂ+@€%Àf .¼TÃéãcÛ´iãÍ*¼ûî»+Ì\ÜÒù©ïýàGçšð’HÕ.Xlà¦6·v V?³ýÆÅPáe,ò˜Ï[Z?€Å~frµÏé¤ö#Àæöþæ‰QH À`3°Ò‰b¦#Ž:¶¬®ývÞUÍzt^&Ï))!mÇ!À` °‘öëY-Œ[%ÀÊxXl«¦ß¬ÿƒË 9T€K€Í4ìaR¦=öÚ[õÙn{5é¦Û2}.iˤêC€%À`sh]„?%¬X{<+žÃ¦U¼­æìÅ6'r2½¸Ø&Þ[y¦G±\O\ý=°áoîAЍ–KèËH¨m\C½Ë%À` °É›ÿihX®CXÎÕ©GòG÷="V÷E˜d ðDè/ÀÔP™ÄIÖE>ä7'63“<á”#å¥9¤˜›’»‘Õ )W€K€%À`ëÚ°Xlò–‚¦%IÃìý7H¨)ùš”ŽX ÀöÕ¥”@<+Ëè$ý¼¡€Q,¥è´'t,–ɼ"Ù³rÛåbË•Wï˜öñ °u¼yh*!°غÂKÚ:OÖ'þYm °XlòVƒ °È~¦avò%_£šf!Æ“ uò<+[McOØTMiÞ‡[‡;/¡C6Ê šÐñx˜|+pƒ>=®+üå5&ÌÑÉ6p À` °É~kyeWk˜¯&ñ©Å‹ú Ñi:6ù—Qq„ Û6.?)Q ™$%W"ÕhÊÇiDv„—„á%ÊŽeeÓp!À` °‘õaά³ËµWv¸þmç„ÏX+À∣‘òì•¥tòÚßE°Þ»ŒðÍ{Ä¥–ÓBÇ¥.Ë-ºX,Û@Âm€K€ÍÀ~<ñFõÏ=8§ç×ßàó–`¬iZR€µ`öá–ñ²ˆŒòÀ¢>ÓÛ¶mûô}ò yy>¯ˆ6ÊöɲjT€[£€Ü TP€^Âð’çŽ˜çææÞgßTï>Û)€lÒ6ßÚVõß}ÏÀsÙ©ß.ªC‡ÁôªsŒ?^õë×Ï)/2…Â.]º¨9sæ8• ãÒõ¦Îaê»Í6ÛÔµ½œ¸}ßÒäGÕB`öû¸¡ás¯»z‡††Eº]̉ -Ñe¬‰ œyºŒÏÏþÙùìÏ3ØŸ`ók£`ó{myfõW€^Æ:<ÌÄh¯q—48b–HÌ™ôqór¼e+^÷fÑÌKºåö{œÎëo¶Ù7œx0 æ`ûï¶›ºdÌ•NÇѦºõîÂì_Þx®¡á7ç74üDwÛðœF‘†ër±µ–õËuÖYg5=°n/ÓÖ`ëoÇUl\ʲ\* ½ i{˜çµ>æ"최cfdÄ’õ†èZ4ä¾õ1ô/]¡ºwïA€ é1°÷Þk-’t~_¿tùH¿tqM'l¾ùß&|1†t`ŠR(×¡Ï [ÆÁöˆÉXÀdQ—×X¶g#ç} lÒí=Éã`k¼R¼;6ŇU˼Ø„<°æBëµtŽØúÀW-׌û~qͰ_²{˜â4lØ6ܧï…g15—0f ­Ó5´žóäMQiR² °Ùí°QÝé+‡›¾kÂåGXž:>'Ý¢ð7¾mCá¨Ø†„ÅÇe;;—Í%l¿õΩeËC”g/vŽýÍcøer,»®f¹~Û°eÛç‚óÀoX,¬n.¸.úà˜æâíf9åucP–Óç-Ͳ¤Nò›Ÿâõ;_Ù¯’^(ºHûÍ6#z–«—ß¾a´aÞä ?,¶ž]g™u`×hh½KCëP½=êIšâ<Ý!ð¼Ê’X A_†„(¿>ÏÔpÏTlœ·I}Ë&ÀÖW=ß Ä °èä¾sÈ`/Þð 6;ÀjO$Þ[l °AšÈñp.È+0[I—󵽺¦¡c;òášáز]góM¿À±Ýnè ¨ÝŠl °Øúuu ZÇkOkÿzÖ!®cÇ °ö3¿Ü³ýú(¿9ðrÖ|Amþ/¿ã7ìkn󋜲˗ü²¿”'õ)yU®®R>Êó«g”Ïʶ-l\~R¢6%"'Õ@È Ú?_(;Àšßä?f‡g¬ 5ây3T:UìÌS@˜]F¹ŽG<†öörÀhæ÷;/Xˬݛ`ì×aVòVÒD<¾6¤—Óur9_¹&¨ƒ ÃvÙö9ÙkzÒ~Ís¯{”FËŠˆ °Xv¼ñ(7ÀJÔK¥ç¢¼0O­_”½ûÃ~Á}°¿ýÌ·=½¶­`FõHxÙi–g¾Ü–>Ǭ«ýÂÙ —޳?ˆ`ÉKñÜ^U—Ê RµtÜÑGlkQê °F*aÄQ¬@ŽKç“&€µ¡ÖÆZÖOèŽNÔôv×°¦q áÕ¶Ö4\ìsáܾ𛋿íñÎ.לy¢‡ÔJš` °´JâQ n€ zIˆÑ6°š/˜å4úñ† `–‹¼‘ßí!*øÝ/ZK¢¤¤.æ±ÄÖ0Ë4£ªì—²¾Ug_A€çžHC©Ø4\…üÔ›€h1;ˆZV<‰A“4UòÀú­m*õ´Ë5=–.É0X;õ’1²aÖE©»__n­× ó•±­¢µßÅ+<ÙZø…XÇi\°ìè —K€Í)‘®3©7ÀÚ°Šç¦€¤¼ zî‹Õîk]ÖŽ´²Û¬Ÿô‹2–WÆóÚÃX’z)J€M×½em°QªÉ²°)XéÜd¢$ùßÔìÐï ¯ ]fØBY‘ü DŸ¤Ó·À“>e¿9:yclϘëµ’Ò‰ËØT©W€•·Þå4e˸]sâ*3œÙÔ#è|M½P®m€28®xåq,bíë„s”ú6£ƒÍ8´$À`i‚Ä£@ÖoB'³ zî,ÍÉÃx`Ã74J¿$ßò’ØöðÆñ,4Ë$ÀÆsO¤¡Tl®B~ê@€­3Àš„„ŠÊ¸F|WX 5={fÇ…}eü ~G2UfD.×!ɘYÙׄF³\³ž(KB]Írm85Ë–I!üeH'.õ0.,ÀVÒÄöfÚ/Êét¾RGó˜×ݾŽö9I½$LØ`»^f¨XÜÆ˯’ °Øü˜‘œI_]Ji)œZJŒ`ñLöÔrQ6fX°«Vö‘™ûåū߄‚µDk¹ô¡åúæ8žÿØZZ~º÷%À¦ûúd­vØÖåaï²6«K9f”YËÌåö£®•έ–s°Ë­E“jëaåu¹Ž~ÆE9ƒ£šò]êÀ<ÕCj%í°ج 1×w .NLj`ñ2 eG0á%#úÙn†Ý†:b†ËÒyò‚Ø|QE´–9Èg?ðŽ»? ÀFq¤³ŒºZ¸Éù¡Q(@€MÀÆÝ9ä¹|Y2 ç—²xî2> FŠœ“=>7‹çÅ:¦°Ø(:ñ•‘€ÅóK†o˜3û–‹¨’ˆ%ó%cP±”+ûÊðê"ÛÅ\Ëp#{Æd‰#Àæèîâ©Pœ)@€%ÀÆ:ór’ "`gå±;ã$ëű`\˜çTnÜrÇbñx[ýt%À`sfOÔz:™X¹§åÅb¹u\1‚ßHÈoÏêk.@Ï~${"'Ù&åÚǶ£qìcá¸ö>R&êkïŸdt=°µÞFÜŸ C,67KøJ¾¨uíZ` °Å03œÏ2“ËgaíÏBSC¬óýÂŒT Ð ` °XF#„$ݰØB[kŸ<–}‘7¶8¢û¢Y—ɤ`Õ‡ÅP*¡X,áFÛ@Ú–a_ž‡¢°ux%ýâ.èxØ<ÜÊ<*¿X,á…FÛ@Ú–Ÿ©#`ëð ʤ·`3uϲ²T n ` °­à9ØK$Ý¥õxÐÅžL#­ue½ÒM€%ÀÖ­çOç °X†§óÞd­¨@ê À`[,f»õ›11Ï@„Y‘‚κTZ¼>hnO?T&y°ØÔYõ­–K€­ï=ëÑué›Äz^$Æë“Z¤8×@ˆIÒÀMúX4st‹¯§É#G}°ü޹~_%Ý¡OÑ?évX”ã`;wÞDÍ™3'0~úéjë­·Ì'eõíÛW577;åßtÓMÕøñãò"¼Ï¥¾È¦Îaê»ýö}3wöé»Ã­Ÿò ` °Q,l[ظü¤Df]Ϊ•’‹‘ƒjà…?_*Ph€È!™`Kr ¸ x©w}ü4 ª3·Óóê×ÞX½Æ{q²ç^S¿wÑÛ+0Ÿ”µÍ·¶U;ì°“Sþ­{m£vÝmw§¼›o±…S>Ô#LÃÔw=(À–î+¬“™A€%ÀF °ht´qn½d2`“Ñ™G)¦™2Š¢6à‡X4åb‘s„Ëêfà}#á7Ü·Þ9µ•N€: ÁÅvxlñö±X¼ºR&<ªæy l»üJõ‘}åx(×.ÛPwüŽí¨£¹ˆ;ê~Ú°á­ê!y‘߬ô±½ÔQ_–GàeÈW À:XlÔëÔð˜)°ÉèÌ£S¬émÄÖw…~á¶fX-þFB~3j†ç¢,@!Žƒ„¿Mx°õ3àýêƒ|²Êó;¦lGÝ‘ß&„ ¬Ú0Œ²¬æ¸WìO€Í\y=ãnX'ƒ‚K€%À:Ý*ÙÌD€Íæuc­³¡@aVÀÌže×ÙµÇÉÚIä7'D’üR.¶›ÞO»<ü_iŒ©]9¾9N×>¦Ÿ'Øô6›‹zVç*ÛÓV·ñÍò xlµµ¬“@€%À`n•lf"Àfóº±ÖÙP ðkª~cNMøƒwÕô –Ë/W3$Y<£a<švù¶÷Têoæó +63Ëpñ°Ö{.a¢6˜ ~Ô/é6@€u2zè\£rdjjjš:aâ…íÏ“nßQ6Bm€e¤Olú® k” Ûá¹z`ÑQX%,Øö¨¬l •o×°ÞK,áǵ­2ÛŠ´l²FöŒá#¼h¦li@€Mö^IòhØ$Õæ±Š¦@a†V%ø4q„Ë8W{]TünNzd† ü™!Äa|»ŽW¶=¸~!Äö$Mf˜°éEÝЖ«£‹‡6ì91?A‡m ßm€›¬)Ñ®]»c6Úh£%LÙÓ cÇN7'ÛZx´¤ À&¥4SD °2 ±Š‹I…æ8S€«=ÞU XBŒeB%s&bxn±/¶É¤K&\b{¥5YQ¶]ùÍœÄÉžJ€[&q2ÁÛC6ë(ç zpâ|ƒF )3xÇQvÒeâÅN5/ dVó¤ë›–ã`‹hNðœ©0 À²=D©zîe/«Ð p³—½‘Yxa„›<ÉŒÀ~“>a”%`i‘2 0¶ÛËÔ­³êWYÞGʳ—Ñ‘™Žå¸~ËèØç-y±¯9)•ß,Ìi1’YtÂu½ÆLÇ1ј¼Ü ÛÖâŒ\À‹µj :ì9Ô’Ÿ›qË€ÕÏ¢°maãò“°)¹9©Æd}hSü|¡@¡6ŒwQ–Ãq™ô©Ã/Š}ý&qª¶\s«jËà~é͸®K½6®ãÚ/­\t‹`ËMäæR¯¤ò`ibPÄ /%.yåò‚¤ì‚d¼:ØÖ°Ð+!À•–±1C‹MϤù{žŸZ ͨÖ^ò§–:qßâ@¬ßÄcö²O€0Y;YÚþ·=‹¸çäþHúÁ$ʶ×L6‡U~ÇÃqQ>òâÈcßãøÝãŽ}¤<¿çœ‹,ÊF>û\Íýru“óÂvꀲ$Úï¾i¹Ï°·”Xý,*@^JÙUãIÙÉxu°ØVÐã4`Å4bÓèöû½ž†¤iô×R?ã½–ò¸o1 Ö^ÒÉ-QmöUvh»¹]–¡²Û ÛQ"yt%4ÇCʳNj#„ÝÛÇ(7^¿W¿.+k2K=Lª› Ica_9>`OÖ––{Œ›qK‰ÕÏ¢䥔]5^”]ŒW‡K€-¼×9-F.ë‘O €04¯³=k·Æo‡ôÛ½åÚíù•ýÍ û6°ÊÌáòRËX{¶oÔ£\x1Ž  6_Ž™‘Au«‚ÌâŒ[!­«ß˜«³áÉÔSòR=Õ÷96/HÊ.HÆ«C€%À`µáMx¤qµñÚË= ” ˆš^Q3$_ /h‚3»þQ­™,3x£|À£€š[®,óð7¼¼v„H˜zÄuƒÊ¥ÖÉR¤;Ór2V€¼¬Q¢9xA•;÷#À` oX¶Û€„éÚ@é°¶WÕ„Vsíâ `’ñ즷ÕýÆ‹—Í0àÀ»kÎ$n[°×£vÑ'É<X'[j Î5Ç)'3Q`ÈKÁ%š£‡>ZßDȃåY,–ð#¼$i$óXéô"— !ÆMØViV_ ­ÅØtÛƒt½ý<°v",“Eù¬9îÖöÀºÖÉ`ƒê&ç‹1öÈ+“F…é ÍâÚN€u2£°N21“£XG¡˜ dQ,–K€eˆ± Ø“8™3ɤF&8Ù³ü–› Uš´È„<Ÿjÿf¯kŒú˜ž^@¢ –&, ü HʤJr,„û œ¬]Ê0ë&3#ãwX™‰Xf\ÆïÕ,ñ´šå`Ì#¬“LÌä¨ÖQ(f£YT€K€%¼Ä/IÇlì}7@lê†S–ô(@€MϵÈCM&ë“@›âç hÄÅhÄ*xî„ê¸ÛÆ·tðàœ\/ºè"õꫯª·Þz«Uš3gŽúö·¿]ÊÛ­[w5é¦Ûøü‹éùG€¥‰AW€¼”¸ä•È ’² ’ñê`[_@p1pqï,Ÿ€XÔ6€ñ¬C4½ªÔYg¥^{í5õöÛoWLS§NU½{÷.lï>Û+„U˸Λ›qK‰ÕÏ¢䥔]5^”]ŒW‡K€¥±JhgÈ`X¶âu=ŽõŒ|b\ë°aÃÔòåËÕ;ï¼*]ýõª[·n¥²öÙïÎXa› ÀfÜRbõ³¨y)eW$e$ãÕ!À` /ªqypX.=ÌÒ0ñ&`°b؃7!ÓI'©¥K—ªwß}·ê´jÕ*uþùç«:”@öØãNTe¶¿ÚÚÖÙRjrÎÉŒT€«™jØL]®ÔW–K€¥qJ€eÈ@¸Ž9JfS 0‡ªž}öYµzõêÈÒÊ•+= 6ÇåŒÅÕC,ÖÉ„’;åd&*¬y)X£Dsð‚$*wîF€%À^2/ô€UY×3_2æJÕ¹ó&%p=è ƒÔâÅ‹Õ{ïéepbJ(Ç—©“>þØq×òyQÅó‚ëdK Ô¹æ8åd&*¬y)X£Ds4é£õMôˆ¨kºì²Ët8sçÈùþ–ò‰³^6ÖÉ<"À:ÉÄLŽ `…ÊT6=È qnCÃ]:ÍaʤwékØA£#À`ix¦^²n´³þµ{«`¯»î:uß}÷©¨<Л¸éÃ?¬[³çœsNë™õÌÈœèÉÿš`¬¬“LÌä¨ÖQ¨LeÓð3Pƒ«bÊ®¸†4:,–K€eHi0öé§ŸV .T×\sêÑ£‡·æ+–¾ùóŸÿ\·„5g;73$cÂ)L<Å_Â,ÖÉZ!À:ÉÄLŽ `…ÊT6Ø…{í¥–ÝvS†4À5Ël,·®”ò4†k÷æQÃìiè°Ï<óŒç=묳Ôf›m¦ÆŽ«>ú裺¦yóæ©ýöÛ¯²ÝºuW“nºÏÓ–ç)Ö©¿Ž `µ4žÑ…™Œ.DTètmßörj1•3`#1uEÀ.úÞ÷ÔŠ+˜2¤®6¶[Š–m€m 5m À>÷ÜsjîܹꨣŽòÆÇb¶à5kÖÔ5ýö·¿UÛm÷Ÿ]¤Þ}¶WÓfÎNžõz‰C€uê³#Xmßö`tav£ [ìÛÑN-†L+‚›]h'ÀÆz³ÞЪ—ÇãfÏ;Èkÿ5 ØçŸ^-Y²D=ðÀjçwVƒV/½ô’úË_þR×tóÍ7·šèé ƒ«ù‹–öùJ€uê·#Ø'¿þuFf(ºÑ †}K€uºe²•©‡®nßZ«L€%À¶´!<$†ÔÚžr´a ,ÂHü0B©qØ6à °/¼ð‚Z¶l™š0a‚Ú|óÍÕÏ~ö3õÖ[o©?þ¸n kÕŽ=ZuèСä‘=ö¸Õ²¯î9K€u²"ا¾ñ Ff(ºÑ Ïéqýz`aÛFÂN—™‚hÖY0ñNM,¶¦”ß gXùÔ˜€eÔ…—ª#G¦agž­†Ÿ=20Ÿ”uÆOÏvÎ;Ì1/Žú™g¹—¢ÎaêËPIjX@ ʰ!~ì±Ç”,¼¯ðÈb|,@k·Öbqì7ß|SvÚi%ˆmllôîÕ"MôD€u2°Ψ‡ F °NŽ™’S€[ð<ââäZn6ŽD€Õã'L¼Qmß·¯ºè¢‹Ó7ºtQC† Ì'eµk×N ><0ÿñǯÖ_ýÀ|(wÇwT½{÷vÊ‹üaêìZ_œS·îÝÙ~8~6Ò6P `Ï×^Öö_ûš¢—Õ1öøƒÂìÀ˜µøàƒV»îº«š3gŽúë_ÿZ׸>äCJ Û©ó&jì¸k#Õ+è…@½¶` lÁí[¬Ó}’ÙLØ‚ßàØXïÝBSAFöØc›•ËgÛm{«É“'»dõò´oßÁ[ú#èóÔSO©Ž;eó¶Ÿp jèСNy‘)L]ë‹s"ÀÒto…Ýî°7Ýt“ÚFÏ>|’~Á3^O”4DÏþë°+W®T/¿ü²·~ì;ì ?üp…õ[ÿö·¿Õ5=ñÄj/=›¾~’{ißýÈýs—ëÔo` nß`î“Ìf"Àü'ÀÆzïæÞr1  °_²0–PêrÏÄ•ÇXxQï½·ê·Áj‰?MJ' ØW_}Õ×k¯½VuÑ—]v™úðÃÕ'Ÿ|R·ô÷¿ÿÝkظôKK¹X§~›[pû–ëtŸd6¶à786Ö{—ÛBLìK€%ÀÖ‚L€9s¦ê¤Ç~Ö¯aQðÈb<ê–[n©î½÷^L:ýóŸÿTÿùÏ&y"ÀÆÚŸe­ðMt…‡ÖZiYF‡“8eo®l­­?Ýû` °Q®›îÖž|í°ØV¡ÈXlZcZO:òHuÁzëyÞWØ}wÛ­b±x`°o¼ñ†7©ÒÂ… Õ AƒÔÞÚ£»hÑ"õé§ŸÆžþñxàŠæ'Ÿ|R͘1ƒ›|—û#`³®26ß·'–K€ï'À` °œˆ)5Ï{ ì¼yóT—öíÕê€]£¿Õ!ÅßÒK†\sÍ5Þ°2‰“ŒõØ·ß~[½ûî»êþûï÷&@;ùä“Õ{ï½§>ûì³ÈÀõßÿþ·—0¹ÔìÙ³Õ¬Y³°ñõc….™K€-ô â“Ï5ÀþßÿýŸ×¡šÉž¦yþçþGá;ê)¼³PCˆc½;Sc¸ÖÓëÃ1°[ÏöÇcéõö›ÄéÂóÎSÇhh…v•Në„1±‡ê¾²×]wŠ.»zõj/”÷Ê+¯TݺuSãÆSΨ õ¿ÿý¯ÂqߘÀ‰kVøÂ °ØÂß) × 0Ř˜vÚÉKÝõ²H\pA Vyäï7|ÇœÈq”E™ìh­÷À”¶õzT‹K,=°ôÀ¦æ9PnÌB hÅ$Nm¾úUuNKX±€ì¶z²&À"ƼVòÀ À¾ÿþûÞDO§žzªêÕ«—zðÁƪV›>ÿüs\.Œ>uÁ‚Xöµ‰ôéXlKCƒm —Ÿ”({€œš xÔ^ýõ±« •ðþÚuˆ<£*#b€ÕöOÚ?_(õž^(z`é­gûã±+{`Ÿyæuà 7¨½´ÇUf!~Ê)ÞÿZ|—~– Ôk9‡XÌLüÑGycb÷Ýw_oŒ,à÷_ÿú—spÅ>¯¼òŠZ²d‰zöÙg °}wÐÑÞ|Yœ„¡‘f€…“ÆL¦sFlÄÃ;L!Ee3f©œˆÇÀFÂKI´Ù¢#’ ¢oðsuJSãÆí¸™á‘•ºî¿ÿþ ëDâxb±€‹<Ç|)Êo.þ6ÏûK¹(åàa‚ü|ã÷4郺`c½Õ °ôÀÒKljžå<°€Â}ûõSCŒetn¾ùfÕûë_/-±ÓYÏXŒ‰Ÿ\=°Ø?ÿùÏ¥„YŠ{öì©FŽ©>øà8­”0A¼®(!Ì/¾ø"¶å^â2:±öÛ­ O3À¾…])+‘†·Ýv[+»Õ´c£¶Aa÷úsÔÇ©¦<lr÷I=ŽTH€ÅÍm‚-þ†§7ˆxLñ rc ƒŒ§µáÿãFÆvì#‹ý첪¹ãÚ‡ëm—õž^(z`é­gû㱃=°X,«ÓfuZ­‹µU·ÓžXŒÙ¦1|x(€|šéwÞñúG€,^ËdLæ·€+f2ÆLÇflëÙ» °±öÛ™XÛ™;4ɨ?ؾvâ²WÖK€Mî>©Ç‘ °€LxEZm€Å6ó n{œ¬ ÁW\qEÙCˆëѬSsL,=°ôÀÒ›šç@%ìsÏ=§Žþþ÷Õ=´Õ2:ýõVí…ò&xêºá†Î /k¹ôûßÿ^577«¾:,^]VÜ,ðÊb -f6&Àú/;E€uîã±lMŸ´{`ýàÑœëÛÍè?ü ›U¼µ2ÿ‹8[°¯8kL`0¯¯ØÅ°—e~™´,¶¦¦Ÿú °¸á$lØÏkÞ¸¼>lN …|Zì÷vˆ›ú{ Î ¦Æp­§ŠXz`ëÙþxl7,ë¹bMÕ^xA-[¶Ì[F³cFbxaè%wî¼óNo‚&sXYFÇœÄ * ³¿õÖ[¥ÿåwL …þt¸öì"\øã?öBޱ¶üšÉX§n{ Î¥'Õ®í“E€•ˆ@?ûTÀ‘âÄVVãÀ6Ó‹+ˆb3c;ÀI¢¥¬°^Ò8ó`kkûiß»kßœ.+yü–å!À–š9'qj}Ç`饖ØÔ<‚<°Ï?ÿ¼7ÎÔX¬‹Yˆe–âÃôdLåvì˜1j„ ÞR:fê¦ÇÒþô„ÖúyÎ=zôhå­%À–‡W¼!À:™ÖXÍŸµ}² °2Ï‹mŸšp+ðû^Y&M»Øo»äeqmí‹{W¯@“ÞµGõ»±g–&qÂ&{üjÀâ­“BlÞè6ÛÞÛ$Ç#„}›Å1°µ¶þŠû§Æp­§ŠXz`ëÙþxlwl9€8q¢¬½°Ÿi/l§¶m=ÈõóÀ=è u ÞÞm£<ˆ"áÿ_ûšçÕ•ßä{ñâÅÀš[,6‚ž¹Ð+7ù¬ê+!Àö·ß0;ÛÆ$ÀFÐRYDýH;ÀÊ mãoÎ@,qüö$NöMŠqØc_‘ßæÌn¸ñe'¼É’1±ÐWÆ›¸Â~Ã6ãÈO€ wßè‹fL –Xz`éMÍs ZìÒ¥KU×¼euNÖkÄ^zé¥kì%ç§~ÚjŒñ²Ç®¿¾®×\s÷}‘ž €‹0c3a)¬ ¶ !fq¸Þy­Ü…XÓcê °ö¼/å¼±Ø[%wO—iXY GƬ0ý Rf Æ à,·Ü9ˆeškÉÊ~2Þ|8stµ[×Úü·¡¡ÿÆë´J§0!I©1\ëé…¢–Øz¶?»v,f>ÿœsÔO5¼.IJp[lÑ `§NªÐ -ÆÊJmüÊW<øÝN¡ýÝï~§Þ}÷ÝRÂ:±Xó7üM€%ÀºöÑeò`íe"]ÖŽJôƒÔrKñÐ[c+åîõU ­‡Ç2oe–ñÀö×-ª¹ŠV•›1°Ú¢au²N«MƒŒ[9´Í°XBdøûfåª÷Ô²¯Gú¬Z,á¿â…ÅÒ:³fÍRo¼ñ†Z°`Ú¹sgPåY OìÕ_ýj hlõŒ:jýòËéHzæ™g<€5ÃߨêV_ƒš‡…UÑ÷§q—B¬8Mdr&{a€…3‘‚€_sMY±wííæ1ıÀM[”!'qJãm™²:`W´üž%ÈõØStó‚—±©Šf–Y€ÕOï&š5 Þ¥Óg&´`Ãß&°` °Øð÷д™³=£r§~»¨Q^ªæ/ZZ3ÌÖ°+W®T§è¥o~¡ÁpzÂG¨—_~Yí±õÖžWVž“?njRŸ{®:ñ¨£T7 d‘cioºé&P‘0ó1ŒpÌRl'.£¼ŒŽÖ´Ñxáº/^«è»ó¸KîCÓ8ñí70¢‘OìRüí.,ÃâÄ£jçÁÿ€U$3úPf:Æï²$OZl`loëˆÏ‰›}€½«¡áÝ,ôWà :5VÙD2°xS­ÃƒOWµ°Ú¿ÓÞ'À` °áïX@¬¤®Ýº«“N¦°­MkX¬ÙÚmƒ ¼5a»iPýî>û¨[4Ìšð:^ÏD, ŠIO<òHµ•ÞgM ä.Ñß½»v-å€}óÍ7•ê°ð|Cã7V¯©Jãj®KØ}öض÷_lhø¥ß WlÉzÉ=À¦ÓZl•–|‘v#Àf`ïÖE»††ßë6;´Æv›z€Õ†V/FëN~‰+´æ)ßõD,ŸŒ•¨aF€%À†5Йÿ3RMxµÿî ïåÃ$?äõã %ƒ)ËÛtOÖʀʔVs.:>¼¢qõµþö·÷i5o¹¶Ý¶·š>B}ÞskghÍKß„óh™ƒbtv=°ôÀª¹ºMiF‰ª=ÁÆå'% 4ëzÔnB€Í6Àâ?¨¡áݦG±)iÚ•«Pâ–¥qZÍ2dp lø±|Xl­F}÷¯°[j¸ÁÄNag)¶³l¯g>F‡cam ìÏGŒ(…cB§Óô죈¨”ºj8>V{m{o¶™úÍo~Ó*/¢#°¯¼òJÙ'ÀtðàTD: ÒÚðƒú ¢l×q¤âºàEÑ×tš±É&…ÇÍ‚ÇÕ®cÄØLضEª$¶ào¨¬YˆuZˆ•]{eÍ>,F–kÇÇBÜMõE7žó÷˜ °mÚ4zc^g=:¯êvaì”)ST{¦nÎ °×O˜ Õcoåy¸¤eB¦rá¿òûÀwV#Ï<Ó ¶ó>ùä“À"D¹\Š`1YÓ±Çè¥zOÜô']—¿hoú§ú:c¾‚¢ÀªÏ8ÞË#0Äé-¸}K€à.Jq™XLŽi¾‘Ì©Á³øv( uöYF§‹{Tw¢²”ÎÂr†–K€ ߥá53—ÑACpe{`·ÑQ@¨<ï*ì< vÖᲦëI믯°®+&sš4i’zíµ×ªJË €­ÔÆ °Ò6Çê ’vßs@h¯vœmû#=‘×§¬ÿ­gŸv…YŽ-YÜXl”!Ä)F¹bV-S+à*ë^í¿ÿþ ) ˜Õ:ø,î@,Öƒ-ìG =tÂlÅ«¬ 2tļó§jOIœFQÒe3„˜!ÄI·¹<áÁQO(dìØ±cÕ¡ÆLÂúᦶГ=þøãjÙ²e꥗^RøÃÔc=¦úuêä…ãy8eÝuÕ–]º¨z<«w¢ìß¿âÖJã[çÎë,ŽS)Å=‰Ú ¼ÛШ/w\mÏ«Çê%À`’5Y+٤؎%®ÂØ­Çë¡~ëdž)#é¼ôÀ:Ûª™Ì˜€Å €µo¿…›“¾I²|¼2›ÉÆW¥uˆq:a‘x¬6ôÂiXl˜ö¼áî¯0zùMâd{aRÜS‡¯Þ|óÍÀ.Z´Hí¾ùæ%Oí*„ wþyµ¥†ZŒÛÜN{agΜYqk¹1®°¿ÿýïU¥”ÀBK¼8À„NQx¼Ã\›0y¿³Í·>ÖÑD¿5Ì °%K Iÿ5¤V»@ëÜc®nãO}ã¡€¯Z»ðúë¯/EÂæ…“& 8Â&ŠhÀv±]A¨ œGÕžW=ö#ÀÖÚúÓ½f7\PÈ0Þ!Ÿ$ófÃÍŠeÈvÛ{ HöÛfÛ~@ è_xá…ÀD€ýÂ;o¬Ùƒá%«N½bíÕ Vx½Ö´å` Šö+’í‘ÿM€…íèg7›XØŸ~åa?ów`+Ù¥Þv™bÇŠýÖV­&?=°ù¾Ys°~Z34Âï­”yCŠgÖ¾épÓ¸¬éa•²ÌeÈ4es;ö—i©«_]ª¹‰+íC€õ'À`[ÍùÔ¾}o ‘ òpâøBió ©•Î­À^yå•´Ê$NãÇW[n¼±:g½õ«à…®û¥!ûíçAêâÅ‹ÕY§žªzꙇ¯×pÚS‡6í¾gâĉÞLʸ›†àùó燂XXx‚\êº`Á…Ù‹á½={¶š5k–š1c†×¯"%¡e=QΪwbf'ê °v´  Àâ0'r²Ç¹šXÓNäš‘ˆ¶ÇÛØvÔàxæ1% RʨM\å˜,l[ظü¤DL¬`¥1°æ )7,Àb?ÜÀò–É ÷ ! °~Rù…€Dªåʉ`õ ü´)~¾P ÷†”‹Çb†»´æ‰ßã,{ùå—+„ÿ.\¸P=óÌ3 PxÕUW©Æÿ÷ÿJ+³cÖ#9DÝø¿ÿë;K0¼¢]õ¤O˜¡øtO:öØÀ…ÍÙ†÷»ßyF3&…rIØò!Äìx£W  ÛA@÷† †.kqƒígÃf9€œÚ@+Ù¹)!ÄÞ¬ftÜvmL /EßB‹[b$Dßàçê0Wè´Ãj%DAÂì7Frùx`Ë…dØ7žý0°·ûË|(È›©rÓ˜gØK€mý,!ÀÒK,=°©yqÔ±%/å‘G©°Ï=÷œú–žiU<°æ2:AKÜ4ÿð‡^1 0 ¨5÷ÁLÆåÊ€…Ç×%` °IšìõX±akÚs T°¶MY`aË,È6ÈÙ¹Èo†ËßRÿzL±6^J²íæýX‘\$Vbèåm”ÌÐ+žLÜtxkд"dÂôr,ö‘}m¬L…cÊr>æ® Ûo­Üܲ¦­y °¹¹íRc¸ÖÓ»E,=°õl<ö—žÝ•«ÞSÇR bÛéÐßsÎ9ÇóÀ`§L™¢Æ^v™B,‹¥t‚Òoû[µÕúë{ceÏÔãfGž}v«}¶Ð`†íj†!WòÀÚQŒb›¬} ”Snã¸ìY»\z`kmýéÞ?s›TÃ/Êq°±Þ ©3\ëᢖØz´;3xL픩3Ô7ºt-ì>ûì£{챪†îI-KêŒÑÞØýûõó&[´`èm—]tÑZ+‹¼a'qúbX~Ê*}4ÖöIÀÊ8U Ù•hC`íÙ€ÍÙ„%"Ðobs G™tÉœ…¸ÒX™Ê %®©˜„M€­­í§}oì µV2©â$|ÑŽA€­µõWÜŸK,=°ôÀ¦þ9pɘ+UcãzÈ~Ue=å”S¼p^—µY%òwhÓÆ›¡ÄÓÖ]Wm©ÁõöÛoW]ôLÆðÎb|¬]îÃ?ì…cöâ0‰K€ è½3 °ˆÀ³£õl»P «DÚùål·#e˜š”iûÚQ…È‹ßíy[üfÆ1ºö6¿ýã¶· °±Ú·u/¼Q× ©ÖZ`WÄ6yUÜ78¶ÖÖO€ òtÑKlPáö`oiÜ-[ñº2'yj¯—Í;vl(ˆy晪«^J:dN¼³gÛFÿ¿ªe|ì9Ú¸5ÁXFy˜D€%Àæ`ã¶ùŠT>6Vû6…` °--™³·¾¥Sïy‰Û(FùXlíŒÇˆ‚›·Hí°c¿RXñV[m¥î¹çµtéR§„äc~ðÕU‡ Èê0/h‘ð;òHy=ôç†M\66dLg‘Il‘3îs%ÀÆtgå©X,–ë{G` ° !fq&Ÿ“o¿WuìØ©²ßýîw=ðÄ$O.ÉÙëµaÅHW¯³Ž¸RƬY³<€;wnèD€Í“%ù¹`uèoܘæò °‘ßSù+›Ý‡Cˆc½3i¸FíÉ¢–بÛË‹ÆÛ¤ã«×¨ágôÆÅb|l£žˆéŒ3ÎðÖŽÅx8—„u^;ê°b„`á…í§ÇÄÞu×]Þþ>ø °€Ñj&Ϙ1£ÛAç•õí}úrb‡ž›K€UsõóF3Êh‡ö”¥Yg@”!?yR€K€¥–ØrF!–›u`(zý1>vÐÁ‡–qã7V×]w·ìŽKÂ$N°€Øé:í«g)ƾXC m5‰›'k2Òs!À` °‘ÞR9,ŒK€%À`+ìö}ûª‹ôAé]º¨!C†æ“rÚé b†˜ëC¯¯ÇÞÛwÜqGÕ»wo§¼È¦Î®õÅ9uÓF}ÑÁ‰çŸŒ§ÕUçYÎSÛ|«w dq¯Üwß} k¾VJ&Àb6â>Ú#ûS}ObŸ™3gz Om5IVÖħØõ|²šX'CšK€%À:Ý*ÎD€%À` °åŒÁ•«ÞS#FŽrJ?>î'jØOG8åE™˜5Õµl×¼g ?[ý¸ù'Î冩³kpNƒ˜U›õNxF}=&Ýt›êС©²‡~¸þ»xñbß$Ëè£ÁµoêÎ;ï,倈V“î¾ûnuè¡_z‡ûí¼KîﬓÁM€%À`n•g"À` °بd–—oâõÍöõÅ‹©ÓÎÞj|ì¹çžë­ój§.z=Ø.:Zâ¢óÏ_kƮ‹u"Ã$€ïqǧկ+ÆçâÅÆíæ½m` n,–ët«d3S/]íAµVK€miC§èïþµ¶§íŸ{C*¬¡8qÒ-Ԥ̬¼¯½õzzñ2êÃY‹3Õ/]¡öÙ7¶[·nê†nP‹-*¥k®¹Æßjþ&?ðÀÀb9×t饗ªM7Ý´tÌ!ßÿ‘Â8ݰϣ¬æ'À:Y Xl” Û6.?)Q Y×£æYµ°Ø”´ç´U£0•‹!øè Ôæ[lYXM¾sÈ`uùãËžÿy?¿XíÚ÷ÂêãÒ†˜'½^Ûi3g{÷7¼¡H»í¶›š>}º7cq¥„<XÌ$”~õ«_©]vÙ¥t „ c\nÑÚÖ©«'À`£X§FÇLÉ)@€-ø ñ2:ɵÜl©p†U%C²è{äÑÍêÖ;§`éaÍõsaì¸kUÛ¶m=È\G¯ùzÔQG©yóæ©… ú¦iÓ¦y‹àr Kî`r7ãN;+Ìn^4p•ó%À:؂۷¯ëÔè˜)9"عºÃbʦð¢'×ì s¤ÂX~†eÑ6ÈØ¦6½ÞÅ kÇí­¯ÆÇ{üI%àÄ8ÕK.¹D-X°`­tÿý÷{‹±°~éÔSOm5ÎõŒá#Ê/²æX'"R€}òë_WËn»)CšÑN-†™2¥@TÛ¨Áõaæ0eRƒ‡5Àn’©–›ÊÚȲ L`á‘4Cf±ÿ# é´aÃ=ý0.ÿÛãCñöA„ç–ón ‘Pž”±¸È/ÿc³¾¨›ß6ì'¿›ß.Æ´Bl–…ó¶õp)“y½inó-U»í±W d·Øb uã7ª§Ÿ~º”° ¡ÄfÂ8מ={–ö=èàÁ ãmÓ|¾IÕëdD °tÎdÓ9ƒë¦í[¬Ó-“­L‘l¶N™µ¥‰)@cË5V`R†€P&y„F€:3Š¿h‘WàO Fì Hõ3 ñ»¹]Ž2ÅR/9–”: <ŒÍ:Úu2^q<³Ž(KމoüÏ1°Ò v”ÅíS¦ÎÐë%w-Áè^{íå… ÏŸ?_M:ÕX„#Mš4Ií¾û½ûl¯0¾6‹çW °N}x“ÎId™† ÉtÎdÒ9ã9Õ4ÀbÂZ~r¦6g”§“*htù¬Àš ¯~0Р“}Ä „7ÓÚ–¯r[ hÕ­¤ä½E™& ˱ì2ýB M/p±jlÐù•Åíݬµ,o3êÂKÕzë­çÝmÚ´Q'Ÿ|²š2eаçŠõdñ;¶cÙKÆ\Ég¨Ï˜qlªúyV† P:(@€­ƒè5h§0‹cʱý<°fˆs/¬=‰“y¾¢'q"Ä&A /HÊ.HÆ«C€m} °ÀÂsi)þ7=šW*¡Â2C¯ihÛÐ+ÛPN¹™Jñ»]–ßq%¤pŒíâéX–ewÌú†ñš¢v1•,„rýΙ ‘]Ðàµãµ‹£ `3n)±úYT€¼”²«Æ ’² ’ñê` °±B»¹ôL†a¥2ýÆÒš3%']pÄ6PÌ6@€Í¸¥ÄêgQòRÊ®/HÊ.HÆ«C€%Àư~ãF“4àåøú‹o+žXs›ü-3)'YW«˜`Ãë^ŒëN€u²”u®þN9™‰ +@^ Ö(Ѽ ‰Êûƒ` °±,BlÃÌú—1:H’c`)ówûï¸êÂr‹,¼Î¼Îf À:ÙRu®9N9™‰ +@^ Ö(ÑCõÑnHôˆSX'3Šë$39*@€u*ÉlMIŒÇʵX,Ö˜¸ŠÆ6‹m€m ê6@€u²£°N21“£XG¡˜ dQ,–K€e`HUÀøu{t*íµ¡£†Î(Ë#À:™GX'™˜ÉQ¬£PÌF²¨–›*Ã5J£‘eÑ“Æ6Í6€å©¨•®6‹&GÅ:`swIëzBغÊσSx À` °ô¾± ° d® `ã5êP:¶¢çøØ”]ܺ>ª†„ ZéÃò‹§: ~¾P sF=LÙô0ñºçºaÉ&Ìz-ëc™&ü/mà…å«”¹¬“:‹ü˜Ñ[òÈvx)±ô“]ʽõΩÞ6$ìg¶7û–kƒa‡úc© þ—²åœÍåªP· ö:ÚËY™eÉYi˜å<è\°!ÄN&F/ë³ìÛÑGiÒÛ×°ü²*åQŸ 6áÔ0™‰ P*v +c…yŠ'¼Ö¼ÖAmÀI—€žìÀœaàžÅ‰“n)=‹ð?ötâwù€'åa»”'eàÛ|M¨¬žöxÉ8ê'€*uÅÿR'œ'`ŒßÍðeÀ« ¡•^ôØùÁ*6 ]*ëH¨ȧ;%xZÓô±NOðx¦-\͹s*{üÖ~ce‡ýb»®xi!T¹7XÈ/ñîš¿a_¹ñ%|ßâÁ•‹À  rß½Ìá”ãu Çm ˜máÊ©²¶,¶)Ïr­pa±_M;2Ùÿ»DÚ&ÀÊßv"Ê…}‹oÓ!#ÑaˆíüP*PAj7­Ü\æMï °ò`0ß@ù¬À,¶XM˜ç…°RLXáuçugðoôÀËh9[¤€¢xd± v¬À¥í9Ø´AÑ„vD ”oª=Üö®ß&'¶8—™,d3åIW£@5+¡½q0½µ2nÕ¬‡<0d¼ªxQ¥ y@`{Ðzz`«¹ÂÙÛ‡K€e```ˆ´ `³g DPcQ»S Ò ÛÕœìÉÕc‡ ÀâöäQò›e›Wʰ!:‚SgT ß T°PÄ/ÌA`Õ|ƒ$ã DEBí ûö1Ç`óÝåì"5Zè GŠm€m€m€m€[ Âç,%RPÂw‘ÅtšÀ¶4íÖZÖ¶mq~Ôå¯\õžjjÚpî#›rÐOò¨ T€ P”)9ã(jc‹ååœxmëmám5a°†ÿõsÐûF’<ö˜OH/< *ûØQäðÉöÓ† oõlCþ[ïœZöy‡íæ¾&ÀÚÀ-u‘ãI½Íúaüo¶AxYíúËqEó¼pqz~ãº?æ/ZªÚ·oÿNÊú:V‡ P*@¨ȃë­·ÞÇËV¼Nˆå ¤ll±´:–ä7€¡À`ЄNs?äà!Bj‘ð·vk#&‹²½—Øxöƒ6ñ€JÙ€FO&r²VÀÞQÉ#ã|í}äüpnRÔûbŽ)Û°¯2\Î3xFU.´Øh£–ä¡ä9P*@¨ )S ©©éÍ[‹á•1ÄrêïEã5à5¨¶ øA˜_1€ÎôÔ"h%¿9‹±xrQ/?H(”zWKŠãÚ¡º•<°6<ãö8_ü†|Í~Y©[9PÍâäW8§ oÔ!ÄMSSÖݱ:T€ P*RÌ|&Ów‡Ü55ÙQ™Á-5•bEjS ÃFÍštÓmXzߨØbi®+“‰ÇRB‹D~Àkþ†cÀc*á¹åÂŒËA¸ßx× €µ½¹fø²Y`Ë-ÍS`³0VØOÓSOÿég?¯­wâÞT€ P*Po0}÷ÀzW"äñ¬X`ZÀõÇyð“/šùÁߪõ®p?zæØØ*µÓS4‰¼®ðdÚU?3óÔjkO2…zV°ö¸[S—J“B•«¿í•Îʽ¶é¦›}¬»É^ùê*y6T€ Pb)ð³n΂²–—9­9Î#k ž­ëYǦõ×_ÿ“7V¯‰Åû’ƒ‹õ$„± ÄÓ‚¼§~cc²¦‡SÊ0=˜]™àHBmÍcûzÚD™Ûí`ûx~c`m¬²lßãkn/°µ‚y=Úôâ¥+TÛ¶í>¨g§Æc;)Àåeœd ‰ÑŠ¡%ãiUà&]1¤<|òt.y¸‘œ&Û0—¸¨‡ÑÃcÆÔ•ºÖ» Ø¡Á¨üæ7nTÂoÍ1«2‰¶±Ÿ~øµZš€(cN~ÈcBf¥Iœ0;1ÊCÙ¢a=°rNß86þè–cH½PWÙo²y|¹fâ‘®÷5 sü‘çþ|ƒ 6IçÄBâRàh]0#êâQNj¶,5aàµÄÃÂüÀ«9­¥‘ã;hñf€#n3¹ŒGÅ$ÇÁ¾ƒJà˜(׬ B†Íö5ƒóÈ¢79áKž¹Ã Ä¢óaŒæ%± ° ¸¶¿I’t€9{ò$@›½&ªx@áEèõ{醲úìr”•^Ô¡>2q<¹Ø_<ºø6aÛüÊÄŠ× l/£ƒ}¤þö’>(ÛÌß+Müäª}’ùÉÓ¡© áÛd®,V…až^¬S®úlÍ¡t®…0ZÑU)æKµhÈf¨€¿ááßñÿË©À«™×ÑJ'‡ Óï8ø ÇÀ85!Ö®»ì“jÁY¹ð l¸á†OL¾ý^†s"¶¶ÈÛ€x#]`Êv³:™‘Ëù–Ëc¯Ÿ[KYIí{Ò©Ã>éСÃèð=÷HP±A]œ V+µ‡²í`—Š2ZÑE%æIµ|æƒâJ]cx6ÍO¥·aöÍSË›3³,©› ר›éa-ß;‘êfWUåúvë¾ù_’2txzïØŠÕ¦•–²A{±¬v¾",¼±•&…JÛýƒõÄ7ؠßuïÓXUÄ’R \$œågGãÙuƒÅŽ „ýèò=,Ž;Ñ,ÛúE Š3ÇF}ìáze(çfûÙõ–<8ÊDx¿œ«-iFFÚN%F+º´æIµ~KÜCóæÇÿå&°bpxØÐÜH(É`Mñ°¨°È[ÍÛ¨T_$Vî ôº}çý¸ù„Óf±>Å^ï|^ïJ“(É5G¨n¹¥fÊýž×öâ¢WZΡÃÛö›ú³?M½{fEå7جH°;Ë©Eˆ¼.Cáäxš(@ `Q¢a³âØ¥øˆ=-ÃáÄá"uÅþ°OMˆ;¿a|›6®l7µ0m\±›q,ÓF7ÏAŽkB,£S°‚A øy`åf“…|—['Vn8ß0BÉM&aĵ¬<¸è ºòÝÞ±c§›/¼ḩi1ŒX| ¯+¯+Û@~ÚÀ~ûú¨]»vÇd´Û+Zµý<•° Mï" Ñ=S#ñfÊoa`­’gҚͺúÙÓ¨£ ÚvÝÊ«ÌK°8GÛiã§YI[¹hwUNÏ×nüP;„¸Ò©cÜ ¸yÃŒYð{¨ø¬ Îö›9»îaT9½œù?­¯wì¸hâ“ÿE3?&¯%¯%ÛÛ@mQ;›l¶Ù5ùïss†€6Ó›»Î “¨+°6ÀÙÐXI¨Jyý¢ûÌü~ö§}.â1•:ØÛñ»ù[5+Ç}Ź„oóÃhÅÜÜ2Å=4bsbÜüxXà&ï+þ–·_2©“y¢ ¹Y$¦ÛýÖj•ý°MÂ1dÌþï©ùÐBä͘YWûd\1ÚqãÆ:=rÒɧsL,'ô‰|BŸ8Œh–I8cH¶ ¬\õžÚsÀÀ?cèI1ºÅÜœ¥í•0´£ËEÚ‰wÑ]ר¼´¬¬ÒQ ™ òf 7n@y[d{tÔ)yÃ,n^œ?ÅT ¹mÛvÀx¹dÌ•Š0›l¸Ã#©7ÛÛ@=Ú uøÙ#?Ç2k:\ø-†|1{Á|œµ =³çT‘è>‰ 4gÕd ¤JÔž„Ð ¬Év;ŠÐV ÇGyrØ–òÛì@ÙÏ,u¶áÙŽ`ˆË¹ùÍ–lÎfŒº˜‘Š8–œ/àV43ÏÁ¯®ŒVÌǽ³РàA`Þ¤aDÁ #ðŠïj˱‰7Iv¸F¥zá¸öZWa΃yó¡ÀÀ¶mÛ^˜ml\ïZ¤!ß?üS}zcÍD ØØØ2Ú¶ì¹õ§ò\Çóкîºm.Ó÷ÍGƳРÆLaQdHš¹ßoaÊŒ+o½êÅhŸ®(Ë¥T€ D @SË ¼Ü8E'.`¨,‚ P*P'zèã6Ïõ:Uƒ‡Yñ*VsñfŠcEþwC[Í1«Ý§ {ˆÑŠÕ^1îG¨ T€ P*@¨ðQ Lž½;X!¼a–xLòb nÊ$?8£“TœÇ¢T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@²¥f²Ä¬‘X£Úõƒüi\¾Áµþẏë±SOŒçD¨ T€ P*@¨@¶˜Ø)ª%°,ƒÒÉuyÔù±ÖŸëeO É([Î9 ,Ë>Aú L€¸,K1Ðõd¬|r¼J»'—¸\±Ê‹Èݨ T€ P*@¨ˆFF@£™+€–jÁKj—À¢®¨»í!Ø•Mû¼]ÎSÎ¥`£èfë‰ÿQOW‡~f+=Ê. °ÑÜo,… P*@¨ T€ P0=žâíÂö7µ@À(Œ‡Ò®J+žNûØá~òÈy üƒôP,ŒÐMÀú™^mè ÇÂ6—]ÇrðK€uQ“y¨ T€ P*@¨ȼ•Bvb]ËOŒ$¶ÜEpXxÅ»X),Xàß~Àˆ}P‘Çï­%Ï@‡–#×úË1ýÆ`Äd*@¨ T€ P*@²¯@%€d• —…Wp ¸BÂß~h,¼¥È¥N~ g×yû`_¿Ïà–ãË6ñÈ ØÉ8Tsó8ø=hœ®@.ÒQG”û¢‡‹ÇWê(:—+?`ECeáºVú˜šË5ªBŒëŽrQ~¹ëpHn¦T€ P*@¨ T€ +P `ìl(¨È˜N'ù߆#6”!$^H? 3ëc§L˘S‹ýÌcHMh´Ï»âœÌºúåµëPNy€¿htuL€5ÁÙé.WwÓãk†?ãøåZÎü®È[`%4Z¼Ý¢{¥1»AçÍíT€ P*@¨ T€ P_Ê, ­\¸+àÄöžJˆ­ .°Mâ¥5Ë7Ë’ú¬ c?+¨ËÁ£k1„‘pi?¯°„ðJÝý€1È jŠïš×Xì_Î{ëWžÒlzžMá-5?~!ã2íÝð|Ëu2C›åŇ‹—™·% T€ P*@¨ TÀYs¢ s<¨ ÅõãR•Bfzý<£(Ëï)P†mæ' €Øc6tÉ1ÍߣX¿Y“mm€Åv9WÔW>•Bš¡¿ý)§c'×Owüæ:.Û å†\ÛóQ*@¨ T€ P*@JKµˆ·RÀ€UnÌi9ÙüÂL`]AªRH3Ž+^a3Œ6 €•²m “ú›žÙ¨6hvg?€5½ž•V4)w [³ Ýý®m%·Œ –°nÞjT€ P*@¨ T€ Pšðsìd9,àV Æ/i{é*¬ r". e{/£X©«Zë «®aÁ~ç\î"ú,òÊØ`yÁàZGó8¶fâ ÷{Ñ€ýl€5C½åøö·‹—¹æÌ¨ T€ P*@¨(Ž•ÆÀMš$³Ú¢ ¤°Ø´¬Z[êÊ£ °™!ÛA­¬ÀÔ;Jˆw¹Y‘ƒ€:*€E9rýý¾ƒÎ‘Û© T€ P*@¨ Î Tòxú·xò[.&,ÀúÍÈä03ÇVFå…hâݼÊùØã8ýàÐo$¿‹ ùì ”üò–Xä5Ëñ«k±„é^.„Øö¸;78ž@Ê2¡O Í/ô¶ÀŠçPŽgžc%€­ä5Fˬ@¥ ÀæùJ=±ÝbƒÇ3Ëô»¶²]ô“o¿õnÍòÌk„kúúy³íubåÜñM€åó† P*@¨ T€ PÈ(UZÒÛí°RäÔ˜Þ=ä±Ãgíß¶ìS præñä8€¨rkŠ"O¹°]ñúí}Ê•´Íe ÑÇ–ü8<ÀeÐRE~ÚÛ ÀåúI]pü \#S3¿kk–#ûà8ø›ë¿©ÌíT€ P*@¨ T€ PŒ( Þj?¯fFNÕ¤T€ P*@¨ T€ P")PnÝ"iÀs¥T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨ T€ P*@¨@±øÿw.G?ÓæIEND®B`‚nova-13.1.4/doc/source/images/rpc/rabt.png0000664000567000056710000012764413064447140021461 0ustar jenkinsjenkins00000000000000‰PNG  IHDR°š6¯»sRGB®ÎégAMA± üa cHRMz&€„ú€èu0ê`:˜pœºQ< pHYsÊ&ó?¯ IDATx^í½ ¸U™ÿŸÿ H ›dWV‡‘5ƒ"Š‘‘AЈ#‚,FAAq (ÃOˆ"£ˆ@•%È&‚D–@v1 (*:êàŒ3sþõmî{9÷¤ªëTuuuu÷§Ÿç<÷vÕ©sN}ë­®÷SïYÆãƒ(€(€(€(€(€(€(€(€(€(€(€(€(€(€(€(€(€(€(€(€(€(€(0d LLÎwÚH:.ù;+Ioó¶Ù>þ¾¢Z 6€ `åm`ÿ‘g͡޳fȽœ.      @ÖM2:q•Uæ¿ì_7ßêM/*}äÈ™Ûa§]F¿Ûvþ¾¬ °lèÜvßs¯?sìñnÿú‹éùêe–ùÏ +¯|mòlÜêÅ*@@@·ÉJ+­tÓJ+¯ü'9_z¥{þ÷/‘ÐÀ°l §6ðèâçÝYç\à·¯yÍkþV]uõï¼oÆÿˆ#ˆ#ˆ `Ø6Ðï6 ±[OÙöw«®ºêG†ï‰Î£    À€+°Újk|íÈó§~wXh?N76€ `Ø€o;î<íw&LØwÀ㜠    É„½u··ý§§À°l`Ðl@<­·þ†¿Mžê[ Ï“½§g:=©}VɤµåÇç´žòÛ „>=5*G¨C­ô`×~МÎGÀ°l@6°pÑ#n­×½î©ä¡Ê2;Ý÷,¦•„WAï̈æQ~{‘Ð'ˆȂ(ÐÇ $“6ýâÆy €WÖsİlhÐ’pZ×¼Ù4PP†[e—}Í‘ûíÿÞÿ BA„À°l`l`ò:ëý!yòo5ÜOÎPPúSñË/?áw÷?òä@¿q‡Œs<°lˆ³‹/½ÒM\e•ùýùئÕ(€(€(0Ä Œ?þ˜<äÏ8=qN:¡6€ `ƒaZëO9~‹4I¶IWƒ¶ @• ,õªWýí©ç^`™yÀ°l`(màÚæ¹ '>\å³uHË`‡ôÂ7ô´؆^š…+@–¨EÙ¨Ça;Ø606 eä4œ¦ã*°Ø@“`›t5h T©“8ဂÊ9`ÇØ6PÖŽ9öx—Wl;‡(0¨ °ÀG¯ZÔ Öe, ÀvÕ›Y7)}qÅ5ôÀ>ñô n½õ7Mú­ñ¿ëÿ*çÎ<ë¼Vùæ*ËíeYlÅwÅ¡À +À½|`Q7öW§ °,ÛUfh6´«í¦n?pÒnÿV°]½7)K€èöC‰ò±±¦Ø À°]õaØ‘ñªy«ªº+…Ý‹õýü‹.mEV-Ÿþ†¶›Ö…ØòŸ|Êl§nÆMùíiG¯v«‘ŠÕ9&)»ÏôÈr²ê¢üö×aÐõ™™ØŸXà"æÁBìdl€`Ø®ºElÀ nÕý÷#GÌtoÇÞ-ŽÑÿf›SåñS˜G]‡µÍïBlåpàŒÖ±Uw[îö3  ;-)3wL¶2ÝÜÕÛ‚ÂQ ^YIV%>l_½ìö‚òÑa¶€`»ê°9+¸ ÁRÑRm³ˆ©V¿U~ÔUÛü®W«n3›aï´–C$ °ÀÀ0?0ñÜO<éÔÖ’ ¤t òîå·Þz«[uÕUc³Î_¦ŽÙ³g»)S¦D·©h~\ô˜/ùËn©¥–ÂÞ¸çºfïx×»ûnâA|¶tûœz°Ó’Šo†P ! ¼,Ûí‡å×kcŠV±Hלl>ûØ9sæ¸ý8¨¯C÷Kbòø4GÆÔù2~Ïê}†ôê™ À6çæ£%½S`zRõŒÞUß?5°Í|0\{ýì/Ç•W€-æèT O=÷¢;þs_p‚Tƒ×iÓ¦¹›nºÉ}öÙníµ×ݾáF»‹/½’‡iMÓª¯u]å°,[ìw€-ïCT|äº)åÕ °ŸúÌ n½õ7“¶›º½{âéxövùÙ;(; ppµëœì¬Š).B¶˜£S%„̹ð7yò:£€ºÁ¸ .¸Àýò—¿M?þ¸;î¸ãÀ0šo×·ìî.z„‡i—¦U^ë:Ë`X¶Øï:á,ô.Km{ò)³[ÏYýõ³?rÄÌV4¶Îßña¬k öÖµÖr<òi€5¸cÛm[Qv¶7O¶˜£SÅCE“3 B-â*8¤>ýôÓ™é§?ý©{×»Þ5zÌøñãÝ‘3q.~ž‡* ;ÆX€-ö»ÀVæLLJR7à*?µ¬"¯8#ê™*ÈU´Vï{xñ˜cοèR7÷ªë[Û•G)Ì#_ByÒö«Líóý •©dÛÔµÙ@ÛÚâcÛÒêÕ6ÛæYç-q¾Ú§ˆ³êËj{¾PX;À°7ˆ0ÀVù;_¼,¶˜£Óɶ`óÃŽ3¾õ ƒr÷Þ{¯{æ™g¢ÒUW]å¶ÞzëÑ2&­¶šûÒigD=p;i;ÇÖg'j À°l±û€-î;d¡.À‹++íå‚jXEXP!8†¿ÇÊ'ÐU·bEfõWß}¸Ô6°å³.ÉþxZ몬¿Êûöwì=ú×ñÚî×­mJ¶Mûm›ýµèqX¯±Ö~Õé—á×¥ãunVN‘îô•u< ÀöUÄ€­ø§¾`ql1G§ì·&aòǹn¿ýöî†np¿úÕ¯J¥¯}íknÒ¤I£ «™‹5ƒqÙöq\=vP‡Î, À»ŸØ‚ŽCvö¾XAš€--b‚d¥Õw. jýnÇ…V–¾‡jûbVÀçGPU¦¶L+ŠÖcpmuYR ¸ë÷ À°le¿Åƒ_[ÌÑ) z›»Ùæ[¾1M SðY\ýã~øawÄG¸e–Yf´üéûì˲;CÞ¥€`Øb¿ële¾N߬EDÛ=ãAM‹Òê9¯ía £¥*3ܦˆ«¶¥E|cV Ù.Jkõ( ÎuŒ ×fYÖßjÛÁuQ¨H~€`Ç›‘Ü3+ûIà‚ØbŽNì±&YÚs¯½GÁRyôÑG»Å‹»gŸ}¶Ò´`Á·çž{¾RW2>V3k†ãØö’¯;vÐ ]X€-v?°•99} °‚:T»Éš¬›q˜'ܰz6øÝŒÃ.È1]ˆ‹¬µ3ìzlÝ¡c¢ÃÝ|¦u`§%eæŽÉV¦džj>Zûõ–ĘÄið'°êRâY‰%*ñÉQ€-æèäýx 5¹’&Yù1vÓ§Ow÷ÝwŸ{î¹çºš.¾øb·á†ŽÖ«Ž5Óq^›Ù_­ ôZO€`‹ÝÓle®R߬E'³ºõêw= `Eõ»Ç¬ÊT½²~bAm]M‹gÕkçd伕(«`Öœ<ë`‰ü–Šü°y·Vw÷°ÅvprÖ98MªdàºÙf›¹¹sçºçŸOf ®1xâ‰c–ÝÙ~ÇÝó²CÒµ€`Øb¿ële~F߬žíêÖ+x O€j݃Óf*Në‰MëBìû![÷bËãOeÛÊ¬Ž™i€íà~ ;ø‘W›Q€íàF©àP¶˜£“°š[C֞蘴zÉ¢ìXû®«sÊ[N¨ϱ^¬¬|U· ÀvhKl¤€l1G'íG{åUVi⪫®êŽ9æ÷ÄO¸^x¡QéüóÏw“'OÙ 7Ú¸'¨n<ô(s¬ °,[ìw€tò³ ÀòL)vÿT¥W/6ß´#s°l¤©de`#`;ÿ¡6€=ýôÓÝ¡‡êÖYg÷å/¹Q+ ~æ™gÜ'>ñ‰1Li¦d͘\Õˆr:·§N5`X¶Ø}ÀF: ùÙØ!™k¡ÓçTÚñ,“)•šLÉÆ¤Öý—1°ùO„næ`‹9:í"°_ýêWÝí·ßæ·Ï>û¸í¶ÛÎýèG?r¿ýío•zè¡VûF- ÕÌÉŒíܺñP/Z& À°Åîe¶2/€`K¿`Xöåñ¯J|r`‹9:1;þ|wçwºóÎ;Ïm¾ùæîŸþéŸÜ£>ê~÷»ß5*]ýõnÊ”)¯L>•Ì ¬™”‹ù;·¡*5`X¶Ø= ÀVæ*°li€­`úÓŸºË/¿<3UÑüìg?ë¾öµ¯õ¤¶Ó€leŽR°Å"{×]w¹… ºý×uk®¹¦;þøã[Ëéüþ÷¿oT:í´ÓÜjÞò?šQY3+W U”Õ¹ÅjÀ°l±û €-å>¤À°¥}¶f€=øàƒ[cÞ,éøßœUBìÖ[oívÛm·Jˬ²}EË`+{p”*€-æè”Ø{î¹§‘ýð‡?ì^ÿú×»ï|ç;îÅ_lTzòÉ'ÝÌ™3ÇŒÝÿ€ƒÜý¤41Ù˜ÌÃZég–®ÏŒb6Ýz° Ø[o½Õ}ìck¥SN9e еhíøÃÑ|:Æ\u»ûå^pÁ}Ðl¥?ô… `;(Ø,Ä6‰“µ.ÄŠÀÀÞwß}îþûïwsæÌq›l²‰;äCÜ/ùËFA¬ ú²Ë.sm´Ñ(ÈNž¼Ž›sá%8}ðÆ€m°7ß|³‹Igœq†K~£òª¼¢ùËsøá‡»7Þ8ºMEó«ME9î¸ãœzkô£Ãlm` »uÀöÁs§Šû¿W«~ï3«²è~]F§]VÀªè¬ºÿúÝŽ}@Õ~Û§®ÂÖÙÏ£íJµ‚YåÓ¶÷¼ç=­ÿ«î¶ÜÍ1[Õ]S®¶7û³ŸýÌ=øàƒîóŸÿ¼[k­µÜ©§žêþøÇ?6.|òÉn„ £ »ë[vw·-XÔ×ÎjÚ&—ÀfßÓoO–Úq§£Òö;ìè6J`16ÿÔ7oŸ¼ô‰Ï¯r‹³ívSÝ&oxct›¦$ãÙ7Þx“èüjS™cN9ýŒ¾þM`Ëù5À°eMmFràœ¼ƒ§UÙm`ÐVš–Ú&è4@4U´FÛô7̬«•¡cú) ÀæÝZÝÝÀv`™}ÝÊ+;­kX؇~Ø)R;cÆŒVDöÊ+¯tÿñÿѨôøã»~ðƒ£«·¤‡vËî4Ô©`;¿§›ü‚‚¶U}Øîú–À6ôYSõoQ¯"°lœYXEDÓÆÆZ´ÕXu/ö#ž\?âꬺ낫ÞnFI»Yv—vz¢‹Þ¼ðÉQ€íÜÊêB¬ñ®;&Ëèì¶Â n÷e–q'œpB*Àêþzì±Çœ–µÙyçÝ{ìá´VëŸþô§F%ÙÝi§ÆŒeÙÎí§j'€mÞ5©úS^µ×€m´«À°e tjràqy°mVPš°ávå 6Ìã¬À€Í3Mö·S€íÜ vÞ¼yîð$b¹ök^ã®K^0%7©{ßrËå¬"O<ñDk|ìúë¯ï>ùÉOºgŸ}ÖýùÏnTúÖ·¾5ºìÎøñã‰Ä6̹`;¿§ÄáÒ€m´ŸÀ6ìÓ­ßÇ.D`£ €-°F`Ø(Û$SØÎµ`·L–Ê9*»—Fàµ(Àþâ¿hõ¨8öØc[ «.ÈùË_“þû¿ÿ»Ú#·pÑ#}=þ­[ã^• Àv~O÷êÚQoo®Ûh7 €`»j l€µ®¾áÌÃYc`ýñ¬yc`Ãý:–1°]µõ*€íÜa ö€w½Ë¾ÔR­È«¥"XìâÅ‹ÝSO=å4ƒñAä¦Nê~ò“Ÿ¸ÿüÏÿìYú¯ÿú/÷?ÿó?î÷¿ÿ}ë7€íÜvº,l3¯K7®5eVs­ØF»5,ÛU`Û¬ ÒfÖ_u 'cR›uXû,¶µ›…XùäHúåúCusüjewi lW} `;w€Òº¿nÅÝs>À&c`?üá·k]ˆ}€Õ;Ï<óŒ»úê«[¿ûí·Ÿ{òÉ'k…Ø—^z©®Š¼Þ}÷Ý­¶|ãß`êT°ÝÓs¯ºÞ}ê3'Ы ¡öÝ h`íÕÌZuÕI/m¾Õ›^$ ¶t!îá:°š8ÃêCž"°‚K-¥“¶ÔU>[n'\VÇ…Çj)•«”¶¾l Ù­2ØÞ>8†`å¬vÚ6m§yd«±E`³ï\~y·ó[´fög!¶IœÚì¯~õ+÷ÜsϹ³Î:Ë­½öڭ߀_|Ñ .»•þú׿º¿ýíoîÿþïÿœ úG?ú‘ÓÄTlg€Ô Ü/€íìú^×[ƒØœ1ðçXä>`+óC&&%å.[R°6•© ið5ت mT’làœ6‰S·À±)å°•Ü¥ v€¼n¿ãÎAlÀª»ïW_Ý=<…½7ù+ˆÕ¤N›'ëªrÀ­e²´ŒN€}þùçÝÓO?í>ñ‰O¸ 7ÜÐ]vÙeN Yu2pUwa½D»ùæ›Ø>‰H°ý°·/¼¿˜ú[Äbóek#li"Û§îÀ¾­)ïÛß±·óÛ¨6ù0luøÇ…åêø^ÙpÝõ°•ù!leRRP] °lÀØ´:º°3ßY—ñ÷s=ì+ÎnYˆm°çwžÛf…Ƭû±Cu;­´’KúY¹£˜ýtÒ¸,À b-ýÛ¿ý››‹º2¶3hM9mÄ $}˜| õWy Nmâ'Ál8†V)ˆ´zôÖ8[A®_¯žV¶Õmmñ!Ö@Tu¨m:^ÿûõ§AºÎS`kõ ‚ëÉ^ÕÀVæÅ°•IIA( 6W À–Š `/Hœ7î[‰‘ÉŽªH*«ÊòªhS#ËXn¹ån9ò£G·Òõî¬s.p›m¾…ûáo‹vÞÚ¬ºö¾eÊ·Úßý;á„Fg!ž3gŽÛ"[EaÕ•ø(ýíoë¤õYMy“M6q?øÁZÕ¬$pÕGãg5³± ºöâK¯tÇî Ñ׫WNr]õí f î~‚Ì0) £žKD[ ê÷Ô®yXÎ}/n;¾5« ±Êñ£±*?Œ¦Zû-Úk­6 ‡›vNuÙkê`ó\üèýl´Td¬AI¹“аli€Ý5y°®õòÀÿ›+J*«ÊòªjWãÊYj©¥~³Á†·&2"½¬Á¤ÕVsÛï°s4åìÅ_ì–IÖ…õVãKwK^Þ\1…=þ“Ÿ, °›•î¸ã·ë®»º·½ímîç?ÿùˆµˆ«Àö׿þµÓR=še¸ŸVÑr9kO^§ ºnMp„›Ð¶s€•M CPÕõµ®»Õ´hªQõÐ"´El# `³ 6Üž5‹²Ñaí楃ØÊP€­LJ ª@¶iÝn©=]êB<+1|%>9 Ð…øg÷ÑÅÏ·@èÄ“N-Cy{Ï=÷¸Cÿå_Ü\0fØË/¿¼…ÕÙM_÷ºB«±®~z÷ž{º+®¸bÌ6íÿîw¿Ûš­ø¸ãŽku5VWa¥?ÿùÏ-xÕò<ý°O=÷b+B¾ë[v]ƒV À.9£ªò"ÀDÞW44¸Sd3Œ¤À*â*pô“?c°UWÜ¢ÝqØÎ^Bµg¶2W €­LJ ª@(€šT”ªù$aªi·$ŽÉ­k­U*ª7H€7èçÀVsÏ”-€}ÙQºmÁ"·õ”mÇtû‹u‚bV“"iÖàû5ÆÔÖštõM~ïÜÞ Èj=guV4tñâÅ­É”—Ï<óL 4µ¬&qx†iÇÍ6s›'ã_Óö© M 5mÚ´VwaëvÜ/«kóþ|ÈMHºZ°†‰À¾âðí ~ü褭—êwÇínkÝ}õ×ïNó»’°êvœÖ¥¹Š.ÄYÑæ˜¶B¶¬±ÄqleRRP D¬ê_Ae­"`]ɃϘ†`ŒŠÊ õ¿º‡YM¾²Ûn»®¬º+¯¢¹ÚïGWCÈU×aåñ»«.‹[ý>Àª £<þ~†>øàÑ2T^¸OÛçíÊ(×EóÒ…¸¾;9­&¶sÇ¿S€Õ=£et>ºì²îë „¸Ï>£ûøã»6Þ¸§>¸*Ÿ´ÉšQ£û™=÷Üs[?YºóÎ;[ëo³ÿûeb9Ä÷?òdkl¬º}ûÝŒØÎí¸Ÿ€£ÚÚ«n¸lñ{€­Ì`+“’‚*P€Àü {>¨…QƒS'€µcüq«!À†ß•Ê/86¶:Už"Ú/@UÒwýõÛeଲ­ «îp¢'ƒX+SëŸ[§0Zæx¶‚[´ƒ"ØÎº*ö±Çs‡Î˜áþ5ÕÓ“ôÁþg÷ù$*{T³!¼Î>餄þüç?w³Ž?¾²Ç.·Ü˜|ŠÂžsÎ9£°j+XMKZºGmx衇ZËüh¦sA´ŽÓo×Í7ßì~ô£¹ë®»®5ÑÔ7¾ñÑî¼ =<&Ña °þ´¶'bì¡“¯É<¬|P  À ê, ò|P,°á,Ä*Ë" y«È«¢ªÙpb§´1°á6u-¶®Ï6£²Ej­<‹À–ϪŽ`{{“°;\UìwÜÑŠ¾¾˜Ü÷–Yf̤M‚XE^ ^ýHª@v³¤{pâ5ŒB¬Ea-Ÿ¬¢ºi €íÜ€4ÄúÓØÞú!ÔŽ]R `Õm`fU èå2:‚>E*˜iKÚt °6V6`ˆê:l ê±X‹®úKÙÿ~6ì]˜Æ–ÀVu×”+€íÜáŠXE/ï½÷^§ ›áüÙÏ~æ|ðA§ett¯(ú©ñ®ï¿+{r0ÆUðzú¿˜=UDuû7¾q À x…ýæ7¿Ù:Fp¬ß¶Å‹§&¶s;^ÐèO`Ëù… W `§UÙm ×ÛnýÖxÌš…ØŸ *` •×À:kâ°]íº ° ¿åjl۹Õ°ÇÌœéVL"ªÿïÿý¿\€7ož›œ¬õv¼fEOµ=XEc7J¢¹gΞÝ:îöÛooý†T³R?v!Ö2HZËW³Û2¢á0Ú[£ÓAU(PŸ¬M´q¤¹—i4)º©±d6{±º+ªªïykù”Weøãgc Ú–ÿñËQY~Ty€»OO´–áòÉQ€íÜaÍØóÎ;ϽaÍ5ÝQãÇ»½“t '䬢´®²ÊèÄLмžö…/¸'Ÿ|²m2€Õ„NZSVßüãsÛm·µ~CÔÝ8+õ ÀjYO:Õiæaý¦*1 qçv<ŒàÂ9c7²W R©ÉY—wfCZ÷\ûN˜dPk@)Ÿœ,ù<€µ®Ã~]þ,ÄáMiPë×oåø]‘`óì•ý# °;p!Àª»ð{öØÃm“táÕXÔÖøÕd’¥<€U·âØ|óÑ®Àþ’:Y]m»€u¿¥—nÁïgœ±D7a›DNÚ.5u§ko˜ç9ì·Á†§® ÀvnÇÀ« °¸D(0¼ À¶ë>ìGbÙT”Óïæëï·r4a’à2œˆÉ¢°iÇøeêØ°MŠÈÆlS9ªWíd‡Çh_Z»bǯV‘¯Kc`‡÷N,xælçNk°ëMšäN_j©±Ý€#öÀ½ör_K"µÖ}XÑTÍ0¬q²íºþjß¾{îé>òþ÷»E‹¥æýÉO~ÒŠÀ>úè£mSSÖ_"gÒ¤ÕR¡Õ¢¯D`;·áaÎÛ![Ði ; ˜°U@e<2f=Úvz°½ý%`;wàB€=à]ï* °Ÿ8ôP÷É$bkðzH2v~°Zçõ3Ççžx≎’ÆÖ†½6ÒîË^¬ÀõÈ™Ç䫯üÿr7j?i}\à¬ó{ ‡CC"°½õC¨z©›D;×â°½¼mÇ`;wÐB€½æškÜÚ €¾ä-ksHY=ô_þ%u ì¿'-½s„Qx=)9vêV[µfVäM_÷º¶cWÛkµ}·ÜrK `¨y© ëÀjmYsÝzʶQ0KâWìXθðÕù½†Ã¡!Û[?„ÚQ — °l)€`{yÛ°U8¨i“8…QX­íºo¤û¿ímnáÂ…£Ëè|÷»ßu;­´Ò(ì^œŒcÝ{ç[Kì¬@­º d¿þõ¯w±°*7/5`ýërÿ#O¶fÞs¯wºe–ÏØdÒ™vv ÀtUñÛE/Û [™21)iNe¥Q Ô  À°5ÜhUWA¶sg7 `¯½öÚ%¢°êüïI$vÓ5ÖpW_}µ»á†Ü”d¼¬ UûÔexû6r<ð@k¢¥ì·_kMX‹ÂæMÀÔnÿÍ7ßÜŠÀj¢¨¼Ô4€õ쬱±D`‰Àcÿ– «†ležÅºII‹++‚P X€­áF«º ¶s§/k÷NŸ>:ök ˆî›t ¬>œ¤Í“èêÖI×`›¥8yâ»ÍV[ÍÝ}÷Ý£³_yå•n‹‘q±Û®¸¢»ä’KÚÎ °‚Ó˜¤‰âîºë.7þüÖÒ[`Í®|Ýu×µàûßøÆh$TÝ}{åøÎ½êúÖìÄûpPÏÚЫsϪ—lç÷tÓ®)íéî5`+ó,ØÊ¤¤ º`X¶®»­ÂzØÎ£,€ìÙXX-£³G²´ŽºîÕ¯vêR,UäUÿoóÚ×¶à0œ%øï×[¯™½"I»o·]î,ÂY³ ßtÓM­¬f)ŽIý°8öKÚ/Ûù=] —†leN[™”T—,Û$€‘þ̺Œ¿Ÿë`;wÔ²VÌ÷¾ûÝ­(¬­«m9ø`7yùåÝw(ÕDOïLÖn=çk_K½fÍšå4”@WÑØ+®¸¢Ô}öãÿ¸°÷Þ{oT`;·‹^AÛ¿×®W63ìõ°•y1leRRP ˆMgç•À°¥ë.Mâ4+1X%>9 °;»íVë,+ ûOÉr8'œpÂè,ÄšTéÉdM«%púéd­g­õ𖼓’è­@7™ÃýcÅÍÊÛnû7ÞØXilê‡.ÄÃîx§?Ûù=] —†le®[™”T fåN*À°lw[ÝE°;j°Ÿþô§Ýí·ßÞ7zçw¶ÆjÆá÷ÙÇÿÿþ¿1«‰”’úÀZÑvKÛì³ûî-xÄ*rû“Ÿü$w)œ°<_ÀjŒml`;·^€۟׭¶BÌB\±ÏÀV,(Åu¤Ëú®Å×wÕŒlG7gǰ;»;íò£í¸ãŽnîܹcV³ /“LâäG`°yËÙØþo}ë[n§‘Éœ4+ñû÷Ý7úX+CmÀ ¨cÛ¹môØþ¼n½°ê`;v"ÆÐ-€UŒ4øÈ~ªüDìÔ¤Ædž‘j>7'é–$ÚpëZk•ŠêÅÂùº¦±Ú°ÕÜ3eK`;wvµNéÞÓ÷…ØW%ù¾÷½ÏÝvÛm-X¼çž{Ü'fÎt\pÁhâ"ÛZ6Y+V3+ ;)鎬¯ÀoÝi§ÖÌÀYPl+(-’š> 18“8aÿ† »†t!.ëA,q\7v檫Nzió­Þô"i°5XúÕ¯þKe–ørAQ«Œã«ª€í=XÆh§ùتîšrå°Õ9ZÒe“7¼qd'$3ŸtÒI-€Uwau¾ï¾ûZËØä­ÅîÿØ‘Gºc“Ù‹5™ÓQ À{ôÑcʘºÉ&nƒUWmÕ•Vöõ×_ߊÀ |‹$¶:û¨ ˆÀöß5«Ë6¨'Ý6ØrþCÊQÝØYº>Øîàÿ®%ö$ÿ©ÊO4ÀVV) ÀvhL³’ã•øä(ÀVÿPøÒig¸‰IÄtäÇØ½á opçŸþ€}à\‘¤uX'ÌF¬µd7œ8Ñ]tÑE£e`·L"¿Ç~üã©åÀÞqÇ®hjò:°85D`±êÆMS¶2W €ý=÷cÙߨŸ¬¸¢»ûˆ#H¬º‰ß’8äÉK‹*€|°ÝyØ<ºøywÈaGŒB¬~˜÷Þ{o§ål-“vÞb wÝÈÚ±êN¼cÉÇuT«¬í6Þ¸µ,ÏÉ6M$–¯ÙÕ¾¢ €íŽ”}ÈçG¶¿®WÞõd÷¯'é0äg`ØÒÑòAØ©‚Òðhìqù¿Ñ9ØH©Øî:G·-XävÚe×Q]6éþûÑ~´5U݉‹¤Ù³g»m’%yîX‡=2ù¾ÛÖ[»7Nž¬—@N“<½ïÿq‰r¯»îºÀ FË$E€5“±Ê¹úê«[ãm-¼pÑ#¥X8çÕÛ[½¦Øé`k ÀF: ùÙX¶´?0«{$×ÙIº™4\‘8ß›äÿ6Fç`#¥`ëqÌ.¾ôJ·Îº¯…¾I“&9é¢E‹ %óƤ×Â;“åt dç.½´›¤ù#“d_‰Êj’§d:x÷Öm¶SÞµ×^ÛØyóæ•Jl}vkOYùØìkõáÃŽt;î´sTÚ~‡ÝvÛMÊ«2‹æ/sÌÔ7oï¦l³]t›ŠæW›ÊóË®,í´vjïUÀF: ùÙX¶ôoÁÀD`óïr Û© °õƒ‰–Ý9ð}3>ö½ï}o+2Zb•×@vß$êª.ÅX¥Í“íw¿ûÝÑò `¢e]ˆë·•2Î9›}ä ÅÚÿg$“±%“¥u+¿Ê-ZÇá‡î6NƻǶ©h~•[ô˜ãŽ;ÎíÀA¥Ö26^õ1l§žÄèñ“ÿ’u•~˜…xH €­ô¾¡°>U€lä…`{%×Þ0Ïm±å›FAv¹å–sG'Ëãh)œ¢i«õÖk5€U÷bu5¶r4nUXM"U6°½³•";Û`]äGcÅWM–¦Šýͯr‹£!S¦L‰mRk˜B‘ü*¸è1sæÌ`#Ÿ·d+¥ À–2œä Iš“w°º ø @`#¯Û{(9ëœ ÜÊ+¯2 ²ë¯¿¾;ûì³ A¬f!öV «q²§vZ«Øo¼Ñ•Mlïm%dXÖ·(Œ°‘N²Õ© À–µ·(€–”^u·² æ8`#m€m”h|¬Àcéd2¦äÒµÒÎ;ï쮺ê*w÷Ýwç¦m7Úh Àž—ÌF¼Á„ îÌd¬¬ŽW9ŠÀ B;I6 ñ¾ðf!n¨SÀ°l±ßuºG: ½ÉÀ6ôYóBµHžß§J+`«T“²jQ`zR‹ —OŽl1G§Èq™¼Z’f·í5 ‡¯J@tÆŒ­µ[.\˜™ `Õux›d<컦MsZûÕŽ1€½þúë]'é²Ë.s{î¹ç+³)¯¶š|—9WŽéŽí°,[ìÞ`í*°lYš˜»D'زòr ôP¶˜£Stͽêz·ÉÞ8 Š’hê¿øÅÖú±iI»G½}ÚkºóÎ;o‰Cï{xñ(ܦ=ß´_Û-Ÿ}÷ó¦¬åï0wû9 ÀV|çP ²l1G§Û?àEËW·Þ}÷;`ÌøØýöÛ¯µTŽ–Þ±¤åo° ë§9sæ¸vÚiôøñÉ8×#gã4î¶h[Èß [`X¶Ø½ÀVæå°#ãUÛ¬"³ên¬<ÖíX]‚í*ðÕ¾œ1¦K²ŸGðv!¶r­L•ÑOÏe¶²û‚P`ð`‹9:M}¨›ï[¾iD—[n9wÔQG¹Ûn»­•.½ôÒÀj,¬Ò÷¾÷='Ðõǹîú–ÝYgf{`X¶Øï:[™¯Àæ¬àT€éGO T}€Ìi»m ó„«¥÷tŒ¹Ú¦ú,ií`+»)¨˜‘´}f·¿¶¦°Å¦? Î:ç‚d ë*£ ûú׿ÞqÆ£«Hì'?ùI·ZÒExäaá6Ühcwñ¥WöÕƒ®éס—í`X¶Øï:[™ËÀæ¬àÕSë&¬íŸ¹þsD]‚ýˆk°*³ß"®ás² ;-)svžu+ÓÍy™Ø5)0+©G‰OŽl1G§—`[·ºÿªðÒK/ýÊdL[oí&Mšä6Ûl³Ñm&Ll£-—|ýa+, À»WØÊ\%6`ÓÆÆ `m{Àêù«<'Ÿ2»õÌVð À.aÇ fåN*ÀVvÿSP °‘"°Å~‚¸…‹q{¼m¯1ãcGÞpº÷àC,‹3Ý…Óì€`Øb¿ël¤ÃŸ €í` NÛ¬º °ù†8’€–ŠŒMQ€¼l1G§ŸÖÚªÞë×[¿²[OÙÖÝ8oQ×…W]s€`‹ý®°‘C~66`%}û;öó >ÿ¢K[Ïg›98mœ¬­ûjy¬MàÔ>еyä{¾•ÅçˆØ­’ò®ˆ/“œ(ÐUØHyØbŽN??hûp\k€`‹Ýël¤ÃŸ €ÍXƒUY„ LÃq±XÁœ‹Õ1úßß`µ|Ž•ã—ÛOËéô `óÍš(PŸl¤Öl1GD¯¦Û À°Å~§ØH‡!?;°Të>3¤¶„NÚr;ÜZ¾pâ'©öù€ªÞV~¹þŒÄMn©}lþ FŽÁW€¼Æl1G§´q¸¯) À°Å~ØH‡!?[§ûõ¤Š·ÕÌÒõ¦çZÖØA׀ͿÁÈ1ø °‘×€-æè ú„óë{`X¶Ø} ÀF: ùÙ:ØñIŽèC,;Às6ø>›ƒ‘cð`#¯1[ÌÑðЫé6À°l±ß)6ÒaÈÏÖ)Àª†b‡`Õõ8œè©éÏ*ÚÀæß`ä|ØÈk Àstªø‘¦ 4ï¦ °,[ì7€tò³U°!ÄÀvóùÐ䲨üŒƒ¯yØbŽN“üi×’etÚÛ€¤ØÏ­·ÞêV]uÕØì®h~\ô˜Ù³g»)S¦D·©h~\ô˜9sæ¸ý8¨¯Ç(°‘C~¶ªÖ‡ØoÛØa}–°ù79_6ò°@ϰ>,õ¼åì­=y·ýŽ;tzÃ7+|~l>ûØ/ùËn5Ö,|-šdŸº_ÛXœ¤›Iipk¢ß‹j8/)ë¯ïØûÝ}ý‚dPŸ5UŸ鸓m `#//;øûÄÓ/´Ö›«úaÓIyMkO'çÒ´c]ü¼Ór ƒžæ\ø½ÂçÀV°ŠÀîú–Ý _‹&Ùç~ÿ| ö[IšFj”º&OñÑ£õüjÚoþ ´§W«n‚>(ÐØÈ«À>À6mJþpöAyørÍ¿—Øî,]ˆ#¸d‹U`b’Q³·|9º7ÿ·µŠç_¯Vo­Ôõ‚ 4A6ò*°Íz0Ü÷ðâÖìƒú[ÅAe°ÍºÆU]WÊ)~]X6í¾a l¤ÃPO¶M’jæ'iúHu,Ë蔵¼Ésò`ób è‡O†Ë'G¶¸ÜMpèFt€mÖ5î¦ýP6“8ùˆZt<+“8á2ôX­ÿ*xÄÚ€`ËšåÔäÀãò`ób? 4Pºöü‹.u8ÃݾðþV„q»©Û·"ƒ¡Ã­õÏ´OIùÃH¤ŽU:Vyì»Eí¸4Gþ#GÌ-[õ„y/Ú¬^Õ–¡1]vnú{æYç)+OíW¹‰™Œ¶SÛÔ;Vm ÏQI+Ó-lWY€Uv>víL›´óó¯…òùzøÇ¤k¿µ;íZK?_Oå÷¯®§ÎÕtÓù‡šûvaúè¯éªý²'k‡êó÷„ƒÿº·b?Eg.šŸYˆ›c[D`ãœìŸ´dbКZVÏ=#Ö[ƒÑ¤g Ï‚î߯ú}î…%°½P:Q CêXƒ(=ô0PÒ•ÿ`P„ Ä"zøog0nÓ÷ppƒ]Á“Ê·vXÙUiPíC›Ž4hÛdD*ÓÎGÛìüüòò4Ð9ûº(¿àÊïlí–N_Ú¦ï:^í Ï¿ ÀšF*Oç£òU®½P°2í» i¥¶H»žþ~ÓÚÚmeù×Zyt ³êW=>ší¨N@ €ýënZùúIwåQ9a8.Ýw\êÒ€ÍÇ÷¢Q[–Ñéð!Ìáy Ô°þsKÏ%óE˜x°ûÏ6ïV`?  À¨u¬ÿ4b²œWåõ¡Iù zBPòAØàËòÔùõ„y  Ã6i»~\Ã( å XÛÃó3Pk§AVâ,°2@óÛ+€S[Cø- :ŸæÃmöR@šhŸ¯Ú¾D0صsô#Åjsx­Ãöj¿aµ·ä~¾0:­c|½­ÍþK‰°!ŒѼÝwr:Ñ€`Óì‡l£¢ÚÖ^ŠÆüÆèy§gKšÏ gõæ2nçç„p¬2ÃÞ`*3ì‘fu[[ü:Ò¶¥íO›oÃÊÕ¾:Á€mô}HãP Y Ô °þhVdÐ~ð-ªéÿ€†•|¾aäTõY²Hg°†›vQÌ,è ·§•nk°i°Ÿšú E#°ëÒÈ×,ŒìÚÛê4xÏj—Ú–uÍôðJ{›=¤t¡ãGdÓW럧¾·{±ãܧ™ À°l³|ˆÖÔ°öÜK†ÚŒ=#ì%ªþú¾„¾Û%늬m>”ÚK[+Ñš6Ä*|¶Y4ëeõXï%¿Þ°ý~»Ã €ùöâßzÕñL`#î²  ¼¬@“ÖÿA·ëjʬýXûbÿÇþ(À`†+ Âñ¡ººFáC¿S€Õ›_ÿ!kcƒý¨pÚÛòp›9ÖÅ8Œ”‡¶‘6F:ÖVÈ×LxÕu`X¶«ÞÏĤôÜY_ ¶ €µæy/µ­×“Á¨þ†={ìÙeÏ{©>¿üžc~$4`õ{fÉÔƒV+×êõŸgaï(Ëã?휬gÝzÆ°ï ²£À0+Ð$€µ·–ö㘩+°Ut Më†líÌê^vSîV6œ`BBédŸ¢Øðø¬‡•½aNÓ7-*Û×:«Ì"kç!è6@ »Kµkg·Ò”Û;À`X¶«ϺIé‹+®¡€yNÚ3%|akÏz{¾Ä¼\ {ùv °aO¡¬zýçfØ+Iõ†¾W¯ž‹lÅwÅ¡À +Ð$€ 4UD`íÍjø61„ýÈg½q´—ÿ YD2|›öV6`}èŠénN®¤cl›ó`NëbŽ ÇÏ$Û›_¤­ þ[jÿ­px]C€ Ä6ζÀú]¡u|ÚX¤°vŽy³Q¡½ƒÐN´`X¶«M߬ùí"°±Ã…bVeY¤6œ…¿[kí·«ÿ×Þ·ëAÕÉïoÞ±lWïM GÁR IkãRíÇߺv:Ö‡:+;Ïi?ìáÄEþnØ>?ÊiÝp´Íïúš¡ipéo]ÚAh8žEmð5+°áùXöpß[„ÚŸü!l—= cÆÀ¦u'/3Ö«”u]®ÊuŽùÉ{ ³¿ZP`X¶«>LßlÌØ<€õŸz®„QÕp›öë»=ƒ,·6ob&¶«÷…£@[f&{•øä(PÀ*¢þh¦m³5^íA£ïat,íÇ8m›ÊÔÖf ™¼uåW:>+R«íÆ´²b5P=*ǯ#íX¿ýª/+‚œwl;HòËõ#ÖYš‡×Æ–ö ßjÇ\3ÕçkĮ̂ïa¹þ6ëÖå·É¢ÅáWµGÛtLÞ( ²Z¨¬SO€`»êõ-ÀZWÚ4ÈÔsÞ#ip®8 Ÿß~¯¤´ãíåx»žU±]ˆÛ½¨Wù°Ó]gçYü´$ÃÍy™Ø5)0+©G‰OC¶N'•ºú0ª¼vyöÖ"ËuONQåyQVyû`X¶«nQ_¬=,ªgˆÁ©½tµïö¢Zõ».ñ–Uï Úû±îÌö×ÀÔ/3«÷VZ½>°ZûÕ^V£mþ‹ÛØɵÉT €íêo…T€¬®,Nwy§»Jílr$ÿèÿo3VYg¯ÊJ›É¸W“SôJê{ß°,é”ËÖ×k½¬ì9iCÂÙ÷„þГp¿ŽŸ¥á6¿ ÁlØóÇ@Sõ¨|Á¬_¦¾‡óT蘴zÃGaûÃcTn/fâ×ïs9³Ë< €­XPŠë¾l¤Æl3À².аÅÍíÍkøw»Ïúç\—ÎÔÓÌû €`ØHç \¶¾X~»{÷ÛÝ+€Ý*©øŠröÎQ(P¹l¤¤lï~¬yP¢=6P¯ °,é”ËÀ&cHù]/§A¯¶œ©s tG6RW¶Ü-(tÃúÏX€tÊe`ØÒÀ–»é8j°`#¯'ÛN8àÄ5ÃÊÙ À°‘ÎA¹l, À–»w8 Z °‘†À–s„tÃúÏX€tÊe`X¶Ü½ÃQ(À±¶ÿœpÀ‰k† ”³€`‹x…ó°,[ø¶áxE"°‘ÖÀ¶w„5-½¿¾àÒ.oÁtôŒ×­:× €`ØHç \6€`ËÝ;…D`‹Ø›íkQ­7ˆKË”…!­I®=—U–i—~eëâ¸Î YvÊ”)ùÔêå˜={¶+rÌœ9sÜþÔ×÷ù1ǯõ'õâ›Og °léß½`ìÌü–8zF²eNÅeR tU"°‘ò°Ù …¼C¼Å\/à¡Ûí“&E8—~D°Ñ^Ü iuN˜0QRtZj©WEçU¹Eó—=¦È9Ô‘÷ÃŽ,í´6Á6ØH‡!?ÛÄ$ËÍù٠嘥ëÓ;¡ Ý}–°…î 2¨lä…`³mT7Xÿ¡¥mJM}5­}8£ÑÀßÔëH»ºë(¡/úú6ÀF: ½ÉÀIT·W«n‚>(ÐØÈ«Àf;rŠª¬9:únÑ ýoÑYAc8ÖÓºÏ>ñô ­.ÈÊ{ÃMw´à׎ #“áþ´H¯êñÛä;aYí³<‚I«[Ã6[U¯å >-Âë—«óó»\§E±àÀšdl¤ÃЛl,[Öòf$æv!žÖ…neÌq(ÀFÚ›îH &«úkŽ–¦ Mÿ¨ ò®Ƃ:‹ÔZYÖVß ú4ÎVåÛx[F}Ç›Z9Y#eµOå[dÖÚ,°ÔùùmЪòUVZž0ÂkçqþE—¶ÎAÇúí³63Ž`i°Ðì‘l¤“Ðûl,[Ö زÊq\ϘžÔ¬Ä'G6`Ó& Jë¢+8 †ÑZƒCƒ8A^15ØÓß°¾´I| NsÂÓÚ§rÔ¾"Mõ»C«þ¼.Óaùyc\Ó^À6€ 4ɈÀ6ÚU`ز:59ðмƒ‰Àæ)Ä~h lº#™”~$3t¾vŠFj{žYçc]‹ íoÎsúÒ6ë\ÂíiÝŠÃ<~ù1p“'ïœØì`Ø@7m€m sòJ“X¶« ÀvU^ Gî(À¦;†YKÀdM’äõ ljƬuMV~?qÜXý"öB^ìxɰÝñ/**uÖö;îܺF¤ÁÖ@/ì+²™BŰ…ä"3 4C6~ l»¬ö)‚i ëw'ŽéBœ6޶Œc™°êʬ¶Ùx[+·Ó.Ä6n·Ý29ŒÊØ1Ç`7uÚÛ $£[%Ûg‘†Bƒ™½°D¶ªS' t¨? ±*MÍ.œ„þìÄþäOz³èÏjNâdãTý5VU¾?nÖ&vj·kVûT·€ÕÚl4ùåÛ,ÆV¾ÀTíö5dƒ`¿\ëJ­ógb@¤NÑ Ý›uÖÙouå£ï·ó©¢½l‡Ž‡£@+ÀöñÅ£éëÛ`ÃIlIœ4P5õNΕE! .³–ѱH©¿Ô_¿ÕÝ`³Ú.Ñ“6ù’¶ÙìɱËè¨>i ýïGŸýÙ˜«p4) ngiã¸ÍfôÒ(œ.ö“ž~ÿ°•ù@“’r—-©¬6 B `+‘"P nØl h×­7Œ¾ÊL›ØX‹|äE@Tv§ËΤµOmiW¶ïü­_å¦Õiݪclò¨Ø@V¿ዯNÎ)L¸ª^%;nÚÛ‹.BH•å_ŸÒ šU—’-Ç•vM `m¼»µWÇøùóÚjZ©>•) Ú‚o+SíÒÿeÇëÖi“le®[™”TÓ“2fç•Àæ)Äþ:˜•T¦Ä€eFÒ!Y½N‡—ºš ýwQ>h¦Íˆmk×3Œš†ûiy=2Òº[Ýþ Éúßïõ`pÎDöŒǪ ¨ý‰ÓÚÙfZL•éÏ$®ïíÚêÖIâ¡u=Ø¡½ô<ñI«r'`y톶Qlä¥'ÛLG@âº`ÅmÀº­êo%µH¥Ô’"‡@릂™Ž » ç]—4€ÍëoÏŸvÃõáÐ@=¯M~”WçëkÖÓV‹ÀJŸ4x`#Àƒ— €¼kÚÏgÀöóÕÒ¶°‘€-î$Ç:‹äC[l ^°®±ú޽´.»>ÀÚÿa]¬AZltÓâfuÉ í!`ý±´MX‡ô±nÚ:¬.ù¼lìà]Ó~>£(€Ý*9Ã+úù,iû@)ÀF^N¶^ Aol {6`à$U¤Ñ»&±òÙR5áøÓ˜k—ÁTy!Ôª,´³"°! úßÓºEÇt!ni«_‡u…ö#Õläxð²°ƒwMûùŒ¢¶ŸO¶žlä5`»çLÇ8»äAl :H›ÄÉÆvÔ†Õp^[þ)-Škc`ýñ¡áõKå4 Ûc+еò¨q¿Íþ9Z”Ø—+h7p×__$‡ÛbÚÎŒŽNÓ®ÉvÎ$N‘C~66_#rÔ§[ŸÖÔT‘l¤luÎs“4ÚÆu#a4Ôïòj]ŠÓ–ÎQ9ÇÔòf!–Æ‚F›V·¿ÝŸÝXÇùkcy•?ƒž£ãµú´ß€7m¦c•Ù.«¶¨ÎvmµÈ¯_§¯•ߦ4}›f‹l¤ÃŸ €Í׈õ)ÀÖ§55U¤)$ Ø4Í™¤=ØdY˜…³ç†“9 ´e´%`ÒêÊêFk ™×>«#`}W½6ÆÖ/Ç„Z¤7míèðU†•›]ËH[(m[»¶¶«ÓÎÉ–*J[c6O¿º÷°‘C~66_#rÔ§[ŸÖÔT‘l¤,°P·³H}Ø\Óm \º¦Žöf­£îa¯€tò³°ù‘£>Øú´¦¦Š`#…`‰aw^9îßl)º#‡lïì€tò³°ù‘£>Øú´¦¦Š`#…`{ç4Nh 4ÏÔ9­ën×*­+oõ{l¤ÃŸ €Í׈õ)ÀÖ§55U¤)$Û<zØIΛİ:m€tò³ML²ÜœŸ(P‹l-2SI• °‘j°8Šu:ŠÔ…½aØ@Ól€tȆý¥@ÀªÛ€  4A6ò*°8“Ms&i6‰ `uÚé0 úK(€–œÝúëÂrkØÈ« Àâ(Öé(Rö† `M³6Òa  ô—l]/Z›(0}$!FŽ,ÎdÓœIÚƒMbØ@6Àâ*¡À@*059«CóÎŒlžBìG*Àâ(Öé(Rö† `M³¶Î MBš`kšjP JXœÉ¦9“´›Ä°:m€­Ò« ,è/Øþº^´Z °8Šu:ŠÔ…½aØ@Ól€Å!BáU€ÞkÏ™÷±,ÎdÓœIÚƒMbØ@6Àö±CÓQ CØäpè…,ŽbŽ"uaoÃfs¯ºÞí¹×Þîýø»ÿ‘'ݰ?œ/[™÷11)iNe¥Q Ô [ƒÈTU+Àýà`ÒFì´ßlàÆy Zàšüf¦ &:ÁÒSϽÈþ¾96 ÀVæY¬›”´¸²Ò(jP€­Adª@ª`›ãDõ›ƒN{±l`IX¸è7}Ÿ}Ç€ë{ìá6Ø`ƒÑm“V[ÍuÎ@lC €­Ì³`+“’‚êR€­KiêA ` À:·uÞƀë›ßüf÷½ï}Ïýò—¿l¥Ïþón„ £y¶Þf[wí óÙƒ,[™SÀV&%Õ¥[—ÒÔ£ÀÌ$“ŸØÎWœ4Ć׮‡v„?~ü(˜nºé¦î /tO?ýôéÁtÿò/ÿ2t±e|lïl€­ÌU`+“’‚*P`zRÆì¼rØ<…Ø_§³’Ê”ø°D7zÝîz瘣}÷´tñó­ñ¬ךü̶’º ŸsÎ9î™gžÉM·Ür‹û‡ø‡ÑcÀŒíÞõjw/°•¹JleRRP ÌHÊÈT €­@iЍL6RJ"°½q˜ tÇúÓ4ÓñŸûÂp]{íµÝìٳݯ~õ«ÂéÛßþöãc¿tÚ¼x«ñÅé0äg`ó5"G} °õiMM)ÀF Àö§ üpݰúmàÄ“Nuš€É"®“&Mr'œp‚[¼x±{öÙg;J*GåYÙ›m¾¥Ó<\çî_g6ÒaÈÏÀækDŽúˆØ­’ö\Q_›¨ Ú*ÀFÛ}ç±þ¶¯œy¶[{ò:Þr8ÜÑGíž|òI÷ÜsÏU–}ôQwä‘Gºe–Yf´®=÷z§ÓÌÆØP÷l€tò³°ù‘£>¢¶¾æP ä+ÀækÔÊÀvÏ)ÂáD[l ¿m`Î…—8EB-**°<æ˜cœ@óùçŸïZZ¸p¡ÛsÏ=ÇŒ=rfRo2ަØH‡!?›¯9êS€­OkjªH6RH¶zgM±þ¶uÝõÁU,rÈ!îþûïï´¦ñܹsÝf›m6fýXucƾªµ/6ÒaÈÏÀækDŽú`ëÓšš*R€€­Ö±DOl ¿m@‘N‹¸êïþûïïî¾ûn÷ë_ÿºgé”SN3>vÃ6v_z% [ÑDOl¤ÃŸ €Í׈õ)ÀÖ§55U¤)$ÛßÎ6°ÄõêµI“^ž¤iõÕWw?úÑÜo~ó›F¤ÇÜuÔQcÖ›Ýõ-»»Û,d;Y6ÒaÈÏÀækDŽú`ëÓšš*R€€­Öù&Ðèo0€1c†[c5ܧ?ýéÖÒ8/¼ðB#ÒOúS÷ö·¿}L”øÃŽ`|l ÀF: ùÙØ|ÈQŸ½Ø[Æ›¤›IC¡Á7§¾ª>l¤’l;ÛÀ×¨Ö `¿úÕ¯º›o¾Ù}ðƒlìœ9sÜoûÛÆ¤+¯¼ÒM™2Å›y¢c|l9[`#†üll¾Fä¨Oú6™© ¸:Òðh\óã*´i6RL¶œÃ4 60˜6àìí·ßîæÏŸï®½öZ·ûî»»]vÙÅÝzë­îw¿û]cÒ7¿ùM·š·6­–ûÑìÉØg¼}°‘C~¶‰I–ÄãƒP ';Mðú“Wtwqi€5¸u­µZ/*’_ë>ùÉOŽ»ýŽ;»ç-d#º°‘C²%¾átù‡¤¡Ñ`õ Í, `×M*¬ @Cm¬àæ‘G! °wl»-[áÝZ´(vðr ‹kŒ ÄÛ@;€Õú¬÷ÜsOk\ìë_ÿzwâ‰'ºßÿþ÷Iò—öÙgŸ1ãcßÿ¹ûym²lQÏ¡žü LLXà%ñih4øz…Ö°Ó’ +ë6À´°Þª%Š`ã[ ­°Á·€ÕDJêJ|ðÁ·@öŠ+®p/¾øbc’fOÞf›mÆŒ=þs_pO=÷" ›²l 硆C47ŠëÂ÷¿ŸÞ˜ÜÓX ¹Þs*4-–(p÷€ºK;=¹”øä(À¾Ctq±xˆØE‹¹ûî»Ïi2¥wÜÑí±Ç­õbÿð‡?4&{î¹cÆÇNf|l*À°Ít•|€}ðÇ?¦7æ÷ÆÔpÑ‘—UìÔIJͳn"°lXÝø.lž½²D6ޱР|( °÷ß¿ûÙÏ~æ¾ño¸õÖ[Ïyä‘îé§Ÿvüã‘~ýë_»ãŽ;nÌøØ­§l뮽aÑØ‘h,ÛL—€í^ð¨›~}™²»°Q† À°¥ÞްQ÷W×2°qùg>ÿ·ãN;G§m·WåÍ_ô˜íwØÑm·]|›ŠæW{Êó•ÿ&NtÄ$3€sÜ}Z…N1{òÉ'»üà­¬ìƒ>è~øa÷©O}ª²gœq†ûÿøÆ¤Çܽ÷½ï3>vú>û2>6¹ÿØ®¹ À°PäÁ, ÀFÞ,MÊÀÆ9Æ“×YÇ}ç;ßi­ “’k•ÏÊ*š_Ç9FÎtr­£ÛT4¿ÚSô˜Ù³g· ¿ è Œ8;F§|ÚìE]ä¶Zw]7áU¯jMà”°Š>hùþçv[mµ•ûáèþô§?5&©m;í´Ó(È.3~| à†y|,Û$¯ä•¶°l– À°lwZÅu°ù­œ~ìâÅ‹]ìGpYäS4¿Ê.rŒ&œYuÕU£›T4¿ .zŒ €³?À³>ÒV“"½wút·ùòË»ùɽý¾å–k °=ö˜SÄóꫯvS§Nuïz×»ÜC=ÔˆPë…Ü:Éïš~G”vH–ÝV;`+v,**€`+2¥¶Å°,[ÇVqlœc Àæ³/gKà ýrÞ!À~ñsŸs믴’;}©¥ôÖ¨•bö‰'žp¿øÅ/ÜÙgŸíÖ_}÷ÙÏ~Öýö·¿uþóŸ‘﵃ ¶_®QÕí`+s,&&%U6  ÀVf™m `X¶Ž;­â:Ø8è`تfÊ‹»÷êÖ)Ø7¬¹¦»n\Ëì“O>é•ýØÇ>æÖ^{mwÁ¸¿üå/=Mýë_ÝÿýßÿµÖµ`—³*~¼cqë&'½¸ª`تl©]9, ÀÖq§U\çD°lÝ E}q÷fÕ:…ûÍo~ÓMIº¼–‰À `Ÿzê©ÖìÄ÷Þ{o«K±ºÿä'?qÿùŸÿYkz饗ÜÿüÏÿ¸ÿþïÿv·Ýv›ÓR;,[‘kÀÂ¥X€Yˆ1œR†SfÊ몎aâŠ%‹`ãœd€­”(/îÞ«[§´1°»n½µ»Â‹Â^œLâ´~2¦\ðÎB¬g£µ.Ä>À>óÌ3îW¿ú•»æškÜÖI¹þð‡[p+°ìvúÛßþæþ÷ÿש]×{Ýu×°ÌB\Ò{H= €…CJq‹á”2œª`´L9]Ø™ÉO«ŸØ8'€`ë)ê‹»7«Ö) `¿÷½ï¹ÍV\ѽäAìÃÉÿ;­°‚Û+™Ñ÷–[nq¶ŒN,À>÷ÜsîùçŸw§žzªÛpà ÝI'äþð‡?8uí­:)Ú*pÕÚ´wÜq‡»é¦›Xoù*ÆÀVæ*°pH)éÀNO,{vžuebcјKš ASæ§Ün»íæ¾öµ¯•º°eÀ°_ŽéÀÎJ V‰[ÉÌš,[5(Q^o5O÷¬et4 ±Mäô¹W¿Ú”t+Ð~'I&KT}ü#i­[`ó›ß¸ŸÿüçnæÌ™n“M6q—]v™û¯ÿú¯J’«"¯>úh ^Õm€k{le® À–âœ.ìŒÄ²s'J€¸jzu²”ˆå«!@j›·Û`yðÁ;¥n×SUùleŽRs¢X6|Øw/5]§,€UwÛMWYÅ=72 ñ®»ìâ6Ÿ0Á)ûb’>¹ì²n—-·,°/¼ð‚ûÝï~ç-ZäöÚk/·Ç{¸x #ˆÕ8WMÒ$@þéOêîºë.Ö‹ºúvÀ–rÒ`ØRüÀÖl8X«sŠÊÔVyEÊ1.rL/ó°•=8JÀÆ9Ý,Ûtð¢}q÷ržNY+<ú¨£ÜQãÇ.£óýïßmºÆî¼dL¬&wzgÒ¥øÌ3ÏŒk]ˆ˜°‚X¥K.¹¤Õ­ø£ýh @IMж \5žVkÑjŒ.ÛÞ6ØRîë1‡ßSÿ‹Ò|ë÷¼ç=}dª‹z °[%–œÌqPÍ'™:{Ú-ÉÃàÖµÖ*Eòu ž°ª[†«Hì)§œ2Úþ° ±¾«»±¢¥‚Nߨ©5M‹¦*¯ŽWÝ ÖmYÛÏÐi‘ິ‰­€­æž)[ çô°lø°?î^jºNíööÛowë'QØ—YÆxâ‰î¾ûîs .tû¼å-î³ ¼Þ›<÷7}ÝëJ¬Öd Ó¿øE7yòdwúé§;i^Ò8W%Á±&’z衇،¨+ز^CÛã†6öÆ4_ÜçùÅòÙ•b}ä²ù´.»@Ë–Qçq½ØJï„~X]t¯Ÿú.à5ƒð!Sùl|¬ [ûô] FT Ž•O7†þZغ4[]utYîÔÀØJoÂ…°qN7 À6¼h_ܽœ§S;€¬žvÚi­Ô°Špj-Õµ“îÄê^¼E²W^ye+úÙnâ0›°Ú¦2=ôÐÖøX]U×à0 ZõÑÚ²šÑXǰñö@¶°ëuÀPìå—_>åŸë·¢óߨ-ª;lS§>{·Ž`Ð…Ø.nžYëƒ"©28?«·':ÖŒPoTÂȪÿ†….Ä­ßÕY#©²_åA-€srX6|Øw/5]§<€¬n’<“}€ýÙÏ~æÞ¿ï¾îô¤+±–ÛÙ#Yãµ(Àªq»¤ ˜vØa7=™LJ€jÑVÝ™ŠÌêX-ÏÀ·C¶2€ 8ÄzKš¯ÎS£ï 6) %ÿÝt‰ ,H•5¬õÕ±´0ë»x˜ ]v؆¬šiëª.´ŒÏòùýéµÍ 3,'4€-ò(`㜀m:xѾ¸{9O§€Õª ,hu!¶u`µ®ëFÉÌÄ «(ìUW]Õ8W?]qÅ-·ëûùçŸß{òÉ'·ÆºþùÏn›ýõ¯ ÀFtN»þl¯.Äi`æšüýÖ32+¸åOëíàÕzr*€¥rÂh® ØÐCåULe)¿þêØ¬RŠ@f7ó° Ø<`MQ3ôp0¸o|lÔ-Ø(™Æ`ãœ^€ÍöÇÝKM×)`5)’f öVëÀþýzë¹ù‰Óøõ$½÷ÝïÎعsç¶À3L;n±…ûç·¿=uŸòÞ}÷ÝnÝu×±`ËÛé0äg#pHØ•7 2é»üúp¬jZoKÈ`ZM0éBÜ2ÖIÊ]F'߬ äè÷1°z¢7þ< •á© òµ{ËŽ­%›jXläýÀÆ9=,Ûtð¢}q÷ržNìç?ÿywH2K±Ö‡œDcï¼óN÷‹_üÂ=ùä“­®½O?ý´{æ™gZ‘R9¬’¼›'“4Í™3Ç=ÿüó£iÇÍ6sãÿîïÜ7Þ8f»åÑX\¬¡`Ë_6ÒaÈÏÀkóÖ¤l¸š‰Í£#ˆõZqÕþ0²² À¶…É´Yˆ­p8ËX ÀÚx×°k±ÿfÆ&yÒ`¬þú“Eùohºl&qÊ"t3çô°lø°?î^jºN¬¢²’а{ðþû§¬ vÊë_ßšµXiï¤Ë±¬&wÚaÓMÝÌdûÛ“1¯ú&-é#€ #·Œ-gƒle^llâЧ3„«²l8!e³D`³`Íú¤ÛtÙö7P{›ÎBœ–Ï¢°~¿x?*+˜µ.ö×k«ÚŠþöËXX6ê&ìZ&6Îá`ئƒí‹»—ótê`~øa÷{ìá’>k-ˆ´Ür­®ÆaöÝÓ¦¹‹—^º5^Ö’²“'MrIO´ØžwÞyîÙgŸ“4þV‚-[ÎØÊ\Œ‰II‰éVóI Z÷–ä>PzðÇ?nôr0Yc`å‹û>z‘lÚ2šÆ#,Ëâ"°D`ÛÞ0Š~ÊH,µ$­<5•Á…ß}#¤ªÛ7k lí—§Õ©má¦ÚÖ9°ÕüЗ-€sxX6|Øw/5]§NVÏæW\±¦]vY÷ÙO}j ÀžpÜqî£ ˜úð‚ì~I×âÅ#ÑYEf³6ÜÀ–³A¶¬ÑÝãú`å[—_ßذ·¥1‚qDZM°l¶aÛßôÙ‡í¼z9‰“º hÜa%Ÿ~[U7Úa.'`í žþ–ýÈ+³Ç²è‡ãØ8‡€`›^ÃÔ¾9^┺qάžç$Ô‡Õº°“'Nt>úhk ì·“hêîÉwXDb'¼úÕî£I´VùC°}_ó“Ò‚SK[«¬¿Íþg¸ßtßvØfz+ý°aoLlHŠXýŽø½-íëB¬ý6Ó°•©úýe8ýãýãšÈ½Øiw˜¦.·®µV£» 4Ñú­MÀ¼nÕáÏ)) çì°l7`‰2ãî¿P§ý8¨5Tf™eÆ»éûìë¾ræÙîÑÅÉ$H%—Qñ«`™9Ó›Œ…µ(ìç>ýiwûí·»-VYŽèAªàõíIÇzÈÍúÌgZ°«¨­²ÁZûU@)ÍŸ?¿5á£&… [ܦØH‡¡ælý°~o̬ž“òÏ´>Ô†ßÓ&c²ub³z\j¿®~Úžµ¯I¼ÀÖ¼ŒN“.~¿¶Åö»ãÆý¿‘— «~b§¤šnû¯:6ÎÙ`Ø*àˆ2âî·< `±~Ú~Ç݉'ên[°¨4ÌV°‚ÕÕ’nÀXmÝlíµÝfk®ÙŠÊZ„Õàõç?ÿù(„êÿYÇïVK¢±~Þc“ïŸJ†¬À VÓ’`÷±Çk±Ö©U·CMütÇw¸Ÿüä'nrZËöºë®sçž{yºÇì_¸èwÈaG”Ö?¦Žªó°Íô]ú `ûÕoJ»»°SË>4Ϻ‰ÀÏ¥¢åØk’‡úZãÆ%C~ÆU¯y¶Ê~O6Ρ`تfÊ‹»÷ÒtÊXf7ØpãH]{üB0UÀ ÷˜:Õ]1¬Û%0{^2+±¯{&‘×ÇÜýò—¿\"mÿÆ7¶&q²ü¶,Ï=÷ÜÓÊ+UV“C¥¥^¬àU/ô·Ÿì€m¦[À>Rʯn ”iG—6ʰX¶Ô&€]1yP¯:nܳ#XÍ`GªIƒ¥–Zê7rôät²5Xî5¯q‹/Χ¸‘r¤‹|ŠæWÙEŽÑ„«®ºjt“ŠæWÁE¹þúëÝJ+­ŒÝqï¶I«­6&òFbÃï‚RAoL7ã<€½å–[ÜÁï}o«[Þ}÷Ý׊pþìg?s>ø Ó,ÄrÚ°š8§Ö-Ò]–¼K‚9]ˆÈRÙË›É&qºzܸ/Ž<<4èšOM °qÎ6 Àv\wŸÅê”°‚(ÍHüÔs/–~1ìÇ;Ìm–D@”6&5`ÕÅwó¤Ë¾ßuXãaßöæ7ç« V«¥uŽýèGSóÀ RÛ¥:&qzìÉçÝvS·wŸuRáHzSzþì÷Ï `¿E°Ž{À½#Ñ0¹U:.§Õoʸqû9¹o4Oʃ?þqßù·½ô­û­n€í»ŽÑ) `Ó¢°Š¬î˜Àé±GÕZFG>æk¬áÔ]Ø¢¯»%‘×óÎ:«µÞ«­ {z¿ï·_k[Ùd«zóRk‘XÍú£uSò°•¹J°ZËzdvÒ+Ó‚bí˜ Ì#9øàƒ£øC/²Uº° †ÍγîJ»}ÖÙ…XF"ˆyƒ!#1CÓq‚_ß(wÛm·Ö›˧ïaWdÝÚ®r|hÖqÚî—§òµÍ¯'­{»cõfEŽÕfk¿ºKûu©]:Nos¶uÆ»°³ƒUâÀVâH°lSœmÚQ¬¦i™µŒÎ[¼±°šœIÑTEXL–ÊÙmÊ÷Žwvç%pjðzTi=þãdÉÖ…Õ1ŠÂj¢§¼I˜²ök *ù-<ð@nª`ûÑ>ØÊ\¥Xë‰é÷ÊÔÿEýá²Û€mk»3’½sò¬»o6Ƹd°–Oݕ̸}c¶.È‚LƒIm3µh¯àQ ( 4øÔ÷pRmK„oåµz¬·_‡A§¶)OÃÚ¯m*˺Lûoxì8«í±7OÑ›9ÌÀæÝZÝÝO6Î`Ø~tÌisÜýÝn'u÷½óÎ;Ý·¿ýíѱ°ï[f7!IЦ Xç&ëÂ~rÙeGáõôñãÝÁÿø­õ^ý4idžc“cNf÷Ç~¿é¦›ZÏyÁiLÒ ú»îºËÝqÇN@éøýèGîºë®sçž{nËÏPV{`+ó3 `ýȦùèE}ãƈɓåk°C °lí@LÀFiõ@Ð6?’Ž›µ<F•oà˜VW€õo Exõàña4¬ÇÚï×æÑªrô–¸S(-r<[Ùƒ£TAlœƒ À°ÃêÜÛygE`€oI"­šˆI“8}âŸpïK uÃ$ë/±#˜Ýc›mRáôÃ3f¸M Wܵ“5bŸ±Ðêç3€]´h‘‹Ilûßy¶”ûvÐÀ¬È2×zD†=1}?ÚàTÛìÿ°Ëo°òç}Ÿ^.ùýˆòÇÔÀ¶ëù©ö…½?ý2¬·§~'tNa¬ˆO_6o—ºGE`·J,9ù]¯æSgâ€Íz;nO{âo³lZ×â"¾ £´aYªÏ7r¯›CÇùÝÃpYC,r[Í=S¶€MCSýV­š¬cYäSôuƒÜq§‡6ê3l`Ø/çÛ`/ºè¢VÖŸ…øÊ+¯t;o¹¥Û-Y"ç;É3õï“Û‚Ê´Ùo¼ñF7yd†â£’ˆí§ΛI8«{©.§3&Íþ­`ËzK7ðëÃîA¿‡¤|ßü®È¾ïíûñ!K¤}·!~ HYPÌê³^—i=?ýà™õþTY~ïPcëÉ©ýáÉ"~}™¼½ØÊ¬_Õ °1¡ûªVUFaÆ£rËt!. °vùÒí34ëB\Æð:9€­ôÖ)\ À°q6Ð/F;;»žíváÂ…­(ìj÷wîÄOt÷Ýwßè,Ägœq†[1‰Æ RÛAéÔM6i­«Éž6Hf2î`ï¹ç›X¶°ƒPü€Ø´^ŠEÖ÷“l[;€3„Aªpè¢ß+3ìù™Õá6®;qT'>Ö±l‰Yˆm§volÒ¥Pø0z›õ7#3ó7 ~;²ÆÀXëzÐÎèØâ¿ÔƒpçìÒ…8?K6ΖÌfëôºµ'·Æ7ÞØ}öÙ­%o4V(€½øâ‹[ûC€}ðÁs—´Ñ’7'tR+‚«±³ïL¢¶_ýêW£Žó—ËÑøUùwß}wt`Ø|–XÝc~ò#Ÿ1Øpò' ýó{<ÆD`•'¬?N$Óï•XÇbYÔVe¤±F7 µ]™l €µn½2e`—2€ß¯Ý¶ù³ çl¸”õg·È¬?¦V€«ýá›—°Ž4à »§µÕê$[ÃOzƒ«`ãœi€<ãî•~×é+gž=:©‘ž¿z}íµ×ެ"ž»î°C*À bó’ží“’H­f1¾7I›®µÖÇüÛÉ'»n¸!³,í“/ (-’˜Ä)݆éB\™“2P+è“?µŽkl6XóÛ­Ü<€•ŸîO´v!ÇÔúœàGWÃ^˜Æ4leö_ob{û¾i AQ0«‡™šö§½igH6™’_†cøŭ7H€Õ9úÝ–­K±ß—Ÿl…ÜGE°qN9 Àö;˜Ñþ¸{]:ݶ`‘Ûvêö£ »l2^õðÃo-}#€7ož[°`Á˜.Äyë±úû÷ß{o÷õ‘õbwJ¢°sæÌ³ž«ºï¶í¶™k¼^ýõ-€UŠ$€í²{2P›·¾j ¤ A 'VX 6…Ýóg¤ —ôô£¡l…wEc`ËhƒœÓBÝ‚M@ÖåXF®'n³2²ŒIùU‡•vOȪ#loxœö«nAwÚ9ªÝiÇt»c`+¼qJÀÆ9µ, ÆÝ+ƒ¤Óœ /qk¬±æ(Èjb³ÓN;­õœ×DM66f=V?Ï¥—^:º$f5Þy«­Æ¬ç*€ÌV¬I£ÒÊ6€U÷æ" €`K¸ E:€õÇŸZ*œ IÁ/?àå÷¾”Ý`õ[ãó† {4ß<`­üv3 °EL<'o¯¶Û°Fù,±D[áS¢(6Î)`ØA3Î%î¾—NO=÷¢;þs_pË%Ý~åˆ*m•ç÷¾÷½Q€Y‹5ÌóƤ밺k,¬ÖÝy‹-Z^åÛ.;;Ù®—¤m’ï+&`«mú¾ö„ ­‰šü²¯»îºVVNhÑÀ°eý…ºë%Àâûw×÷õí%ÀªÛÀ¬ªŒ›.ÄõN/oT¶ª»¦\9lœó À°€^ܽ2,:uÎnå•WÙµ×^Ûýû¿ÿ{kll‘tÎ9ç,²Ÿ_j©¼ j¥ýD5ñËüÁ~ÐXM&U&ÝtÓMNKñ„Ï=÷ÜÑs–kž']ˆËùÝ> €è%ÀN«¸ÛÀ4u¸5ÿÑK¸¢îîß<l·íË`ãœr€VçžóÎþÐøXÁÏÒK/= o~ó›[ËîÜ{ï½…Ò7¿ùÍ%@Ö¢²›®²Š»ñÆGËSùØ[n¹¥T`Ç^S¶·~HVíl÷}ð¦pKâ¾þ.ìôäQ‰OŽ,›†¦ê–¨W‹|ŠsóÍ7»wÚÙHq6ˆNÍÕIãc÷xÛ^£ûªdá|à­è¨ÆÑIgŸ}¶{Ýĉ£< bOO"²í³Ïh9×\sM `u•I,ÛÎ Àvh§S“ãÍ+ƒ,ð\ ž»°yöÊþØ8§˜l>ʰq¶ˆ®Ns¯ºÞmò†7Ž‚ì„düê§>õ©ÖÚ±E’Mâd<½¤±°É,È7ÜpC«œ«¯¾º°?N&¶)›èBüŠm¦KÀ°uX& À°uÜi×ÀÆ9Ó, xÆÝ+èô’ûÒig¸•V^ydßð†7¸9s渻ï¾;*m»ÑF£; b°ÿ´Ì2îÐ:þª«®j¬ ´“ÄØ—m€­Ø±¨¨8€­È”ÚÀ°lwZÅu°qNù†mì¶ÛnªÛy—]¢Ò«_ýê¨|VÞRIÁزËó÷¿µ[VkMF¶_ù5®/6¿ò=fË-·royëît!Nh oð4xtñóîșǸW½ê•ñ±o}ë[Ýõ×_Ÿ ±>ÀjyE_úЇÜwÜ1`‘í$]|ñÅNcv“ÇJ+ «°;À°™Û”Á΃Ôº×q{f×ÀÆ9Î =âÔ=06]úýDçU™—^Q,™cŠÖqI‰6=Fc‡Õiæ¼ãî½~×é¶‹ÜN»ì:f|ì‡Õ2:Z'- `ç$@¹ÛòË»7ß¼5[°ŸïÊ+¯lE`ÃeÓÁìô¢ÍàõøÏ}ahïE¶·~HVí,[‡e%K¶Ž;­â:Øáp¢ûh?vÚï6pñ¥Wºu_¿Þ(0&¿½îÔSOMØ·ØÂ½.?û•¯|%u¿¬À¶hš={¶›_.€€-æû{ÃMw´ºÍªû¬9‡CmûÈ3[)ܯï!ðúãj‚iy|çÓÊ×_µGU†uãÕ¶äò·êSÕ¯ÛoÛüè¦"r&¦Ê—6VTgǨ ÕqþE—f:ž…>4«r©( ¬ÓœYZÛ§6«µ%Öù ë ÏSc¶˜­ÅjO¾ÁÔUKÛl±å›FtÅWtÇsÌ€UWâï~÷»îÝï~÷˜5]œyŒÓDQØFœm°‘C~¶Jv$˜µ.¬¡dµ–ù&V(GÀn•yE¡bÉŒÝS€Ô€spÌ4€Õ_?òªý‰ŠlúÉÔhÛe–óFhU—Ö"ª:΢ÃV†ZyÂè¯ö‡kåtÇ8Á!ÀZ”Ø?–1°Ål-Fwò ¾¦gs[Éëüú׿ÞÍš5«=þøãÇtÞzʶNCaÅì€tò³U°ùU’2ˆXôC&)ÀF^ ¶˜£cQİk°  ч\ûߨúpg]‹8œeVå« Ö~¾ ª5^Uðª6i9 `CX`‹ÙZ› ï`k«ñ±‚¬¥—^z4"ëÿ?aÂD÷¥ÓÎ\K.QÀF: ùÙØ|ÈQŸl}ZSSE °‘B°Å_H«~wa`Ì$ErÖå8œè)Fʬ•+õ!Zí±q¸i‘å¼ö¼ûçž6Ž€-fk1º“g¸4]¸è·ÇÛö3>v¿¢»pIpµû€tò³°ù‘£>Øú´¦¦Š`#…`‹9Àþ$N6I“uµµnºi3üú aà*hL^EA5f´Ê.Äayþy´é2¶-„Ó´(.c`‹ÙpŠ^Y6 —^ïÿÀ‡œÆÉb'Û é0äg`ó5"G} °õiMM)ÀF Às~ÂetÂï6y‘àVP§¿iÝqxŠà†3ÛøÓ´±ªæ¨–‰ÀZ”ÕÚ¤º­[³MÚ¤r-ù³Ç8È!ÀZ·d§±Íö˜òÉSÌNÑ ½°x`#†üll¾Fä¨O¶>­©©"ØH!Øx'G¡º ûë®*š*@ó£—ŠÀ¸¦­kå"Ó¢žayiÑδõeý(«þ!XíDªmþñiãpâEÆÁÚ:¯~[µMå(éÿ´6ád³?ôB/l z`#†üll¾Fä¨O¶>­©©"ØH!Øê¡³( Æ”Y6¢®á²=Y‘ã²up\oì ÝÑÈ·6ÒaÈÏÀækDŽú`ëÓšš*R€€Íwnºá ýHn7êˆ-ÓºöZ·gë-¨µ¨©¢¹Y©ÝxÝØ6¯7vˆîèŽ ¼Ôšá9ydÊoàÓ™“Óå<ù @#`qhDØHµØúØ&v¥µ.À‚TµºF˱`ë·€ͱzm€tȆý¥@À®ËÛ«þºªÞZ6ò°õ:J8¦è `Ø@³l€tȆý¥@ÀN£Û@]Õo-yØf9R8¶\lÀêµ6Òa  ô—l]/Z›(0}$!FŽl½ŽŽ)zcØ6Ð,`q•P` ˜šœÕ¡ygF6O!ö£@`›åHáØr=°l i6 õ©óÚ¤Éàš21]^[Ãýlš„5)ÀÖ$4Õ @• °8ËE=òc3ØÀðØ€&tKž9K¬+Ú€fVמî;`«ô*( úK¶¿®­E–ìð8¢ýâLÒNlh– (›·\‹S(Ð l¥7t¤Y9'=1Ùÿ"ågª„>ýx×4 Íl³Ew®6PÞ-4ÐtµëúÚnÿí ïou™Õß°Œ´mþ5StŒ•QôzZ}VŽ-i¥rl[V™íÎÉöåé’VvÖ¹øç À6àÞû&ŒOšðp¾úœœS üö¡OïïZ€(P‡lyg¹¨cJ~´ÆºkÛMÝÞ}䈙NTJúßj¿m·<ç_téèOuƒÕ18c´ å¬iœ§_nx=í«_ý<ª[e´P•¯±¤Vþª}ÖnÛîC¥öûçä×qæYçÙ'¸( ›a~+ÓÚbçMâ:žÚÔ(€(€C®Û]‡`A_l >0p´É„!|ù°ªk#øòAS&ÈÓvíü¨ "µÍÊõ'-ÒqÊgSg l6`Ü`U·ÝvŒþªL+ׇTw+×ÚÛ>}·óˆµÍPC}7}LCµ€r‡‚ÓG@@:`ës®cEòqM°r6 £žiÛ|} (PH¶\dîX€¡[N!åb[uÛ€àÍŸWõ‡ÛoþxQ}÷#§‚Î0’ªnÆiåú³«.¿lAŸÊñ—»É[FGyÕ¶p‰œ°=ª×oEdÃ:ý1¯Õ6˲AÍ»Nªß?WµÏŸYßÕžP¼r›²€ío¥m<0iÅÜ”–l”l#¤Áe•½7Õ“!…0…þjD°2bÒ©I’Q)ÉPí#COëú«ãtŒ},šV†ò„X+W7Œ}ü6¨åñ?Ú¦Kç¶¿¿®­­LÈhŠI;°El ZÈêj\f"§A¾6le.Å $¿[=-äOûùéÚî3€í¯Òç`ݺt~!À Ê`·N’ÆÇÊ€mœì)°hÛÌø <÷i³þ† ì¬ÀTuø€jeÐZ;üÌàØºøðÛ%¹(¶`«uÙÉãܰl ¿l@Ý{å3øÝšý.ÆŠœZd6í¯ äkÀöƒ·Ò˜6ÊŸöƒPj˜ñ@´Ê d•=¶¬rC~\Àj›ÿÑw¿{þ÷¿ .}×ÿáÄPaXArVt5Œ¸†m ¡xÈ/%§o °ýå²ɹa‹Ø@õ6 ˆUw_·ªn̶̎`Ö¶§ý »Mêõ`ñ‰ (  Uèû›/¯¿ TÙ'-¯Hâ ,©, bÙqY½&C€ ™CœàÃtØÝY|¡mÖáy¬ý¤@,ÀúaFb­}öæF†¦·£úëÂíÖ5! B-¯Êõ“òú†Ë[›~²´Û ÀVï0ª“Çya+Ø60ˆ6ÀÖètôU¡_?mFufaP*ô½­¦ÀVÿ+éÿ|³zMúå™ÏoŠZÙÆú®Äl¼nüêÿ«Ã¤*P`}ƒ6ã²î»EÖnŠ°ï½•!#Ôÿ~òß°uª,é :¤œv `±6Àâ TÀï )`´€‘ßë2ôùU… ù « »%gõš4_>-²ë7´ò}¸Ö6ËÃ0‚¼ß³—X34…îýèlÚYi”Õ…XûìMÍòž˜iÝŠÓn.£‹“ëä‘[Á°A´Ǩ òåmÂ&ýoC}ÿ=mük5µjÃíYA'‹Ì†[•c]‡Ã™~OÏ4È-xêdïGʬ½ »Hë¯<úXÞ´YŠ}CÇÙªm~ÄUå„ Àö£Õu¹Í,é :¤œv `±6ÀvÙѼâ ågË··ˆ¦¦ÔR~z8+q+>»Àj{ØÓ¦Àž-F‘-›c™Ã š HÓÖˆ²·&>dúåøoUÂÙÍBƒ“1†ogüáÚ-]ˆ£.ñðe`qòb<òa+Ø60ˆ6ÀŸïÓáÛr:aIkÝÓzGZ/ʰúÐGo¤q@V÷d¿.¶Ã Ïáé „“9•Ñ©Š2ÊÔË1}ª‹C:ˆ)ç„]û60,ËÁÔe÷Zr§®ºê¨€íS¦·Í¶ÀS´2HÕþð3-Ù .½~ *k§´^“>؆KfZÙþÊ& šù3°½µjG¨JG¿‘:°³^Ù€–‚é÷å`´ Ž’ixò)³Ýù]Ú3ˆT[´¶l¯®iÕõ°UyCUŽg¸¦@R ™ÖSÙØX`ëöë‹—µ¼aD7,Û¢Á–€*ådQ`€`‹ªAÊæšdë­¿›{Õõ£°¥h¬°ImÌk‹­ÑjùôÝÚ¼ãËîÏÒJkËJײå6í8v€œæžšº!§ +¬¢Å*—™†«P’2P©Àø &>·pÑ#ãˆ4Í1¢=À6Ð;H-Eû ¾B€­Ë¦Úi¾¨«MݨçâK¯tWYe~#ŸÒ4 PPP`¬K½êU{ê¹Øß÷ÎÉî†CF™\Olà¥V”ÒTªÛ­¾ ¾gÖ VÙ´q²~7Y‹Ú*Ÿ{ßË—øíô÷û‘ß"×Cåúu„«óð»ëÕeuûõª,µ]å èÓÚ¡üVŸé¥•î÷®Ùv.×Þ0ÏM˜8ñaü@@@>P`¹åVx,°SĹ&/öÒ/6` jípi[òÓì x!è*¿ ÏÔêÿ1³µMÇꯒ±ñU>ËBžŽi×ýWàhuøõèÃïvn~÷bM¶¤²´O€šÉU;¥‡i ü‚Ø,­¬ :ÎoC¿ØDZ;ç\x‰[i¥•nêƒG6MD@@Xyå•ï-%èg‡…¶aØÀàÛ€`,Œ:¦u‹5ðôaTpæÃ§AåO¿ÐIý58ÔwÖ§ßVÁ¡Ý( þ²ì/¬× 4`UOø[–em¶h²µÙ¬¶Ú9¶ëBÜ]±³4?ñ¤SÝrË-÷5<@@@>P`ÂÊ+_{Ö9Ð…˜.ÄØ60P6`p.ù’^‚Ïì|ð £¹‚!®¥A°êóSDg”µ9 Dó6Œ†ú@í·Ç‡Ú0ʶkXö#GÎü[ò¸>®Ù4PPP Q`ÆÞÓÿñOD£?Å5æ“ >•ŒXFÓÀ- `ý|úßïšlÝuõ7vɵ5­Íi³·Z]g+Ëo‡ýoà¼(ê°ìFoòb¢ûT<@´ŽS¸ÆS¿žÛÖ^Ã5ŶևêÖÞýªÑ0¶{õå—_á/ÃäØr®€606Û…Xö ¨§iMƒÕ¼l^43Ææ² » Àf•å·€}ÉÝÿÈ“nÙe—ýã0>ü9g@ÁT@k( ä TõfW‹ÛGp®Eù ¹š}Q³0Æ8XäÇŸëÌuHƒÎv i,…“3I kº„c` ³fúµã”/mÆcÛ﫵HjXwÐf•^Ó´6«ë±µ¯Vƒ2‰Ó—N;í¸âŠç¹Àé£ §&çrë"°Ë{çsà Ð)r*e?~üg;ü£/ ‚ÃÊ9^Ø6`6àÏÈë¤^èªË°’?q“ß 8´#›uXeÚŒ¾!èÚ̽‚;åIƒ¼FÓÆZûüY‹ŽU¹‚i+Ë–ÉQ9~·j[VÈŸ9Ù Ü7M«p‚¨~½ï¶Ýnªº¿­Ì³“c«€4 BñéÒZoª¦V) è«±ôU ú]róSÔŽ)z¬•mÇ¥uûõoTåK»qÓ¶é1²¼«7øûÇ/¿ü„ß©+U¿:!´hÁ°Ð²"ˆ“jKËøÇØüÉœü}ÍÜÙLÂiëÀj¿« 1œImÊÖ¡2ü™‚õ»ï~»mW§Ú•!VYvNa{³´*29USïÍç-p'NüÅà?â‡ê Îôþr+(ˆåÓlŒh©ú.èSdVIÿëfi÷Ñ…´côWoDõ7€÷Éï—¡2}°Õ>ÕÕ_«#ìœäí ½²Se¯H¢°ÇÌøà!nª£A»€l(jY³úf•c3 §íOëŽ\´=ƒ’¿NýtŽ“×YïÉ3w«^=w©·+ 0<®:Y-Š–8HC0‹ž{Ïó .šaÔRÓ‡GƒÆ¬Éì"¬O+p†Êë—ouú«¶ ¨-Ÿòh›_O¬ˆhYTñ+¯òÚ§n[°ˆ(,Ë©`ØÀÀØ€u«´.ÇþxRETcÊô<ŠÖúkäöãùιð·ÒJ+Ý4 Ïóa>-zVwõ; r•…ßêZ?¤% ü$¾ÿ±mþ8Ríow³XÄÖ/§ƒP9×Åáw«+¬€Rc.pÚS7ÝlóžzîEœ4ÀÆÂ¥t²€K€š5ÁR»}ýp´9VÏNêèæ±.3yòº¿Jžëx>’µù ¤ùîæ7[¯IùÂ1Ý[5„Ðz5ú‹¨ žv¬ê÷ƒbê]¶ÉßïÀ,Ÿ•áž‹XÚf]©­÷¦ßë3M¬>ýµÿ­Çh¨™_vØ3}Ô†AšG¨Èuïi^3\¿Úæ_Lß ³–Ú±ˆ«u3–Ñ¥EvóNVå[´TUFÀ*ŸoØlžÊì÷Ú×¾öcûî·ÿ‹Ýt(›® Ø6€ `½°½ Õ‹ÚäqϺ¯ƒçó¤ùîæ‡[ïI}ýãP 4ó³m(`ìÐ?•gð*¿_uªmJú ;Ñœ’ï®üúÙCÈ i ¼Ó¼ã|˜LØ´à˜Á©öYO«SõQi|‘v-ÏâxFi·ÌŰLJݼ1³¡2Lßu|,ÀúÝØZ›´êª“ÎýÜçOúÏ^8Ô‰S‹ `Ø6Ð-Ð Z½¨mâ³—6u¬€üô0ê—æ»teU–“ÕÓ±]ƒåsgùûi-ˆõ‡þYŠªËÎÅÔ†+¦X¿·¨Î7äµÏÿ¤c7„s줭ÚR†™:6 X²›®4±‹+Cˆý¤ÝL±ÇZ¾Ð€ÒÞ’È0ý(°³¿-Ö‹¶üƒ§Àøbçù±cþÔ-'‚rqP±lÀê²E^ߺÛÛ~¿úšk~yðٜшaS›ýî·Y=CùfŠÎlœ¼iþ¸ÚàoO+#«w¨íYàèuL6lÏ$*Ë×Ò‚t¾ŽloË´·'vÁ±2ýõßDX¿që&`ßý‹®O¥ïá›ÿ´­NÕgýÞÓ"°fø~7¿œ´FÛ²º?÷Pzªn‚É[êÛqçi¿{tñó3®.g‰zp̱lh† ,\ôH²<Ò†¿]a…j³•6tM´H ïCË?öSVC ðÌwoMM+cÐV:‡Z†“Þ°]3óü‚eôa˜\GY—^3lƒUí³¾õò÷ûŠÛŶ~ñÖ‚¼õ’d¹¶8°Áv£è ‘Ÿ/4&íóaÕº+°Øs¾- mŽ &ì»ÖZ¯ûflÄk†3Æuà:`Ø6oŠºÎúâÉ[uÒ¤g’‡øVCû žOƒ&¿ZDùË:Nþ±?þ3¶Œpþ›ð8ñCÈæ“«>}:Àæu!çã±q¹V¿ÚÐIÏM6ÖZº/ìBP¦ A°?Õ7ʪÀ1ïMOV»ÓÞT•9GŽ|ÖÕrm¼É‹á"÷8QùN¡6€ `õÚÀWÎ<Û­¶Új/Nœ8ñìä=qðÓœa¢À´èòÅж´avþØÒ0dÐ+ˆ)ùãY-XÕ®£ÍÂë/oiÐjpgeZy~WàNÖ_ZS ›ÖµÚ‡hÓÍïÝ) ý@ž‡µ]ÇØG<ã×vf!îñ-© ^”"M²h«ŒÔºË b¦ðŽ­§,ÀÒ}8Vaò™SW^yå{×XcÍ?~äÈ™»qÞ¢²,µ‚ `Ø6ÐPO¡ý8è/Ë/¿Â_p½4yp­Ëã{¨°Èg跇˾ÈÿõƒKáP>Ëoðj@kÇØðÀvk3 ÛøPùþ~»¬LÛrucö#¢i5µÑ?Ö3\ÂÇ/GF‘¶ÌŽÚâçóóøu„m×qþ •Ÿ½*cìõÉÚ4Õ´#\§ê1§v“ØÛ‘˜¶êŒßõ9æò €)°IòÏq‰ƒð 9 ›oõ¦wßs¯?sìñn‡vù«¾“ÐÀ°l 6`Ï=svÚùþ :Æ_ö¯ê)”<›MÒê<®‡V¬áD~rLïÇ´1¯e»ÄªÎvþy–^¸°yç›·ßtKk—ŽM;7†(vz9P ë ÈQ˜–¤ý“4k$½md›¶“ÐÀ°l JðŸ7ÓGž3»þ´£‚~P èŒÁiçŽQµ.¾ióâ4M“² ]åy0D±J5) PPPPP` ° OËž¤­øaÝ{Ó&]*[v·kÀJÿ°Kq·Ï›òQPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP†M[”>f};_›qrØô”ó-{Ýåü9@@@@@ŠX>)G€¡Eæý¤õâ´n_••ï’Ttø2kФ¿E?lÓB:ùh=B›W¯Ö/´5ùìü츲ºë8+£ÝK[Š¢®åt~e®{ÝÉ‹(€(€(€(€(0 &YIðQ¨BùêX«KÃÏáÉ¥¬ÖëóõÐ÷˜àÕ?. Ô¥§AòûkÚñyœÖ;g•‘vÞvŒ_ìyÅœ{»<l§ r<          @KXA†þ·4m‚ °1ìôS'Àª½´´vçEt ð‘vþ1çîGR³"Ž‚WƒTo)XðµƒÐ´ö˜¾v|¤°1W“<(€(€(€(€(€SÀذqê^l0TE´®N€m't,Àêœíüôí>‚E©u!NXÁtŒžº&1ùÂö˜¾êšÜ¼ØÆÝ†4PPPPP Fv«ãÛuÅÕ~Eo‚"¥vÝC€Æ§ê°ãTFÚ'-kÑe+Ï8ûeø€çGcÛéiçfãgÓÖÀ2&²š¡imñõµÿÓ^<ĬÎÃ4λ®ÖiîÛ‚E¿óº۵ʻ¦1öL@@@@@V `ÃÉŒ|P ÇÏZwäP6ƒ*A§uÏõÕþv]~ÕVœ§ß7¬+ Ò ¤ÒÆûúàíkQS›ÕZç£2mÜjV¤9¯k¯Ò×Ú;k³_¯uUNkwÀ¦]‹èf½ ÇõZ~¿KtÚ˜`Ù•éâ_KìyðíÉ©¡         ø ´X¿+« ÍÿX÷ZˆÉ´‰ŒÒ¢ŒþMÁŠÊб>ü¤M^äŽÊP~g &X #ifÑA‹ÀúFu—¶Ox¬]Z4SÇØ~ƒ»<€M¹4«´óµÏ²à°^û®k^»4͔Ǹ›Îj¯?AUØ}÷_&(¿¶…/ÂóömÅÊ´¶³¡¬sg;          À(ନ`ÂŒ…$Y|à e2ð #h>À†0ãÃrxœ•—Å6öTyüO»(£lÖå õ£™á1~„Öö¥l»Yí°óÎçð¸°^]G‹„úšficZ¦é¬º ,× ðiv⃯Í-¬sL³%;@@@@@h)``•Ö­VpQf­Ð¬¤mÏ*Óö‡ÀfÀ™Íz> U °Ò)+jõø]¬«Ø,=²L7­^ƒK:³´±ã³º ûÐé·!ë……åI»>6Æ7kÝ4]¹eQPPPP†\?2h]j53èÈŒP6‹ÞêoÀ†ÑW+Ë &ŒðåEL­>Ù˜ª6¯mþøØª¶Ó¬tõg>ž6"t–6i ^ç0OLd9­\ÓH×ÚìÎÿkà òÛ–ÓG@@@@áT @;•5ð R$U€a€FqCPÍ[+×À*„–X€õ#·UlÚdNÖµ8î´óô'eÊøÛq¾-©¾pMà!»=9]@@@@@_‚ ¡]´OТ(€(€(€(€(€}ª€œú¸ëÖéÕ°±mÏƒÌØrÒòå•]G6¯ œ_™ce{U¿10N+W/jô†          @* pM‹8ÊÉ×v%ßá#xú®ýÖ Ùò«Û®ÿ±®ÇV¦_nÀj›€.­ ¡ÌYP¢c÷ÉFä Ú­µßÎYÝ™­^¬ïaç°LƒC;6ŒjçÁ£¬Æjf•¡ÓÑ9…Úøã—õ¿¿_åéÓîüBMUF– Ä´!Ï.T¾ÎÑ×Ú·3ßž”ÏÎAuÛ>ÿœÍòÊmZ:Ôï(€(€(€(€(€mHƒ*sòmR%¿Ëe˜ß¾ ˜¦ÀU½‚ í·ò*þ¤Q!ÀÚ˜Uƒ}×ñY‘3šÃq½a¤ÏQíW»>Jj“¬YëGùtžvŒÊÌܬˡsÐY9ºúîw±¼úÛ Vý±³~—pibßîüÂ6L›.þ¹Å´!Æ.¤•Á©þ·ë§òý—¦¥¯µo¶ß€6«\£ÙZ¯&,˺ölG@@@@ˆP@PNdÀ™vxÀú3‡yT¾áþ`Ó Ú %ë”BX Çõ¦lڸ߬(i À¦A ݱX¿œÜÓ@^0&¨•Fú„K¿¼¼6(¯iFÑ­œ˜6X=íìÂÚêÛ†¶©üðÚèÅ‚o§i܇ö•U®¶§Eû#n²          4A‹´ùm18оp‚,€mI!L†€B…uµ¶Ùßv3‡e âböIØœ”·S€UäWeøHkC<¦]‹p[xiçgÑgëÚÝîÚ¤Ù`^;cÚVFÚ¶´²´MÉ¿þ¶Ío¯ßÕ:<¬6°MøÕ¡ (€(€(€(€(PR0²•;‚‹Æ•Xæ Nýî¹i«|Ú¦¬Óœ–ìà ¾Ó–ª`U‡ ±×+t½lܱ¹ÌƒS›—§€•]†×>ìö À–¼é9 PPPPúU!ÀœøÀèOF”%›‘ %`MµÉjp)pµI¥ü2 ` " ¬ŠD?cÖô·ëam ë1¬]v.YçjBª´ _bäµ!Í®Âm>¬†Q~ßFt=ýïþqj‡?Xç’V®¶3 qÖÝÃv@@@@èlÆ_|´¨¦J€µñ©¯únÑ3•kìG- ¨ú-…Q¶P^ìnY´Tû²"t?Vg8³­mWû¬»°Õ.;£ó,2‰“ÍÎì·=m[x}ÂÙ™ývø/¬\{¡ }Y‘Hp;_•é}^b#°þuöÏ#,_í°Ù”­l‚(ëC|V¹í&(ëƒÛ•&¢         duÙLë’ZT-ƒÊpF[Õ™µ,Ž_‡ŽëEwϬxóڢ㲎-ª]^þ¼ëÓ®­±mÌ;ß¼6äƒío§wl[Óê ‡“Ŷ‘|(€(€(€(€(€ P mÒ£ªšeã3}h˜–ž¬ªNÊA4ÌîPPPPPP ÏP4T~7>ÖåÓïìwçíF”‰¡ê–œ× ÕPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP KÿäE­mžWòƒIEND®B`‚nova-13.1.4/doc/source/images/rpc/state.png0000664000567000056710000011321713064447140021640 0ustar jenkinsjenkins00000000000000‰PNG  IHDRÛˆ `JmsRGB®ÎégAMA± üa cHRMz&€„ú€èu0ê`:˜pœºQ< pHYsÃÃÇo¨d•øIDATx^í½˜Eö÷Ÿ]`v‘ØDfâîBˆàƒ»[°Áƒ… IAƒ†¸NŒ$$Áƒ\qA–eq]Yvÿïó¼¿ß{þç[ÝÕ·ºnÛÌô¹sç„={çzßêoõ§Î©ªsþРAbóýۦѶ þÀÿ9ÿsnÝÿãWãåäÜÜÚŸ%÷¥¤¤¤¤êS ü¿ÿýŸÀŸëГ!K»6nLcÆÞB_|ù} û öµßÜçðš/¾üÒ±/üö9ß·/ø5)ØçüÕ°Ïø½Ùö9?–À>ã×TÙ>ã÷Û?øñDö~Û§YöúôSÇþþ÷Oé“ ûäïô·@û„ÿ„>þ›iãûŽ}ô±mÓ‡ÙGôÁ‡Aö!mý Û¶lý€²m+mÞn›¶l¡M›ClÓfÚĶ1Ð6цá¶~ÃFŠ· üš ´n}¼­]·ž’Úšµë¨6,éñŽnÝúõÜ&°èvAÛE¶ñF>lçiÓ&>¯°àó¤‰Í¬[K[¶neÍÁüÚÛúÁ Òó‡}”¥ÿ¸?|ô±¶LÑ}(sk÷3§ï9–Ý?ûíßtÊ)§Ðÿý¿ÿ—þçþ§F쨱ï›xÙyô?5øÛÒlÃÓ9€þýûï4zÌXÚeׯ´MÃF[µS«h´í¶tÛm·Óo½M÷?ø]3ê&º–íºQ£éºFÓÈÇ(Ãß׺Y=wÍõ7Ò5#o «¯EWÁ®Ågî¸özçq×®ÆkÙôûðÞkù3<Ã÷ÝbêX²Í÷~ã³ÔwĘ>óV+Žvå5ávÅÕ#)c×ñߎ]~ÕµvÙˆkvé•W{vÉWìâË+»l„g]z%]xé»ärºvñeÊ.(»”οȱó.¼DÙ¹”);û¼‹èìá*;ãœóéôsΣÓÎN§žy|úÙtâ©g*;þ”Ó験N£cN(º÷þ4d¯ýhðžûÒî{ìCƒ†íC» Ý[Ù€!{*ë?xê·û0ê7h(õÝͱ>‡Pg¿AÊzôÛº÷HÝú PÖµ÷êÒ«?uîÑWY§î}”uìÖ›­—gºö$Xû.=©]çžµéØ`%º*kÝ®³ÏZµíD­ÚtTÖ²MjQÒžŠŠÛSóÖíÕyÅyî­Î»ÖAWÖ¬[ïþÔuÒƒ5Ó“µÓ«ÿ ÖÓîÊúî6Di­?knkoÀà=ià½X“{±>÷V½B·Ð/t¼Ç¾²¶R­Ã ýývìîrßÐv0÷—C¸ßÀå>t÷¥Ò£Ž£Ã>^Ý¢¿yvÿÍvô '+;æÄS”{Ò©êúܾc'5p{â©§ÙžñìIþûɧŸõì)þ[Ù3Ùöô³Ë)Êžyö9‚½¿f-¸ê*5XfùsôìòçÃí9~.Æ–?ÿ…Ùšuëèá Π­«VÐê)+[3õaZ;ͱ l]ÛÊ·°Ù>ž6ží!ú„íïlŸ&° ×á1Øß”á3ÇÓGl°meÛäÚ¾]çÚÚ©ãé½)ãiËsOÓÉûïEソ†n¿ã.ZõÊkt+ó\ÕάŠƒÀ·Ý^N /qÁè@R€4 åɨT-`{• X´€*À0^wÃÍÊF²]Óu3l,Ý0z,Ý8ú–`»™°nâ÷Y6 Ÿ™À®¿q4™6òF>&Ã0˜ˆ²0GmšøH¹k í+î«Öl—¸VA9ÎW1¤çÊ.¾`¾R™ó%WÐ_îÙye—Ðð‹à‹éð™ç^@§Ÿ èžK'ŸÁÀ=íL:á”3踓àjвê"À=Œ/¡€=8°¸èàäö@ê© `¸â‚ÃÅMÁ•­¯ìW Ø^ýw÷ 뀕/ l€«lÏ~>Èâ‚‹ oæ" ¸òE™ €mÛ©»g²Åí»LCVÁÕ,àªÍ„¬¬†¬lã¢â@ÀšpÅßIL Ó°Û?ýµ1Áâ^d bÀæý`·àA[3ÇviÎ fÛµ9×MÃWà5áëد†/À«á« ð–tx»1x»+ð*øò@JƒZx»°F`]{õóÐÕàí3p°Þ=¾¼{zàÅ1 ¼\Bÿlî{Сt^ ߃x€ªÁ«¡«Á è¢ïyvì ºG’2€÷õ7ߢvØ>þäSlO+sÀ«¡ ððõÀ[Iø¶—_q%Ãöï ¾ÊºQ–ä¤Íyhøé´å¥èÝGî£Õ®½Ï·k'Àî¥ ï¥l›Ù¶Nrì#¶]û„o“Ù8~]¶}ÌiûˆÿÞʶ…móÄqü½ãh=ÛZ¶5î§÷ÙV»¶ñé'èø}ö¤Õッœ 8CsçÑíåwPCŽ{ž-\Þ7ø$–úVÃ3ã¡2ˆ]¯Ïç¹´–«ËÐ °¹`U@s Ý<ö6}k9¹­œÆÞ~ÝR~'ÝZ~ÝÊ£Ÿá± »·ì–Ûï¤$6ö¶;È´18ÃFßz;[¶Ý|ËítwÆnå¿]÷f nÀ ‚MÐ&ÚÐF0dzǀÄñððM6ÓÏ@û:†¶ãY_v`}5]Ì^3¼e€ðpïÙç±·{.C÷ìsé”3Îq€Ë°=–=Û£àÑr'/eØšž¬öb•'ëBV{²/6²Ž'뇬öbƒ kz±Ú“5AkBV{³ ´ìÉjSÀ €,€éɺ^¬òdK2ž,@«M{µa-€ ƒ7«Íôjñ·íÙ.‰gUÔQàµçùêãÔÐ5‡®aÊóu¡k·ªÐl5p‹;Àãí¬<^Xtµ·«¡«=]íí†Cw˜‚î@ޏÀÓÕÞ.€]7 º®†.`k×]î‹èº¯¿ñ¦[×® [üm^oe½Ý÷Ö¬¡‹/¹TÁž°ÜèV¸€ígD›^|ŽÞ¼ÿNeï²­f{ÿÁ;i ÛZ¶ ÝI¾ƒ6³muí£ñwÐÇÊÊ铆×ýM½îÏœ÷ßI±}øðüÙwÒ¶®mxè.Z˶†íýî¢ÕlﲽŶññ%tÜ^C<ظ°W^}§f›d`;†cÌ÷Üÿ `Ok‡M•@ û[‡Ž•Gk„ŠMÈÂk½\V+àyû÷Ðw£;ï¹îº÷~ºû¾Ô1Œ{ࡌÝÏ[vï}RÝÃÇÙÝã Ûîw?ùì^O°áxƒ¬ü®{)Èn¿ëRvçÝžÝÆƒ ˜@ÜÂØX(dŒÜNcø¦¡ýnëß»†¹‚8›†7Âý5Bü8¯o_zÅÕTÆð<ê~C÷ü‹”§{ ‡•Oâò±'qùø“¹sŸ¨<[À!c€ÈjÐ"d¦ÃgÑÓõ¼Y+T Èšž¬&ðdáÅšž¬.¶=Ù(ȶC¨˜-È“Õáâ(u@ÛÁ¬ Û0¯Öönël“†œÃ¼ÝXè&ðtn®Œ§kC×ôtáí†yº¯éé¸vˆY{¹ˆ² ¼ àšÐ5ÃË€®^Ö!fô x¹ÚÓÝŸ§Y‚¼Ü0è¢*/×îk ¶ÕE}áâÇhácK|¶è±¥[lÚ’e´˜í1mK§Ç,[²ìqrì Ÿ½ýλtÁ…ñÚ‹iéãOdÙ²'žd¢œŽ?éÂDvÃMåôø<8Ц ޽»ú=ºÿÔ£iÃò§éÕ»ÆÐëlo²½u÷z—m5Ûû÷Œ¡µ®m¼w Á¶°m½w4}ÈöÛÇ®»ì >¦ <ÃýîMã8èüJ~ÏeªÏ‚¥ÍlÙ6°­uí}¾]}ÏXz‡í-ØÝcé ¶µ-¤c† Ê‚ímÌ:Ìá*ÏöÛ4T?°ô<$º¾Ö­ÛábxaðÐི7ßr›ò^á¹*ÀÞ3Žw¿ë}>L÷?ü=8~=ôÈ$?q2=2qJ¨Ÿ0™l{xÂ$Jb=2‘¿#ÚpaöçýÙx~<ÛðÛ`zðp/lÓƒsàßþÜf0èhO˜Š¸Þÿ-| ƒçŽèÎÎæ»õ9ÆœñE,¸óË.£sp¹Ü¸˜·=’çlÑÁ5láÍpˆ37«çc½Y†¬.òd“†‹MÈêùX´nØXÏÉú¼Xöd=È"dÌÖ†½ØÌDz»s²²6l[sè¦çf1?ëÌÑf,ȳ5çiµGk†’õ¼­ Þ¸ùÚ°yÛ¤s¶IÂÈA¯©®§ øfæw3¡å$áe ÝÀùݘ¹]g^—çÝ+^ótý¡å t1õ¡½\3´¬×&àÓ)®†®örõœ®ZtƒBË º.pÛ¶í;Л“|Æß?%ÄplÚ&MA“¦NÏû$Ëð{´M˜<•”MšâÙ#ü7l<ÿvsà  ö€@Ãÿ‡'p»MÈÿÁñ40ϺãáßÅxáh{xÉôÜÀ!|„¨]xº˜ó=ŸÅ§€Ë ¨°p !eÌ Î#jÌ×\z´Ze.~r>9ž¬š“ \ø”™—Å…)i¸Ø^ôä-|` Èš Õs³6dMв°ÖîÜldh1?èBÖ„­éÕâoÛ³pMÐÆÁ6Éâ¨4a›t7jqUeÃËt¹Ü¤sº•nfN·3‡˜ÍUx“„—íùܨ…TAó¹fhYC×ôrET™Ð²ééb.7h>!e½ ÐÕ°}iå*z˜e&òµΊßQÎËdßõ×¢‰|M²mÒ”©|-ƒe®i“§N£V¼D§žvºšOÅýÉ|ýÓ¦¯™€@›ä^;•¯¿Sgø ×ñ/¯¤;ßÞ_²ˆž»æzm…k+¯»„V]w1½ÆöÆueô6Û;lï,£÷¯/£ulë•]D]»óÖN¾ˆð:ç=xÿÅüYóg:¶ší‘Ó[lo°½6òz…mÕµ—ÒK®½È·ïTL§Ò¾=³`‹P2v«T ¶mпW^y…ÆŽuÜfx³ðÆ&†W/€&3¸¿³*x„†QFbu-´,{$†‘Ùœ¤fîð=¦Íæû¡æŽ1rÄ1j›Y1—möšÉ6Cÿ6ü>˜9ˆ° h‡),>{à‡<ƒ~Êô,kx;gÏŸ à~püD^U>žÛÁËíó€ÂùX†ðò<·«‹2V-c…2V>q̉lÝ0²Zeì®06ÃÅzu1F÷•ñd£BÅldõÜl`moVÃV×\iìz²²­ [Ó«5CÊæœ­½Ù %yºUY•µBÙöD“ÌÏVÆëMÓÓÅ C·²àÍZÉlyºA ©lo7ÉB*ÓÓÕÐżn§kÏçjOW‡–õêå /ÀÕÐÅÊe¸X¹lÎç*ؾþ†òlŸqO§=ÀýÛ±q˜‚C´§áüÆÑCD­èm"ºèÚCãù­Œ£lËyáÒI'Ÿ¢ òð# sAϰ7x…”5Œñœ<2‰†=ÿ‹tç!{Ò{‹çÓ³—Ÿ¯ì9¶ØV\y>½|åpe¯±½1b8½ÅöÎUÃi5Û¶µW;¶Þµu|{çðó²€ë-¯]Y{Õ¹ê=úýk®>V»öß¾uÕyôÛk#Σ•l/³½tÅô"ÛólËÙÞš1™íÕ•Ûé=5WkZj°u²\Àرd¶ün´§ÂCƒ' ÏЄ,B],X¸X…°"Ú³Eüw€Ùaˆ û ø³²Œ¿ߣm>‡BBÍ™Ì[°ˆÑ±¹|¬æ†gæÌÏÝTÌ]À€_à@}Žß RÆ×æ{ƒ›mº‚õle¨(€‚ôtƒÚôÀÊмð”ïcè"T OáfÌý"´ ‹©°’yø…«ÕÊðn;ù45w‹ER•òö7|¼—»§:žldõåÁZž¬³…Ç1„µ7 ïU Y¬6Ö+Žu¸Ø…«†¬ Û0Èšaä°ÅQa° ›¿­Œ— èÆmª H“¾6(tÔÓ5·©ßÊž.¬ºÐ [ÁœY¹ìé†Íéš ©¢<Ý •Ë&t5põ|®oXhÙžÏ5P™[…^eضi×A-TºýN¬}Ý̓imX s/Ýé3ž†â¾o¯E¹›§ª0¥§¦õøú`š9ÛSO?CÇaîVÝ»}klT0ûùçŸé—_~Qã1Óìçü]{€Úð›ÊL«CO]t–²gØ–³=ñYôâÅgÐ ¶Ul¯]r½ÁöÖ¥gÐ;lï±½»ì ZkØþûN^jŽƒ‚ò³Ïá÷œî½ï…½ÇöÎeg*{vé™ôÛ+—œI/±½ÈöBÙÙôÛ³lO³½1õ:¸{§ôa‹p³öl5h·Y°h1g‰.ü¯ Ú›…wŠy€G¨‹ó¤>Oà/]ö¤2{¢÷ã üD¦&ÿ sþðúƒiÃn¢©KÅQæ,2¸Îâ÷žu¯³à¶ÑoKèQžkðLžanš ws¾Ïcs Íž»aÊc7¼qí…ÒïÚð¬µ7­ÀìxÐÓÊhg´7<^\ßã| ¬<†çsuHádìóÅþ]¬R>•÷â»=ša{ø1Ç«…QûTÊsµ‡©½…¥û÷É&kÐêùX2Îòb­9Ù ÈjV-€²æfM/V‡Žd]ÐbKö€íhÛä¾íhÛL¶Ù}ÛÒÜþmiAÿ6´¸ -PBO ,¡gØžg{‘í%Øn~sèÁ¾cÔÇ9š_a½÷Ÿg[ÎöÌnmè ¶%ùû´¡G´¥yÚQE¿v4£_{šÆ6…mrßöôÌÈ+iðÎÛÑjžŸMͳÅÅYÃÖ e¯Ü-Àkz¶˜;„W…‹=.ú‹ÁV¸-¼R€«ãÙe?Å+Þ¸ mÆÍ{ÐöM3Ü×+Hß?\AvÈM³øsyžkÓnʧ{Vã9«ó2¯]¼ä~:‹ÐYã–Ñ"†s ¹v@lxäü7Ž1€¦ÑuCø˜®Ÿ–ñ¦Mï™%Ø3Âê¼h/e:ì­ÃÚ:¤m„¯á ÃëE{cŽó:#Á»õÂɼp ó·˜Ç*eìÏ=çü2µ:ûo±çsEH^±/‡ºö>à`­“Âc¯¬o…±µO66…áÅš«‹mÈš€ÕòfMÀzûgyÛˆ Zu߬ Z:6XàosÎÖLf[{ßmÜV š‚­½ 9)lÃæuƒ¶)Å­`Vóº!ž®½ *lë/›Ðm®B̼’‹Ûp~]è&™ÓÕÚ3W.csXr ]½rÙIŒ‘YD6Ÿ«CËI¡ûêk¯S›¶í•—¹×~*Û‰5`ûDûx°2{PÔâ%ò Ãjì>’A× ¡gï¾|[L¥GÃvt–á»AðKÎ$ˆU¾A€7Ÿ?‚÷ïÃl¸ßÅ‹@¯i¾­¸ëVz¨[+z„mB÷V4™mÛŒî-©¢gKš×³-ìÙœ–ôjNK{7§§Ø–³=Ïö¢¶>ü7Û˜ƒTÇóùçÎñiÃñàXGóó/¸¯Åëñ÷r¶gØžb[ƶ¸w-ìUDó{µ¤9½ZÑÌ­hjÖ4™m"Û„î­é©«/¥Ai˜luv$¶€, Ð…´>ز àÕbÑôjNÅÜ,¹ÃË$Ú'°_Ì2½q;èvÖè=¨Á°±4“—Ž/Söû‡t΃ ìYch˜áÙžý ƒøAñði)/Y÷™÷%ʳ~ÎfØž}¿)¼ö¸[Êž¶2Ò¯Y{ÄÎí º~Hׄ®ZF_23QièêT¸µçr1Ÿû ö¤m;âÝ“#M{óÔll3‚ñ ÀüôÓOô¯ý+ÖØ: ´4dE¦+6„¨Û…€íQØ àrö+Ãð]H ûé§ŸªÛ Ãs0ÎYì!vMÁ¶h{zéîÛh|÷b†X1옦ô,¦él3z¶¦Š^­ôönAKû´ e}[ÐSlËÙžg{Ñ0€ÇöÉ'ÎwÃN8Ù¬¸/öã÷²½À¶œíY¶§Ø–±-æÏ\Ø»%ÍïÝŠæô.¦™½ŠiZ¯šÂ6©g c =} Ãö¯ª[Ö^y¬AkÞâ5¼z‘”Bvæjç0$î¤Ó‚gžuŽò@‡Ý8‹–1÷0C¾{Ž¥Y:YÅØÀçfÙ“¸F‚:6|Îð‡ž¦Ycß1 ߇i8Cwèè ZöÄÃt¿æœ±Ÿë!þ{8=€½]³ñ{Ò¹ÃùsôwïÁïå=`ËžxÈ-¯ÒõžcH/a(/yüA+D=œÆ©ýiÎãgŸãxÖÊιŸ3iÔ°?¸mˆÇÏ¡{°?î^ç÷{vÖ½î|ò½t¦jŸ³½çœBó&^OƒÝןv'‰M¢«ÿv¿f¢o>s¿)Oåù\,ªz˜·La%ó=ÜA±…°Å¶ ‘¼ž-ÂÈz²K!Œœ™³=Jyµ{ì{{´û:iÕÅ*ëS5]0ò}wЄþhÂÀ4™m ÛŒÝ:ÐìAh.Û|¶ElKÙg{rpzvp{znH{zqH;Ïp\~˜9&€vté¡ øúxq‹û/å÷±½0´=-g{fHzŠmÙîh Û"¶»w¤9l³ÙfêHSwëHSØ&²={Õ4hçí«[ Z¬à@“@Vô-Œâ‹?ÿ(Ø*HžM÷²×÷ØÒ•7:lô7Ïç\½ôüñœçs<‡çÆà9dI™c<Çpu=ÛYH[Æp~p¸ûZô°ç1T‘ÎlÝ< °Åßïtólþ›a;Ç8ÃÓ5Zݦ (ã‡ÓýðzÙ#nÐ`Ý8Ë qg6;ðЛg)Ø"ôÜ Á9t/BÏ÷Ã¥QÓùoÛ³z# áßuæ½1cÑÖ½Ü&3OuÿÆçœEw²G;aä`þPL×N|”Æ_Ë÷w¿ŽÆÏŸH×ìÞ€v»f‚·¢Ú[1Íí¬çm1_ØbE8­!y²]!CU¶—ÒYœGùdÎ(užó¥Ÿp:ð!êÇù£ÅÃ}Øq'!ãœcÇŸÌùÕOƤLòˆÈ¿OÆšäf϶û8'ÿÕmv¦—¾‡&ïÙ›¦ìÕ‡¦±Í`›ÍV±WošÏ¶pß^´h¿´”í ¶gèAϲ=ÇöÛ‹®>êßqá>ž}töã/Èïc{m¹kOíϟ϶”m1¾wþÞ}hÛì½ûÒL¶élSÙ–aGh׫[:¶CéyZg®Ö1Ó£Õ^-Gy°e ž– Û3îyŒ3pù¾sÔôryr^χÏãÇÎã…î-¯Š{R›~Œ_ïx²Ú ~:Û‡ø}x\ÁÖ¬÷·ãÙe€Þ4 ž¬c÷ŸË¿ç܇<Ø"Ç—²7«<Ýœ,*÷ Ïx¤øýj~xÙÎ<ï}Jæðòâ¥÷Ó™|ÿLÌû2lG!Œ|à µÈjò¨!Ô`È 4ÉÝà àNÉ ExÐî#';Í٣ݽÁ`ºfÂ'„|çéÜ6§ÓmJVÛ’Ô*gwkæpgó½NÔ^ó€KéNÈqåQ|ÌGŒPûŸ±@ ¦ô~Ûá\Ä@­FfØb52’¢rÄÑœÈâölU ÝS-¢P@Xîâ´!kÏÇjÀêäj!”Îiì.~±A‹û€­ Z Xó_ ÚªÂÖm]‡mu@[™B¡ƒ†o×^Á5¯¼W×-AU ‹ÜËAsºæ|®™þўϵ3QÍçš ¨Ì\Ë€-æl'MžÁ •ÎàLoئwª²#E¿åçü¸iÇœˆí|Èw–²ãNÂŽ†ñ)X#]ë¹ÊN<:vÒiçñš›‰4€³g=þijœiî|Ÿzæœ çε~aˆ]Ä;vúÙ;ãœ2Þf˜m=2•FvhF«&?HÓJ3Øf6Œ*ØæÎvÄ0ZÄöØ‘Ci)ÛlO3”ž=f½pÌîô±»Ó ¶—ÙVº·øÛ´•Çñ}¶—Ü[ü Ó¿ÈÖ;DÙSüùO5”–±-f[tä0ZÀ6ÿˆ=¨¢tš ;jzþž[iX“]*[s~Öôlí•Ǧk‡™õÞÚø"ä ˜³luù4þ°Óïv¶ú<¦<@„v*€%`«oêOxé°±†ªë騩.ž5z˜¨02«»sJ:€ÕQìI k/|2·ïØs²aõ­2NYg^Î4@׆¬¾àúnz·AÛ€‚’]íà ›Ë òr£ `7É*åÊìÅÕ°¬l"Œ$û{õqÄ.¦ra‹ðruC̺òPVV*-«ÁU äé†:À|®žÓ5QÅUÒÅ tƒ•¯¼Jºtåõ<^Ðx™ Êá| @rbšÓÎ÷ìÔ3xwi ½S¿³±6õs.æ<éh_ªì,mÃ/ãH×e¼‹d: ¶/Wz¿¯– ¿Û¯äŠc#‚í"~ܵóË0]åØcQ¦ß&MM#»•Ðë3'ÓÜ“£ylóO•Ò¢S£ÇØ–²=~ú¡ôô™‡Ò³lÏŸy0½pöÁôÒ9Ó˰s¦W\{•om8¿†íUËðØJ¿ïEþ,ØógJËÙža{âŒCiÙi‡Ñbלz8Á ?™–k×JÁ9èí³µAë‡møÊcäåÅ< Î…=]XŒ‡X 3q2oû™ænû©¸CÁäô»°í‡W#c› æ]yNÔYå„}šÊåçÎÃßOó¨ ºI=ÇÞ'ƒt&@ªá ¡àa¼:™ÃÂ÷±—ê@f8 Uyz žé†‚gަ!ðÌ®já“{ßY億ϺO/‚rW'³×zïÙüž³1ËÛ•¦ßHƒùuðXñ;”'«à Ggàw²¿`¯FÌÞ*‡‰ç3`çOÅÞ*¿ï:ž‹U{xïVm2è:Úü»èTþûÔ;œ=»\G»5ØFŒw@ÝvÃö4ºeöxºrÐhЈ‡½-?zîäiÓU–*$·ÐÛ~B¾…³I¡BÈXU®ækyqÔYÃ1BÎ!¡Ó9”Ä!ä#ŽUé÷äEC8 #¼Z\$PîÎ\üXæ.áÙš‚¬>6ÃÆ  É…BZ´BYï¿­lH¹6a«çq«;Ÿ[éÌTls]œ'5ïÝ 4f…!{»Pü|nøV!³œŸéå"‚µÏþ¼ðç„y-Ætî¿×ÓYç]ÎÀ¼Ô3@Áιé+3Æ`<—á7ܵó|Ê|ç+øa±$Û%×*›:cí¹ÏÁôÜ +y?þuTvùõYv1ƒ¶K®E¶]:âÒvÿíÙU7rÁ”ŒÍ¬X@7öåT”ófÒ¢óN¦EçŸL"-½àDzœíÉ‹Nâ}·'ñÞÖ“è…‹Oâ=·'ÒË—žH«.;‘^½üeo\~½iÙ[|?Ë®àÇì5~ìµ+N Wø³V^Οɉ¼¯v-çïÄw?uáI´ vñiôäˆ èe.”ðÐͼ³ãýUò]öT70©Eh5lõ*csÕ±¹òž.âȃŒ\½wÜ5Ž7I»[ø‚„*k” %ß®`Ï Ûb¦EÓnbð¡Ù¡£i:öÜ«dõ=w3Mã9gÞôåqªçÜç$Øžz#oñ9÷•x;³Ÿûxµ¹O¿‰A9”ó–g‘’æU^©'oëQɾÇe@ÊàÉ “ô{à5ïÎ[zœyWxì Sµ¢ØõPïrVÏ»óLwîõLº½ªñ›O½ƒf*oµœNáÏ9ù6cKÏC×ÐÀƒèŠœ=´ÓÆžÂï;…FO€.Ø€\ñ€j[JÒÙ_˫٫Ŗd‘R«9£Z\Ïu‰Q «‘» -”W{ª³ ™£àÅ_aÅ$VQb{@«kÊš{eò=%¢¨Ê¢§¨ùØÌ¼l°[%к^N˜§k¯B¶H%õlmàjO×®kg› ªåÝV&åc\è8-à†>ˆôvC<ݨUÌæ¼ne¶ UÖÓ +ë´ˆJ×ÐM²?×^µŒ…ŒÈ]Œb¥G©¦•–<þ4à—ò€~);/Ë[ {œ¯­Ž-Zò„ß–>ÉÓ\ڞ⿟âi0Ç–,{Ú1þ\ØA¼ðjëóµøÇžx†§ÖžõÛ“|ߵǟ\ÎŽQ€=ŹöÄSÏñúמæ[×nß½mzå%zsÆz›íл3{íý™hÍlÇ6̯lÛf×¶ðmmådzl.?`[ø±Íl]Ãw¬­˜@kgñ÷²­Ö6w-Ÿ6Ƽ†ëÖQÕ ÆöJ [íáfÁ^Nl‘a(hN6hŽ5i¢¼å6'²JjÁ‹rJ†w;€LGH¼ ænuö(^V®>M%}ûSo¾5 …Yß~¨RÖ 'ÏHg´oN‡·-¢ÃÛð-ÛmÙÚÑQíZÐQí[ÒÑZÒ1ZÑ1]ŠéØ.%t\W¶nméøîméÄîíèÄí]ë@'õèÀÖ3ó÷ñüø xÎz ÇŸq\÷ötl·vtL×¶t4ÏQlGv*¦#:¶¦Ò­è°ö-”Õ³=bZþü \4þnh5tÁVƒ6(rlá1¡ä.ê'›y‘½¹[7œ¬’[0`œ=·\q‚a¥S6šéãR.: H»È£¿y0·’…Þ§êK»È 0zÍD7¡„——Ûj@šæesòK8©ͼɘCÅ^W/?2IC£v\HâÂÅQÛwªëÉV²qÞ®^¡,°m¢Ò=Á6ÍÐrä¼® [7 ºÕñt3íàj òtÃc˜†ô"ª°âõðtÑ·Z¶ó-‡%Ä0kçÚ¨€F—ð³ ØåûT­\£^îQœz†ª_0$9ö¤Syõòiʰž†Ê`0ìLJ! ®#¦rÆÙªl' {÷mCòm˜Â2íÌsÏçh[°am‰iH1k*˜%1$ð± ÓiAˆVÖLè†z¶&hMàF•ÓC¢«¹€ü5#¹–-Wy¸7ߢL¡®êmåwó®®þÃó¸ xdðÎÀ`¾PdÙeóbËä™eóŒryÙ%ò0·i™b¦4žY&ÏG³<ƒÒñ0Ù(¹Ò§ETæÖÅõÕÀu=N]O“çRaH®êØ24501ÇZÎ…çafÝZU¯VšGqymå*|ïÔ¯½EE°âZ^u‚PÝÅhXoñ9å ^îªSNi‘ø…á÷Ø™¢xAƒV­×^µTà@/ 2KøÙusÍ Tfa»’ ݪе‹ûºAÀÅcaЭpߪN¸° mØcj~÷*Ç®¼z¤Jû§{íõ7©‹ý 7UÉVËó¸¨8s;Ã@¹[•‡ârP¨ú  È£.cp!øì¯Z˜e Àgc̹b`ƒh¶ï¨úÂnýáËù|àœ]Ê‹ŸP>  .Æ<³’ÛQ^.<\ÓËÕn—[ÈÀ„«âáfÁð¬ l/q­[¸€îU®—{ ]„–‘DåG3\à•</¶£¾N(垜Ш‚™öø<ϯ 7 ;ÜÂ3ÔVÎ+£m»=m|¿2ÎÿiÚm|lÀn¹Ý1GYtA ϰTÀäß ÅÀR“s–0 H`&V;Àt¡éž ¬†]zåÕj‹¬ì²lWò2ü+”§ŠÕ‰( Âk=—‚µ®§òÂ'U^Õ©åïÇä…gOÖ-§ ë&­PÛ{´½8¬Õ«!‹ CÈjoÖž“õÂÅ)A¶²žkÜëí9Û$àÍÅ®#h…²éÝÆy¹q ’T 2Aä‘F…–ÓžÓÍš×Ý¥)íÀ¦CËq!æªC·˜uHÝ ð²™ÊLŠ6Ÿ›$´UÜ@ïËÕÙ§]„•aa^n®ÃÊQ^nU<\x¿u!¬lÁö µh&1l´€­ Ü,O—½.x^ðÄ­häΜ.¼6„;'x½Ø.ˆ•áoÃð|˜á3´aŽR ¥W'Ä0C˺As¹I¼\3¬l{¹Gò¢I3´¬½Ü¸yÜ °2`[aå$!e¼&Wó¸ÿøìsÚ¦a#jðÇmÒç_$„­Y Û [ ] „8¯ºŽ=^/¼8 _ /x|×ks=@<æ™BÕ¡Tó ´ioÑñu¨Õ‹ï¿z$8žŒañ¶?9[ 2æ„e¯S°¼ °d/Óó4˜XÕ{±ámÂãD’E çl"ð8•]À^'àÉ+€ag²÷ ;œÆE-YPÀ“@°€çq O¬@!vä-D‘Rñp^à˜¢“ Ó ,ŒÅvb¸îÃpÅêbTîÑ€EŽc¬4Öó²ðd{ôãm=.dmІAÖ^üäÛÊÃéK\«N¨8Î+­Êóq«“x¶Y¯I0‡W)ÈN„V„>l×Þ“[EékÚÓõy»|ÓòvƒÃËÙž®^D¥“cØeýª2ŸknJ²€*,†®&×,l`{¹¸Ž$®^¬VNÛg–?¯öhyûµô¾-÷y:“š³ïË4ÿçêÏQßñ8–¨ï|™÷˜e[fÚË+ùoðOí¥—[áÙJþ{%½øìezAÛ þ{ÅK޽ø={a…²ç^xQÙrop~ö¹Œ¡aO/Žž~Ö±§žY®ìɧŸUžpíq®l¤ÒRº…T-.| « !C–*r  Ús‚•Ð#S¸^«Wæd±r’møëáêíU*ϲQŒ^m‘2s-»E ô¶(sï0êå¯&ŸajµyÀÖ)ßjsÎ2†LciV¹×ˆ«à‘¼¥ª†½Öq¦·”9·Ó²WÓs’Ô2¶m¯¨¯Œa{šß&ó}¿©ýßqÆ‹ÇGYÜû+ù¼}Œê¾»›@߆¶Ãdn#6_ÛMá¶ô™ÓææyÐçÌwÞ íAÏzŸ=ú€ê'³`¼/6Gm=DC?DT6—·Er?…!}«®i­ûµ“­.“c]å)à¤ó_—ó}e|½|îy¾~Úæ^Sk+®±{þE¾î&4ïš­¯Ý o_äë}–1 ÀÛV0+çkvЪXØ¢®y=5nZ$&m  ˆD¢ Œ¸êš@àÆÂ$@µ¿VžË””¶¶ ˆDuSÿçÿüúòË/éÛo¿¥Ÿ~ú‰~þùgŸýòË/ê>nMûõ×_é·ß~£~øvmÒ\y¶w [žëØÖMÑHg—ó& ˆ*§4ak/œ ‡­»J`[¹“%â–ö ˆDuSiÃÖn6l±ÅÅXq,°­›¢‘Î.çM4  TN¹€­Nû˜[lÏÁV ܼ„mi9­X±Â±Š2™OFáûÊX÷2ª(/¥ÜŽeÝ+÷ÞÊ|¼VÚV4 ¨C¨UØb{M^-(VTPYw0¥å+¨¼4lº—U(@'}}¥V‡ÕÀ…-Ú£&Ûçj…qîìö{Þ|=^[ ZƒÐaUŽ?éûëjûDõ9é¿É®s{ݪklZ°Å6#»A¨g«TÔlË*Ê©4¤aÑá|©Jzg5˜¨ãÏ‹Ž€È{¶5Ñöï-«È ”‚Ú"îù$íWÛíŸäû³t\É IußÕŽIŽ?ÉyÈÅkjC³¹øò™ù9pˆƒí¿ÿýoúÏþC¸Z Øê´‘addX ƒmY…¾ð2£l?õkœPo68CŸW^«6nMï«´\_¬K©Ü}†¯ó¹ü|™f¶<)ÕYËÌï±ŽÏ Q[Þ‡7².7?ßx‚ãG' üýú½ #çwñçzÇb·¡þíგ°Îìx–U"{þ#ÚÏùí|~JÍö÷Ã7îùHÏ.ªý+Ѿæù)/uÛÚh#_òãÞ!áùG;T–aïOâùúôÇç£Bÿ¶J(,ìÏ0µe<§ú´§¿BÛ×m·œõ_5àÉ\WÌk‰7Àú}î€)´}cT±×¼?¬%Õw‚ã¯Ï(Øbk@Û«W¯,àÚ[LØêr}~Ïöó/¼|¿:¥a g«O¸îH|_B³=Os^5îyç‚ [/|ly¶ŽÐmfî;‚ÎtîlO¹ÔçUãóLØ;ï÷žŒ:þÈßïv„Ì…ÈùžìÑ|lís£>3{@cÎ \*9ÊŒ8ÿ NfTÂn?o@äN¨¶q|qÏë A¬BÛ?AûúÏOh1ßí^8m=Åé7Éñ'¹ØÅÁ: Ʀ^جÁVu<[;2¡Ú' ¸nÿ3χ۞êx"Ú7×ý×§W÷øÌöŠû}v»µo|¨<âúÕ¿éÛ?°É:?•Œ°$Ñj]zMl5hKJJh·Ýv£ÓN;ÍÜ8ظ>Ø~[N{˜5gºÕ o¬æ]Ôãž÷F†ñ°õD[{Ò„U¸ìß0º·aëûü€¶ˆ¼Ø-B_û}ÑI:œÎ”ñp²Ã¶ú‚P؆Î7Æ´Ÿ _í噃µ¨ó—V‘íÕ¾8vÛËÇÀÂ{ŒÛ7A ¬â`wŠ{øó¦çyª|ÄDoˆgœÝò^gk3¾}sÚο¯ý¾øöƒmäõ%ª™ÇïNùúW¢ã¯äÀ»Àà[ ÚvíÚÑ¡‡J£F¢qã¸tëwxÀõÁ¶is•Ê×®ôÏ>ËTý±a‹äü+󶾎m\ƒæü’Ã6+ó¢^w`×Qì AfJ îâî{>tž<¾ýâÎOÜó5[ß8ùîÙfgß`ÂÑO’ÁB°^’µ õ¬âß ÛxýE¾?d°•XÆ_’ö­:lc~_l"O0X¬Ôµ À¡ãN8FŽI3fÌ eË–ÑsÏ=G«V­¢åË—+àÁõÄMàÂöâË商­ž“Ó!B= Ãlø.¸*d¶5Å¡Âa±Ju¶€0WìÈÚnëø³abüþØÎ¢aZõ9[s¤[å‹jTd#`ŽÎ¼ fŸ 0rÖgd{ZÑadÃc2Û?AûÚçÇf¯òuÎ…ÿ7†|¿qaŠóLã.xqï{>K†¤¿3ªÿÅWÜ*hÿ4ŒÓvvXÛ5ñ·o®û¯Ý>ö´OÜïKÒ¾U†­=°¯O ôwüqç·ÐŸòlÕ Ãb©0ØêJpNÙòlu©9†Ê9™0r°g䯄ƒBMYÛO|‹¬ mÀÅپЕó"*sЪ{q4¾'è5˜ÛÕŸ…ïHò~%ÆØã÷/PRÇo¶›»Ö^ˆ–ùÕƒ­W¶ñçßl#»ýôÖÿùñoã2ßãœC3ÔüýYaÊ öOÚ¾¶~y1œí ø¸ùA}þ¨·÷þ¸çíÅyÁaäHý&ðdìöÑç'iÿ kß$ïÒ_’÷Û×§ , ³ö¢‡ý>'*`/@L’Or|¡¿Ï\ôsý¨Î1ÖGØšù‘åFvÃÈðlÍ‚õl?gÀrtÃü° SÖÌó>ÁYs il)tA9°•R²ó,픬j¦ï×ô±ÄEjúxäûr«³¸­?I`Û¸2°-»ìJÒöòªWò+©EÄÈÚ¿ä^2KIǬ^Ç´· H{V¯=ëJûÉu¤~œç = l„®êJG–㬿Yνœ{Ñ@~k fa{ÙÏ«…wûòʺãÙŠó[Èr~êðù Ý6¢ç(“ÏMŠê° Üñ©Qتðñ¥Nù¢K¯ —V®ª3adéÄÒ‰E¢Ñ€h ªH ¶Ï¿ø}ÞE ¤>û\VÙ%Ž lE¸U®¼O´# Ô% ¤ ۳ϻÐnÖjdíÑ*ï–MÂÈÒYêRg‘c½ŠDUÕ@Z°E)sëOÀ>ÛϽð±·Yæl¥eÏÓTµcÊûä¢.(, ¤ Ûs/¸8"ƒ”F¾CÈ0_Ùª8TÑG‹/×™JrýùµÑ‰ôþᚬ1[¿S¾³°.Pr>å|ŠR…­›ÔB§lô…‘Q•àÂK.§ .¾LÝ‚æl«œÀ>¡‡TùÌF…#öäE ç7JG•ß!š Ôm ¤ ÛsÏ¿ˆ”e¥kô`뀶*°5³;…U†©N=ͨÏ÷<ðz³IêUÆ ì-R¯3»RN\Êó25! ä¡Ò„íð¬0òçFÕ7Œ È^tiÕ`«GvaiÎìÇ«ZO3êó£ ÄÕ«Œ™ÆÕ³tr«J½Î¸v”çë¶ çOÎ_!j -ؾ¸âel‡_˜1_=[3Œ|ÁÅ'W=ŒžS4¾Þc’0rlCëAÆÕ«Œi%­)õ:eÔ§%y^4"È; ¤ [=_ èfÁö‚²Ëèü²KÕ¼-lÅËÙI-ì9Û yÆÄ ¼«XO3-Ø¢ÂIh!ô¬Î0)½±LêuŠPˆ€ü&Ñu!j W°uæl0²òl±8Š«nÙ’,ª l“Ô{LRO³J° ¨·k׫ŒPÜ*h©×™ä"W"°ºÏ'9yMœÖåyÑH}Ó@Z°]ñÒËtþE—Ðy]ìY`ùBk‹Þú“©Ýèxp¹®§ýùIêA&©W'0©×iÖì¬JNÜêÂ4îýr‘ŒÓ°‚=ZÔ²u¬ÐçlE•¤´™´™h@4PˆH ¶/qÙ)UËb•$êŠ`Øf<[Àö #K9ï6ŸbG—ß$ Ô®RƒíÊU™Úðnø@ÏV÷R϶vO¾t>iÑ€h@4P3H¶Š£ ZÍS ¶_Ð¥W\Eó‹.áp2lå*ñlEè5#tigigÑ€h 65lᤂŸ`©æéçþ9[†í•WÑ%WŒP·°•¯¼šÇad7¹YyÇ ùÆezªÍ*ß-Ñ€h@4H ¶pR5KÁSØç_ ¤pçò«®¡ËF\M—¸FY~ÃÖÍZÛÚr’B µy|òÝù×ÉåœÈ9 Ô¾Rƒ-;©à'Xª- ¶W\u­ó¢+ñ¢ØIöW¬X‘Iâ¯+ÝTTP9?®ÊÜy¯õ§ô3“*”—†{§â³*ï¨÷°«§«¿Å2¯5Ž/ª^­á5ã·+ãïör9‡Tò#¨}AI§–s  ˆ²5:l™£Êqe'öó/¾ÌlýÁ׌$WÛ*;ŒÌp+5VçÕ‡UpQ u f*ð'þ¯$hUîe·V¬Y>À³LC¨ú}|_1®^­ª¿[^ê­Îu€mS†oUòKç—Î/ ˆjSiÁܼòêë<Žâï/²a{=]]ÍÀe[õêkþ9ÛïÍóÜÌz±š %¶õd”“†ƒÊá}¦ øÐÒyaeõbëÕ&+±'°• Fm^0ä»E¢ªi 5Ø27•ãÊ hl¿4<[÷ªëFÑ•ü¢׎T·¯¼úº[Û³ô{­ª¢Žá-&‚m,Å’kØÂsŽÜóÎ ØVMèrv ˆjSiÁöÀ–ªíªk¯·`Ëä½zä ¸Ú^}í lm0ºž`bÏ6 ž¬ Ëf.¬„÷‡hƒßï *íÙòñÅ­bÎ~Þ9Vs^6I=ÞÚ”|·\ÐD¢Ñ@îæl_}íuºÚå(na_~ùUfÎö ¾síõ7)à^3òFe¯¾þ¦/Œl.@Â\ey9æ,6eìÕêEC»ó¢æB$$;T[Îó¥‰a«ç‚zªx¿»P)¾žnp=\{SX½Z-Nûù¬P¾Ed2+Z.ì¢Ñ@]Ð@Zží«¯¿Á,u Ãß>ØâÎÈFÓu£nöìµ7ü°M¿Á’…fÓÿ^¿´©h@4  d4l_cØŽ¼!ÃQ0õ˯ ÏwFÝ4–FÞ8Ú³×s[6¶ÐÈI—Ž/ ˆDµ¥´` n^opôúÇ0l¿Î„‘qçÆÑciÔÍl7Qöú›oåq)em‰R¾W´' šÒ„í š£¸eûÊ„-îÜ4æ®cxño¾-°•ª?RõG4  ¼Ò‚íì¤ÂqÕ,ÅíW_“ñl¿úúk=ö6ºIÛ˜[éÍ·ÞØJ'+øNVh#tù=âuŠ*¯Ô`ûÖÛÌÑ[=–ÞÌLµ`û ¹¥œF+»]Ù›o lE´•­´™´™h@4P×4lÁMÍP‡§åôõ7†gû5»¹·Ü~'½íÏÞ~ç]ñlųÏV4  ¼Ò‚í[ÌM“£àê×ß|› #ƒ¼·–ßåÚ|{'½ýÎj­t²‚ïdum.Ç+^£h } ¤[8©KÁQ‡©ßøaû-Ý~ÇÝÊnsíwë'lã2I‰ÐÓº´©´©h@4P›H ¶à¦æ¨fê7ßž-È{ÇÝã莻îUVÎöîê÷óγ•ÜÃÒ!k³CÊw‹þD…©´`ûîê÷<Ž*ž2W¿ùö»Lä½ëžû”ÝyC—íÝ÷,ØÖf=Û„õbýé3éǹ^Êÿé„þòxqõpÑÉìdvY>鈅Ùå¼Êy ¶Rƒí{ï)†Â4S¿5a‹;÷Ü÷€²»Çݯlõûküžm­Ö³uNt”g믗ëæR6r/;Àµ‹Ågç/+d¤ž­tÈÂîr~åüŠ SiÁv5;©÷Œs8ª™úíw†g‹;÷=ø°²q®åXÄÛ ÔŽÒ‚íæ-[<†VÌÏLOÿüçOØâÎüGѼ iî‚GiîüGië l¶[Ñ€h@4PðH ¶[¶~ ø© LýçO&lùΣ‹£ù 9ÆàÝú¡ÀVF™µ3Ê”v—v ˆjRiÁvë(~j[°p1ýôÓ¿2ž-î,Z²Ô±Ç–ÒÂÅKèÃ>ÏVF´?¢­É-ß% ä§Ò‚í~D2?ÁP°ö¯°Å¥Ëž %l-}œ[ö8}ôñß ¶ÎV£ìtZøqÏ›$_ëÝ&©Z$=?;ºœ9/¢ÚÕ@Z°…“ºxé2æ(ìqÅÔýëçŒgû¯Ÿ¦ÇŸ|ŠíiZÆ·KŸx’>þÛ'[9.©EÜóI:C>ÔÛ K7™äøå5µÛá¥ý¥ýEµ£´`û1;©K‚–1C—=¦>E?3_·i؈üq›†êÎSÏùäïV‰=³ì òRê4‰T®jÅr¾b/­¢¿„™\¢œKö©×'L2¡ß‹ïÍxqÆç‡ÕÛõаg[ÊÕBêÙ*ØF<é9FÕÛ­¡öÑ4 ¶aõ~÷š)+3µýù k§#ÈHÚ]4 È¥Ò‚íߨIuú4=É·O2Sþå¶|çÙçž§g–ÞSö÷O?Í»z¶^nd hly©+¨z»^^c#”¬ P`Þ-É×Àz>̲ëãª÷¹ÀUÇjP0KüùëñVn bŠ0 ¶Iêýzm鯀V.p¹¼ÀÉg‹¾òEiÁö“¿JÏ<û=ýìru ¦þò˯Øþ°}î…iùó/({ö¹èÓ|æ‡mÔ³¨B[ˆ ºž-`äÁÆõ†“×»u:Md½]c€°Â­½ë}>ŽÝöðå„^,lãêý|¿„£åB˜/B9Ñb®5lá¤ÂqCaàé/¿š°å;/¾ô²²V¼¤ìŸ}žwõlÃa OпÊeМ¬ß³¬N½ÛÀ6À VGB² °Ø$8Ž\wù|¹‹Dii -ØÂI}þÅÊ4Kýõ·Œgû+Ãö啯ÐK/¯rlå*úìó/ò®žm(l0™a[oÎÖš #›žd•ÂÈ!õvÍãƒÇj{¶ ¸Ô é,ÏVÏ«†„«ùsÂÃÈqõ~ýÏg…°² J4 (P ¤[8©+^ZI+^v \ýõ7¶|gå+¯*{yÕ+Ê>ÿâË<ªg\oÖ ûš ˜°ÍG×®ÅkôÖ=Û \âž÷/2kÚZÐ ª·k†·Y÷5æB/õ;ì0x9ÏG'†m‚z¼1avûû+xÁ˜ÌÙŠç–ç Ÿ#ZÊg ¤ÛÏ>ÿ\9«/1dÁÑ•l¿™°ÅW_{ƒ^yíuzåUØkôÅ—~ئßP5[Ï6ýãÏuç©Ýö‘9Û\Ÿ_ùüº×'åœê9K ¶Ÿñ­b~ÂK™©¿ýûß™0òo¿ý›^ã-zí7éµ×ߤW_ƒ¾üê«œí³­éz¶uM µÕ>R÷W.¦u­¯ÈñŠfÓÐ@Z°ýâ˯”ã †ÂÀÔû`ËwÞxëmÇÞ|‹^gûꫯsÛ4G>C:™h@4  ¤¡´` 'Ž+ –Âþýïß3ž-Èûö;ïÒ[oÃÞ¡7Ù¾þúm.HCœòr‘ ˆ EiÁNê›ì´*cŽ‚©ÿþÝÛßéÝÕïÑ;°wWÓÛlß|#°-!Éï‹¢h@4 ×@j°e'Ž«2æ(xú»¶|°}÷½÷=è~óí·âÙŠg+[D¢Ñ@Ák -Ø~ÍNêÛï0d]ç\ýý÷ÿdÂÈ ï{ï¯ñl5C÷›o¿ØJ'+øN&£}ñøD¢´`ûÍ7ß:N«k`éïÿ±`ûþ𵤠àýö;­tB鄢рh ð5l9"¼Úp\ßc®þÇ[¾³výZ»n½²5lß}ÿ}^z¶¾m1\ÁÇÎg,£ð;†œc9Ç¢Ñ@šH ¶ßrDxÍÚuc–ú`«’Z¸{‚^ã[lÄý2çI-ª –*æ Nó¤D}V>Ô³­©ß*ßSýÊ´„LKˆòRiÁösNs¬Z0Cu¢(_R d‹ºúºhĵ#銫®¥ËÙð†ÆM‹2 V/¶†êµÚí »nD=[ʱœS ª÷麻–ðÃê½zïwËùécɪéë}¶óâu dP" ä¿Ò‚íÊU¯Ò¥W^M—ÁF\«x ¾zÅã?øð#ÚcßiÈ^ûÑî{ì£lÁÂÅùWÏ6ʳ©gë3“ËØNGWï5«Ü^@¹?ñló¿SÉ…OΑh@4`k -ØÎ[° Ù“²í6t/fé¾´õƒ3°ÅÁ{ì¿û0ê7h(Íå7ù<Û|¨gÛ˜Dû‘°Œ+AçVÓñyªÛ¼ É…T.¤¢Ñ@e5lçÌ[@½쮬ïnCO·lýÀÛAÃö¦ƒ÷P í3pÍÿhþÕ³ …m|=[­tÀÊv@y½hF4P?4lçSÏ~»Q¯þƒ<àfÁV{µ qïƒiÎ<¶qõbkª^klãŽ/gš]\Þ_uÇvv½h»ï3ÔrxÝYéÄõ£Ëy–ó,È ¤Ûйó©Gßpû ì÷lA^íÕ´½úïΰ]Gõlñ’»ÈÉ-Â1GÕ³5ŸS¡`c1UÔ"'_ØØ¦æZ¸eÎb+pƒêÙÊêC 7‹D¢¼Ö@Z°=guï3@™öp·lÝš #¶ˆ-#| Øöì7ˆ*,ئ?:«Ýz­éÿžü½Éo–s$ ˆ²5lg1l»öêGÝz÷÷€»y‹[ÌÕjÐöà˜3Üaß©GfµU¯UD&Ñ€h@4 ÈÕjäYs©KÏ~Ê\„”a‹ð1@Û]àÙsçå ¶"v»h@4  ä‹ÒòlgΞCº÷¡Î=ú*à"œìƒ-îh¯ íÊDFì9Wžm¾4°‡tvÑ€h@4 H¶»õ&˜î¦Í[2s¶¶=ú:^m—ž}¶)†Í¥3Kg ˆDù«´`;cÖjߥ§‚-<\x·Y°Å¢¨î_î“» 2bÏâÙæ¯8¤ãʹ ˆDéh -ØNŸUAí:÷ð7¶Ê«Øæõué\ét.iGiGÑ€h@k MضíÜÚuÉ7 ¶ðj1WÛ™CÈpgÎÏV:£tFÑ€h@4PøH ¶3gS›NÝÈîÆM›3s¶›¶lqæjÙ«h;tíŰ“7ad»êNîÅôœ8£¢L¼\™» ˆD®t`[DÓf΢â]¨¤cW¸~Øòj)åÕò\-&v1Á;#`«3DÕlÉ:Iº‘ûAMᘥ å‹ò_©ÁvÆ,jݾ3•tèJm:ÂÃíAY°íÒË-¼ZXlk¹ž­*$PVFaõhíª?¶WQïVuëýå\²¯ÜüŒ˜Ï×é$‘¾1“ÒŸÙ—r²¢‚*Äs¡À=MþƒFÎQJ¶ÍZд³Ù³íên¶g˰…W Ðb5ÕŒÙyUÏÖç$îîˆ7»mæ9ý¼/ YïÖ®”F¶ ¨ã º~œ¯½qûxU-­ÀV`+ ÔºÒ€m“æ­l[·ïê·‹š¿5`ÛHíêÒƒçk»õQ Åä.–0çS=ÛØyžÇë)P÷ Ï2ªÞm@mZåéj¼E²ªú}Ž×‘\€‡yæÒáj½ÃÉ_¼0Ñ@ýÔ@:°-æ9Û9 Ûî.p»©pò†M›ô)¶j¾¶ko[Ðxú¬ÙyUÏ6¶ âæWcêÝÆÁ6öó]FÂÖ1ÂÚâÙ `e% ÔºÒ€m³miúÌyìÕöfØöàÛîlÝhÃF Û†l± ¹}çž.l-Ï6®^l Ô³†mBXÖ®/ëRãŽ0µjÏÚaÞÈÏ×%¶Yõr[£D Œ¨ëçˆZλœwÑ@ík Ø6oÙÔ Ø~Tܾ—\¶Û4Ü–6mÚ¢¼Z¬BnÛ©»ZE5÷ ™aäÐz±æ¢%ÀÃ]ˆd.Ró–v(¶œç3³<;jõ/,JT–—]ó6ó9Qõn•Ø­T+p|ÖöŸðÏ·CÄN(Û\=ulÒÙj¿³É9s ¨¿H¶E­:1lò¶Ÿ Û> ]·»ëÙnK láÙò|-` Ж°ë;Í‚múBŒ ýÖߟ~[K[J›ŠD¢0 ¤ÛÖ]¶‹¨¸Óî [önÝp²ãÙº°Ýèz¶XÐb5VUå*7²Ô³ÑË…O4  ä‹Ò€m‹Ö]ØvÜÚZµsBÉY°U!d Ûv]r Û|i`9éì¢Ñ€h@4&l[wäÀ¶mOjÙ¦­÷{¶›2Ï×¶fЦr&Œ\y¶"n·h@4  ä‹Ò€mQëÎ4mÖ£Ôªý†lo¶îÔ¢„a»Á #7R›nQ¥ MG^ªÌ!äVí: le9~­/ÇÏ—Ž(Ç!P ¶Ò€mó–™›ó²}²=¨¨¸+[— lÿ¸[äpÄ|m+öj[¶íDS§‹g+¬°;˜œ_9¿¢Ñ4l›µlÇÜœKÍ‹{PóV]¨¯NnÖº#Ãvc&©Å†›•WëÀ¶3µhÄØŠg'Þ½h@4 ¨H¶M‹ÚДi³©YK†,{¹ÍZu ¦ àuë}°Ý¤`Ûº}Z¶3eζˆLFö2² ˆê»Ò€m“æ­iòÔ™Ô¤E{jÜ¢ ß¶¥¦|»ný†L=[,MÖ°EYÂÈÒùê{ç“ß/}@4P4 l›µ¤IS¦+ÐîÚ¼XYã¢âlØ¢,BÈ ¶âÙJèH¼zÑ€h@4PO4ls‰½‰“§Ñ.\ýGY³V ÜVÙ°EYÁaä’4Å#ëT‡åœÂЩvPñ&+]b&U¢™ÄBå/6S#åè2Ÿ‘縞œ|UןQµœk9×¢ÚÕ@*°mZD&M¥š´¤›¶dØò-ÛÚõë3adlºU°e¯ -²` !8Àõç6ÿ#÷¯¯€UÕÆ®çª€k&â¬7[»'B:‚´¿h@4 (\ ¤ Û¿6nAmÒ‚vjÚ‚aÛ‚Ö® ­òlÛtÈZ [O6 ‚Mi¹YÐÝ_Y'« NT½Yñf%œ% ˆD9Ò@:°mΞíúkã"†mQlu™A›Ø:Þ±ª„“U^.¦ÞlŽXFª…;R•s+çV4 Hª´aû—ÆÍ=àf{¶jq”ãÕÂÌ9[F6KƘþ0²éÅâ$Uõq ôjÍr{®—ëû>®ŒjE¢Ñ€h Èl5pý°å ­ÚaË[{Î6Q=Ù„a`õY!çØz³9hणyŒE¢Ñ@áj MØþe׿Ðj«lEd…+29·rnE¢ú®´`ûÈÄÉ”¶ðjƒV#×÷“!¿_.H¢Ñ€h 056lwܵY´g«S5 l SPr¡ó* ˆ²5&lwÜ¥)s›FFæ(ÀV<[éŒÒE¢Ñ@}Ò@Ú°Ýaç¦ ¶°@ØjÐ6/nŸµ¹>5¼üV¹ÐˆD¢ú£4a Ð*ÛÅîšuëŒ R¼Ùôj¶õGdrA‘s- Ôw ä ¶n(lZ­t¾úÞùä÷K Ô ¤ Û?ïÔÄñnÙÖ¬ ðl5h¶õGdrA‘s- Ôw 䶸°Åœ­x¶Òñê{Ç“ß/}@4P¿4+ظ[ÉH%ißD¢Ñ€h€5 °•Ž A4  ˆr¬mŽXBEõ+T$ç[ηh@4¤Z­$µ1ÊI4  Ô' lų•ð‘h@4  äX5 [I×(#Ùú4’•ß*z ˆ´Ò‚íø “ÕÞZ¬BÖ–µÙ„­ì³Ê…H4  Ô ¤ [´[¶Ò±êKÇ’ß)Z ˆL Ô8l%©…P.B¢Ñ€h ¾i mØþé¯ êÙê•ÈRÏV:[}ëlò{Eó¢ú«…­YõG`[E'9÷¢Ñ@}Ó@®` ï64]£ì³•ŽVß:šü^Ѽh ~k@`›ã½UÒÁêw“ó/ç_4 €j¶²ÏVD'Ñ€h@4P5 °ÏV2LjD¢Ñ@Ž5 °Íq'Áu/« +VPy©Œz“¶™¼N´" Ô lS†mYE9•Vñ3\­\<êÊÅCŽS´*H®Z-¶Á¦NŸE›eÂ¥åÊ»ÓVQÖÝy®{Uàñ †‘zžæ½Ö·²ŠÌûËKK×W”E‡H’~~Øñ™Çh§ZZž9>Wÿ¦²îÎ S°-s«þU·t‚ä@ÚJÚJ4 ȵò ¶¥>¯àô<=ˆê¾‚žYÓÄë=@7HZ ³Ÿß€ámz­¾ãs?'ʳU -/õÀý°µïg~t†\wù|јh@4+ älµ‡ix‡>Øj°u¡åÁïµ=X¼.Ϋ5aõù!Þ«ö ‡-Ã?æX²ÂÈ|ü[éü¹êüò¹¢-Ñ@Íi -Ø>21SõG§l ­úF†'šñò¼°ª^0dÂ4)lƒ–û|å)G_¬g+°•Ž]s[ÚZÚZ4_ÈØÚ`4úګŒñ<ÍùOM…m³¼I7¼ì†¡=AÆÁ6îø<Ø@6ÂÝúxüžªs,Ú;Ï6¿:‡\¬ä|ˆDii `ëοfHñb¡rw;Œ¹hácw¡ÀåÛ2c‡¡ËËB·°5ßñùú»œc4ŽÏÜ®ã[D•½2Ù\Àe. 2?;3/í,¦’P²tø´:¼|ŽhI4P;È+ئ/‚øÐmúßY;'R~‡´»h@4 È_ 䶺ˆ|%çlÓm${‹ˆ0Ýö•ö”ö ˆDÉ5P°°$´•´•h@4 È­҄펻4£vnª LJ×+‘“ZHÉO* ˆD¨mžT¡æv„*í+í+ TV[­Œ¢E¢Ñ€h ÇØæ¸+;ú‘×ˈY4  žÒ‚í„ISè/»6'=o‹¹ÛZ],b-<±Ê9•s* ÔU Ô(l±(ª¨¤ƒªøSû ¤*Y¨@<` 3‰D¢Ñ@5Pa‹¢$½¨«£D9nñpD¢º¤ú[+£ªwkåNö§S̤[ôÒBr ÈLJI+£.2iÅ¡*Ž”ê’ÐäXåÂ( Ôg ¤ Û¿6.òæm1w›5g[»ad»jOvÙ_—;†U¢Ï®ÀfÞb»‚z½WÂN2˜ ˆê½j¶˜¯m^Ü^ÍÛ¦LŸI›ÕÌIª kVòÁßFaw=+åbeÝidUž€Z¼Žœ]Œ >îä·‹w# ÔG ä¶zUr–g[а•ùßš4‰‡ í, ÔA Ôl×mب¼ZÓ‚=Ûz³^ãVõy¼ÏïeÚõní0°½€*®Þ,>/¾^Üñ˨·>Žzå7‹îE…­Ô`;y*ýµI 5g«míºõ´MÃFÔàÛ4¤Ú‡­;«B»®¡Þ-þÖóª¡`³°»~_T½Y»^mvY`+•¾¨Èù•ó+ÈÖ@öYëv¤­Fçlë`ØA+-Ñ€h@4PH¶zE2¼[¿g»~ƒYmaˆG.rE¢Ñ@2 Ô(l›¶jK0¶ÓfÔÜjdñleQ…h@4  Ô’Ò„íNM[ÒNÏwð˜†î¤|„-ò!ë}¸’׸òá-ö2(=¦ ,*ßžÒfÒf¢‚Ð@Âqf˜†î¤)yæÙª¤™\ÈÈ¥“ZÄü¼ª@¥¹!ÅK­>ïÂ6+ÛVŽ/*˜qîì6ˆ{Þ|}²L`ùwžµ1XŒÏd–}üIß_WÛ§VûEŽõ/¿-ÿú£}NÒ‚-¸ ‡u°Leï6˳ÍØ–U„ÀÅÆw‘ª¤wV€‰:þ¼èpˆ pA‡šh û÷f§ÛôwÀ¸ç“´_m·’ïÏÒq%/ôÕ}T;&9þ$çA^“ÿp‘sä?G5 [µ7qf×Ãȱg³êNuàeFÙ~8†Õ›Õ'6ôùª<¦çš©ð£S*f<çsÙë-3ëÙf¼`|¿L¼cÒ»¢¶¼Øz¹ ŽÇøûõ{+øøt%"ïXìÁGÕÓI:Þ£e•ÅÇžÿˆös~;ŸR³ýýç'îùHÏ.ªý+ѾæùQõ”Ít¡ü|mÈmç ž­Ãªx¶º…Á6‰çëÓŸŠˆT¨ÐJÒÈQPÿ6ÛÏŽ,évôµCD½éê¾?ðúcþþ˜OlÿÇûÃôŸTRo»VÃÑiÃvWL˺ÞíÚu2¹‘áæ"y2LC׆­¬”î¤|_wȸz³qÏ;äpÏVÃÖ [ž­s!1Þ¯Äk˜÷—äó{Ê¥Tjt:|žy±‰«—wü‘¿ßíh™¼ÎÎqg{ °µÏõûõùÃo6KVj„qþ0œ¢ÚϹ%XÇ÷|l"Û?AûúÏOh2ŽìC|ÉÆêz¦q©w»ÐGœ~ãt×~q…BâêM§ñþ¸ßõcû”þé/Û9zÛ5!¨1Ø®aØþÕå‘ñÂ…îD®^UÏ6,tWo6îyrI`ëuÊØÚ#q³ƒÆuVçâï÷ülØú>? -B?î÷›µ{ÝP¯ç'šgœÎšñpü×ôªªÛP¯,¦ýìÁ‹ýû➯6l"Bé¾ï7Û_¸p<ÏŸÛ7A I6–qP‹{øó™ˆPX-ç$Çx|±íWýzÓ‘ý7Q½êøßÛÈþ¥ÿ¸þèøkÉû“@˜Úl7„7ÝyQs!ZæwV¶:|\yØÆŸÿ¨öÓ[{üçÇ¿ËlsçšaðàïϪGì[¤â‚:iûÚúE=å¬Á[ø4CôùOxü¡ uâÞ÷¼½8ÏZß`†ÎÃÆ­šŽk?ßó™ÅŒ&`#ëMWóýñµ¬Ã/æIú¨þÍE™1ý»:Ç7’ç£a]s°]»Îóh5p òlã:\ŽŸ÷ ÚX¬’åçø8êªpØÆ‡ìêêïK÷¸¥ª×žÒ~Õk¿Üzrrl¹Ùúƒ4Ç(æÓ©Ý¹[¿g[G`&ÿ–†²Zÿ‹ëö…ÂÞâ#ç³rçSÚ¯rí%úªýöJͳMÛviF¦å£g+¢¬}QÊ9s  šÒ†mSTÑs½Û,Ï ýóÎM=à>21`”„gÅk Ô/ „nK I#ú¨_ú(ó]£°hÿ´S\Øø‰“¥êO©ÐF¡ò{ij ˆÒÔ@š°mÖº5ãy[x·°,ÏV`+âMS¼òY¢'Ñ€h ®h Fa»ÃÎFÞ‰ÃÈ| “0²t”ºÒQä8E«¢Ñ@u4P³° š³mV$óJ ˆD¢‚Ö@ Á¶­ÁÖŸ]ŠØ£mî<Û&ÍZ8 lU´ÈJ&`1×™PrýùÕÉ{et- ˆêžÒ‚í”é3©yq{jÞº=ÏÛbî¶­Û°QçFlQˆ '¶héÙ#“¦R“æ­|£™*'°O8*¬|f£ºwR¥#Ê9 ˆDù¥taÛÁ…-—¡ëÁv›†ÛÒ®·÷—Æ%´cãbuû—&%4aÒtjZÔ:1lÍìNa9N«SO3êóõsåœb/“®1¬¬¿öme’ñ‡Õ;õ¾ß­ÐS×êuJÇϯŽ/çC·h f5l‹hÊôYTTÜ‘½ÛŽ Z@·#­Wží¶Ô`›†ÛÑÚu¹Žm{.¯×NÝÂ&NžÉ°-I [-ްDèöãU­§õùQ… |%ܡlj»ÐëuÆý~y¾f;¿´·´·h æ4*lK:S[óbÜvaØn¢mmǰm´½‚í.Í;ÓÎÍ`”Mœ2‹šµh›lU _½ØìŠ.IÂÈQ° ­7Po3®zŠOèõ ^§tìšëØÒÖÒÖ¢üÒ@:°mÁžíljѦ+[7jÙ¶;µlד6lÜL ™³ 6úoºÝÄo{Ò®E=غÓ.EݶԼe»HØf•ã¹ÙÄ«b=Í´`‹E_‰CÈA°-°zÒùó«óËùó!¨9 ¤[^P<•aÛ²m†l/jÕ¾µîПa»•nû'†í¶fØnæJ}¨qËÞ Ý^ ¼“¦ÌaضO ¶IêIV§h\½Yûûýaáø“Zèõ:ŽW¯ºÏÇ·³\`¤D¢šÖ@:°mISgT0d{+ÈwÜJ:¦›> FÛíØîÀ°ÝBM[÷§&­úrá[¸“¦¶B¶þdêz:¡Û\×ÓŒþü$õ&ízºÉ=[€¨€ëuf„]]˜Æ½_."5}‘ï͉â5l›4lç0hûQq§AÔ¦ójÛuO†í‡´ív¡Û [©YÉnp[²‡ëÁÖïÙÒIKîŽ*èm/¤ö‘ßßA¥¤D…¡´`;mæ\öh* mßm?Ú¸ù#Úvû¿¶;*Ø6/DM‹[x¶#gÏÙÖua¥U÷VêuF«ëz–ãŠÒÑ@:°mEÓfÎs½Ú¡Ô®ë>Ô¡Ç´ióÇ ÛLØÂ³í§æm±Hjâ”ÙY«‘åĦsb¥¥E¢Ñ@þh -ØNgضé¼;µí²GlFÞBÍŠ¨ER»²W»Kó®¼Ï[Út>L{þˆ]Î…œ Ñ€h ¶4láÙ–tİÊa佨C÷ý9Œ Ï–ÃÈÎjd,êË^m/öj»©=·AI-j«!ä{¥ŠD¢Ñ@®4l[ry.•t ¼Û6JÞ‹aë.röÙnæyÚÊ£UI-8ƒÔ„É3²Ò5æê‡ÊçJ' ˆD¢ÚÒ@°mÌ«‘§ÍÄjä¾j‘”³"ÙØú“É å@v§¦í8mcÎ<-«Am5„|¯tBÑ€h@4 È•Ò-'µPûl{QkNhQÌ[€J: ¤ ›¶R#Ž ¹‘Èþ…‹ì¸k+RU˜Ô¹úqò¹ÒqD¢Ñ€h 4 l›¶È å¤jlÅ©Þ ·8é3UZ;åõviA;pm[_=Û„%òò¡Ñjãìª?µq òrÑ ˆDUÓ@:°-ât³8/r׺*ð®ß¸‰aË…þ¸SÏvÇ]²nñø?ïÜŒÆOœL›Õ)Ï6I!ƒ\‰1(OtÚßU›¿/íß"ŸWµ‹‚´›´›h } ¤[”ØkQÒ‰«ýÀ:óߪ?(±çÀv{³ÍèÏ;7ul§¦°µ“9åΪ'«ÓrzD§êWûAý7{ÍN²‰ *+3ëÑòýîN£ÆÖ‹µS)ºÕ…|U€ì×T"ûSÜñéc,/+£ ¯²‘UÕÈûÍNªK³Bl=Þ$¿ÏkGJÓýþí/7ýÎ+m*m*¨;H¶Í]϶“\®i[T¢ëÙ6l:°eoÕ¦<Û¦ÏV¶¼Ôót@d€˜•Øß¬êã^ðüt˜Þ 4Pê=™ûq… ì(ÏϨã¯4p£Ïl¬t¥¥ê7ë諯ë (ü¿×®JýûüWí¬_‚ö— Cݹ0ȹ’s%HWéÂÖlóâ Ünñx¶®gû§šÌ[N0&\Ì k!”–»06ËÑîkmØúþ “^"N…¹-`ÛÇçóÌMÏÖ.>o†u]:lÃ_uÛ_:oºWÚSÚS4Pw4 Ø•p™­’ž­Óh¾ª9h•¨Â¤æãuº …Jsá`QÎaçÌ+ î¾ÏÏ,¦ò×·ÉZ püÙaä(؆_’zºæk0·«+äý:4µM‡¿«Ùþra¨;9Wr®Déj V`«W$Û ¤r}rƒÂ´¹þÎÊ|~¾_e~‹¼6ÝŽ*í)í)¨Û¨7°M«Îl®ŸïÇ—«ß-Ÿ[·/ rþäü‰’i ÞÀV‘LÒNÒN¢Ñ€h } l%MdÊà%ô/Ò¦Ò¦¢Ük Fa»ã®Í}Y¤jzÎV•{AIK‹D¢l lųÏV4  ˆr¬mŽXFx2Ê ˆD¢­ÀVF´¢Ñ€h@4c Ô lQý†z¶f!‚º>úó*÷¸•„Òü=RÏVFÆiêI>Kô$¨Y lk»k.SH=Ûšír1’ö ˆÒÒ@ÞÀ6¶ÞjPºF3pL=V/i„Q¹Æ«k|Ž?]¤?£~ÎLi§cÔ°õ§NÌNëhŸ@©g+:­N-Ÿ#Z äŸò¶‡¨ðz«Iêņz¶qUkܼÆYyíJ>:7²~œï›Åü°‡¬Ù)’ÔÛ•z¶ù׉äÂ&çD4 ˆÓ@ÍÁvÝzúKã"Ò{mƒæl#«Ò$¬]üÜ_¦ºŽU5(ª^«ž<·kÈZ“êžg\…r{RÏV:l\‡•çE#¢º©ºÛ„õb£`ëšw«þ.5 Ò'ð|•ÈÀ¶¼œ‹×zÒÎ!õlëf'Jz~åur~EõWu¶ÉêÅú€eÕ›(B*4QrËÏéY¯5±gëÖƒµ¿Û󀥞­\pêïGνœûúª¼mÒz«v½Û,2ªÞ¬ Eëâ耞Q(=ª^®¥†$xç:»Úü°ÆãÎgz‹¯øµþôR϶¾v6ùÝÑ@ýÕ@­Àó¶°G&Ö>Ûêv¤\nªî±ÉûëïEBνœ{Ñ@õ5 °ÍqÖ¤"•z¶ÕsÒ¶–×I[‹D5­mžÀ¶¦O¼|Ÿ\lD¢Ñ@Íi@`+°•œ¨¢Ñ€h@4c lsÜÀ2r¬¹‘£´µ´µh@4¯Ø leD+ ˆD9Ö€À6Ç œ¯£,9.ñD¢Ñ@Íi@`+°•­h@4  äX5Ûµœù¯MZxù‘eŸmͨdô*m- ˆjWÛJŽfj»^®t˜Úí0ÒþÒþ¢Ñ@U4w°5Ó®à‚vV¥Àz³º"r«TŠ\ÝÇKÛhTúA*Å2 Ðé3é½zºn®d}^ªÅ˜z¹ªñí×T¡òOUN¢¼G:¿h@4 Èo älàŒ2w3@hCëͺ S¹Š"fÙ¾$õbÍ\ÇA~¢Køùs-«ãàÊ\P%£'rÑÌœ9?UÑ@­Àuma|¹‘Ú²A§¸xxÞ(计mÃÖS·`½~,²ž®{L‘Åéï¹rä«rå=ÒùE¢Ñ@~k žÁÖªò“&lÖÛ•‘ßBΜÑ€h ¨QØîÔ´¥·"9Û³ ªW딣Ӟgd½Ù„ž­Ö5=Ux¶ö­¿<^ÿ²U³apûõ¹8iò™r1 ˆDuKy[ˆÇ®Wë û,RRÏ›cÎ×]ð™ ŸëòrÌgêÑúàèûüÌbª¬ybïýÙ!âØz»2's¸¢Ñ€h Þi ï`›ËÑšÔ‹­[#Á\jA>[´  Ô¤RƒíŒYÔ²m'jѦ#•tP¶~ÃFÚ¦a#jðÇm’ZÄ…‘sùÃ¥^¬t¬\êK>[ô% Di V`‹LR° “§Rã¦Eõ.œ R:¥h@4 ¨_ØÊ܉ vD¢Ñ€h ǨQØîܬ•J϶~êd/ç[4 ¨ÏÈl1o Ëš³ØJg«ÏM~»è_4P5P+°ÅB)ØD™³•ÐMŽC7rq«¿79÷rîóI[¹Ø ðE¢Ñ€h ǨQØîÒ¼5éPrÚž­]µ§ú#'{Ue èc@ÒŒ|Ë$•~ûȨ¹ú“6”6 Ô  lq² T{¤S!äó}i«} éuÆôÛ'ûؤÞoz竾\„äwŠfêƒò¶þ䜫Ø*QUïVÁ¤Œ«ÿ„¥SŒ«7k=_^šml½[ª¡°ù~ýÛÍ4“ª6o`;ï ¯×ë F¢ÚÇ«줳4½sï÷—›õ€cKRï7+§ûþõˆëCg”ß(Ð ®ò ¶6¤X ØÆÕ»µëßÚŸg§kô×›EØØ¬ ”FNR‚OC-(Œýý®È4ðŒrvYÀ°™¤^¯Y8kPÀƒ ìø<ó»öÍ6hP]ï·zõˆåBT¸"9·rn ]µ[ÌÛÂ&N™ferè™Æ‡t#aâyyð`ÈeÒ¬$¦zûºÀÏJRï6ä3“ц£Z,„zp=ëØ÷+Ï5ÄO¡q’6×ÈE[4 ÈG äl-‘ÀËó<ÛjÂ6®Þl®a÷ý:T\-ØV§^¯íÙgÏ lå"–19&Ñe]Ð@ÍÁvýÚµ¨˜ôŠä Ï6«*`‹’y.ˆ²ëÅúëÝÆÁ ºÞ,>Ëï•Ùaì$õn£ÂȉêÝÆÂV{ÿ!åý¬9n»^¯/$m~—åÅë²…v9ôýî9òC«Þouë×…%Ç(~Ñ€h HyÛLY§æl|½X}ñ7·Ý¨ÇŒÅ>fH7²Þ¬µ@h/òmÿ‰¬wk‡Àõoñÿ†ðï~ö|mlÃëõ&ió5˜ÛÕµq IÞ¯ækCëüU³±\Ää"& ÔU älëj#æËqK½^¹å‹å8D‹¢¿¶ ¶ÕÔÑH½^¹¸ÕÊ1ŠNë«j¶˜·…e¯F!ÖW!Êïí‹D…¬mx¶…,Rùmr ˆêºj ¶ëx5rã%ÞŠdñl¥óÔõÎ#Ç/ ˆ’j@`+ž­Tû ˆD¢k V`‹ý¶°IS¦[¤d””t”$¯­ˆD¢º£mŽG3ÒêNgs%çJ4 È•j¶MZ¶ñæmÓölÓ¯×ZXõls% ù\¹8‰D¢x  lq²Ó¯×Ÿ9HdùZÏV:D|‡6’6 ˆr¡¼ƒ­Ô³uÊÐeÒ#&«g› qÈgÊEG4  ¤£¼‚­Ô³5r W¡ž­tŠt:…´£´£h@4¶j¶Øo ›4Õ^,õl‘È?¨ð|Ú'^>O.&¢Ñ€h æ4g°µ~¸Ô³•½o²Z\4  €j¶M[µ%½"9ȳ•z¶NY<ÛšmÊÈ^ÚZ4 ¨ äl¥ž­SÇ7cÙõl¥cÔDÇï‰Dij ¯`›æ“Ï’Ž" ˆDù¢mÌ䋘ä8äÂ& ˆ‚5P+°Å¼-lrÖjdªU4  ˆ O[ñle¥£h@4  äX5 Ûf­Û‘^‘,žmáÜd4.çT4  äAY`+Q:¢h@4 ¨Ï6Ç¡ƒú(*ùÍr1 ˆD~ l¶2W# ˆD9Ö@­Àó¶°ÉÓfPã¦Es’ŠETÖ]Fµ2ª ˆD¢Œ ¶eµ[’.+ådŽGK"f¹ ‰D¢ü×@ÍÁvÃFj^Üžô")Û³Õõ[ËËËt…8»—Q…™ÎP—¡ÐìçÜ×ét‡^ÜòRÏ“.-wÓ"Ÿã«§»Âÿýú¹¨z³¶™z´øŽÚHGÌÿŽ(çHΑh °57°…Ð@eÀd×·µ½Fõz¸ü¡ž-`l€V »´<öL}E|U‡\!à1€<¤Þ¬¶Y¹€öDίœ_Ñ@2 äl}‰÷Í 8!ž«í5F…‘õsÊCU°äú¹š `¬DS•ÇóŒ­A€2™ ¥¤D¢BÔ@Z°6cµj×™Z¶íD-ÚtT¶ž#ÇÛ4lD þ¸MCZFÖžm(lM0F̃FÁÖñbÀª¿K o7EØ:¡pñj ±ÃÈoˆDUÑ@­Àó¶°)Öjd„…ÃaÛ€i…y ë 5«¯=¾ªP1<Ô ÿ÷e/n2<_ý]±ž­–¶¿[J̪óªt4y\ Eõ[y[sA‘®žåùQsÕ¿€)`ñ‘ñ¾,ïR…¢õ-ƒÔÞ¦ªÎÀ¯·ëí®ð Ì­?Þâ+ëø¥ÃÕï'ç_οh ~j o`+¬Ÿ”ó.ç]4 ¨H¶m9[më7n ž³ #ׇ—ß(Ñ€h@4Pÿ4*lÝR[™Ÿ•ùYÑ€h@4 04*lų­£5¡Ë9 ˆDñH¶âÙÆ7¸ˆRÚH4  Ô? ¤Û]›6'µÏV<Ûú' ¹hÈ9 ˆDñlß{ï=Z²d M™2%˦Nªíi‹/¦µk×Ò?ü@‰`‹ ÈtQTÒAåH†M™>“7+œª?"¸xÁII‰DõQ€-@ûÕW_Ñÿû_ÏþóŸÿì§Ÿ~¢ü‰¡ú#}ÿýôÍ7ßÒgŸ}Në×o¤¥K—*Ø6Öžmx¹‘J'¥ÓK¸0À¶‰ÀVÈBÑ€h@4Pàl'Mšäƒìo¿ý›~ùõWúùç_èË/¿f¸~Aÿô3úøã¿Ó†›éõ7Þ¦M›¶Ð„‰ü° ##g#öµnßÅËéðNåØsÓæ-kIdn’ Éa\Kí/£ûú8º—ß,º¯¯а՞ì¿ÿÍ ýåWú׿~¦þó'úûß?£>ú„¶~ðv+½óîûôê«o*Ï6Û"gΖ=[Ó6è}¶Û4Ü–)½‰ÚtìA%ºSqûnʦϜMÍŠZÕâÅ> c®ê«ÐåwËE^4 ¨M ˜°ýý÷ßé×_ó@ûý÷?ÒGB[¶|H6l¦5k6ÐÛo¿G¯¼ò­[·![ŽOcn¶îÀŽk{®kl6ÚŽ6nÚLºõ¥ö]ûP;e½iÆì¹Ô¼UqÍÁÖJÇX^š Û°z¶Õ®·+¯¹ó,m-m- ä™Ò€m“æ-hú¬ÙTÜ™ÖN]=Û°‰3H5Ú–4Úv{Ú¸y uî½uê5À³YsæS‹â¶5$ ;rv9®žmõvksd%ß-#{Ñ€h@4P;H¶-ZÒôŠ9Ô¦G‰»öðn7°3Ûp[†í¶Ûÿ‰6mù€z FÝû¥ný†(«˜÷(µjÓ¾f`T­žn%êÙFV%JXoW„^;B—v—v ˆjSiÀ¶i«V4cî\jׇ£ÃÚz÷QÎlÃí¶£ÛýéÏ´yë‡Ôgð>Ô{÷½©× =•Í}t1·ëT°MXo·6O¶|·\lD¢Ñ@íh Ø6/)¦Y æS§A©ã ޳uÜmmܺ…m¿=`»máVý÷8€ú ÝúÙWÙ¼…ñ‚©.lu)»°âëU}ïó¦*ƒg¬FŽ«g›F½Ý€²*Õ'Åå¥Ó×N§—v—v Ô¼Ò€mQ»¶4{ÑBê²×Œí9„6}°•ýùOÔ`û?ï¨`;`¯ƒ¸R¿aû+›·h ¯P®)ØrãújÝ2𸼟J¶jÖêz¶©ÕÛØÖL$#ÏGÈÅ­æ/nÒæÒæù¤¶¿ÿþ‚a V&ûí÷œðâú⋯èÿøBmÿ±W#·èÔž*–,¦îìEÝtÿÞüÑ´íÎÀvàÞÓ€=b;PÙüÅK`+É'ȱˆE¢Ñ@õ5`Ãö¿ÿýÿhØÐ¡4üÜséú‘#={èÁé‰Ç§¿ýíÓ,ضìÒ‘æ<þõ*Ý—m?ÇÛ—6ü!m·ã~ØögÈjØVÿJ'6 ˆDù¯ Ø^pþùT1{6=ù䓞½üòË´zõê@ضêÖ‰æ>±„úqõ9’o{±¿Û¿0l·ó…‘yÞs·n¹¤Cg +JÈS4  ˆ Zi„‘lŸdØ´G¨ný°uH `ÈögÈúHÕÔjdrA YFöù?²—s$ç¨>k 0²ölXe‡ï¯BÊžg»íöÎÖŸ¾Cö¡>¼õ§÷ ½¨×n¼õgÁbjݶƒ@@Ä—¡èÿ¤¤ŸˆDuXi„‘[vî@s–=F=Þ‡zh;ho^ õ!m»#/j´“Ô¢çÀ=¨‡—Ôb°JjѲ¤¨ (µ‘ªÀVúôÑ@k 0r‹íhöc ©ë>Ã2¶÷PÚô!¯FÆÖ¤kÜ´y+ué3(“²±çBºÆ¢V%"°Xb l¥H? °Ò#7oÛ†f=º€:æÔÇÚvß6må}¶â¤N!‚-Ô±G?UŒÀ±>4³‚ ´¬ÁBIN¤¹WÊïU¾ó#mey©ÚÓ\QÖ=ùû¶ÉÛ*‰Žå5Òž¢¼Ò@lo½åµÍgåÊ•ž½ýöÛ\ùgCàjäfÅÅ4sÞªž»Yåš¶Ò]š¹§#;‰ÀV`+ð °Ò€mcçz¶­9ÍqkÎQ¡ŒÿVÅãQÏöÛ4TwŠ]ضlÛ‰`S¶x³ï"¬! çKù¾b\½Ù¸ç…{¶¶^øØòlïWËÜw€™p¶§\J¥†˜ðy&ìãêåÆäïw«¾Oµ±sÜÙhlíscý~uÝ6Ë \ŽÄ¶r¡-à m]õÆä¸^¿h7Ø6eØÎœE­ÚwÎ8®ü·ÛFضn×EA¶E›ŽÊ¦NgØò›mØ.¬Ñ o¬å]Ôãžwß—¶^õŸØÚs¸&¬âª5ðNmØú>? Œzüq¿ß¬Ýë†z=o<Ñ<3 ì $2Q¿g¯?çO`›^'• ž´¥h îk Ø6WaDˆµÓŠ[[ž®õælU¹ÀÖÅÈ|~Ï0¶XéŽc¿>ÖQžyÎa'öLèÝ %'^‘,ž­x¶ ¼N\?”çóU#©Â¶­¶ëmت02^TÏVÆö¤ø_‰z´NÖø #œjzea'K…‘­í@¦§ KÓ³ÄEŠ뺙d° ?þÈöIìÙV}ÎÖô”+½K`+°ØŠ X©ÂÖòlC`ÛI¦æl½0r°gä «Æ„aã´zN1ãyYó·Ö¤=§ ˜•ó"ª Ï-I½[ó5˜ÛÕŸ…ߘäýÉŽß¿HIµŸÙnîXüs!Z¦«[>ØŠ‡‘¯†—h³64`Â5lùåWúç?¢~ø‘¾ûî{úàƒU Ûuë6Ò{ï­¥7ßz—V®zïo  'ðë~`^ºaädž-`똶ù!&ß‚¦ì9ÊÚ8iùülý‡DÇ+ž­x5ìÕ$êòû ºhØþþûïh¿ÿþU4þ믿eÐnQ`}ïýµôöÛ«éµ×ߢ—W¾ ÛDž-Wù öló¶‘!d½­E2K¥ß)¶é·©\¼¥MEy£ÀváÂ…ôÕW_ѯ¿þJÿú×ÏìÙþ“~üñŸÊ»ýûß?¥?þ„>úøoôá‡ÑV.Þ³™Ó¿ûî{4þ|åÙîšØ³íà,r`›Ÿž­Œ@kiÐ#°Í›‹‚ôZêÆ‚î€-R1¸ '5€Ó~•‚mIÇ®ÔI-ò8Œ,šZºÐl úB#ýª–ú•[GÞˆC.r ˆDéh Ø&#kØ*à²M›1;;©…ÀV`+ ˆD¦4`ë­FNêÙ¶0m:#&yJ;ŠD¢üÖ@°­œgËy¶ù- é´r~D¢Ñ@º¨Yت{]TAØ4.±—Uˆ ÀB"Øt+í)í) ÔE Ô(lÛvæÂñ \ÔµÅÊdÔ³mÒ¬fëÙÖÅ“$Ç,Ñ€h@4P·5P3°å:{7m¦]{S»Î=•¼3fÏáâñ-e!€xó¢Ñ€h@4PЂ-¶ôüç?ÿ 5¤u4·þÄÎÙ6l´mÚ¼…:÷@º÷£ŽÝûR‡n}hæì¹Ô¬E«‚n`ÖíѨœ?9¢Ñ@‚-@Û·o_êÔ©“²®]»Ò{ìA\p7NA8¶fy=üí"h´Ýö´iËVêÞ0uë;ˆºôÙ:÷@³ç̧¢V%[ÑŠD¢Ñ@Ak ʳ=á„謳΢Q£F©ÔŒ«V­R  ólCa»íö¦-[? ¾ƒ÷¦Þƒö¤^÷ ý‡RÅüG©EqÛ‚nà4FDò2² ˆDu[as¶:”|íµ×Ò¢E‹èý÷ß÷@‹LR>϶‰Sõ'¶ÛýiÚúÁG´Û^Ñ€=ö§þC÷UàûèbjÕ¦½ÀVF´¢Ñ€h@4PÐаýî»ï¸Á¿|)áÁ“]²d‰´A°Eµ¼PØnÿçië‡ÓÐý§Ý÷9”¡{0 Üó@š¿h ·ëXÐ ,£Ñº=•ó'çO4 HC¶?þø£Ê Õ4”ÞûïÿK¸5‡ç «BìÙFÂöO;ü…>øèo´çÁGѰŽ !û•*è.X¼ŒJx¿m?D>C:„h@4  ä«4láÕêÐ0@gmrØîøWú€kôíuè± Ü£i¨ ÜG{œÚp’‹|m9.鸢Ñ€h@4†4l,€ª½X Xó6¹gëÁö(†íá4xßRR°í(°MãDÊgÈA4  ä¯4lÀŠð1àŠÛ K[#ïqБhí톑ųÏ^†ˆD¢×€†mLãO[½@jð¾‡Ñ ½¡jUòî©N"²™Œ¶ów´-çFÎh f4 a hVÕbHm‡}¶|Hý‡íOýxÛO¿!û¨­?ã'M£ÓÎ<—Z–´7¬µ,n§öß*kÝÆgE|‰0š·,ö[‹bÎFe>Ưá×µâ×·v> Ÿ›e%ü˜ûý­ÚtP[‘œûúµîq¸Ç‚ÏSßÝ¢55-j©ÒM6iÞBåxnÜ´ˆ +ðßüXÓ¢VÔ ¯Ã1èïw?Çâü|F+õzçýÍÕj31iÑ€h@4 °5pÜ 'Ñ]ãîßúÓh»?q:©ÍÔk·=¨ç€a*¡EþCèì .¡‡™Ló²=êÙìy höÜ4kî|šÅY¦‚mÍªÈØLþégÎæ[ýø~^¿Ÿ¥?Sýmšó}Áß‹Ïp ßá~ÏôYª*a“1–cOîêôNŸÉÏÏš£lŽ ïãcQV1—f¨ç*2ïç÷M™>“¦LÓ6ƒ&O ±©Óir„Mâç²l ?c§L#ŸMæû16aòTª´MšB {„ÿNÅ&N¦GblŸÕWåeÖ¦Syš]óþvÞã|Go.š€Â ޵éÔƒ+u§’]©5o[BmÞVœ›²{Å-JÆãVíø9~Mq~-¿§-§¶’Ž=øñnêý­Úu¢–m:R¿¯yq{jÞÖŽš¹Ö´U[г&-Ûi[”PœíZTL¶íÒ¼5EÙÎÍZ‘m;5mIqö×&-È´¿4.¢8Ûq׿TYÛa—fgÞ¹)UÖþ´Sª¬mÿׯgög~ݺít›¢ýõyÁùÃyÕçºÐúÑšÒ·Jo®A¶µ`]æÂ‚¾Ë|Ì<®°¿íß’ä~P?²ûNÐ}»/%é7A}%®ïÔf?IÒ'*ÛªúzóXÌþÕô5m¬¯av?ˆê ÐOP_Ò¿P¢¶ïïÈá4ö–[õtljܤ)ÍáôŒºAPYou«©Œá© šœ ;T R¥ú`m:u1ç5™×ñß^§àÏt>Ë6@’MÕà43fBV54.JÆÅ àmÙ¶£ ÝΔœk¥ ÛY½¯-âðr‘ Zû¢Z¿æy¯nħ6=Û«®Å\½CñÕƒ-þh´-€{ÏΧÎ:Î+"Ÿ U@´ kÜÚ@´ïë÷"Ì«¬}¸!”ë3„ TMó]tJN¾`0h½0„]\,\ùb¡Íó*TØØ±$¼&É"ÎÛ º@ąǶÉÃɹ‚mXXÙÙW6”NÖ0Ø:Ó3iõm²þTYØ¢/˜ÀÕ¡å\M¯Ôl<ìH^³4Wñ\5V”g–VƒÀpyÇŒK6nÊK[¿aÅÛF~M:¶Ž?'U[¿ÖåÀÖògfÙ:~Ìgëù~ú¶†?3ã…XLPf´Iv›»ç%JºKKÓµñ9©ô¡”úM`_±ûOŽúN•úI]î‘ý@_‹œkWàu1äú›XÃÌ1Ô“­I=f¬â¨áÑú=[s?×·i¨VO‰IˆD¢Ñ€h ¹ÀÏ ýÑÿ?œÒéú¹Â°þIEND®B`‚nova-13.1.4/doc/source/images/rpc/arch.png0000664000567000056710000006410213064447140021433 0ustar jenkinsjenkins00000000000000‰PNG  IHDRpó ß HsRGB@À}Å pHYsÄÄ•+tEXtSoftwareMicrosoft Officeí5qgÂIDATxÚí½ ”\gy 9gâ3¶q[j[’­à X×hqÂ()Xãœ)Ï´~ÂØ=òð³:3mV±ƒqhÛêV9ƒ•x³Ty2^´ò˜‡°)Ea| #p)SK43:vŽâÚ šìY¬9KHÛ0±w©½où{›·¿þî­¿[?·ê©sžÓU÷·ºïßÓï÷}ïû·~í×~ío@và€ÀÀT \ô*Š…¶!ë9à€À Gàe#qU¡Ëm, ½îŸ¸î®˜–˜qŸ æ}Î}Λågt¾ûœwËä¼í®Y×½oôõ@àÖÊXÃÉ–Fäê.ʶàÞëôŠ[§½sï+n^É-;ï¦Ûuõç¢ÛW‰“¸î®nhxbVñ–ñ>çMókÞ6‰ºÏu÷¾®9'‡‹º N@ຸלé7–ltÌ—-'e#pò¾é¶¹J’¨!p€Àõ&p…˜y½\Ý[¦³n nðWv}Ü ®ÿZÝ_Îô—+¸e+uËfzCûÉ p \Éökóæ­Ëïæä¬dÓ†DoÝ2 ëÎ3ˆ¸áK`¹×NàÜ|êŸ7lsÕê¦8ý‚ÖFuI{+¾À¹ù“Ü7ÇÁn°çSòæÏxŸó^¿9[ŠkÁ~@à#p÷¾ˆÈ5¼å뮹Ô~ð°ÊÁnç>KD­Þ¥À•Íü%¹¢À¹iUÓ¿­ìú·¬Üy˹mÌ»eVûÏ pƒ¸’íãæ¦å½~p‹ÚßÍL[Íç$N…nž p€À p€À@ÖÎ¥Yô¦uT¨>íböîs屺)Íù†À7«xÕJ^¸ŠKî[UÑsïuݪ·ì¼{_Ô* n›º Ÿw9æªn½’I LMU@à®àU_°"UÕ&V'dZ‰¡`ÞKõ…œB÷¾ìD¬d¦åÜòE· ­â7É€ÒŽîL¢ÀiI¬¼'p@áú‚'peI+º÷«bgÏÖ[Õm‘¿ºÛGޏsïç@% \«ô–'pyÓ ªÑ¼ymZíRà*NËœ@€Àµ8÷¹¢Òfä«èÞÏ89› ÈW+rfDlµ¨½Û¦­£Zuͤ…¸¾wv¿\²ÀÍxWp2UÕ蜛žóú»ULÄ­ 2ç žn£l–«Æ\‘~p0×Lj&Œ†™™Ë¾Äyt+pGÞÿÁÛ›ñ}†Ìg?÷‰æWÌžá<8@à8@à˜‹«=jò´-ºQ£:‚´¤©AÜrU?Õ‡[g‘ƒ p€À æáTôÓ}¸é¶"CÝT^(zùÝ~ª Ò pƒ@Õµ@½‘ºº‘1_ÐÌü†[¿‚À pÃ{@-zV1¶ÀUL"Þ†‰ÊÍ#p€ÀÜðR˜÷%—½a°•¦iµáª8 p€ÀÜRU'a æÐz’øi.Sˆ8@à†ð*š:¥ù^Î}®Ûz© p€À öA7¢´Ë»i p€À7„•ÈWΛ6c#ru 1Û™áà p£{€muM¬Ê6, p£áÈ}KÍ;-öµož­¯‘¤~·‡À7¸‡Ð¬°ey(]y啟߼yó™M›6u#Mƒlܸñ…BaEÙ²eË…¤åe{²ÝˆÇe?Ý÷VN @àzgÛ«_ÕºÎ|辞ÖYÛ½÷†5Ÿe›ÀN¢c{å¡ Ô“³³³ß–›ý¥—^ú#°C‡½xï½÷6}ôÑf­Vkž>}º™æK¶'Û}ì±Çš²A÷-2(ßçòË/ÿN$‚§Üí‹ØÎÉ\2"m"[ïÜ¿¯ù†7îèi²ž]²À¹¦Í‘¤ý®D¼DŒ$:výõׯˆ4?~¼yöìÙæ8¾Î;×|â‰'Zr·k×®g¯¹æšç4z'‘Áèý4Õ·‰œ "6r½ÈO¿iôÄãŸY·žNûò©êªÀé4+p"ˆò¹×è\üÃdçÅ_\¾âŠ+¾%Ѭýû÷_8vìX+â5 /‰ÞIdðÖ[oýȨDgff>&QEN&˜f;ýÔ—Z¢¥r¥‘8¿yÔ¦‰¨‰ìÉOY^æ "q"tºŽŠ Ë÷áCà{éáq‰DÙ¤?Y$n?¼îºëVxàæ™3g&BØÚ½$‚xôèѦD/ºè¢¥YØEçf9¹`šîÝï¹eœ…d­ÀÅ5¡Ê|aJÈ4<¸îÛ¥itffæ{e“þd+++S!mq¯çŸ¾Õ,,ѹ 6|ß5µîä$ƒi8?â¦9+^½ \¨œL 5Ç"p€À…¤˜ô “¦Ñi—¶$™“¦V‰Hº•»„&Qà´Ï›mæ 5u"pC8‰)#3%Ú–ö¨Ðihf•¨ÜÆ¿;33ó+œt0i'D¨D¶,Ò¬j3 pC8I*#He$¦ŒÌäÕûëüùóÍ|Ïåµ#- LŒÀ©¼ÅÍÓ¼n!Óè’Àmذᖫ¯¾ú¯&e鸼$‚¹mÛ¶ïF"w˜².pÒÇMdJú¼…æëÈR™¯²¦ýâ4mˆ8æ…Òˆ pm.—˽÷ÆoükéãváÂ…æý÷ßß|æ™gÖɈLöK„RöK'ßyœ_ÒGîŽ;îxîšk®yˆ“²,p~ÄÌG$ÍFèt´ª¢¨2öñG~kužLCà:8‰ ½ýíoÿ®ÊÆSO=Õúùæ›o^'"×^{íÐåG-îûÈ4™'ß9 ¯C‡­ qeë•QË%pRtçÎß‘‘/p"k?üpGŠÖuòzúé§;8Ùohß2íúë¯ \;©KúÎIë¶ûÎíæK¤Sš«9ašn@à`lN*(øIxE^DŒ>ýéO·~ZÑñ%J"`"PŠŠÏm·Ýּ뮻ÖÉ–lKÇ®#ÓOœ8‘(pº¬|'}É{Ù/pö;ɶí÷ÐÏ*~‚ýý’ÖÕ¿‹Î—em“²ÝnÒï$ƒd„*iFCàº8)%BÑ'5$ÛtiN¦Ë|}I´NäEû¦ÙeE¶tž/>*híN¶o¿‹¼—ýXó—ñ¿‡¼·ßÙ~—NÖU”>w²_]W?í‹§ø(Ÿ¤‘\qœ€À!pÝ Üò¡C‡^L8Û”ê ßÔ(Ó´ QÞëÀ#=Ó&LY_"W*p²?™¦X“åýýË+© U¾wÒw–m‡føëÚ¿Ih]•YûÝýˆ¡}I9®Wnúý‡kE€nx厛>¹øÁ÷!TÀ0ÿqÜ*]®`dÌúwäÞ{ïm& œ¼¤)PÅ,$P¾ÀYñ©ñÅK›fm3¤ œíï¦ëØù*EÚ|ê œß<«ÛëDà’ÖU!‹8m6µëkÔ0ô:ö;Ÿjþý[çüž¥Z  ~þöÏœû¥;¡Bà†Æì솯¾ú5¯ºð3¯Ý —«®Úü¼øš/pûçççŸk'p¶)Õ—!?µ‡m6TqS‘³ËØþa¶é5© Õ6s é³'Óm¿µtÆ \Òºþvôo·®ŠoÂhÔ%ÊhBEàÆ9ïµÂ ¹ß‡nVаÛ¨q§M©vº6}ª°ùýåTl|ѳM²Ú‡­SÓõí÷ðNeQ¾WH:“.i]ÛLªÍ«º®ö—ÓßSçÇõ“Ú²Tg€Q œ$Ü•|nq4É®,#¹Ý¸‘"p€Àq®ºêªß<|øð? F(çšD”üév„¨²%­#Ó´ÉQæ…–±/‘!»mùÒÏÓï®Û–¦V­³ïuÛ*“íÖµÒ*?û=ìï×/O^<òÈ Ñß½ÊMF)pZ!”4Wxð¡ûZómÕÆDà©}úÄO4y%¿ü&R‘»¸>nq/IÙÉ[ƒ"0'ò&‚аIôM@à8nLNšRs¹ÜŸ>þøãƒ¦Å¿ì  Í?×ÍKj¢^sÍ5gu4 À¨N‘rWþ|‘;ÂY“›¸6¿ ZçT‘&Ùož­·ÖÕeä½]Fê¤Ê>íþýÚªv?²M‘L¿¹×.#?­ˆÊwÐõDDe¾LCฉ8÷P¸äꫯþÚ‡>ô¡u}âxýä%ͪ"pqéAâ^÷Ýwßó[·ný&òã&pZ›Ôo>ÕÚ£Và¤n©|9Ò:§"HVüd»2M–‘ee•85]Gæ  ÔmøûÑ‚÷v•K]F~Ê|}ÈÈwÖïÒ®~+€ÀeVà”ÙÙÙ#Û¶mûîñãDZµ^2¨áºë®[Ù´iÓoÓl ã(p*T6z¥rå œF²üˆ›+•¦Ð¾ty¾¿¨ùƒ.üýø9ÆÙm†¢‹7qçÛ7oÞüd¡PX¡o\o/i.ݵk׳ҿ0ú{îäã*púÞŠŽÈ‘ˆ/V>2½±ÒæÒ¸mHP÷£Bé?,ä³îG¿—Dáä½¢Q9‘Ñvß@à&Nà̃¢¸eË–ÓŽ;Ö\YYÁÌ^Òôüè£6_ûÚ×>»iÓ&éë¶ ² p¶Uû®…"cÚŸL›3-IçGÝTú«"/mÕíéÚIÛAฉ8óÀØ6;;û»333ßÛ¿ÿ‰.ñúÉëìÙ³­Ú¦’SïÊ+¯ü|ô÷ÚÁY8ÛŒ*‚¦BM›‚•¢Pj’À©¼I?7¸`÷#ßAÞ'Eàô½?ð!©Y@à¦Nà̃㒈]’¨œÈÜc=6u‘9‰´IA‘6IÈÉí·¥0= «gûY1²d›-»í›fNæûýÛüÑ®íúÀ©p†FÀʺ2Ú@ธ¨œÈÜæÍ›¿øâ‹(͆RjR£sò{=ðÀ­ ]tÑ‹ÒGÐIÕ`"Nû¡Ùi¾is§ ŽBíFà´ V"pÚÿMóÑéÂ…j›\m¿:YF[èv´/€À!p=TvHmO×÷«U*J:ñß{ï½MqîܹLˆÚùóç[#Gå{KXSù}ä÷ŠDµÌ€˜)²Q.‰j‰d‰<é4iδ¥´D†4凊š.£7wÛkåÐîËö£“é²oùjeIKyÉtù^~º›‚Ä/ æwCà:{Èl—Nü²Sqùå—GDHF¶Š‰ "K‚ˆÓ0^ÒÜ«û”ˆš|i –ï%‘µË.»ì97rôHÄ~ú³Á¤ Ü8£bèO‹+û5,8@ઉ¸„‡Oщ‘Ò‘%AÄIïÒK/ý‘È”E#y"‚èocãÆ?íKs¯îÓEÔä{pß‹ËûªDòDàb¾Ç¢îÓ߇À p€ÀÁT \ô*;I*8™ÊyÓênZÕ}^t¸º[·à„lÞ‰V%°²›WÖœ›>o¾[Õ}¶Óêî³,[Gà88˜F«›i¥81Ras2VM¸‚ ™Y¯î¶[uÛ(鲡&T'sÁ«xÛÎ{ß—€Àœ‹ŠÙió®Y´â„«èËW‡·`¶QpŸÛ \ÕÛvCà8@à{IÚÊNÔêiÚŒ©òUéEàÌüy·í²i¶Íû§ßщޢ׬ª}åhBEà8@à`ª.oßûi<üiö³®ë§ñ¶Yˆ›ø<ã-_ðæûß­à¾iD88@à8ãd@à8@à88ˆ¸7½¹Ðº¨³Èmº·ù+üåL~w¹ #p€À½Ä‡›»÷Þ¸Ì;÷ïk¾û=·t´½#÷-µÝÇÉ e›•‹9«Üð?Z™}ÅëÊðïPä<îÍ/ŸªŠX4|è¾Äù¹îT·½úUãÈî¥Ú¹½÷Ô¶ñ·È¶À oxãŽØ¨™DÞº2† pÒì)Òuú©/­›'ÓýæS‰Ö‰ð‰øÉ¼ož­Ç œ|ö£w²?AÞ˺²Œü”mÉ6¥ÉV¿‹æ·¤ïÀ p-p*j*UvÀ/v"L*u"^"Ov_àä½Ló#~‚¼?ñøgZ˨¤éúºŒö¿“i6J(ÓCßcP‡Àc'p"D*UVÖ¬4IÄ+ÔÎ6Áö"p²MåÓýX¡Ô(¡ŠeÜ÷Eê8`"N¥H-Xù²ƒB’çK[¯'?u~Ü42Í¢Q¹A Ü+®yyÇ£q8@à`àç÷w³¯xRàd]™/ŸC RàzM‘‚À€À +_¡æH00jëvd,M¨ÀÄ œ D1úø#¿Õúi›S“F«Z± œß쨃z8iÖÕ&Tÿ{И*³£LC‘6=“>sÚïÌ(ðN·'Ò%Ëh?µ~N¿‡nWæ‰$Ê:"Ÿ0U'$r”$B*ašÚÃŽ •ҲˋàÉ2)“(ŸìÏFûâ¦%m·ÝwFà8€‰8@àCà@à*@à88˜f“d¼~¢^@à€18M´+h=ÑAU/IL#7›-Çå' Fà8€‰8­^ $Õ -KWy!-ñBà8@à¸"o¶ä•E¦É¼4d Cà€”NË^ÅÍ—òV¶Oœ–ÔÒæV¿ ½lKk¡ê2¶)Væk­îW~ÚíJô¯“}µ8Û,,Èï¢ó´—F…^úþ!p€ÀÀPNú¢u ÒˆœHš–/€¶¶©,cבùZ|^dL‹Ò[Ñ“éò½:ÝWœÀÉvlQ{ÝžJœLS‘”e}9Dà€±8•˜Pói\´ÎؠͬºPDO¢h¶//¡u:ÝWHàTL5’çGäìïn£r pü-Æ^à¤É°S‰Ñe5j×”ŠèÉçvg?÷²/+g*zU“éŠ|ÖíªÀ…öÀc+p*AI͇Ú™$ p p™8m¶TÑ4GÒr"EþV™çO“Ï~6X·N·û’Ÿ¾Ê4ù=ô÷±ëól½µ¼üDàCà8 “ p‡À pS#p[_~õ ò|é×7{CàCà8¢ÀmÜxù)y¶ôÁ%€ÀÁNþþ©<û9™8@à88@à8@à8@àCà8@à8˜B®ù»oüâýåÉ} ×ÜríÈ®»¿òãƒ÷ÿ/Üç8@à8@àÆ™—Nn®·³»—žüþÜ]ÿîE`Œ™[:õîèZû«ˆ¦0w×—8®#¹(E4õˆ.(`ðQ·]KµÓÑuö¬Ê[Kà>ôÅä!îó\¢X´Í¸Ï9'ry..nlî!Û7nÜð+®˜=3É\}õ¦÷N͵uwmo+êvè'â†À!pÝÜ#*Þ´ó~>¢ê(™é•ˆ²›^±Q7½h¢{ºþ¼›–wûÕuóf™*ëî!v¾å?üìç>ÑœT~õÞ;›"q“~,ç>rò’躪F¬øâ†À!pÝÞª.êVñäM£s9#m î}ÃIXA¦Y ”uŒ¼ULd¯î~ÜúóFÞTî¬("p€À!pð’ÀÝôOv_˜äëY$nN®§èºúVÄ…X»óK/ÜûÑrŸGà:º9بXÃ}ÖèYIeÌÈYÃ[¿nL¥­îÖ)yÛ*èò&bWwó&²éCà8@à¸5‘¸¥Ú‡cî®/½øëý÷y®mjΛVH—²#p'oUÓ|Z÷¶UrQ½5g¢}e'e8¸‰¸Ãµ×ÏM¡èWqb·`û¤©„Åœ6·Ö½íøýæŠ\Å4ÓæümO¯ÞùÏ/îBÿ¸tªùÕÿô§ p7>¸ëï×§^hý<ô•ÿïo_ôwšÑq‡ð²—½ìcc/pF 4…HÃëϦ͛U¯?\#°ºFèÌ´ªYW›V žÎ{û˜¸4&¯ßwäÔ§NžkŽêuç±§š_<ýu8#vªUö,Ÿú'pgö.ծ߽üäã’Ì—ó~®Û!Þ H‚À!p€À!pÜ0"p/F='¹àBó8ï8@à8@à8nü®­ƒG9¿8@à8@à8.hÝÓ¹¥“³œß p p—ëê¨Dà8·8@à8@à8.ÌÝSÛ.èç†Àõ{S˜ñGŽÂøÜ3Ï<Ó¼í¶Ûš7ß|s‹OúÓ‡À!p\¶¯©ãû8¯¸~o ë’êÂxœÈ›H[­V[#s÷ß?‡À!p—ÅèÛK¹ß¦*U 7“hœû)e´ Þ2¹Àz±Óu;fºÖ@Í{ËçCÓ¸fKÔTÞìëúë¯Gà8Cà²y=‰ãœFàÒ¸’)U1IxµÄ•&ñµ%²¦tVÅQ¶U\E‡º)ÍU6û«˜ýÎ#pkî©§žZ7]¢r‡À!p\殥}Ò|ÊùŒÀ JàÊf^ü¯k¤ÌŠ–¹¼WkÁ¾÷¢qu‘[S¹a’£p½œôwó›KEè8Cà¸lá’öž‘ œÏÜ ®”$pf¹ª)½UrµSë1ÛmP¬ºÏUnýKú¼I“©ˆœ¾úé§8Cà¸l]G$íEàF+pNÔ*œ_ ¾è ܌ݧýéÞ/Nò`Š~ÒˆÈà‰¼…šS8Cà¸1¾-œ•¤½¡’Y€À ]àÜûyÀ™eJNÞªf»e³Î¢™®ýåtzk¶"liS¤ÉTß÷}CàCฑ^C$íEàrSXÍçÄ«h敼ÙŒy¯Í¦«}ât[Žb O\É1ãm·4é¹èºÀI´MšM~øá5)EˆÀ!p‡ÀeW2ëI{¸q¾¹è „]#Ap§£P/\¸°úYd®×fT8ÉõóXÄ~ÎanÜo0ó&õH‰ƒ×ŸÀIs©DÝDä"p‡À!pÙ¤½ÀaJ.­  p7|ö,Õj{׸?!p0'©C®½öÚÖ†^ë "p€À!pÜЯ’ö7­'}Þ4™¯DàDæ$"‡À!p‡ÀýuCÉ,J‘u‰vݼªÍÕÃ8­Â WZ Cà:¼Ö+ýœŽ:Oá{äõþ2¬ßCà†,o$í…¡ \=f^ÀI!{Ä Ñ71 p}^ë=åY4Wê÷~àî7š7²1¬ßCà†…+™uޤ½06gÊgUõ¿`Ííf–Ÿ77g»¼æ‰Ë{UM:Mê[5Ÿ«“최ŒD•T" b@àÒ8wÝêµµšæÒµ>oÈm>_|ï8w¨š$Þú>§ûTÌuÝp÷‚†Y¾œðÝÊv?7ÖÑ·åÚ{ Kà&ÉnIˆ¦úBÁþç«9Þ4÷›}@¸¾%#yš®(·U²ÿ»mjU†²ù~™o6鵜­È@%.%³×`=0mÁ]¯­¦ÎWýÜÍÏnÙöÆý1×ðÿ4÷‘²ùçmޔܳ×üLÂ÷¨{?gˆÀ!pã I{aìÎÜLäCฤkäîÚÞÝ˵'8Oa¨ã+pÚlªNœ8ÑúÜK2_8Ø5BÒ^@ฟ¼DÜ$ÿ›¦‘fT‘¸^òÁ!p€À!pÜ@®ýqއÀ-ØH‡À!p7zHÚ ×Vàdƒ6ö2˜CàR¿6(™·þ%Â&²&ͨڔ*Ÿ5‡À!p‡À’öÂØœú_Òò5nÚŒM2àý—ü}º÷%/\&Ó‰ô“ŽQ¨ÜØ]G%ǹ #8—²C“ô–\jMœYVݘ„ ¡*u›ƒn’N.H3ª"}ߤƒ¹Gà8CàFÃÜ=µí­‘§Dß`Ä·èçYÓĹ*SNò š¨×,Wð£búÞ«ÄY_§Ï$ \`Æ4œ\°¥´äs/õP8@à8.µk‚¤½0§e²b­aJeÙj U3½îÕF­ ¬˜:ˆu“]}A×sóÛ œ„«wøø'ó7¼ç»»—kG²Äß¿ý3ÿå_U»/Iä+ÕzÉû†À‡À!pˆ¾-ÕvF×ÄiÎI¹À‰«š’X%_¦ÜgmfñJe½Bö ¡h™Vtpïe?3þrkÊçK7\ýµ°ø¿>/òVØÿ`5‹wì ÿGWÒ%ò&Q7i:Õ ‡À!pÜH¯’öÂøœw“вY‹mŠÏ/˜H[ݸ‚'` ‹ œ·ß¤>p­¦ÚK®È½Ùf…^›Pm:ŠÙ#p‡ÀôZØ'ͧœ0§…æ½ivÔg=0=ï5§®îm»Ð­ÀMÂAìEàTڤߛDã$•H/ýß8@à8®?\ÒÞ³\ 0NW4£P .òÖpÓÛ \Á tX'pæ³Fó¤?\ÅM/»Ï:ó^ZJKG¡ŠÄÑ„ŠÀ!p7’뀤½0^çn 93˜ d"k¡¾nE÷~Þ]Îän[Ô¾mf=Í1·˜^ÖmùûÔo£ešËûܺ+_õ³_‘¼oÞvªNä8@à¸xy“ä½>½ô…CàCà¦\à*.iÁK0_uÓÜ@½E¸·üâï|gïáZÑÛNÉœæ7­ú%)¸)8‰ºi T7­‰ÚK?88nÊ®n2'TL¶ƒª9MsU•fÓ7þóõŒÉ¼3ÛÉù8[…Ó‘¨ÒŽ&TCฮÿv 6%•°z@àªFàμn÷ÝÿÄM/jß8#y p\¼¼Ù‘¨’Ø—Q¨‡À!p]ÿíæcò“ΘÚÝ«yNwÜ|ßg4i¯‰¶•œj?º¢¿MKý…À‡À‘®=®dÖ9’ö‡À—‹Î÷"–9烞N1 pÜp¨›œï…ãœz8M#"ýàzMà‹À‡À!p±g¹vln©vç pЗÀéKÓˆÈhTÈ€À!p‡À¥K$n;ü¤½ô%pZU¢p*s‡À!p\ªçùq-™€ÀAj'HN襤‡ÀÅžãûDà8σTN"n’¼÷Úk¯í»‡ÀÅžãg¤ •ó 8HEàNœ8ÑsŸ78kÏÜáÚ=KµG9ǃÔNšJµùÔCà8ëSÞHÚ  Bà´˜½ ͨúžA ‡À!p©œÛµd© œ}‰ÀéK2 p‡À!p½CÒ^@à`(g£n‡À!p\ßçõQ‰Àqn8Éÿ&#RyÀ!p‡Àõ†KÚ{šè p0p“—öc*‡À!p\_ç4I{ƒÁ œÖBõ¡ Cà8®çèÛN‰¾qN8; ÕCà8ëé|&i/ p0XKó…À‡ÀM»ÀQ2 8@à8¸ œKÚ{–s8@à8¸ŒI{CàËÀ¹¤½g(™ p\Fޤ½€À‡À!p€ÀeHà(™ô-p¿ùûg›ö+#á=} ›:#i/ pЯÜqÓ'ÿÙ‡ÿ¨ùý'#áCǾÚ<óçO!p€À!pS#p®dÖÎ@à Ÿ›ï‘÷ðö‰½ñ"pdSà¾|ª:±תwºTÛÉù p!p>t_sÛ«_ÕÂNçþ}Í7¼qGs÷Þ†.wi I{Cฉ8¥h{-Y³’&Ò&B÷î÷ÜÒ’8_î²"p.iïiJf×ϱß&Ç?«Üð?Z™}ÅëÊðï0‚Ÿ¦À‰¨‰ ùM©"u™“Ïß<[o Ü‘û–2'p$ím/Å,ßw&€mÙ¸#ozs¡)Ç?‹üÒ¿šÙïþ¶wÜ4ðâèY8³;-®™&¢æGÜ´)5Kç¢oç¦=i¯üåüÏêµ›eä~/÷}ƒ 8Žý䤥˜TYó£r¡iã~¼"y{ byÚŸ9òw”¿'÷á#÷{¸ÔN¤Ì8íûÖNôÆùx‘´Cà€‡8DZGà&Vàt‚ôsÓæSéÿ¦|é'DàN?õ¥Ì¯=KµGç×ðÌAà8à!ŽÀq츉8‰ ÉO‘êÏÓ Y8^$íEà8à!ŽÀq츉8¼É ¤é¨’ûös¼(™…À!pÀCãØ#pS!p“r¼HÚ‹À!pÀCãØ#S!pÚÏÍ6¥sÀBÊw†¤½<Ä8Ž=7Ñ'ò¦e´d@ƒˆ›ZfÚ4Ž— ZÁ 3fg7~~מ"25äï.ud8à!ŽÀq츱8m*‰³²6ÊþoÝ/—´÷ô$—ÌûèƒG¸ŽG€üÝ8à!ŽÀűëï„ÀM¯ÀeùxMCÒ^âÁþ/BhžŒNÓùI-àÿg>ìB×\wHdE§4‹éqôû> ë8Žâ|AàÂhþ·Nð«4Œúxí=\+îYªÕ&ý™À!p€À­AnÆ*e¡¦Mê)Z‹ÜðEìßc/’&ÇGNŠô8ÚcÀ!pŠý‡m\nZ’ö"p pëMnÞò`Bóãyê ÿc¯ò–eEàhB 5µËq’|pãØ„:M%³8¸5Mj}‘›¤m–$pÚéïc/ѵ¤‘„2=)g›^Uí=Tü\þ)°‰aýmhäoÓ{À8¦™¶¤½Üš‡¬/`~6öv8;Ïcïçn"cZbI¶¡Í®yýæÙzìqòYÖ—‡²¼÷›à8®›ã%)C$uÈ´<38¸5U‰¢ÅEÔl3©íÿ¦ËÉØv€GàÆóØ'Ix;“¨™¿®ˆ›=wÚ œFÛTø-Ñ”5‹^‹3æsQ¦yËÌÛsЭSŠX0ÓfÜ4%?J“cä÷sµ}]G•Á?îx¹¤½gú¾Éß=püäxäüiæ}η¢w.èôE‡À!p©5«iöu¬hS›mNSó;7kgø¸?7>ÇÞïãÖÀÅS+hí.$£Šìô#pîÁ]÷æò°nxË­.½ª"n÷P¯»éò¹ì~t¹Q \ÒhóQ^ÓqÇ+­¤½Ñ«¢ÇÄLkȱ1Ÿô;á«:Y+¸õËæ\Ðc]tçACà8n õ¸›v/Ñn<½fÙ›/‘1‘öP“hwePàÊ.ºVõ®¢òåÜó0/¶¡y+‚ÙqiB%¡ãµ÷žÚ¶HÞΦ‘´×IÖ‚­[©sÂV7ÂW lC£rïX8CàRC›¯Bédšm2Aà²ìu°J\F}ƒ/p~¿Èn.Ô4?ªó¥O«Z 37oæUÜç’ÿ@÷šÙ4ê¦Ë‰ÌZàäÞ çƒ—qHö:^i%íµâ¶Š‹¶åÜûª=R^pDZìS"p‡À /"cÚÜd{¸úÍÞš&¦±ÇQ@ØþkºŽþàKžö‘ÓóFG8Û}ë´¬œ¯†FcÌÝÊØŒûYõ‘2Òà \qÔ}àì±¶‰ž“ŠÜâx¹’YgRºëz,ݱ-Ë;[tÇ'Ià*+ÐCà¸ÔIÊ foäÜd{ù)?%*fû7&G=컎L“íÊycçÛeôœ’ùºL–"p*fqÍfNÌ*¶yÔ{ðÏÛ¨L/M¦ƒ8¿˜½æ~Óc5Ê¡J?Z’OGªË|Ù†VY³£Ù}Óm¤‘CàÚ Íb¨6` f`ÉäëÑœJ‹f;þ²%nzNj%J‡çqâ•;núä¯|ð—9^Càw'»—Ÿ|ÎI~×ÿw»>ðÌó{¿ŸæßK£$ò`ÕÅc-p˧NŒÛõÝ)»ï®ýR‡¿g+ ÷ú_øàDδÀÙÔRŠJšFÉü&T=‡ìöì4ݧDÖâ’Ûmª¬Ù¨œ8]?ˆæT®½ÐTM]À¢7ÜÞÖ ,8©[0 ¼ƒ7¼»0ȼ<ã.pãI&ÔÁ{¹ jXÏ»>òÇÝÁç×Þ:Ò&ÔC­*IQÂÕ~pi'ò Õ=v3êM÷œêæx—Y~îEÇ÷X°_ Ü F¡Zy·M©Vàô\ÐÉŠÍ Úž~–TötZÜ{ÿ¦‚j.Õi¡è7dó>[i+–/yõë¼ùilB•‹O/6 7ËçQçgCàÒ=ö¶¯HRGä´$1lüšG.t³õ3ÚÜÓ& ·K”‘8µË»6´¾Q™9”<5MÓè…ßoHÓ4 kêO½“'?”h¥D×'áÙý.·Ë š¤ ƒÄà_ƒ¶)Õ/©ç˽¢÷{6Â&ËhþGM ­9!“îÝ\¨j 7¼‡ZÝ4{ÚÚsSÂFÉ›¨]=¦ÀðÔ \¨Ÿ‚ˆ›^”Ó©™³ÿ ëqÖŽ½ƒ1˜VÅÛgf˜#ÖŽþþŸ¬Ë#æ)’¢'Ÿí&¡ïêÍv¹à™Η»aæûÿæ«í$îè$=û$Šýñä"Ã8}†è3ÃFàBÕ{l…ŽÐö4=_åCïWXÐÏ~ÅNÎö»Ä?²\wM¨U“ݼmÉÛä:Í—ôPň2.ýc¯CÿCÕAÕͺÀi$îmGNE"÷¤JÀç–O}jŒ£2/&%ñM[à’¢ôòàî´KZÿã¯5ßZ 'ò•fÇIzöÉhÔ]‡jÿ%)‰ï°Î6¥Ú{‰.kåIeÏ_ÆnË–Í󣽡hŸ±}ïÚ œ¶@…FÔ"pClBu™­ëI¨MXö>O­À…BÈÚQ4tq„šYå"Õ¶ôoò²M™®ÿ9% 5×}jGéAHå¤ \»:¶ò·µÇ¹ÝñÏÚ8”:Bæk­î7î¶ÛW;ë6…@/å^k¡Žéy”šÀÉß5t}k.¸A4Ku¯ì÷ÖGàî®íÄg`;†%pöº÷k"ëP¿ÿ›ýRÓˆèu«Ó칦µ“ýsP§kz[)¢³Ïµ´ÓˆlÙ²¹çŽiìW2ýàê^ê‚Jž™V¦ õK«7¹È’ú½Y!³p}(Ú T;¬ú£ŠìSqM¸~"P½ h}Å´ÿSšd‹û/5íoâ{s³Eåã†îëù¤7ÉÐ1ìd_I§ÿ(ø)ôAJ!ÐËùÀ}#ñ­c¿¥e˜Í©r¼~úÝþK42âY©;ò6“¿mܱ”{M»šÈ¡¨¬Ló×óïóIõ¸eš\Ûš^Äþ¨ëY1 M‹ûîý ÜÏþìÏô|î“nò„u ƒlÄb°P\¿“¹¬o£¡‘B6RÒÉPs?tNjç¨ÄtzcÒt¡ã¡ŠèùÒí?´Cǰ“}Å œŠi·)¸ôN®sïv 3gW$pû"Ž#p“Ý f¡ †"pþ_#6ª¢Óü!á*~Iáuû Õ¡áþƒ=i¨ù KóL²ÀisD§-±Ç ´ŒF[“Î?†ì+Nà´éE›fùœÔK7È8â/I2)£O8¸5Â÷߱ޅò=)íÒ<Èvt±³ÍqIC͸þûÀ%5jd–N£½¡óFÐè7X“¿³ÿÏ—2ª4D{ b&Jà’øÌœµn£nÚçÉÞØ;jŽÀõwìm”4î°õ,Cçà®Ý¾âNå,iP7XÓä~·Qô{K:^­âïKµ³ p0Q§Q6`€DåìÃS›¬ü>GDà¬$Ä"LjŽÀõwìõØ…dI¦Ù>r¡ÑX:(A›aÓ¸Nö•4ˆ!$€íR péæÇ4C¡ã Üé¹¥ÚN8˜Ó‡¤¼ Í¥¾XéÃ^‡lërþ(ÔÐ>´oR¨)¯ÝPs®ÿcoSih6sý’rIlSÂ$5}úgG&ÃvûJ8éÈV=?õ÷Aà/pãXz/t¼"y»cÒrÁ!p p«CŠ”„S–Ó4 òдM 1÷G±*ICÍåó “‚NK)-=*Eþû¸ãéé÷o’Ï~6í Õî&íË®:¿üö{…Ò pé¦U3i×÷‘“—ì^ª­$%½Eà’ïrmʵ˜…zÙ¶o·üÅwFà`h7ÍL[1{@àÒø»H· ›ïo”©C:9^{–jή@àº7›hÛ:„©$¦Nªö€ÀqŽcÀMu.nÃ8 bPö®#‰«!p¡9ýj"uÚE".‚Ÿ†x!p p¸,¯ÝKµsRkö“ Iš­ j¾lW²N£´ œì/´Í¸}u"p²MY?IBeÛ½J*<Ä8Ž=‡À¥'pË’ë,Š•4ÊØ'[„^#wv]nÉÙšØ~6»ŽÎÓæÕNö•$p~ôØþž¶ ~Ÿ^j(#pÀCãØ#pc)p:˜©9©›ã%¤2—ŒVpé´9S…J›Zý4T*Uv%+O~ÎŽ”×õ:ÝWœÀi ?G+lúY“„Ë>z­½ÀqŽcÀ!p)¯Ý˵'vß]Û‹À%7qv“‚'Ôôé×+EôüzÈ!ó×ét_!S1õGÑkš%û»û#â8à!ŽÀqì8šPG)pKµý!pñøÉÔ{•=»^€ûŸ{Ù—8Íú'#íÊðGà8öÜX œŸrT5P;=^.'Üù¹¥“³\rT-©œ&tÏ’Àiº¸ú½š3âÇ›hÓé¶šK\Å•q:^‘À8ˆÀ%7‘û¥îM1¢‚gËßùRÕO ¾Ð:Ýî+TCÙ—3iZµ}ó8à!ŽÀq츉8ºÙQ„òSüŽ*×Éñš[ªíˆn"Žé N›Q;ɧ}Ù¬ìùiH:8+ÿ¡uºÝ—?ˆA›Líú¶/<Ä8Ž=7ÑçKš}HjóÚ8/89.‘]MªÄ`£±¡:Ëœˆ”î+nn÷å œŽ:õ××ßâÇ›x³9û”(Jî 4¥"p‰œöKЬêr¡œ¡òjòÙϱf“ü&•dët_qµPµ¶rè÷I£Lkx厛>ùÏ>üGÍüë? :öÕæ™? Cà8®½ðsxÙÇ87¡ .'ܹI.pŸ–À}òú}GNýæïŸmþÙ_¬Œ„÷<ôµæOCà8®õw‘¦&Û‘]›¤’F/ŽÓñ’t"’V.pŸ:y®9ª×ÇžBà8[ó·ÑNçڼߨC¸»k{%±/ p‡ÀMÀMÂñ’œpÒœŠÀ‡À!p/p’s+.!ê(úö pH‘{¸²g©öht¡5§njî€ô7Ê*;þé}Í‹.žÉì÷—!·i2µ©%-U4Ìú§ýÜÜ=µíѽõ,i Üe—½¬ß{ÏI¸ÚÞõ±~ÐCà å8·÷žÚ6þ“ß„ªåŠüôã1Ôg 7ZKë?Cà8J8­‹š›;\; -< p‡À7µ'Í«£Ú“À½Tà~eÒrÂ!p‡À!p€À!p™ ×A'“H‡À!p€À!p8y¶È3Cà8@ฌœ;OÏʨT8CàCà²#pË’Cà8@ฌœ+pCà8@ฌ\ë\]®=!5R8@à8Cà8.+·TÛqCà8Cà2"p.'ܹI(pÀ!p‡À‡ÀM…À¹óõhÄÁ¬ï+®Øp8Ë5”³Îå—_öCà8¸! ÜÜRmGtΞᜱ¸n8Cà8ëøœ=#"Çy‡À!p .;wPšR9oCà8ãoÀeDàæ–NÎJN¸I+p—»páBó©§ž ‚À!p‡À%ž·EìçܺÀÝÿýͻõóæ›o^}ýõ×#p‡À!pIçíݵ½’Ø—s8n$§Ñ6û^dCà8k{„œp€À!p7=·\;"pþ‡À!p\FNÎ[99Cà†*pµZ­5Á=ýôÓ‡À!pÏ@à¸q8}}úÓŸnEà„gžy†4"‡À!p2w¸v zþ<Ê97TÓѧÒ|úð÷>Ÿ8qCà8ëDà^*p¿BN8@ม œ›DßBR‡À!p‡ÀuøüY®›[ªÝÁy7³8CรpKµÑy|šó8nh8Á¾¤‰|8Càº>ÏÎÝSÛι7p“×m·ÝÖê§/‘7Ji!p‡Àu}/G<À¹7p ¥éõ…À‡ÀM³ÀIE9—Ì7pÓº§6â&ýßB8n‡Àµ=—Gìã|¸ÀI8¿Ïƒ8Càz:—÷‰Äq>‡À \à$ú&ˆ´i“*‡À!p×=.'Ü9 ܇À Eàä%?%'ЄŠÀ!p×óù|4â ç pÜÀ.͇ÀµrÂíˆÎé3œS€À!p‡À—sçôiIîËy‡À!p p8)«%åµ8¯Cà"p2UùÖj5Cà8.5;9×çÉ 7°œÈ›Œ:•Á "tRJ Cà8ëû¼~,b?ç pÜ@ÎŽF•ѧ"s¤Aà8ëó¼¾»¶w÷rí Î-@ภ܉'V+3ØÚ¨‡À!pç6 pܘ œDÝDÚz-¡…À9‹9·—kGÎ/@à¸TN1<ýôÓ b@à8K9¯åüæüî'7š¢Ï ïÿÃ3ÛçÞw00o* ½œ”ÎÒRZ>‡À!pÁ@à’n³F¶–#Ž\y啟߼yó™M›6>7} …ŠÏ[æßûƒ7¼éçŸõ§oٲ傿þe—]öœl_ý9ö»ï°cZNF J¿7áÚk¯]}ï·Gà8Càz>¿÷ˈTÎ1ȬÀE¯Kœ Ý133ó1‘§‹.ºèÅK/½ôG*[‡zñÞ{ïm>úè£-¹8}úô@’Ξ?¾µ}Aö'ÌÏÏ?'ßᵯ}í³"y³³³ß޾ã“NîöFlŸ4³/8}1 Cà8.\ûó’Žó 2!p"<·lÙrúòË/ÿŽÈšÒ­·Þúƒ£G¶äéùçŸoŽëëìÙ³ÍãÇ·äîúë¯_¹æškž±ñŒäîw£÷ûDJ'EàlÔ Cà8K©Ê Õ8Ï`,ÎEØö‰Üˆ°‰ð¼ï}ïûÑO<ѯô^+++Íw¾óÏnݺõÓLC‚À!p pã-p.'Ü9iN圃\.—{ï[Þò–ïŠltó’Nñ:’Ñgle~ ³ûÛ”¾bi¿þàþà…Hâ¾™Ö CàosçûËœsºÀmذá–o¼ñ¯{yˆËèE[’i˜wâĉ־5—™tƿ뮻úÞn¯¥¥:yI¥‰«®ºª‘F$Cà8@àÆ_à$•ˆ¤ᜃTNFJnß¾ý¯z]ª—Æë™gž‰÷ôÓO¯›&#'mÔ-ÖMÚgHà’¾[·¯Gyä…H⪇Àñ·@à&_àÜ9Z’ûrÞAj'µ?¥|T¯¯NN„ÈOSaבmØ”·ÝvÛê<‰¨É²2]~ÚíøR%?»]]_öâÒhXùë7ÝFèåê¯î@à8¸É8)«%åµ8ï ‹^Û¶lÙr¡Ÿ‡xRªô‹‹hYÑ’y¶Ï™|Ùy’åt;òS>Ç%µM¨šïL×Õï©Ò§M¯úiTq´ßW¦ÙfY8iºí÷%£S%ŇÀM"Ñé1QŒãð#vþ?>ý×ûcæïàoˆÀMžÀœm¸''¤$p{¥hû(N%­“ýˆÔÅ œFÈìg¿D”Šb(jh§Ùï+ïõ÷KjŠíö%}á6mÚÔWŸc+öRÝCƒéü ¸, \ô*xŸó3{–jήÐie 9[ ç,XÛ'µLÓ¸¤W’Àµ[_"^Ú *‘0ùi Qó›]UÖüH™J6Ÿv"pòÒú=úx0­¾Îœ9ÝT®ø‡ÀMªÀ…®k½N埡„¼—Y“{GDÊXôªGT÷®#‰«¹i •²èUq”Üϲ‘·ª›^rÛáÞ„À¥Û„ÚÀÙ¨›˜¿¾,/ò¥Í ¾ø©@麡‘§"~þt¿ivÍÃôŸ³ßW¦Û¾vòJc„*M¨Ü4œ\or=É5¤] äºÖ~±2M–Ñå8nN„kA%ÌL«Ë{éÿ¹å5o:覜ðU¼mTÜt™_òä°Ä¹‹ÀµfÄZA,޲™Cà8€‰½FŽFäoø¼'H¹€ •mSdŸ—sѰ†Û_>°|ÅÍ«ÚæÚ«ûƒ LT¯n¨"p p©Fá–j;¢ë$3£gaBÎÐb ?\ÃJž©žÎlõû´U½}¼fÜN#p žæ½ï£ `@×É9þ0l[pѪbHàŒ°üŒ.hÒÌ{BWðšksfCN—±Û ô›ó¿_!´_7=?ÒƒˆÀ!pÀÄâr£À= ]àCàèëZyLÒŠð·Èʵrwm¯$öåo­ëåü8ä„»úêM²0V‹pQ p€À!p™¸^"÷£þ"¿zïÍÏ~î0dÞöŽ›šòÌŠÀ™Žþ¹!ÉL~Ú.jCà&¹Väš™à91|äY?si=l²ÛÊ€÷WðJ`•¦á¢Fà8`:سT«í=\é} ›psùß*Þ´F(¥È€ö_BàCà&‰¹Ãµ‘Ä=ŠÀ!pƒ8¿|V>0¿ê•½*xÓ‹*1‘¶ª)Ÿ¥Åíóîs#æ{Ôc¢wºßªIä[rI€¦x½ý~ æ{„—¼åËø[ˆhBºüÔOýí¢ŸÛyh pc#p/¸_eN8n:šPKF¤*FÆ´"‚&æ-!³ÒV4âª/ÔM…;ÝVn(©8Ù}yßµn’ú=«Æ,7ã~\B%‡’D:‘#ª€Àô„Dà$‡À!pÇEÄìÌǃoø¢æm#Ià m.g¦We½rþ~=+…¾‡Ý+yR;M»À¢pKµÑµsCà%m!Q*yÍKŒ WìFàLäk¾ADà8€1¿vÎÎÝSInzúÀMÎökñZôú´-º÷ófº]¾Ò¡À•½ïSëƒæíw1AÀªæw(˜å’Ö¯k-W¿?‡Àôpí,K^8´Ä5¬¼¡«rž$­™®2§eúÆÜ¼™ž쳑$On[ºý†‘±Å€ì)ùÀïc…´d¶ÛŠró@àúÁ¸?À!p¹š³ƒ*ê)í—&S`×Ïñˆ}7Éò¦iFŠ ËøiM )í@àqýì‰Cà8 #¸œpç†]àCàCàú»†ŽFDà8 #Ì-ÕvD×ÑKЈÏPÂÜÀzÕaÕK`®£3"r.¿aÃËn ë™+gø3¯]óMo. •W\“knÞre³ðÆëæý½?Ýš'?û݇0Èßã¾ß8œ‰D¾~úE¿À}`½:€À@ÇQ¸;ö,׎uºüììÆÏßñÞM‰¢Áp‘¿ûÛÞqÓØ ܺT^Ñz›7­â œÍéfäoÑÈaɲ/„Fzù×ò\èÀä ÜÉYÉ ×i{¸>x„&É ÷±¸€°­:¯‚¢W̾`«*¸é¶¶iÃTC¨šuL%[!µ¼n€ÀŒáµôXÄ~KKàµt•‹†ÙT~t®ÑÀÅL·¥´¦öjÉ–â`¢®¥»k{w/מ@ิ.gê€Z…À•´Ò px=ï$'‡Àu*qÚO­â‹ù\ô‹Ó®Þ¥ÀÕµò‚È*€ÀLìõ´\;" p\ZWtѰ¼7=XºÊ¦1óT;¸ª•9³ ÊY p‹\KrM!p!ö,Õj{׊€Àd„¹Ãµ‘Ä=ŠÀ!p@Vî¥÷ç%7‡À pAª2Hu@ಅ[ªíŒ®­Ó×.™oI“ø ·_Ò† pÓxm»§¶Càz¨œKRvWf9+¿.j‡ë4¸ð8€ ¸¶–#@à¸^#`å€TimTÍõVäˆËk2_3mÆ‘÷ÅÌÉ¢¿¼M\|?ùå¶8€I@*2Èõ*pŸ¶À~êKͺ¯yä¾¥æ‰Ç?ƒ¨M€Ài_IØ› E»\’Ý’Wœ¾d¦WLbÞ’‰è•MÅ…¢&êuË—ÀÙé#‡u?*脳AÂ_ÈÑk_D3«üÜ;ÿ§æÅ—]™Ùï/BÎCãŽGì”À‰¸íÞ{CsÛ«_µ†7¼qG¦DNå[/qU'FkäÈ}ž±¦"勞¸²™^qrXòÊtŒÀͶSõ¦WtŸ4¡Bîˆ\Ðüg;|>û¹O4¯¸bö ç!Œ±À퉄À}ól}UÖäZ°R'Ódž¼Ïµ,ßõÎC‹\ƒF#_ëdÉ«aZôJhY³hKhUœ®J›ßÎlÇŸ^ •è@àƒ,ãrÂó ܧ!pïÜ¿/VÒdšÌ“eüy™kûò©êºeBÓºÝn·'¿‡l3MÍJjÕy'KžÀ5´} g®ìš=ófÚ¼[?×…À•8@àƒ Â8˜¶ÀÅ š.ûYš(ý¦Ö?ò[«ó¥)öÝï¹e5z§Ñ=Ñ“n ò¾Ý:VäBr&ë :_¶«ëÛeìwMú]'¹\Ù Ú¢•³67秊=Óyófù'‹ozœÀÍ›«Âg÷ÍE p0Q¸{jÛý”"ý œÈ™HO§ÍŽ"j²¼íg¦<=•0]F$LåI§É5g?ûëèv­ä…Næ'-#ò¦òh¿‹Èâ4õËyƒr6gÞϘ¦Õ¼FÛÜûE7¿dR’øÛZð§ûyà¼ýÍÇlgžA €À“‚ŒòŽîG͈)t¿õUù/¼ÿ¾O4O}ãë=û"4"d6YŠ iÄ+.Ч2–$Z:Me+´Ž6ßj¿¼nNåÔöëÓþ¾z¸]»‹Ùê—ƒª„\ p@çH8'oÊæîúwÿ'›¿ó…Ó=û*IFàâ–µ"'kí.³ûëVàDÔ´©Ö§iM¸Ë.{YÏ2˜U[ôG¨ p}k†xëòÉæç¾òÖNæ%ITZÚn?§}õäs}›FÎðM´À‡À´GFŸJ1ûÝË­èÛC÷#ûâé¯÷|þk¶ÐhŠÛ<jBµÓ{8~¿M¨qÍÃÒ´Ú«te² pÀNŠÙ/מ‹¾ ¿°üåæ×ÿóŸö|þ«(u’NäH>[)Ò¦J‰Ú«ÀùÛU±´rfåQ$øg›bu¿VNu`0H‰ÛIÜ8é÷ççú»D†´oX»J *V*GqÒÔ‹Àévõ½/Šþ2þHU‘3ýü°qÛh³é9Ì´œIçÑ·×¾é b@à8€N%îžÚö=ë1¸ÜWR»Dl´XR3£4CJ3ÁOŽ+óüÜqqÓ42¦‚'Û’mЬÅE í|Yßßn(°ü.í~§I¸J ,Ö¢©Iª•l|‚ØÍØ´ €À!pɸ ßòî¦ÃÙ(s•D(B7îd)‘o=]Ë+YÉ3yÞB·hrÅUL~¹ŠKÊ[µÉ{ V óÞ~tÝŠùNófy¢}€ÀÙÄ-œ¤­±géÔߨÀ½íž/ p\âÃÅ Û¡³%¯¼å 1·¦ —–éRárïL%†ª©‹Zð*:TÌwj¸ùEtb·ÀÅ py‰“º¨‡jñ‚Ü-þýÌ_ƒÚ‹À æá"Q³rHˆ¼Ú¤91«t*p1Ó ž昕j¡ª4ÚŠ%÷]ê\ø€ÀÅ÷y‘>%¡BÏÒǤ“‚Õ~?`°·kéÉ/î>ôäßýkŸâ1.ñá¢EågÍ© +yN˜Ši œm ÕH›‰Þu"p%·;¢ËâçxÒ´Ãlþ‘L«à4“Ì[ûöÝ¿ñ0B…Àµ}À¬ö5‹¸œ¸¢ÀÙe¬ÀUL_ºÓ„º`G¿2ê¸õò&CíE’lÔMò8ùy•:¸´›?:Ù'Ð1{˜Ó~fùÀÉ+ «ö!pU#ju#þþêeozŽ‹¸Ÿ$팋piÞ$oeJÖí&’4«¶[>4ä®Àɵ-ÿØÉ?biäHF×MAÒI7©¸1|à-x©K5@à:è,l3«‡°ÐU¦tšbó4I³«Ÿ!Ý&Í •Àñ›p5a¨îO“qvZd®7q“k×ïJJà›Vô?h½½7 û>.Þ¼“¶R¨i[Þ¬ÛIž”DÐäæ¨Í­¾à©†êÚåýR;"LòÙÞÈCÒH`p§uN;)¡•iýS†ÀMÆC/çš[ó\Ì€Àµ§“¶´ÞýB×¶|ŽÝ¦FøBQ=]&T‹P²/Ù0xóÿë´«…ÜÚ:uH8Ù_h›qûêDà´ËG’„ʶ{•TÆ.§3À…–·ÒfßË _ÖÕi¶^¢nG>ÛÚ‰ b¾À%õ…U¹±Ÿåšõ›Yí22Mþó뚪 Ù®z}ë::O£òì+Iàüfaû{j?_Ù‡~Ÿ^Ò !p0Të¥\(b'pzãÔzŠ>º|R3 0Xayé´9S…J›Z%Rê:a—ñD…"p¶O­®×é¾âN#üz³Âf[´éX»vL¤À¹6]‡Í¯Öwó¥Ë7˜.£^‹\¤€À oªÜØãF¡vÒ„·¼Ü$µI%®Wn¼IQ? ³Ý$zí»¦÷к¯øÝ%Bç¯Óé¾B§bê­×ASöw·±&Yà ^n·†'qõ~ò¬ùÌtò·7´‰ÔoÆ”›žþG¬}Þtƒp ëëMÒ YHýiz3µ{Žëw‡À¤#pzMvK’=»Ðöü~Hàìç^öeïzoò»pºÝnåuâΛŸ÷§yó ¾ ™Á¹@I¬‚[Ïæ…Ëë:Þr…˜ï[ˆù v€©8?‡öY‘›šüÇl›Wõì÷g³ÿ½úµÐ¶ýˆ›6Ó†¶©ÿE“F`0×I8™'×_–N»ˆÄuáМqÜOæCç•´ª˜Ä¼y“þ£jKb¹ée“¼·d¦×Í6ên™²W¸^·[6ßY÷¯ÑÂy.|˜v³ò"76¹ñ…:òÊ O§k'¿ÿ\¨ITƒê ®¹V«8øÛÔ$ÀÈÂ!p0§ÿD…®1M1¢‚§ƒ BR¥ÓÓ¸^öe.NÎä~bûæM³À5PÕ€-Ö[ô>k ¬ª•(SkQ«8˜åJf•´E›óÍ%ô­˜æ]].o¤nÑFã¸ðnj’qƒi8mFí$œöe³²ç§!éTàl×Ð:ÝîËï3«÷#»¾í‹7õ8m¦ÔfИõŠ^©«†ª¸"ô%/‚Wr«áQ ̳ͯé«"p€À¥ƒüW«iCÒ¬…ŠÀ ^àTf´X»J ¶;D¨"K''"¥ûŠ[§Û}ù§£Nýõõ÷¡ µ³õviWN8©Ë9I›±Ñ7·Ýo_‹ú» pý‹ü7eyCà`šÎJöóGp†– à uy°]0ìrV¦âºItº¯¸Z¨Ú=$ôû¤Q&lnÞ­S67o¤mÑ5ÉÚ óŽFŒÀå¼m«¨éúE3½húÊéþª\ø€Àô#p7üßoÝ?zAäo$yà:\/oRäm¾7'_e7ÍNÏkäÍ-SÔ¨›·íœÙ¶íOW4Ó‹^ä­ä÷Ë@àz¸èï¿"÷Ž^‰îûû*p€À!p¬¸Ï§rÿçd@àúíϦe®âúÁ +¹.€À!p€À%`G|Ù¬å2bÌ.§‰=8Cà`„*k%HíR?=Ȱ*# p—î&ß&ï[\i«|hÙ¸}pQ7<’ ÇKÄM3±kn%ifõeÏVsð+*è²2ÏO •ú-*À‡P!pñ—ªW}Á¯„Ðð*3ŒÔբ׈@r´7äœDÚÚEÖlNmZµÛC5MUúdyM¦)ë„mÊO_þ8ëïÁ²`…ÍMóë—¼å«Àø œFÚTÎD¤$ÊÄà7¡¶+y£Ï5ЧËÉ:~m™æ÷¹Cà8®«Û¦SÛÔé×uÓrý\HMpÁì×~'丑léZöFLÄKÞÛ,ê¡BÔ¡æO-:­ge04MûÜÙZ‡‡À¥óp±5F+^’\[kÔ/_p•üÚ¥±§Âè bѽŸ1’·ZRKë®rA—î࿵ \R=A].´Œˆ_wQI£>! pᇌVThØ f¾–®ª÷SÁ[0Ó^ÿ»†-½å–Y#•\gH³e\-S€C§b(Ò¤5-4¡ p\z–urä•ȪÖiô(pu·Í†)¯¥ÂVðGººÈß|§õY¸o¬ë{f£lé 'púY–±ëH¨6­†Nšdeš4™úM¨²­Q$ FàƒIïW4Q8¿/ZÉô{[ì#g›G+FÒCëj_9mJ@àºCó½‰Ä©t "j2Ý šˆ™ >P!SÁÓþl:*U…0.J§£Ru=‘:;º@à¸ô%®aåÍÍËi³©¦12—Œ`͇ú«ùË:1+˜(`ÕáGq¹é¸Î$Î^Ð>j~j6•³ÓTÂtàƒ¦ 5ÚÔ"£”78˜Š>pcøÐ+0x¸ôÙ²#O»Y¯—¤£ê÷†ÀÜèx}+r!L£ÀÝñÞ­s†‹üÝ8@à8€®Ù°áe·È¹ £Aþþ‡À!p0÷þ p€Àù˜Tª˜Y‹~ ®¸eÓ+nÔjÞ¤©h‰/›+¦Yà4•ÇBŸÛ)éO}Ÿ°l#fú¢+«esÇ-Ú俜€ÀÜK9Þ ^Òb .ê¼™§’Îå’Óem-Õ†ÙÎB'gJ}•ß/ç—ó¾§¢"¸øNEWýaQ÷ám¿äO@à8€¡ œ™Š¹¢‘™º¥[Ð:¥n~Á«ÜМ±/¸mTŒÀUÌô’'“Vàì4-¿•}?ïw›1ë•ÍöÊfß%¯JDÃIÜ‚ù}rZÇUÿ.H pÀ(ÎJ[É«˜P7•FŠl5†²‘¯Àå-YjxߣÑNàüõ´¾ª¾Oø ¡ß%\ÓôëM¯˜ïµH’a@à8€‘œ)._54¼Ú§%/J—sËÕMß¹$ÓR\U/ –†À-ºm.øe½Ì2=+øûêRতX=n0h«øƒ ¼æÌœ–¼‘³rÌòk.ЯnMÎôGËÅ4—& œÙN5®IÓ¯ïêïÛ|Î'Ü¢™,úýí8`X·®X¼“åªz6ïĦä¢_Ë™A%•AO¼tú|§ÈzÌïW5ßuuƒŠ¤i­ø‘ÃÀ~ëÚ\L™/@à8€Q \¡ÝtÛçÍLËë@ýl×sëØèZÁÌ[ýiæåì¶ý÷Þ´œ÷ýJq9çÌ÷,Øïúüïíï×|ç‚ÿ÷@à8€¡ \ÆŠ3v) pÙx0’“ 8@à@à¦^àÞôæBS$†ËÛÞqô$p;Dâ`dìã<8€IãÿQSú‹i3þ‘IEND®B`‚nova-13.1.4/doc/source/images/rpc/flow2.png0000664000567000056710000007367213064447140021563 0ustar jenkinsjenkins00000000000000‰PNG  IHDR°W›c¢ÑsRGB®ÎégAMA± üa cHRMz&€„ú€èu0ê`:˜pœºQ< pHYsÊ&ó?w#IDATx^í½ ¸$E™¶Íõ!CÓô¡±›foA•Æ@<Ã("È¢¢ö0(=à‚,Úþ.lƒàö3ŠŠð1Š Ò #8 à†¢ ² (´è àÖàŠÒ"*îùÅSœ÷dVFUeUETÝu]yÕ9™‘‘‘O¼™ñÞõƲÚj|PPPPPPPPPPPPPPPPPPPPPPPP`ì˜p ,vÛ©ÚfÏž}ÇZk­u¾ýÏ÷cº°¡6€ ä`³fͺqbbâÒ©²ï¾Ž}+‡(€(€(€Ù+ h=rbýõož1c­ßpЋyÓq'Ú.ºô3­o64À°l ?8ç¼ Š3Ï>·õ?êØ%Úló-~å~”|xÝu×ý˜{ïOfßzq(€(€(€c¥À ÷y“‹²þ|Ñ!/ÿ­`õ‡eClÀFØî¼ûÞâÝï;«X°ð+Õˆ¨ìXµûÜ,     d«À®³fMÜ¿øˆWÿFÎ à ¸cØ60~6pÙg¯*•}âŸx™kÍfdÛ¢Qp@@@ÑUÀ9*oxÚv ¼mùÝ€ëGY€‘ñƒêœ:ïÖÞwæüqΜ¹ßq-ßüÑmý¸3@@@ì˜3gÞÅ/yÙ¢•÷Ý¿x^±lÀ¦màšën)6Þx“ŸÓ¥8»¦£    Àh*à~]ÿÈ›?ùÑn¡ç<¢;Ø6€ Œ¶ ܳâbó-¶|Àµ‚ÛŽfKÈ]¡     d¡ÀÌ™k¿jÿ_ø+œÏÑv>©_êÀzµ /™3wî]ã6‘E—V!µTÑ2o[êþ®[žãŸ_÷7ù¯ªw¨WÎú\Î3˜ÖÃMiP«À®óJ·aÛ^[Îdž°ñ°Mî45Cñ`[«ü¯¶ÂÝÂ"·MNm»FÜ’¢Ý–¾Óoòo¯]Îúh†ð˜òG˜IPP 3Ü"ö?ÐØ&Ïñp<©gêÀš°ÍR¯¥Ö2kò†]\ìüa‚ë„óFâ.¸ @èTºãÈ6áÈ’v„ ŒŸ h‰µµ×žõK×îÔuí´iåôì(×.÷†(€(ÐwfÈù`×ñs< êÀš°¢°·ÓlÇ’q     üME{?¿‡›pbÈgÀ°ñ³ýºÖZk=Lí- QPP P`ÖìÙWœsÞŒ}eGlÀ°®m`ëm¶]éš&“‰ó2Ø8H…(€(€S`ÆZ3gþZkú5¿¨ uNcØ@S6°äÇýi5Ö|'íl”l”L$B@@Ç+°«~5oÊ!œalÀÆÓ4‹½f³§¡R€’‰D(€(€(ðxpЋÁáO‡“z§Þ±l )`lG.Û‘\$F@@¿)püQÇ.ùSS ùà cØ60¾6àš–‚6J6J&E(°Ø¥™ˆHG@ fΜùÁ·¿ëtÆ¿2q 6€ `Ø@Ï6°ÞìÙ¸Öqþh´}½ ¶¯òŽUæËÜÝNŽÕs³(€ã­À¬‰‰»®¸úºž".ãq¡î©{l0`£} ?Z*Ö(Àb"(€ã¥ÀÌ™ëƒU2üáé²Ï^UÌž=ûކ¬€mHH²AÈDǵ)Ç•|°%l`Úø°«ޓì@Ÿ.†(0j °8œ@6€ `MÙ;P/€`epD`úØq1@¡+Àâ¸6帒¶„ `ì@›u€`úÈq1@$`q8l`6pßý+[Ýæb·K.ÿB_Òþ÷§>ŸïeWD§½´ƒòþwùæ6K<;Ц€`Ø>r\ P XàeðÂ5°³“ÞúŽbóÍç{>ûÙQÛ¬Y³ŠwÞ¹6í;ìX¬±ÆµétÝ6Ú¨Xýõ£Ò*½Òn·`ATz7lTº7Þ¸£2l°Á¼bé…—ô<;ð žAv M; À°}ä¸  @ °€Å [®3Þ¶ö¦ãN*N9å”"ö³îº³Š+VÔ&¿þúë‹9sæÔ¦S‚#Ž8¢X´hQTZ%Úd“MŠeË–E¥w/ô¨t–á°Ãgž}.›D‹Ùh!¹Üfô˜# À°=>DœŽ(¡ìxCPIýÊØÇø€Í°¡ìO‘W¸lç÷˜5 À°=>DœŽ(¡,3(€á:ãmk,›aÙÏ"°™ÂgmËèôóÑ"o@‘W€o¨h¢!&l(ÆXväÔÎn€`ïèÌd*S³ŒNCB’  @& °ÀG |;éÕX6“fqPÅ`XvPO×A-XÀ¤W0á|l(ÆXv´ZÏžï€`›Ø+5îÚ³E’   ä¢ |ÄÀi°“^m€`siTΑØ£ŽYRì²ën¥Û!‡.ndVí}_p@¡­×wÓ0Îox ìÄ€l–Ë   @ °€É0o®9~vÀ°i´zÉ”b¤ö›w­(®þÊM­Í`Öþ¿ñ¶;NAàÇ>~i#y ºMj`“1j ‚(€Q€?tCÍõ°1Ù À¤QËç"# °þ{ÿ„{[+[Öø ÷AWð‚ï÷ô`¡óýó,¿¦ ¹_í›ÏƒÚï’*|~jÛš’{r×§ßö™lþ,pѯš|±-ßX6Ù†p8{€=ûœó‹-¶|òt7cýíGT½ŠÞZ}ûÝC0ÓêÿTÛ!v8ݰ®*HªúäP”¸€<,›úuX #Õžr–m°ìм´ 0Ö«Hi¬ öîÀ*ES}® §ÿ÷VitLiìü”»°i=Œý,Í<—ù]ý¼y£À8*ÀŽ$}Ôgª6À°ãØÆ¶¹ç±XÁ§à4ìþ«}ê.¬ýX¥óÓèøiï9£µÏØvÝ”S|'°ãó6˜ïnU;@`žwÊ4zv À° 6]£ÕØlÙ¸Ø:€õ¡€…Ç`ôï€ý:懠;z üQ§)Ú À¡‰Kù’l0±“u+¶.ÃUX%›²iS6_{hR3šÌ,ç¼X`'EØ¡L£g—,›s[Ù‡²5ÀÚxVÌjØ ØÖ‘ÕlÃj4¦UZ›yØO¯|üè­¥Oµ-i¸ ññÎ>5Ô’O‚ ° VJÆEÒ`>Nvô@!Õ›r·­°ì°]×à/þ‹›G寫­¶ÄýŠ£¯y]äÛöò™\°ð+S·VOÕŒÂá,Ä6þÕÆÀÚDNöíÏ,æëçÎXœšF ì2gD“½çöO¶ÚŽcÎìT­°ã ©5ê”gtí€`‡ål`ÝVØæ`öÊ©}3†U¦.#f*ï‘^™³XEP«ÖeÕ~MÊäGbýYˆ©Š¸ê»lØpŸþ/K›ZÛÀñÉð¥Ø >â—`ØdׇK­¡¥<£ •ƒ¬[€–_¬²:˜]ê"³û «l]\w©;gñÔyYl·ï™²1°Ýæ•Úyl–Ÿé)l¦—h±X€ucŠRkÔ)ÏèÖ À謯 *{¿ƒÙ3ܾm‡UÎÈë* «.£»ºm¤vßð¸etF¥`#­}’°#P‰ Ý Ào,60@`Øaù1ÀljãeCé4Ž÷f·-Êa ì¨@g“÷Àëm0øë°ƒ×|”¯À°ÀËá¥É†Ÿ¼òŒÒ°yìÏî_Yüî__Uüa÷=£¶×XãÏ8¸rÑÌe©lv8ù€ÚÉ߉Œ—-óǽãi ¶ÿïÄüÞ‰ì(#ƪ÷ÀŽO]âNX€`‡bÏßo½Ær;ì°Ã£¸ˆÏºëÎ*V¬XQ›òúë¯/æÌ™S›N Ž8âˆbÑ¢EQi•h“M6)–-[•^uóé´ ÏzÖîCµ•ÅÞäG€ß¨¥}ÔéàÖ*)\ÿÝ¡ÖGøîXkæÌ?°¬º“Âyæ+Àv®gT+À°Cü¦ëlÓÍ6/n[~÷ØÙØ<#°?¿ûÞâO[m3=ƒï¨iÝý¸ìSËï¤66v¡"°O}ÚvD`3ü1–ìø ;>u=ˆ;`Ø„ ¥l©€¦‰üÒ†d6&NYD`Óé°Ãgž}îÀßWݾGÖ›=Ûõ îy}ÓFý€.ÆÀ^é õHw^*kƆz^5‘Ó ›öû¾ê9`}ēΠ€Mºz²+ ÀÌ!¼Ï!{Ý’73fÌhu=ÛjëmÙ 1ïÖ¡Ï`ØNº1°½ûu뢬ZNçb­‹\Ú‰Þ¯•ƒë‘¼Z7ëмªœ…øê¯ÜThkê]8Îù°QÏÈH$j `ÝOVû¸íT¶l5ÐĽ~Xv ðÒ /)æn°A \×\sÍbÓM7?õüýË®¤ãì´èÞX€íµ ïìü2€uÀªes–ºcu–[c©W¸œäÛvò´*òj=€=ûœó‹-¶|ò*Û ÿö¶´¡£Ú^°˜}Þiõ+Õ‘MÜ‚{ò­s&Ù²Õà®ì€`ûÚøÞpËòb¯çì= «ÿøÿXÜtÓMÅøÃâ”SN)fÍšÕ:¦¨¬¢³÷¬x ¯åU' Çû`X¶V¼ƒ, `°®˜Zëu²ƒÓû•´€ ËÒw€ýØÇ/mµU§½çŒé6JðuÈ¡‹i³zèIÀöë±á|®_]wÝâëÇÖ™S?:è¥ßë€`ûÒøª»°&«±îÂsçÎ-þó?ÿ³®þöío»xå+_9 ¸ŠÒæ4Î-GpL¥Ì, ÀöÚ„wv¾{Ñn«­³³úž: €Ýe×Ýjaõ›w­(‘Õæƒ®Þ¹ßÿуÓûtLin¼íÎUÚ_ýoçûóD„Q^óó×ßÊ_­´ŠÛ5-?•-|÷[9²*ÛÊÊÙdÀöýù½ ‚®ßxãâî»ïfËL¶ùçqæÌuÇQ›lˆ,¯‹.ýL±™›aV¿Vk;ú裋ï}ï{Å~ô£ÊMKt(:kçìô;W\}]_àº÷LžO À°lómY†9&°‚?un7ù àSi‘ô xõ¿ÀRíƒÆÌZ÷㣎YÒJ§ÿ ,í|ÓùÙÕyjý1·–¿µ;º–m:_éu¾öé;æ·SäÊkßÐJcÇmŸÊ§ý!à6ÙÞ°>±Ã.2›/¸7°MDq‡mÊ\€í@ÂFìN·Ì„ß]xÇw,®½öÚâÇ?þqôvÞyç9øÝldzÑK åÛdƒI^½×u°,ÛHó•{&ÉlD†ï@A`ØX(€4€Xú¬ã] ¡ÔòX]ßÎÑ5•·A¯ Úÿ_@ê«ÎÓq‹Üª,~ú&Þ÷Uy4 °—;æþ@PþX–‡äo °ÝCº ŸôÖw¬Ò]øŒ3Î(~ò“Ÿtµýà?(N8á„UÆÇª;²®Óφ”¼»·n´`X–VØ)0[¡µè§ £ °9‹œúÝ}cÖïflê_ËRÁ«€×º[ú:˜îæ=_wNÃË5 °ì8Øyì=°ÝÁ‹º k9ëú{Øa‡µ†$üô§?íy[¾|¹[ûñ°é¼Õ-Y³×5ˆï®.­ À°±-ÔH§K`Õ½7Œž†ï˲ˆ¥Uµ.ÄU«ý‚9eYä¶® q¯«ó-:\ îGÀŽô³ÝŸ›`ØþXVž¹°Aºõjù×í¶Û®øüç?_ÜÿýoÊw§vš¾Ö³vß³¸æº[Ùf~ì‡#Òiž, ÀæÙ^6\êäVﶺIœü.¸ö.ô»ǬßmXùÙ$O!û‘]+['[ÖÝÙ° [x³;Úå½vóï$ë;IÜKZ€íÅ~Fí\6`Õ]xÖ¬‰Pjœ·¿ýíŸåoú¼sÎ9…f3žŽöþë«›1İ,;j-iW÷“ÀÚ2:‚;ëâ«1£6–Ô&M²cŠ,úÝŠëVùÛ¹–Öþ÷#²Jg“+tª ¬­gëÇÕß6áÛ•ü¤IwÅïôùª[»ü¯w›¾ë>—¹êvŸùu™Ä`Ø;—4l=ÀjÖ-¶Ør _øÂßúÖ·ŠŸýìgÛ´Ï›ßüæéñ¶é·¿ët¢±‚, À°ãÒ¶½Ï,V°hàg³ ”~ÄÕŽ…Ù2€µq¨:߯¡ú3[¾þuuMýïOÚäç£s4I“ò©«ý6Ñ“/Ì6 q§½jºIOâúw€àQp~Îs;N÷vV¥«¿Bu Ewuí #29t v«’ ^™9€`#ìql’°õ{Îy´àuÍ5×,N?ýôâç?ÿùÐ6­»ï¾ûNôÆájô­ÒÊq²nÂ*‡_~¥ñË%}tܺ+?âªc!ø›6óÝDÞ®ue¾~ã[K^°å¥êÎmMØÁR3¬qÿ`;Ø›o¾¹¸æškнèEÅ.»ìÒúû—¿üåÐ6­+¨žz; W¼û}g±‰:G, ÀŽ{ËÛº6Ñwt¿ ÕÏ·a€=ÕÙ“8©í§ ²bÚï|àˆ¶ËÜ"£1cFý.¿vÞdIæe]ƒËÊ[–Î@ÝÏV÷b‘Óª¼}8ö6Œ‡Å•F¡Ž–FÓ¸¬&J`¯¿þúbÇw¬Ü¾øÅ/ö ÜŸúÔ§ŠÍ7ß¼ÐwŽð®ºs[v gŸS€í`o¹å–âÖ[o-–.]ÚêÊûŠW¼¢¸çž{Їzhh›ÊôœçÀ¶㪼Û>;5fWÝŒ·öýÜç>w:¢*Õþƒ>¸•Æ6t}0¶nË~~Ú×T´·é|Øæ›h¶9€ýæ7¿YÜyçÅç>÷¹bçw.<ðÀâ»ßýnñðÃmûÎw¾Ó‚ê©6‹¹n|¬–ê¶Ñå¼z{©ÓhœV½N9唨muÖií®Køá‡ÿ÷_›Nùì°ÃÅvÛm•VéçÎ[,^¼8*½ž±º²vS†§»ÙÏÕ ¤Î®R9¾ÞìÙLšo°È1T€`e=u!¨€ à| 50Ü)BªãÖ¥Ø&J2£´®ÀÚo똀UNˆö[”Ó®¡ ˜,O;ÇÀ×®­ýþ„P:ן@©,]Ó/·ÑW›¹Øò÷£¿¼¸–ŸÒZwa~U¦ÉŠ÷ëV`ê«`SðiÝ‚°ª5èJïŸãlxì‚ .(n¿ývvŒI¶Hêº_}õÕÅ¿ÿû¿°Z«õÿ÷‹³Ï>»ØØM§nÅZ;ö׿þõÐ6E‡÷ØciÝév)®¸úºlœâTœó&Ê1®«nìê»ýó¿ÖxÚc—¼±xÅâWFç{è+/–¼ñ¸¨ô‡º8*]§e^9Í,ÀÔ`Øh€<†e0¦ýIZ8“þ×~[†Ç,\ç„KdžùØXÛï«Uví°|:Oê_§l‚¦²tÊËÊ–Ó"°a寭L?Ú¬ãeiÃ¥}Â7ëÖÆÅú‘MA­¢®6†ViüãoxÃZçÙ>`õ· µéHi¿ò»–IœoØîVÝ„}á ‹¹®{î&ë­÷8€½ë®»ZÙ×¼æ5­nÅŸùÌgŠGyd¨ÛÅ_Üzè‡.m‹yyqçÝ÷²t‚Æ`›€ò¨_ [#¶ñfº]†ìßÝM>[ÃèB\fHí&¨%wp1‹wpÊã’ÖM:ÕIÞuå`§6«*@m°6ópÀæ0öÕÊÀvòHÅ¥`ëÂ0ûùϾ8ä ƒŠM]·Å÷¯¾zq—ÁM&&JV3«ñµ×^[<ûÙÏ.ž÷¼çµ¢³¿ùÍo†¶iíÚO<±5.V«ewM9/ìà´nªÎÈ'¾Îظ¶·¡T,+SŠêB<*k]†c–ì©zΚØv݇u}v `ȩ߭8„UØRóe)YØzÇÌØ£Ýr9®:øs†T¸Ùåjö{ßû^¡õZ?úÑ[n¹eñ–·¼¥µnìoûÛ¡l¿ûÝïŠÿøÇÅ„oA,³×ÛASÀNë¦êŒ|âë €mMã²`ØžV]dýñ¤qf7üT«”Ùy1]©Ý¬ÂdW—ë`4³ªñª~·b«åw,ji“:•E`•ŸØŽi²'ÆÀvb¶ù§m`¶â‘ìŠêìQn|ù[ÖZ«®¶u°+V¬(4±ÒñÇ_lºé¦Å%—\2p€ýãÿXüõ¯mõFmÀØ`ÛÃÐ7ïZ1’ï‘q`v ~;à÷wSÏq*]ˆj­ýº˜óbNýËj«Ýå¾w›€1¥ëV€i3 Û,Ä>€ZVÇì¸Òû3 ûc`-??}ÙÒ<ýÓÚi¾×2¶ñg²I€ýóf›¿ßoÿâWn–ÛŸÐr-aâ§»gêf`ïw¯ëºã~ùË_nyµIœ4Öº[V{ß}÷?úÑ -½³Ï>û´º/_¾¼PT´ŸÛþð‡¸>ôÐCÅ5×\Óš-€,5帰՚_ý•›Š-¶|òHì÷ô`q¿½­ÐwS¶“C>lt3}ªKÙMàÈ¿ÀäŒký~ÁÂg¬dËKƒ'm±å#n™»£­¥}®»7týáf#€õ£ f¯tÿ/v[‹-÷zsc°íƦª±faÓïB¬(¬‰5X,Ë·*m§€Ùïôl¯ÑãÏo`íòW·þèïþõUÅ/™éV3î^tégºr$C€ýÄ'>Ql·îº…u!Ö=_î¶M×^»xÑGßøÆ7Zã\ëV]xúÓŸ—]vY±ÕV[¯{ÝëŠx xôÑGÝ®ùË_Šßÿþ÷Åm·ÝV|éK_j½;ØÁë€M` 6›Äo»³ãún2ßÔó`£Ûi×y§‘`Ѥˇ-O æE[ [­@°æˆ:}ÔmKÿºÚjû4$t7Ù¢usbxŽ èz·´E¿akù—F9úuM¶‰'`Õ<ú°þaŠÌþÆ-ñ [FcXΗ&)Úm÷=»Z:¦lc^ùÊé®ÄÙ׬±F¡H¬º?mà ‹sÏ=7`­?ûÙÏŠw¾ó-ýÈG>Ò‚Í^7ëŸÿüç¼Þ{ï½Å7ÞØšL €¸ší°éì(E~‡õ޵ë°ÑítS}AެD`}§³ìo²+Üvš;¶m®fÀÞ-¼°Í?uƒXÿ]ò‘Ÿ}n1Œñ²š¨hÇvîxÙ˜ªu`ΟßêJ¬1°êB¼•‹:»V¤5+ñs×Y§8dß}Ûv!¶¬ìƒ>ØŠÜrÈ!Å®»îÚêú/íf¸ª»ðÊ•+[á¯}íkl"c¥Øx€=ûœó‹}_pÀ*?|i½Õ]vÝ­µ}ìã—¶Ž)Ê©ÿÃñ³:×" §½çŒÇ×¹Ú¯sÝÛµõí_OùÛµ´_ãÖ Ô”¯Ê¢oS:uöRûý2é~,¿0í°Á³©ë°Ñí4- k`ë6èb|‡‹ÊéöMädZ£ °ýŠ|¦’/Ûü“6h€îb¼æŒâѽ¤x¨Ën½Ý:[r@÷ú§½;Z6¦ `5“ºÛ2:»Ë6ÛLGf·wûéOºµŒNÙØ2€Õ7šøª«®*vÚi§Ö²?ÿùÏ M¼³ýéOj«º!ëºßüæ7ØDÀ•l}äÛ„ öÔWûL;ÁŸ Qûì¸A¥Ò F-­öûãiõw4 @:fI+­Ž[‚WËS׳4ÄVV¥QY¬<þ5üò+_K«² ný{ëö–Úylt; ÀFKE¾¬&=r‘ɋݶ,ãmE'ÀìÅféâÛo3`‰À:»£ßvÖiþîy:hïŽV_ý¿vQIEF{Ýþê¢Ý¼Cþ2wƒâ·¯=¦øÅ-ËÒÅøÝï;«8ÌÏuüªöÖ[o-^÷ªW/]sÍU–Ñùÿ^ûÚVVãb÷Þe—®V«í½ï}o±Ùf›ïÿû ÁiÕfWu¾ÿþû[3+š›2ÀÞãf­¾óî{£ë!¶¾ROG¶>kp((¬RíTZÄT p-½@W›ý¯¼ªf8.ëB¬¼”§oOÐÚgçøy†eð¶  S·×NËÀF·þl´T$¬Qàbw¼»ž±UãG»qær>Ç9à+•MÙÔØ|öxêæ5Š›º¡æüì7Uv÷ËBáÞ©ã“O}W<µدýë…ºoâÖS,Ú,Ä;l±E«{ñf3g¶ÆÆF`Õ8Üt®"± .lu¬ú›"®ú<òÈ#­ÙµŠMu'žß7‘Ê€Í`ìBÜ55—Í8ÿÖš±ø—+/¼d Ëïš^æ® Bu{ÑEOÊSVØSN9¥xµ‹H¿ÿ O(ÙË:Ø÷žvZ«û±³Ë–-ku+^´hQñ“Ÿü¤ÕUX‘U:E^SX?~“›ÈKðæÿÀÖw«µÓQHgPhÝmý|°‚G¥ñ7?Žëܰûp6lsvÀFûl´T$D6 tâ@OÍL|¹;GËìLä$, Àæd¯ý.ë°ÆÀjfb-³ó7)J“ÙÄñNáU׬XM’¤5]ýì-·ÜRÌuÑוŠÂºåun¾ùæVdÔ_6«™ˆmûçý÷/6[o½â‡?üáô>;öḘ;wn¡ñ²ZÓUß)¬f~>Ç­ ¬1ÇU½&ØæÀ¡‰çcØyø i“5ù]€ëÖˆµ ™tnØý·›l8fÖïVleõ×y­ëBìѶÖýº>Ýj°ÑR‘zXuÖr:«ç¶Ö†íª>sX­Õ¸ãŽ;VÎ×ÿáÚ°e0)Ϙt©LÞdåålWÆÜÀIƒØ?mµMk9_\wKtô³I'«xí`5þô{îY¸)ÓQØ*€ýðÿý¿Å¾ÏzV«û±f$Ö¶è/(¶]}õâÍn<­íó¿7ÝtÓbùòå«DhS‰ÀÞàÆ2kŒ±[¤½¶+8 ÀúÏx õÇœ*u-¶¬Mždy¨û°Ò”­¿Nòä_WQ\ýÈâçg]–µO׳ÿ­‹²öé•Q_ë>ìÛõ»ÛùšJçÚdTM¾ãRÈ €n˜Øh©HˆìÔ²9roéÉÄ[ØD °'Ÿ|r±ù曯²½á oˆ^Ʀn]×ðøsŸûܶÀk ¨2éÜÔµ®<lOÀªyô`ÿè&ˆúõ»Nê°rêä`vÒmØw»‰À `µìÞn–b­»©ƒ¹»îºëqد~õ«Åönü¬@ws7ñÓIo|cqß}÷µöCnŸŽÝpà ƒXìí·ß¾Jt6€¸VE[ÙÿØØn½&°J6R+ƒ-IcåèùËãØqƒT™;GûÊ"µÚoËî”Ý·Í2ì/£cÝ–•_x-¢)[¶ŒNØ ÚÊÎ2:Í·g™åÀfVa7Q¬ ±ƒÖ»¦ÖzmºÝù.=ì= °‚UÁ¢™€VÌ?øÁ(xì`u­Y‚lÏf42ô`¿ßþÅÃn¦ßŸÈ ³Ý¬€uS× Xt½~­µŠ“O8a€ä.˜7¯u\ó<ê¶ã\·ão\l·õÖÅR÷ÿ•nÛËõ˜øéOºÊ&€U×eA«¿¥0Vp&Í4QS]v#°©A㨕§löà~ÜcÙ¸Ù~\'·<‰ÀF»l´T$D6 8j¡ÛŠ)~F`–ê¾kQXl³:¦ÈŠÒÀêÁ¯Ž]pÁÓ€®Žùù©›°Î Ï3€}Ï{ÞÓ:VÔÊ«ì\•Ëʯ2i«榎ëÇ·5òCFŠF>Œ25 °ZÓõgnÜcnW]y{Øc]àS\W`Ea7sÑT= а 4÷r³ a57û´Ö•=ðïÿ¾5‹±Àö™=ÿüó°š9Û6ÔS³+2Nà¤óXº×=·®À~Ô³“ó;I À–Û.Ý’°ÑR‘òU`$V0Žqõ££T9yÚwðÁ·6?‚¬ÀÒò¼ê¼Ã?¼œ:×ïB¬tÚ§ãÊS0kÇõ·ÎÕy:®¿}ÈÕÿÖ]YßMj]>lóp“Û‰ó—SÚ^ö+_ùJk)è+”ž~úé-€=ö¯(Þåþ×~m¯wº½ën|yÉrJ¿C«Ù‡µ)«uh5”¿¥°~]_ãÆ?k6âílßzï°l“ï ÔDIš¼iT»÷R'lt; ÀFKEBÈW‘XëBlQÔX€õáQÐiÚ`}˜ ÁÐàÔöûyj_¬Z´ ¨ë€³éã ¬ã>R€­ˆ^öž{î)žé–ØYæ`í·mçÀó?]Ï‹\4Öàõƒn¹Wp@kbµçï¶[±½ƒÙdµÏ«9¤¹Õ¶É&›šéX³‡[êëÀšs¬¥uî‘næ½8üœ[ÿ ¢Q>°Ñ¾- k8Ã'ñIPlÖ"¨6™“éŒX ­€Ð&†ò»(úÐ*HÖµ§á¸Øp ¬žA±uö£°ÃžªA€•6|بîνì»ßýîVôÕºoíUã]õ¿Àv¯í·o­kp*ÝnþüU VéwvãiuÌök_ûZ+šn¹,P’”PWÔU¬ °Ñ®…%²Ú¼èÔ$Djœ+‘Äd»ÔQ‰Ù¬ÁbÙäJݬÀÔ ¶]V°©´êâk]‰«´` fØÑ{6‰ÀÖ;¦1«Ù„ýu`5A“&qRöÛßþv1×ìJ¡Ç»ñ°tÑTÁ«ûù½5‰“އQÔ—í·_k'‹ÒìîöÔ§¶Ò*«µeµ®l¸°õu댓-±Îl€=?;J^6á*Ê`«ºÒ–uó ÇÀ†Ø2Ø,‹Àú×4еµ_ÛE`mìl»ÙŒ‡5‹1ØæŸP¶Þ9k°þð‡‹]¶Ù¦Øbà +ö»ßýnñ//|akYRÁì?<ñ‰…ÆÈ–EQ_ºï¾«¬ÆÁj&ã­6Ø øÖ·¾ÕØ›nº©¬–mßùÎw A´ Z³+Z{ã7×^{mñ¥/}©ÉýÜç>Wl´ÑF­žêÎ;HG]×{·›©Zcy]®Uoïh„F½ØÛ|;MŽ(P£›°‰Œ$ÀZw`Á¢f¶IšlV›ÄÉ&`R:üj»¬ÆÍ„Z:û¿ÀÚLÉþ¤OÚNâ4ŒudØæŸP¶ÞY-Ø}èCÅÎn©›=\w`EJ7qcZ«"°Ø×ºI›þÿ5לؽ]úw^qï½÷–n°ŠÒªû±f0~ë‰'¶"º:G«õaÕõ¸lK`à œÎLâTo½Àç¢ï m€m¾&G`óµ‘XƒE«èªºüú«ÿm&`›ý×¢«ek³Û´–·òh×…ØŸMXùª{³[×¹éÉšªò`›€ØzרsÏ=·ø‡­¶*vwàªñ«Ö¸À^zé¥Ånüª?îU]˺ÿÚ¾—<ÿùÅÓÝ;ZGV3ë™ðÓÀ ŽË¶Tvé…—T.¡ÀÖÛÞ áƒëQ'½ÚÛ|;MŽ(ÀækYl,ØY×Þvé=m×­·êÜ^"¥º^LÙbï³×tló0[ï°úû;íTœŒMU”´ `ï¼óÎb+×UXi¬û°ºoàºkáª.Àg½ï}­¥­ªŽ `5îVÙªm]ˆïYñ@+ªzЋ^R¬¹æŒV÷ävÛ1¯c«ñ8mW\}ݦªîz)ÎŽÆlóí49¢›¯ Œ4Àö vãr>Ûü ÀÖ;y>À~ò“Ÿ,žìÍ"\=Øõ 8jÒ&¥}µ[ëuûµ×.Nq“9½þ裋ïÿû]mXgÕDQUÛ ö¾ûWo×éÅ^ÿ´w-¸Ô>y«mŠÝvßs¬6ý ÇsõÏ95£Û|;MŽ(Àæk¬ëB8. JâÁ=¨l½ÓŽ}µ[õdAµˆªºïïÀtÑ>û·Ýv[kVaÁãÙgœQì?kÖtº÷»õ^wËZ=ÍÁçåîu®ÃZ·_»lÙ²âþçÚnÚÄI‘Øsλ ‰å4¨ŠÂŽãØM7Û€%;²Qxvpí7WB)˜Ä)aS`Ø‚lóO(Û9À^wÝuŖ믿J·`Áì8@Ýa³ÍZ³ûª{ïv®ë°º ëØen<ëóvÞ¹5^õ¤ãŽkÍ(¼·ƒÞ|ä#¥cX«Æ¶Úþ7Þ¸5ƒ±`¹Ý6,€ £Y]ú™Ò±°l½ýD£œl€m¾&G ›¯ °,ۇ瀭wŽËf!ÖøÔý]W`ëBüz×%XKãÜá¶Ü>mÓM§'yÒ¾7ß¼µü&Wúú׿^Ì9³5{ñþÏ~vk_§›öË_þr¡1¶í¶TÖwÀ5ñIo}G±Ý‚íÇrb"°õÏ\NÀFYW­O6º¡>Ã¥œˆNMB¨V€lÂÖÀ°lP¶Þ™®ZöŸÜLÝê ¬ šÖtã\7uccßï¾²Wz3ïäÖˆÕ¬þdKW¬@VÝŽÛMÆTvL{Í5×´–î©ÛR\vœ~¶þ™gûÈýÞØè†Ú5«É·åƒ½*Àöª`ŸÏŸl"uC½Þ9ã>ž4ÇûWݹM/ý^?“ÇÇ)ÀÖ;ÓUû™Ï|¦ØÊAë]S³k]ÖW¼øÅÅÖnÝV¬@VÑØ/|á {ß\pÁÅ® ñë\WâßüæŽßGØ«¯¾º¸ãŽ;j7¶¾Ž lZõ1Ⱥ‡k°Ñ®- k`ÇÁDØ|'ƒ`›BØzgº `o½õÖâ𗽬xßê/££1°Ï~úÓ‹Í\ãÿüÿ¨œ%X“8)‚«IÚÍ&\vL{ÕUW·ß~{íÀÖ×ñ Á€M«>Y÷ãp-6º`£¥"!‹ ¬À°î1 ;õ.`ëiØÍÜMZFçæ›on­á*€½é¦›Š9.⮫ÈèÅ_Üš¸j;ú5¯i-§³½‹â^vÙemÓ†y`µF³ºÇl_ûÚ×ZݘµôΗ¾ô¥Ö¹í6Ú¨5C0˺ÔÛASðÀNë¦êŒ|âë €v´Øh©HÀblÆciŒÀª»ºG-e¡%aæÎÝ z«;à\¼xqqýõ×·V2½ï}ï+6ž3§5U*iVàºåmt\³Ïu3ÈåûϸÊ9ýèG AgU>Ø+¯¼²U†˜ €wÀû +l:uÑïºÇüØh×¢ €pW;•-[ &£­¥}BçF¬¶mCy‘Mª %›ªm£\D`ãœéûî_Yuì’bu7I“@v]7~õôÓOo¬ºè~âŸX`ë–·±ã{º®Æk2'ÅU>¶ÿE{ï]ìºí¶•Kä`¯¸âŠäÆnD`ãêºßÐÀ¦Qý®çqÍ€nÉ›ØÉ9sæ>ú¦ãN*ØòÒàe‡¼¼˜={¶[¤€ D*À°‘¦2ÉØÎœénY^<óY{´ VÛ;ìP|êSŸjM¤dX-—»yæ™Åþ® ñKÀ¾óïœ>O»Áÿù?Å[O8¡4/uýýüç?ßêλ°Õu¿€M£úU¿ãž/í:4° >cå¸Û\Ž÷Ùg¯`£¶À~õ‰O,ît³€²å¥êÎmMÌBÌÓ0¥Û3½ôÂK ÷Ë÷4ÈvØa…f!®[—5<®ñ«›ÎšÕšÉx²G»|”æ…Ï}n«kñwL³‡ç `?ûÙ϶Ƶv²1¶»únÒA`‡_MÖ'y±l—ûÐø¾ Ø.Ÿšq>m ‚lMQ¾ƒÂœ6¶Á€í¾U·â%o<®xÂT·âu€¾ûÝï®]—5\·UKï,qck '»uaŸñ¤'{ì´Sk؛ݦÿÃs `Íll÷õݨ°Ã¯ƒ¦ê’|_—D`£h€¥ qôã’wÂÉ&Šïfï9ÕÛ2¶<5põwdv@)ÀöîLkß½ž³÷t4vë­·.þû¿ÿ»X¾|yÔ¦_î v[vGÀªmîšk¶ÆÆ jßâÖŠ]âf,öóÀ*2ûÕ¯~µ£ €í½¾{…vøuÐkr~u°ÑÞ À°ÑK¾ 绢yË·þ(y¢ °Í9Ó]ú™b£7™Ù—¼ä%-¸ÔøØ˜MA)Úúr²êR,xÕöèTWâË/¿|:¬þvº±ŒNsuÞ ÈŒ+ÀªËÜî{ì½=m»Ånn¼yÝ9»=k÷â©OÝ®6òÙqÇŠíl•Vé•v—gî•þ)O}ZTºNËð,W½ïÆÖ†qÝØ°,ý¸ä›€Í·î(y °ÍÂŒºŸôÖwLw+žá–É9ñÄ‹Ûo¿=z{ûÉ'Ož˜Xb•ÝeË-[3+/¬ÖŽ]¶lYÇÛlw ã °š%UKPÅÚ¬fúÖZÊuéÏ:ë¬bÂ=/uét|¿ýö+öÚk¯¨´J¿á†gœqFTz÷šJ×iž÷¼} ­EÝ© +=Ýà°,ý¸ä›€Í·î(y °ý™;ï¾·Øg¿ý§£±O~ò“ ­íª¥rb¶ÿéŸZc`- Û뺿öå/o/€ýä'?Y|ùË_îx`ûSç±À0Î{Ê)§±Ÿu×U¬X±¢6¹ÖdžãÖaŽùqÄÅ¢E‹b’¶Òl²É&-(ù`c>–á°Ã° ·¡= €`Ø \N`s©©<ÊÉKcªžØþÂŒºMn²éfÓ »Ï>ûW]uUkýØv[À^©õgÿîïZç `/½ôÒB0ÚÍöÅ/~±øÜç>×ÊGŽ·ÆñÆéz³6ó ·Ö2+¥Ø<œŠ.J À°Mù¢nʌնíÂ9e °yŒ.!§S€í Fbaîíï:½˜1c­,ª[ñÑG]h ªÍX7qYk‰ÝäPçœsNëumÀ^}õÕ]mŠÞxàÓ`­ˆqì½®7›`ØN¢ÀìÈ6Õî7ÉÕæõxw“¬ÛÛûxXíYÃËè87aµÉm‰Óû¤Û'aÇ4[–ìÀMxà ž†FAè‡>ô¡âÖ[o}Ü&€uÓmOƒk˜Nçj¦cER;ÝŽ=öØBKþLklâ°ñq¼. À°cêy4Ûl¦Q\¶ù‡!ÕØTk&Ïr°ìР튫¯+¶ØòÉÓ ûÌg>³øÂ¾Ph9ÛqËê(â*põ÷ÛßXÍZ|å•WFoZ£v«­¶š¾®–þÉivÓQ]€`ót,5 ÀÊ,‰À&øpZ‘Ø„+'â°ìÐÖ@ìÝï;«XËM̤Hèžð„⨣Žj-»£™†ë6ìE]T\qŵÛùçŸ_ì¶ÛnÓàºÕÖÛZògT€0·û`X6C¯!Í"' °ß¼kEqõWn*Ýn¼íÎFÚŸÓÞsF¡-·6@å%›æÕR°ýPu|ó`Ø$½{Vk÷= Á1ŽC:3U°,;¾ÎGÃwž=À*Šj‘Y®ßV阺+Úêw?V÷aEzíu[¶ü´©µlÃOBÂÙ° WN†E`Ød¡N‘ÒÃõ4„jÆàÓN;íq{Á—_~yk[ºti±çž{NŸ³Ùf›K/¼$Ù{LÍ™dyX€ÍÐkH³ÈY¬ÀU ©±²ûø¥­nÆ~÷bë~¬}J£o¥±÷uÙUzK«c)w/`Ó| úQ*¶ªŽož,›<Üi†àgì°Ó4”n·Ýv­nÃ×_}1oÞ¼â£ýhk&âC=t•eqN:åÅ}÷¯Lþþ )] €`Øñu>¾ólöìsÎ_Fmb#ª"©ú߀Ôë¸àOû|€-Ë/¥÷~X¶á'!áìØ„+'â°l6€wÎy´&`R·bm/vKìÌ;·Ðz®YÛÿ¿´ÐZ³)7Ú”íÑ€`Ø ½†æ‹|¹Ër¢Çl³تq±TE[ `ÃnÅ:nûüêå`X6»¦²î`UŽm§ V0 ÀfÚ“¨a€í‡}’'   ¤« ÀŽ:0¦v, À¦Û&°dݬŠxÛ´ ÏH¬º§Ü ¸—¶€à“Æ¥PFO€í¥æÜÎí€`ØÑkK»¸£^V—;Ímç.XøŒ•¼‡;[3¶‹'†SPPÀ`ókø†ÝðrýÞlFûê×S¼é¸“Fb;ü•GFÝÇnnù§ãŽ;.Ž^]ªu×U¬X±¢6ýõ×__Ì™3§6qÄE'ð¸É&›Ë–-‹ÊÛ½S£ÒuZ†ý÷? Øgßý£4NÁ¦f¬µÖœpÛ©lm5XÙ€N÷Í7ïw¼“{{'C?v|üpõõ_4>·Ë¢À``ókø¾y׊ÒÙkm¾QíÚÕo—^xI60RDKÞx\qðK‰ºìë_ÿú(ÈS"ö1©ž÷¼}ØÑ„á^öÎK¸oÎ°Žƒ`ãã¦p•ù®ênÁP AØüVëá… À÷ºÂüGyvÈAk9.× ŸrÊ)l‡QàÃ[\œyö¹ÙÌ¿ÞìÙ¸&J>Ÿö ôÒ…XA} ]ˆókÃõÎ`ÇçõÀŽO]s§T€\ã'è|ö ,ìàê¬×ºâü¿Õû»wÚ…€`ƒ8ØKu °¯û¸âŽô$N£üþ`û° ójì0ÕçÚ#«@¿V ‹[#$ðòÿ·ýê«cepgût^x¼*?åkù)ïNA»VÙ¹–oÙ}ؾª² `Ï>çüUîÿ¿0Ϫku°U‡š~ÿG>®ŽÊêÆÀVÕƒéPV¯í´òíÅ®­ï°>ªl¦Óú&ý`~`Ø‘mH»»±nÖ‡W]u «÷õ¾/8 ÕóG›þfIoïM¶»‡&dzØk-Ý2Ó}ªnú °—T£§¿Ýe‹þímÓP鳯Ñí;äÐÅÓ §þÿØÇ/måU–Ÿ`ÄŽéÛ Ñ EÓòk´X#mçêZ–ÖöÙ·Òú°¥ýÚç§Óq5þ*‡îÝîÑ.¼¿ØkuÒ…XÐlúÛ·éëÿoõ£zïÙêÆ¿×²zuô5xViUvÿ¦›lÅ~œ°û°ò•ý ˜öæ`5­ À¦ë ¥dݬ ºÐ+mßÖÚ/µö£¡Ú¿ýkú]1ù5 °êNîÛÅP š‹–+ÀbM* ˜àãÀ 2 |ÔðùðþŠŽ­4À³ÍТzR??5²>)ÿx;€µ¼…ÔõTf+ŸòôlS¾:Ç J eÿ+ÿ¸Ò”u!6³k*]̵bÖÊéGAu©æœè>Cm”οg¥±rÚUõꃿiaeÐw;­ ¸­ÎCgI×6•g*-P­r>X–Fw–ºÿ&zÔ¤ï¶ö|ûm–ßë©ÓÞIaÿýQ×CÈ®e=Œü¿ÛõбÞAaYuOv_M ÷iâ ì2gG“=Ú§÷I¶OÂŽi¶ìTÅ`ýˆk]÷×2€õ'¿ÑóaΠÑïz;öÔ¢ÅeNÉ5дÈbÙ}ÕÝK;¨ÄùåÔ¶»VUCiPêë¡H¦µ(¸E·ý ªº‰…÷VW¯~Ôi‚´Ê`åõÛî)Þqøõ>Ç{`Ø1õ7úyÛ}ز¶/|ÿtÛ;É~(ö{MÙ¦öC§ßþË—ðÛ.k³ýFjüžPa¤ØztY™ý¼•¿ßãË÷]š~ç°ý|,ÒÊ€M«>r/ ›ÀZwRW£o¿Ñá3`ýÆÐþŽébB™ß`•A°ß˜ÖAY;X éíæZU«E¼CMüFÛ¢´¾cPU^»NÀʉ°kÊPÄÕÀ†d???ºmðêwV^áýÐ-ý(, Àæî8$Xþ¾lD†mML¡v=nÂ,ý¨ßnXo›`ý^Ÿö°µËÖþ‡?vZï û¡ÖÚËðä¦áUù° >M}*Û'aÇ4[6€ »ÙÖE-c¶ÛÉ%ê"°a¾~ÃÝ4Àvz­v‚i•þÒ®ÿ»‰ÀZ”ÔóN"°ö ºEýîÂu‘Þ~8äÙ °ì˜úý¼í¡l¯½“ *ChŒÀú?x‡C”ÂbõC§?–×~@·<Âo?ßýl?‹´ò`ÓªÜKÀ&°þxÉ^#°Ö@ù ?.Fp呟.lüôë¬\8È~¹ éõó+ƒñË¢­Ý\«ª¡-ƒI¥µ_¹mŒ°4²û·c¡–ÊË[Õ5Ü Óÿ%½€5'ÂïöFÃÃ:ìf¶é~:'äýxè`ØÜ‡ËŸÀ†½ˆ:íä÷2 ëwuö{ñX{À&há#P$v*1¡[`XëòcѶ^#°l6&Æ,в_hCèôÇÓXÃæ«4~7ܘ¬?3¯5øeÛ͵Ú“?æÇô°û õÖÿv_VkèUVëŠU×…ØÎ±zí`ý.ÍV^ÿÚþ¬Ê~7e ±™Hi¿t`Ø„|€Q)J_Ö"˜6„£ìÝPÖS§ÓÞI–¯M¤h?H†ícÙØN"°ízY•uQî×».Ä£òøÅݧ©â`°~Ô΃ð×Z›W–?  Ò‡i•¦l=а¡QžjØêf1,k üò”å«F¶lLmx­²{WÞ~¶ÝL‡VŽ˜kÕ5´yüùù•éîÓ9a÷®˜zõÏ+[ãÖ/³ŸŸ@?t˜B`– (ÕE; ëtáøà €`ãšgRu @ßÖ~ôõÛ-½í‡Înz …sØ{Øz6ùËÌYÛc˺…c`;Øp¥ðýO¶Ë#i´l´T$ŒP€Àƒ„QÑ:ü•Ü"²ƒ˜XcT4Lñ>X6¢m&Ig ô`õ.ñgæµ^1ö#c7=†|€ gö'ä ׉gÐج«{ »û½{ØÎŒÔq °q:‘*N€]e™šþ˜2©ñ­Úr™×föï«]¶H3üRX6®y&U `õþ´µVÕã¥lÎNz …=x”gY)ëj«È©]ÓïÍSÖ¨®·õìÒõTfÿ^ŽY± ‰ö„Iœ:°òÌ“°™W`bÅ`Ø‘Ø&Rò>ÜC°lb~À°‹s¥+À¼ 10€‡wÔ ï€íÑò3:€Í¨²2(* À°îWõA6Ø\k¼õ`Ø |ƒAq…»˜|Û^>l¦íÛ‹Ùçwn¯z~wL‰û¥ Ào™6ü€pž À°ýjÐ3Í€ã6€Íô©¥Ø(0dX€cç<°ìÛýÔ.ÀŽqÀ¦ö8RÈC€]`5qCnà„.ƒ½Ül$¶¼,›‡{0°R°ì YÛ2—ÏdCy‘  @ œ‘pÙZ´™3×yð¶åwu4RÓøÛ,·šÕQ[|H[ï¯.-ÇÝv6À°mÜÒ¿ À6°Ç;sïuB°ôŸJˆ(€¦ûhk*}M¯¯éôS²~”GP³ÌŽ­ñ—Š”#_HÀ.^¼¸X¶lYÔ¶îºë_|qmÚ³Î:«˜˜˜¨M§ëî·ß~Å^{í•Vé7ÜpÃâŒ3ΈJïÞ©Qé:-Ãóž·OqæÙç&ó>ª{×›=û§Å|ZÙZX¶)€­56   ÀH)0î{ö9ç·ÖwÊô¿Ö¥«sÔu|ØåÑõ¥Ó î—ëä ©íêné…—»ï±gô¶å–[ÏÜm÷Úô»>s7÷ oY›N×~úÂ…ÅSžú´¨´J¿å“·*vÞå™Qé7Û|ó¨t–áY®W\}]6Ïí"°,ý¸P<Æ`‰´.²Š6î²ënÝú֦Ȭ"•ûø¥«8:Ç Në\EJuŽÒ‡cG혎ëoßÑWÞšÐ!tþ«Êcét]+g˜g]™Ô}Øï6­û´üÂòëº&TòcõÚ´ °Ñ. À°Ñ QP€†Å(‚Š8 î paw[¥1HUú_§È­¾ý¨®òR¨†×4-s$ËÊ£tÊÊSl×·<êÊd@í§·ò똿aÚ¦^ò¢°Ñ±6ÚÅ`X6úqÉ7á Wô#ó->%G4÷lY÷ذˮ ÓÒ°Û±`1ŒP*½EU}Ø•£æ§ÿË"°æÔ‡å)ëölyÚXÞº2ùPZ–ŸŠÖ†Ý¬ŽÑê’ºlÒØè¶€`ØèÇ%ß„ó]Ñõ°óAhPöñã]ËÆœú «è¤ßý¶¬[°Ò !õ»$[´µ ËSõÓµ+“œU?º«î¡“ò6é “p… äelt À°lôã’oB6ߺ£ä +À>~‚¢2€õgí #ªukù ý-Ö16ÀÍ bíŠtÔk?l€nðØ'm±å#Ö‹‡ïÇz3å°½ý]§³gÏ`£—|°ùÖ]Š%W—t>NqØ*ø g!¶ådÊ¢•ÊÃÈj ƒNué ·S§1Ø2  »%·+S­Ôºm§÷CzÀ ]`£]‹&vÞ¬Y³n±å§Áœ9sŽŠ¶–ö ñi²Ù°ýPu|ó¼|o}Õ;w€-›a×&qúëÁÚÄH>ß·ýÖeXyjŸ¥±ÿ•ŸMääoÙ$Q~þeåñ'²‰¡ÂnÍ~7æ°L!”úùiL¬?‰“?S3à1ºàÑTÝÊöÃY¸›Ê{ÐùèyífMh=?á{bÐeÖõØhï¢ €¾ GZ+ÝÝ-é;ÌøæØŒ+/Á¢k\""°-'3ß)€ó—Ñ1GÐf¶‰’l¿E;möá²et옮¥ôþ²<:æÃgèx–•G€ ëX~á2:ueÒ}ûOùù)_R©²‰®†åsÝôº¬ þ ê­k7Û@–¿¬gG§yT¥ïÇ}6U6åÀF»g¸”Ñ©IˆÕ ,s‡&(MØ4ë%×R°S57îXëâBi™CgËá„Çúé¬vëX6U&Ó§Ûrp^úÀÙt `û5ÑXÌ{¡L¿¦ž¿²¼‡¡o'6ÀæêQîŒ`®<6áÊɰh,»Jß0‚Yå8–u ì§³Ú‰ãvk޹§ºüíáAuçp|ü Õ¯ó²IÇün¸êQ ÛÔf€¨ãeöª}vnLª·€ìÔ¢¥á3ªÿÃëù½*ô·•Ißþ½èܰ;´å¦-{þT6]»ì½¡û±r•W/ ¿Ü¦‘ )hâùîdz ÀfèQäÜ`®A6áÊɰh,;í¨ÊA­¯Vå`ËÔ¹íÖq퇓X—gSe*sàë®ÍqÖº¹†ëëÇûÁǺÕ˜†]Õmâ4³'çͶý=s¯´Ö·ŸÆ†öìza÷xƒn\µD–MΦô6†¼ê½aãâí^mŒ¹•ÙîMe²¼”Æ`݆6è˜Êc°ïß§þî6BÜÏg€ÍÐ#¢È¹+À&\ƒl•“aÑXv•HK?:òo ·ú·l8q˜A›õ'ó—ª’f7F-Y¦gYb³î§7€¶}Í´ÿUõÇÄû½+ʆTMð‚t˜o»ÉãT–pñvîÔì €ÍÐ#¢È¹+À&\ƒl•“aÑX€ãEäSsúG©<6±X89𥍾}È —‚êdò°2€-ƒÀ²kø“" ²Éõ#ÈÖU·“¡aÚ°|º·°×†ȾV¡m06Cƒ"£@`û«oO¹°=ÉÇÉ, À°Ø@lÀ¢šecamœª±~7\ƒÖ°ûpàWl8N´ $ÃY}ýrû Ú4À†× —¶²ÙÇC`ñgPØ„M€M¸r2, À/}€—:Ðàøèw§6À »ë–-YÚƒuîtò°*€ ' ËÂ`˜O°±šÅD`ÃI ÊºÛ{?š ÀfèqPdè¯lõí)w¶'ù8™l¹ Œû2:Õèu<Ø:#˜}¶$“qÕ>¿+­E^«ºØV­—là©ülb#wjùk–ÖÝÙα ü‰£,ŠkùYäTù„ª\U“?)¯Œ îíºÖµÙÊêíôR¹ýq»íf@¶½3 ®;pÉã/ÀÆkEÊzˆÀ%Kèƒ øk@jÀe¨4¶iaºlßÄB¸ Ûy~´Òf¶k…ìww6˜õ—ͱ‡íZ~~ºžgX¶ð¼²±Òø:øPó5òË.ñ3lxÕõØzd*ÅÍî{^tj¢@µlÂÖ¡‡ü®„ËGÑòR€`—>ÀK 4elĵ½^UKÀTu¥¹NUžáxS?šiÇ:ÁNÒÖ•»]^M^§®M`£¡.¥‚3|P WØ^ìóù}ΟìÇG€`Xl 1°nƃZW9õñ¤MAå ó`£)6Z*Ö(Àb"(0& °,ð’¼ ÒÉæZiFiÕUÖ_ûµßõ¤kååì·.½äÀF{Ql´T$`±@)À°,‹ `Ø@Ã6ÀF;Yl´T$`±@)p*2<¦³§‰ê%¹Ô)6€ ËØhï€–Š„5 éŽ3!f‚(0> °8ºÃrt¹.¶‡ Œž °Ñþ- QPPÀS€=( N±l`X6ÀF»l´T$D@@–qo {–³Ìu5l -`£] 6Z*¢     À°Ø6€ ôÁØh€–Š„(¯3\ÑÏ·ø”ÒT€.ÄiE/ˆ&QØ6³ °Ñm=- Q _滢ëaçƒ(Р,ÎrÎÎ2eÇ~±´l€n Øh©Hˆù*Àæ[w”Õ¥œˆNMB¨V@;‰@i*À¦Y/¹– €ª96-çgœúÀ°œm€ÍÕ-¢Ü+À&\yl•“aÑX–5 û°dÎŽ7e±Þm€ÍÐ#¢È¹+À&\ƒl•“aÑX€`±lhØØ ="Šœ»lÂ5À&\9 €`q\v\‰^õ½BC4ÌÝØ ="Šœ»lÂ5À&\9 €`XÀ°†m€ÍÐ#¢È¹+À&\ƒl•“aÑXǵaÇ5÷Èå'ú‰ ônl†EÎ]6á`®œ ‹À°,‹ `Ø@Ã6ÀfèQäÜ`®A6áÊɰh,‹ãÚ°ãJôª÷è¢aî6ÀF{DÑ)Iˆí`¶6áÊɰh, À°Ø6€ 4ll´Gt—K9/:5 Q Z6aëÐC~GÂå£hy)À°8® ;®¹GŽ(?ÑOl w`£¡.¥‚3|P WØ^ìóù3úœ?Ù, À°Ø6€ 4ll´#ÀFKEÂXLÆD€ÅqmØq%zÕ{ô Ñ0w`£½(6Z*°Ø  €`X€Å°l a`£,6Z*°Ø  €XŒ )0sæ:Þ¶ünœ¸†¸Ü£(”ŸH 6€ tcl´wÀFKEÂrÇ'P PÆF'µ'•s°l(³6Ú}`£¥"!     x °8¡€6€ `MÙíb°ÑR‘PPXºLÓeÀ°>Øíb°ÑR‘PPX×>8®MEpȇh 6¯ °Ñ.- Q _&\ÑÏÈ·ø”ÒT€.Äù:Š8ùÔ6€ ¤flt[ÀFKEBÈWù®èzØù  4¨‹œšLy°Il _`£h6Z*¢@¾ °ùÖ%OX6_G'ŸºÃ°Ôl€nðØh©Hˆù*Àæ[w)–\öÄÇ)Àâ§æSlÈרh×€–Š„5 àÓ&l"l•“aÑŠ ËÜ—"°ù:Š8ùÔ6€ ¤fltS ÀFKE–¹ã“¨”¦lšõ’k©Ø©š`q€Ss€)6‰ äkl´[t¤K9#:5 Q Z6aë`®œ ‹À°,¥ÃR:Ø6€ 4ll†EÎ]6á`®œ ‹À°8® ;®DÍòšQwÔ]S6ÀfèQäÜ`®A6áÊɰh, À°Ø6€ 4ll†EÎ]6á`®œ ‹À°8® ;®MEpȇh 6¯ °zD9wØ„k€M¸r2, À°,6€ ` Û›¡GD‘sW€M¸Ø„+'â°,ŽkÃŽ+Q³|£fÔu×” °zD9wØ„k€M¸r2, À°,6€ ` Û›¡GD‘sW€M¸Ø„+'â°,ŽkÃŽkSò!ˆ äkl´G4/:% Q ½lÂÀ&\9 €`XÀ°†m€öˆîr)åÛòA^`{U°çO¸¼UA|P  XǵaÇ•¨Y¾Q3ꎺkÊØhe­ ‰Àb(€N€`XlÀ¶6ÚÇ`£¥"aD`1Xǵaǵ©ù Äòµ6Ú‹`£¥"!‹   H€`XlÀ¶6ÚÉ`£¥"!‹   Hƒá1fÎ\çÁÛ–ß×°G)ßuGÝaÝÛí]°ÑR‘°F}Üñ¨„(€c£Û½£†“‹vØ6€ ¬jl´ûÀFKEB@@ð`qÀqÀ±lhÊØh€–Š„(€(€(ÀÒeš.ÓØ6€ ôÁØh€–Š„(€(€(Àâ¸öÁqm*‚C>D±|m€v1Øh©Hˆù*0ኾ4ßâSrHSºçë(âäSwØ6š °Ñm=- Q _滢ëaçƒ(Р,pj0åÁ&±|m€n Øh©Hˆù*Àæ[w”D±|m€v²Øh©HÀb(€¬g3g®óàmËïæ€9lÀ°žm€v²Øh©HÀb(€R`S€Í7ÒA”ŠºÃ°Ôl€ö.Øh©HX£À®îø TB@±Q€ÅNͦ<Ø$6¯ °Ñî- QPPÀS€Í×QÄɧî°l 5`£] 6Z*¢    Ûó8¯ÔœFÊÈ`Ø@ 6ÀF»l´T$D@@€e²lÀú`l´‹ÀFKEBÈW WôËó->%G4  1Q›¢6”;ÄFÃØè¶€–Š„(¯ó]Ñõ°óAhPv4œFœêÀR°6º`£¥"! ä«›oÝQò„`qzSpz)vˆ Œ† °Ñ >- Q _Ø|ë.Å’”b¡†Q&v4œFœêÀR°6º%`£¥"aû¸ã¬›¨™°‰VL¦Å*2-wãÅ`qzSpz)vˆ Œ† °ÑÍ4- kXæŽO¢Rš °iÖK®¥`§j€ §çŸzİl€v‹ˆšEKEB6_`ó­»KÀ°,¡Ñ‡%4Rp ) ‡ ÏØ]Ê4â M¸‚Ø„+'â°, ÀbØ6а °zD9wØ„k€M¸r2, Àâ¸6ì¸õ^Ô íÑ>`3ôˆ(rî ° × ›pådX4€`XlÀ¶6Cˆ"ç®›p ° WN†E`X׆×T"@”ƒh$60<`3ôˆ(rî ° × ›pådX4€`XlÀ¶6Cˆ"ç®›p ° WN†E`X׆W¢^Ëz¡=Ú§bl†EÎ]6á`®œ ‹À°,‹ `Ø@Ã6ÀF{D“Ñ)Iˆí`¶6áÊɰh,‹ãÚ°ãšJˆrĆgl´G´Â¥”oËzU€íUÁ>ž?Ïå½Òmª¤n¶ãkÊ6Ã_ÚeÞ*ù·¯—Ôô`X€Å°l a`£=aìÍ]úòWå·¶ûÈïêÆ_Ö9äß^»ÔôMF[ ®À®S¤JêtÛ6¢´äß^¤QÒga„=ŒE’™3×yð¶åwãÄ5ìÄ^íÑž °Ñ®ƒüÒN}YK/¬îCþí5}êìã(€(0: °Ãsôp²ÑÀFÍØÑñ¸@@HRzÔhî›Æ†gl’M=…B@ìØzª›Jv§ÀýW€ž£‡“öØ60j6Àö¿Ýæ (€(0,NwÖ6ˆÏ‰î"×âB\#?XèQs ¹lž °ùù”PbP *ØØZÃtìð=œl´Ç°Q³v ‰ÁÞòyîrkö’\ PÀ`±…$`q GÍæ~°il`x6À&Ñ´ª¢ö·m\P= Ë‚4—¹ý½ö>ÔõvÔ r@UVä¡S¼þÖq{!è׿ôð÷éogçà%#°ÊÛèoÿ\ÿÅ |ýréeÅg„`‡çèád£=6€ Œš °#ä ÄÝŠù¯~jí+ÜúŒeiã®ò·Tl§Š‘T  `µO»ºFèou“ÐG©6|9h¿>¸Ö¥Â€ÔØX?_kÇìÜ£§®m×Rz][ûùÅ«AH%+zÔhî›Æ†gl*­ûÀÊ!ÔüU]T¾§Sä;ÚÇ¢µaÁä[j+ Ø>7Ÿ6Ø2ßÔò,»–ö)_23.4J ”ì¤wƒaÔÔÿÕÊ èïª_¹ìÅayévé¨úåÌ^a´v”ê€{q °Ãsôp²ÑÀFÍرs-äkúAëµö,›Å‚$öí÷,”j¾²ò·c>À†~­Î1x¶<}PÕ1c½Ç®²¸aèU2€õE t=pöðú/{XÃò(塾­;‡Em-½=Äá·°–O¯÷Ìù *Àâ@šÍý`ÓØÀðl€M°¡ïo‘,âj (U~êd¶a@Ä÷kUBËÇ‚/òC ñ£»Jgk½ ý t ?ê+ÿÕ‡bƒ[&ê¯Mû+Ð)ÀÚ‹@5XZë¶×ÿÊ»êãƒðWÇøÞ;eׂؕ2©µ\ýnÅíVÇtp̬Ïe%%;–æÉM7©€úè‡KÝø¿$é¡öûñëÚê¡—@ÙÌiŠÀêÁ׎s ó²|ì~tÜÖìRþþ Ee_0Mê@^CV€Å5šûÁ¦±áÙ;äF}x—†ë¼Z·^Áf蛆KDÆ‘3µÞ†þ<-~WfS"6Gvx6•QP Øá9z8Ùh `£fl3ms†¹RÕ£/ z*ÃùWt{·6±’¾ý†uXåa½íš6æÕïFLâ ‰"£  @[XèQs ¹lž °cët(ÒYݨ¶‹z >ÕÛ¯¬waÙyá>ç÷`T9²Ê3LKôulÍ“G)fMLÜuÅÕ×8|ÃsøÐí±l`Tl€)›A@@ô˜9sæßþ®Ó؇p GÅæ>°el`x6À¦×ÎS"@@5Ž?êØ%ÂážÃ‡öh `£b®Ô8H>(€(€(€(Ð7pЋç‰û°lŽ Üy÷½ÅZk­õpßZ+2F@@@§À®[o³íJ¾á8|èŽîØ60*6pÍu·? eE@&Ý­ø <çrgšµÍfxÓÌnþ”ä¹Üål£Àß­¹æïîYñã`‹ `Ø6е ,yãqZc5ßIƒ‹(€(0: üü—s¹3­Í.í/ Ë}PÎ Ü/æ—žyö¹];-£=à>ˆ„aØ6н l¶ù¿rÍÌB[@@ÑQ@ X¶ÎUêwª@üôÔ Mù:R`ÑÞÏßïa·î7´C;lg¸mùÝ…Öï¨å!ñ P0â¼A_t ®'?Y>>9&+Œ[]rõBÑVÕT–Ö¾cØ¿Žòñ?]µ4úö?áÓJŸcWè‘3ªohƬY÷ËgŒ{@°lèÎñêß̘1ãM ¶KdÕ¼‚¬Ðlþ*ùç(ßÚ†ÎÅÞüâÐŽ=—t(¬eЧ_Ádðö‹˜^,U«ަ¦WäS›þÖù1«ˆ©u_Ö9:×麖Fyë?Âje4q5&–_š’5µ® vÐ{þã¯pÞºsÞРݰl`\m@³¯½ö¬_ºÖgF×-'ö[ù}ønq*‡~oÌY ö܉QŠ4Y) £ö'?xêEâÿÂ#¸¬êÚvÛUºØñ´á¯Hძ~Aò_re+Žç¬*iÜ «Ù#5‹ä¸:aÜ7‚ `Ø@ç6ðœçîóÐ:ë¬óòqoC¿&Ê&áTO@h¬÷ `Úÿ´ë%(0”{ÀÔùåÔ¾²ŒÚ¯sôÑ·Ò„× ýg·stÌ;úÛ÷u•®êî$þP¼î!пK°^,U¿ÞXý¨h'k%ÖÃ¥­ `µß>zX¨¶¯ì—(=Üþ9Ý©ÂY©)°p‹-·úÅ}÷¯b™‰À°l Ö>rÁEœ3gîu©5f”çq ”ùrƒê (ð“ßh¾¬þ60Ô>ý/Øzú€©rÛæ÷@Ô>ýoÇ|_Wùi¿†äÓ €õ‘ïêû¸Úç÷’”/®ó-;Ï‚;~9í˜]ߘoä°Ín¬ `µÏ~ °G«=ì±P;׺Û‹¢Ýç?„áKÏ«°#g¦«­6kÖ¬—ì¾çä/‰Bt…@34ðq²õØqð*®Ãéûª'ßÇdO@ƒÉP¥É)Pô‡Ï•õô#ǹæƒ*âêû¬¨>\¨Ç¬ÒTnüùùé:þCW »A”u!î¤ìu÷ÆñÄØ`ƒ Þõ²EÿòëqrĸWÀÀ°x¸á–åÅ“¶Ør…k¾æ'Ö„QœrÂèà {VE&ýÈgè·Võ´rW¥/ëÊëûÜuØ2€µ<ý^“Ò~9ÊÀ{D¬R‹bú¿†ißäÔ]êoA«ªu/¶‡Gçùk“;•‰äÿ"ä?„–ÖºIø×òí H•€ÍÚë ïÆÃž°ãN;ÿòžÔv!Ãé‹wúÐ ­°l w¸ð—ÿaÞ¼ ¼Ö·¥ ¥ßf>¦Š5Èž€ÃXßom `ÍAZûù ÀÈ(Pö‹u“°¾ö~¤ÔÒ[7a›ÝÌë£_¾pv'ŽŸÕuí—$ƒY¿{²u=¶<«&™ÊâFS`bbý6ßbË.ûìU@,cá°lsÐüo>þäGŸ8gÎ-j"h+³R >6螀6ù’/ZY4Õº6›Qº‰À†A¤¨m,­•-¼fX¦ªŠ$ÈÊ@(, tjØz0꺗ýzUõ‹VÕõ«ÆÌúùÄŒ«Õƒ]ö"¢æGSmgÏž}‡–ØQ—±Ü#”Ÿè6€ `ÛÀ™gŸ[Ìš˜xx½õÖ?k4›º‘¿«p.–¦{*ØNjd¢Ú0:[Cðl¾¯ßKPé•¶]/À€µ¥&•ŸÝ§µÀŽ«Ì¼ñVåÐÇÊ\6Ž·l§?zä-l¦nîÊ6Efµ•=ÀÝä«s:aûªÛkq^¾ ´îºëþD +GFëþávaØ@.6pÅÕ×G»äOëÍžýˆVr©k¾æåÛ„}ÉÃ(¤i²'`¢¡àþì¾úÛÀ0,C¸ÔOØ °,rìû±ÖƒÑ&<Õ±0Oÿš”ñÓÔú÷¤|•Öï5é¯29ulì FO×ÚÍéÁ°q°ö+PS¿ò(ߨ¼ôÐûl7÷Â9y+p™µÖZëá­·Ùv¥€öMÇTh{Æ;=²`á3V²¡6€ `yÙÀâ#^ó{½Çõ­º[{íu~ë"®w¹æêx·Íϻ٢ôS~žà«ì3¨ž€òƒ«zÆú¡u•éÙkw=åSwͲ²j_Ùy½©êî‰ã(€(€ *°«Ëë ·:µ-vß“lh€ `Ø@v6`ïñ%SuG´µÁÆ2‘¬zÖÏž€MÊS6çL“ùWåÅø×A¨Ì5PPPPPÆFuµ±Þ´~œîWOÀNËÒ.½¢£a—á&ó/Ëk×ì÷=‘?                                                                               ôG­3=ÙAÖJ«sø¤£€fãÔ2|PPPPPP`è N­©Bi=m±­iXtLZBBË+LÆ^`*AYÝ¢÷~¶±ç(ÝÑn;qjoØaù”Ü®×îÜAÀ¥ê¤“:ìâV9PPPPPêd ÃMÀÒĺ{ýX^ÝGøi5T¹ìžËÎ ó([ú*˜³u«ô<½¾:VIaeÔwÄv ü¡•€íF5ÎA@@@@h\XÁ•Ea=40Òw/Ÿ~¬ÀR÷FRí¾ª Öؘè¢Aœà´,½®oyZD¸W=ý2Vý˜Àöbœ‹(€(€(€(€(•zeQH¨Éî¨ß[U´€UÙÎs› ´Ý= N•Fi«îÇ@Riª>í¢ÅeçèZ:ÇÎ++#Ûƒqr*          @^ ´ØvÀ¥q:W‘A}WM¼äŸÌÎQ”·ì㙺Íú×(ëF+¸ô#°k{ç:YÖqm:f+—ʤtíºKä*¢Z°–G]´Úº!ÇD|UNXi«2–å_°ÒYX=©¬í>º–îW›Õi».Ä~é:LÀU#0‡QPPPPºW `}èÑß™5À²ÿË¢|ö¿uÅ »þ †,­½Âòûi×°|üîÄ>ˆ U3UZÜ2€µ{SYê>v~̤N¦¯òÔõu?áýW¬éj`‘ä²rZ^~éÞªV÷kiý.Öu _§ÇQPPPPP T*€Uä®,ê§ý‚<c?!úðéG] úÂȧåcÑS+´•'Œ^V•ßö·kyYÞe“,Y>Že[õ…ï$­°“õQ–ŸêÃÊé¯_O!l[}ø?Bè\¿þüû°h²®ïG¶MË&&ã‘E@@@@@U0@3Xò#i‚Øefª@²,úi08ò¯a@VÖÅØ¢þ±&Vå)Ó²ýÃX•¥,Ò[°qýPöáÖ7„ªû¯ê6Ý.jÝ®Ë1           t­@ÀZ·ÓªèeÙÅê¶ìœ²n±í"”Ýó£„M¬Á¡±´±§>`}ø´.ÈezÕu‰ÏQ=·\¤~t8¬ÛN¢Ì].'¢         ŒŸehàv×5u1õÇFÔ•Eýª"{Ê«ìÚíàÇ@ËcÙÀN–@œ[å÷Ç«`}Í âËôj§¹òϱÊ"¶–Þ·«‡p|­o:ÖÉ ã÷äqÇ(€(€(€(€(€+P€Ui죊Í:+P©ê¶š ÀJ8+«€Õ–Î Çr–ÝE’«Ð¯”:¸ Ó–Mˆä—s˜«k«î«¶Ž‘PPPPPP U[5VÒº“†ÑµnºWu­ŠÞ•]£©¬4²¼ãð“xeÚn™ÿtK;ÁQU7]¢¤vV_öXå+‹lûå®êB\¡ç‰C@@@@@¾(P€ºXÙÕªb»lÙò/í¹j©ƒ5*ë¶j]Ò²ûðËTU÷¯ý*wÕú¶6’Ò„P\U±UePzÓ®ëÿ ÐnY]»lvi»/Fa]§ª;yY]ôÅ@ÉPPPPPLv[Yl7pÕXí·cÖÝÔ(\ºÆöÄZzƒ²°KmUùýe€”‡À͇³*µû«‚ѪóL++·®ge·5mÛM”Tf‘íÖ@Ô´õÖ@Ü4·1¼þ²8!DûúZ¹M[ëê—Ñ–ÑQý õµ¥–xÊPPPPPP QÚ¬.d>뎪oÛg0§ïvXA§®cçtù³ ÛMYwU¶,½Ž…Ýbە߇á°[rˆVu­:OåÔ…×´²Ûyáú¶í*³Àê<ƒN¥ó'šÒ1?âë_[«œeŸ°ì§e]ˆu~¸N¬oeuۨᒠ         Àø) ¨d8žÕWAÀXv\ð"xô£oJvAÕ>ƒ+}+:§óªÆgj¿Ì®3YQ5uå¯:_åª*C»cUzøÅÓùvŸú6ýt®ßõW:´û´+‡W—Fºé:Úª4ôË`éUn¿ÞÚÙHx¦kn“Ã(€(€(€(€(€(€©*àG£Ûaªå§\(€(€(€(€(€(€c¦@UWÞ1“ÛE@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@1Pàÿ·ßŽ×Å‘>7IEND®B`‚nova-13.1.4/doc/source/images/rpc/arch.svg0000664000567000056710000004404413064447140021451 0ustar jenkinsjenkins00000000000000 Page-1 Box.8 Compute Compute Box.2 Volume Storage VolumeStorage Box Auth Manager Auth Manager Box.4 Cloud Controller CloudController Box.3 API Server API Server Box.6 Object Store ObjectStore Box.7 Node Controller NodeController Dynamic connector Dynamic connector.11 Dynamic connector.12 http http Circle Nova-Manage Nova-Manage Circle.15 Euca2ools Euca2ools Dynamic connector.16 Dynamic connector.17 Sheet.15 Project User Role Network VPN ProjectUserRoleNetworkVPN Sheet.16 VM instance Security group Volume Snapshot VM image IP address... VM instanceSecurity groupVolumeSnapshotVM imageIP addressSSH keyAvailability zone Box.20 Network Controller Network Controller Box.5 Storage Controller Storage Controller Dot & arrow Dot & arrow.14 Dynamic connector.13 Sheet.22 AMQP AMQP Sheet.23 AMQP AMQP Sheet.24 AMQP AMQP Sheet.25 REST REST Sheet.26 local method local method Sheet.27 local method local method Sheet.28 local method local method nova-13.1.4/doc/source/images/rpc/rabt.svg0000664000567000056710000010200713064447140021456 0ustar jenkinsjenkins00000000000000 Page-1 Rounded rectangle ATM switch name: control_exchange (type: topic) Sheet.3 Sheet.4 Sheet.5 Sheet.6 Sheet.7 Sheet.8 name: control_exchange(type: topic) Sheet.17 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.9 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.25 Sheet.27 key: topic key: topic Sheet.28 key: topic.host key: topic.host Sheet.26 Rectangle Topic Consumer Topic Consumer Rectangle.30 Topic Consumer Topic Consumer Sheet.31 Sheet.32 Sheet.33 Rectangle.34 Rectangle.35 Direct Publisher DirectPublisher Sheet.36 Worker (e.g. compute) Worker(e.g. compute) ATM switch.37 name: msg_id (type: direct) Sheet.38 Sheet.39 Sheet.40 Sheet.41 Sheet.42 Sheet.43 name: msg_id(type: direct) Sheet.44 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.52 key: msg_id key: msg_id Sheet.53 Sheet.54 Rectangle.57 Rectangle.58 Direct Consumer DirectConsumer Sheet.59 Invoker (e.g. api) Invoker(e.g. api) Rectangle.55 Topic Publisher Topic Publisher Sheet.56 Sheet.60 Sheet.62 RabbitMQ Node (single virtual host context) RabbitMQ Node(single virtual host context) nova-13.1.4/doc/source/images/rpc/flow2.svg0000664000567000056710000005775013064447140021575 0ustar jenkinsjenkins00000000000000 Page-1 Rounded rectangle ATM switch name: control_exchange (type: topic) Sheet.3 Sheet.4 Sheet.5 Sheet.6 Sheet.7 Sheet.8 name: control_exchange(type: topic) Sheet.9 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.17 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.25 Sheet.26 key: topic key: topic Sheet.27 key: topic.host key: topic.host Sheet.28 Rectangle Topic Consumer Topic Consumer Rectangle.30 Topic Consumer Topic Consumer Sheet.31 Sheet.32 Sheet.33 Rectangle.34 Sheet.36 Worker (e.g. compute) Worker(e.g. compute) Rectangle.57 Sheet.57 Invoker (e.g. api) Invoker(e.g. api) Rectangle.55 Topic Publisher Topic Publisher Sheet.59 Sheet.61 RabbitMQ Node RabbitMQ Node Sheet.62 Sheet.63 rpc.cast(topic) rpc.cast(topic) Sheet.64 Sheet.65 nova-13.1.4/doc/source/images/rpc/flow1.svg0000664000567000056710000010610213064447140021556 0ustar jenkinsjenkins00000000000000 Page-1 Rounded rectangle ATM switch name: control_exchange (type: topic) Sheet.3 Sheet.4 Sheet.5 Sheet.6 Sheet.7 Sheet.8 name: control_exchange(type: topic) Sheet.9 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.17 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.25 Sheet.26 key: topic key: topic Sheet.27 key: topic.host key: topic.host Sheet.28 Rectangle Topic Consumer Topic Consumer Rectangle.30 Topic Consumer Topic Consumer Sheet.31 Sheet.32 Sheet.33 Rectangle.34 Rectangle.35 Direct Publisher DirectPublisher Sheet.36 Worker (e.g. compute) Worker(e.g. compute) ATM switch.37 name: msg_id (type: direct) Sheet.38 Sheet.39 Sheet.40 Sheet.41 Sheet.42 Sheet.43 name: msg_id(type: direct) Sheet.44 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.52 key: msg_id key: msg_id Sheet.53 Sheet.54 Rectangle.57 Rectangle.56 Direct Consumer DirectConsumer Sheet.57 Invoker (e.g. api) Invoker(e.g. api) Rectangle.55 Topic Publisher Topic Publisher Sheet.59 Sheet.60 Sheet.61 RabbitMQ Node RabbitMQ Node Sheet.62 Sheet.64 rpc.call (topic.host) rpc.call(topic.host) Sheet.63 Sheet.66 Sheet.67 Sheet.68 nova-13.1.4/doc/source/images/PowerStates1.png0000664000567000056710000052741513064447140022306 0ustar jenkinsjenkins00000000000000‰PNG  IHDR§}¾ç½{ pHYsœœ&Í:4®¿IDATxÚì] \LÛ?¥E›J5Ê^Ñ‚’¶²dMv%[Ö(¤Å³d©dI¶GgÉßö’5;/e§BÖ(„ •J ÷îÜév›™¦™ifjê|?¹Î=sî9÷žïýžížs~Mp2‹&( ˆ?ÄâñW«((ÀKJ0ÁÃGE/_¾¼^ñ‡aXµ sÄñxÓÈÊú¶ôô*ÜÁÁÁ‚\BÑÀ`ü1™8é&$ÖÝÎñ³ÖªùM6jŒë´ÀD~@úÓIDBx ± 8Ýg¿ûŸ@sˆ4_1//áô ƒ´Á¿[ýHŸÇw/;_ÍdVzá¢\1¯ã_êgù õç‚a$y%(•û»GÜØ6 ›ó¯ˆ]~5Hlýe½cèµ"}œ‡÷#ýMf\§ô÷£¨ÖîP²ü‘…g\yªDó„ ɼðä€õ‚˜Ìã–_Èî¯Î‘SDÞ¥]ÍœùÇ0 ëõŒ¶§&žçï@ŠíÆåÉ“ï‹8¨š©½î˜g4NÅ$¯‰ÀN­ÊK8 çÔX•z&îŸÒž®ÚD<ŽaÌø™l¿°òb¦žŽn&¬Òú«xž_¿5ä3®ÖJ¡À{zÐúåî+¬£Cÿj_+k0 œ<¢lÜ;‡3Ä·Ï1qñnn£sJ€–r1À•cb¢­,z*Ê-ýÜŒGšm{}||ùÁó7S¶æ¬_qÝWÌ¥=tŒ´Áš¿A Œºsx·Ð!Ãüï–ÇÓY9Fž¤N]½b·„Em]–³Ät° Ï?I¸¤ô‡aíH~84 «%¶ËŸJ¢?õë¢àh¢½ˆÇ;÷æàë‹nnKIé\ýwŸglãÌCÄ»?ö/0›Ó¶†qÄL:ŽÝd;rrJ´´”*^5`Å‹<âéîT* yDÍ=w%ëÁWK¼üÜæÖÇßRO²µBBßn…^Tûþ´mÓ¼9>›+|¾Å‡:X1P¯ËíÞl˜@øžÀ,úO°(ƒý>‘ÐÉ«Óõßœ˜·å =s5èòg¼/»}\ÊÝ.í:j£]Kö…˜¢cµ’­ª¹,HG…çåæææ)))U…W5[*BFð¼I??¿uëÖñ¼÷j¯µ··OHH^û…ÊʸÏÒO9~åã#5Lo®Ã€ŽG7.{Œ¬¾I²ù¯(ûîÊF>~Sù‘cÔšßàÚP—)>¤ÛLWQ²ú+{µS®Ý lüÿ@Épr*7Ii¦&¬Þ}gžoXnf<’äAt´°>ož=þG¯ªSè5È úœÛ»90ÀŸî¿/æD7Ûþ$yêÚºä…ÜÙGú“äAtíÝú¼Hº1Æ¥/Ï»-(ùöú+€äQ>…Mµà%<‰Ùuàp÷¾Ã(ò¨äjÎ"ïõG*Ht·m®õúÐÚìÕdl:'î.-=‹‹Á …‚¿m?þ¨U€”bg2…aQcÛý=L\}R¼²KɺA¹!y­¦Ü@·wøizJI\ú¸³×Ü{ÎûWOõÛAîåM,z’ÈãéîU%MüJÇñº˜”dWJÝmpÿ”îö´ ÃYDÙS‘À#Uÿu騾«-±ú~·3Tô)ò6x½CbæÒϵdbTÞ‡Çw[šôä7@e\‘}·ÎÅô*¯3ve•Àêeñ¼¹á›·R|¼‰Õƒm-ù·2ÅŸ1;Šh£%%ZÚñKÝ´ÈÖÝ´3ý6NU~Õü(¬I!Z'Æ_\ú:ðÏk:fMv§†U ¼ç7/™¹ 4šåÁ"ÂÕÅYðÔÙø’ šW9Ž£KpvïæAžóª ܺE}?ƒÏ\U&r÷ª ܨ´h,<Áã*u³Kǰ°Ažó à&ñ¾~{c#—ÕñOø0Woh¶·äñàO›¶ls5¥r`EžDç&íöåÑÇðŒð之­Í+ R-¹&MªÌŸ ¢¯’ô¾°qy˜g7/så=ä6Ø®;ÇmpÉ<}ìz ˜¬ç|3*ÜëÕ6ÐYضÔW!ÆÒ ÔÔÔÌ\‡qâ  ïbššZm%Ý€ø#URϪñ¾fÍÄŸ¬B@ò L»uëöæÍ£ýùó'Ÿfľl ÕœX°`àä9ØDˆ<ûš·†Å_óæÍ‰Î7ß¹b»wï>zôhûöíÍÍÍ«pêÔ©0<*?¥ââb’¹ùóçGFFò 9jÔ(ú/²Qý'%hii}ûFô ù·oß¾I“& BžÍÍ›7QûEz É#±gϞɓ'CG@@@XXÅ"jÞÿþûï?ˆ]F/_¾Ü·/ù‚“§¡¡‘——‡ø'Hò8@‘Ç`0è»MäAä‘+ÊZ¶lùáášB7n”y Ž?úÖ¦'N$cÇŽåØ*ÄØØøñãÇ00©rE$¯E‹Ÿ?æ“Ä•+WúôaO{„ä¡òSœà®Øìííé9òرcÔ;w&QQQjjjtòúõëG®~ruu%=)òPý' P{ R{@ò8ê9²½êåUiwçÑ£GC’îÅ‹×Öý78þ”••yîLÆs/ooï-[¶p·WMLL(òÈ0ˆ?éõ)wZZZ‡ø¦ˆqvv¾xñ"åO‘G…Ù¶mÛœ9s¤³g$*?ÙàO… .ÐÉã Hä5Ä_ž>} {ßùùù†ïߟmç@¨¡jÄŸ¤ðû÷oÁÉ#‘››«©©Y×Èã͟צMQÕ5ÄE6¡Së€u•°[ÊCò<|øð'NÔ‘‘Ã?èHÞ6lÚîÔ¤¤tzÏ ÇK±v°K¤/»úÍÀÕ«Wœœ$¿8ËOrœÂ|ö Ý݆þ¡öþ þê(Ó]xa'ß»wv̼ŠÁ`´k×îåË—µÃ}zu\RÝ‚4*'»…'|öŠèÑ£‡àSSSaµ_Ä^Æ1›Ó=¡ ¹w{⾊ÉĹ=IË9þšš&>MšàÚÚ˜dù­`Üþ‚ùÛ6ˆ½¶ÝÃÒéöDzŽ]WŠÝÒâb¹Æ×ASgÊ'ïîJõKëøC``ª°â(Q|}°JÕŒ°d÷ùóoÉò7ç,›³Ñcá±íœó¯·±–½«Wj7˳vú£kT£ç2¯ëüiÁ¶ôÝTÒ¼ÍÙbÎEÊN*tdeáU4Cˆ 9p4nâh—JÜ];=oÀ€áôÀ9¬Tà%Ϙxg†#ek—Áè.¥úÎ ¹ì»QÃß§.CWß`y ½æÌ'IbÈ#iX³*Æ»ÒlZ“ÄO™Âi&W«ÜLgg„/[ºrQ ë—$Ô~©)°-âj™.`AÊ30ŸØž!ı‹¼eˆ¬ð—™Y €Ôå㛸Sà«®¸Rº¢ADÄ•·‘G7½!®µwh\ûåçÜc™“®SêægooÜph¢’ºz2üA±%øÁÝä"Ö?,\yЮ»iÊãlÒ_;#öƒ7´xœ?»?D÷ý‘φÀ—½íËåKxß~µÍ½`ÄNÀM\öÇ‚í5ƒéÁN[Á÷!þœûÏ"ë³Òè(üœÈWOÄ:¹‡Ê hqöí7£Îµ_dkª¼èàÎ*GÔ;Ú±'J³'tTnÆÝÉ«CM˜&ÕµÍHScƒˆ«§,VÉǃ ff<–½¯ ûè [ulu‰?öBê¹§R¶ÆÁÓm›æÍñÙ h_,—mؼÀ]Ö),+Û)'WéìÅ òÎý=x†öXö­¤@Qéˆ Œ¿<¥lÝI¹»ŽÚh¯oM¸ÐÈ<@NnWÕÈ/¼¢’Z­°%ŠþxyÞG½~Ù¨ÿˆ?„ªø«÷};¤?Äâñ‡øC@üI¼v«ãî}&dœ¿¯×€ªÿk"ì1ß\B ¡¦Š+)³G“ƒƒƒ¹dùòå”{ÙšåK‚•ø†ÏÈÌ64Ðù>é¬×îÃÅŸ<ûÌy¸t€k |täú±~¶ä¯¾Òz¿¼¼– {É®m+—. >þD ‡á¬ýàCÖø'm ?ÆÚýÊ {k°âK(3>àN^ßôGáÒzUþè--ê ᢭u£ö‹“ä˜2eLÑ:ø¿ÛËÿغ-œµu±•\?ŠÈ%¦”rݤéˆm¶Íæs”+æ‹×aþØ…'$HhÛ:,àÄ”™²—ëZ;,†‚¸ªãÃL‰€>°ðäÅ=<öàðqs+Ù¼y'-XiE‰Qúe<šOÅgoÖÑŽ+±?½G­»áªM½m¹÷ÿÔ쾚 íø'†1ó“û»µ§aê©%¦C +»Ì´ Œýé¯ìzglÑE\Šú+Ž/éG—àzÿ£Ùÿ]…ÿ>mbü¡nmò-vKXU1VSì[ÇÄøBþx÷\ˆ9¶Ëmä´˜˜h77wúksê\¿ž†jº?–]®JrŒS Ò1oÙòÍ+ƒY‘¬^æe¯ÏV5oXŒ÷#_2¹&Máq…5€ä±ß<ýAÌ»q#m°c·>ÔÃŽ‰»bóKÅäÛ Ö¾Â~Ûã³+Vs.2çPÖX´öÜ¢µ‚&Q¹àõ¼4Ê%ÙÑ.î¿yó< ˜Ëª'qKÏ,éç6ç"S÷>ý bø&DÆ+£X;àUo\¨Ñú‰ÜpC£ÇnâðòcLIêÏÜÊ’‡a³BìvÌÚ@ìªûä'0aÍ*±co[~âèþc&±JQ¨‡uvçd&ó(wY ¦²³¯NU`bã*)ä"¬Ýw%rþpËò±g4’䱋% k'™æ2Æçr%%¥’’±gÏû¤t©ê©ÕôÒà¯Â<ívÕ¹Z+\§¯DoÄ`˜¶¶6[–¨ûäÕ2u™¼ŠoýúõÂnEÒ€ÊOÒh{]ŸtÈ“Uþ$JÞ‘#GÆŒX{– µí|m¿èêê~üø±îäIÒf@Ãå¯.·xñâððpºÏ”)S¤¹|Cì?¸»»GGG×$†™3gîØ±:8È"Ù¨ëâ2ÌŸPäq+ ‚$ãÆ;tèÐYYÖÅe»ÿnccsóæMABîß¿Ÿ›¿ª y¨ü’¡  ê#ÊøYÇŽ%ù²eËV®\‰ø“ ŒŒŒ$ym‘×€øc0†„-OØy@ågÝB‹- Šø«sPUU¼‡Ú/uÊ,Ëç‚ÀÒÒñW#<~üØÄÄD¼qÊËË ²sçΈ¿!))Iìü1™L±×”u‘?jy ÇœŸå ¹Áöšä|È;›œ­|.R“ºœ‚-"·ýå0Ò³[ÿÑ>~+Ê|fÛÓ‚Oà°ðu<æ’îj@IQ‘g°i¯ª®ùÀ±0Îë§þ7×kZUþµ{/—Qv£¦ÁXÞ»n;T{à Oߪ6'øN‰?;yìHþS²Š1¬t\:åç;_¢ús€¬ à,‹¡Ä|b·Ol8{H’7hýCß5¿o¥e/¾ü„äQ>Í :Ã|4ÓåÁÊ®‡»÷F‘‘V@À0Huj;tô1Öl"'Wi1ÃÏ_øãìï<Ê×hUUê1ÃfQs¶XÍAŸä«qSÆ»U˜$¢ß8/èsñÀ_‹-?,IK­Î²=JX³­89• sÎÏŒT|åbðøšóÉ«*›82ñÁË·³H cF/2v_ÿdr_ÃÅ7–:·â(f\0lMV ën‰cÝ^¾ù°%WO®s2 9«¡'l"Qü‘È+_©ÅÌx©¡ÛRQI)=ƒid¤–œ¯l(×L"ú#nº,’Çâ’èü®xƒ¯`7Xâžï$®×¹¨°H©™ÎO¢yFììk®7/!ksŒ—õä¨Û{\9XÊ)(®Š<3üü(Ï¡Ãú’¹£ÝsGøwmæówÊ‘é@‘™‘!;NröbaAõ~|=XE¾´9°Ç¹•ÅüË»l¸n¹TÈ“WMÝÛ\O ž4oš’ZBÆÃqA|Æ·•½5+’è¸$%~ÉŸ¸à.ŠJ’á¤bê])óÌûRKü/ç†÷Ób?Áw±Y|µ17uŒJE#›`’GcK&WûÿALbëݽYÿ©˜PYŸB.5Å£(ŽÃqz}9/òÒ¼HV¥ ¯º¶Ý×|žl¦[ÉŽÉCÅ4R!yðx:Âê8Qü©™r6RL×Ò›0îë¤_ÿ5WSͤ­›7},¾þù§¢Œý„E¨¢DïážÝøäXAÞ‹¤-{WîVz-¦yN”Œþ¤ŲÌoU®.çhþß¿}€{‹ˆÍïÄ\1gê×núªÔU|–JÅŸ( :XÑ'¼™€©CxMvçs£ÊÓ—¾|?üïŒÙ¸Q2ÏTÕþýÛGñøjcªÍ9n°xá?ÿµp\m^ÃkS³K¹ƒÛ·%Ð1uª§£½!ÂyÂLî°§_©U±ÒUGQÀÀK‹'ÕŒ¼:4þéãMÔ97^~TVa×TÏo];‚·)¥)ãGqôÛâþ‰XÄ™Aü;t"®³µÛëw™CÕŒF‰Å ‰ ‰îeæÉɳ§É¼ypsø€>|n˜ˆ9æxgÛ|”Jÿ)$4lÐäù•‹¡œ¯[ã×½ÛWŒu™¨Æ=¿Ì¸È«hò 'š [¶lñööæŸe= ˆf¬……Err²_òØ1»üÑ‚¤1~]_áà è ­6@ü víÚ RQQ‚Í×Ç;ñ'4””” )øL'Ý»w¿_ÐM455‘þDÄŽ;fΜYm°øøx¡¢œ<ˆ~ýú!þDlŸ²²ò“'OŒQýW·Y0¤¬Úþ¥¥¥ü'2}ÿþ\)Ñ·oßË—/‹1éž={Þ½{ñW#@òœœœ®^½ºhÑ¢õë×s É2d$OCC#//O,é:::J‚<Ð÷äÖþðøîÝ»V­Zq‡9}ú4iÍnXågAAšç—\HåÖÖÖ5jÔöíÛcbb8ºóŠŠŠô­+ccc]]]I÷ºuëüÊçä¡ö§D@'ïÎ;VVV¤û?þøï¿ÿ #;;›ôY¸pᆠè×rì;š““C:RRRüh“9PÿA ª·   ’<:èäñ\eïåÅÞãšœêïææFIñ'qPm“ ª?7™?K‡¾~G‡°7ã'§€²&òºÝ2«?Øáª5kÖÔÍÇá]~úŒéï1d,5ùšƒE{ àúmB>µ¡¡azzº€¥¶ã=oûcàý1D“&M$µ?Å û˜L&ÎáÃm²%88˜Ãndff·MêB’3d99}ÏË(ÏvÂ, S‘uNTõ6€ÿÝ%ìm{ü}®)1™n±9X›LüZ€2{ÛX˜´âc¯NÁÀ&.»>ÓIÒ }}°JÕl)•¨LŽVðwöä~ÖåËÞÁr‡×ÑìYS©Ö¦ á¿'%²ñ’&ßɰ°2\3oÍpd2ã×ÝÎñ³Ö" kò¿ðüùѯ†E÷7>hµài¤q↧si+ûùƒŠ ;³¤ŸTõW¾T̃þ·ý1«•|Œ•d£9£Ûªm¹ýÛ$†•c`äɽ{·zz†5Ù¶«iD—Á°ZŲ}LzBòBb9w“äÁئ»uÝ ëÀà5—Hÿ¨ ó¼nFí1€nÿX% ò@eó| C²Ì$c;༕þ“DÉÈþQýi¿ÔWp´Ýi0/_¾\¨f¶I þDG‡ÒÒÒ„½J__ÿýû÷ÕkÙ²%ÒŸ!š2¿ Í©‹|Så¯_¿ ¾W,âOüúõ«qãÆB]ÉkÕªÕ»wïø“З}Ä_%PäµoßþåË—Õ†'Íìð'Éd2 HÞ—/_š7oŽø“(ò¼¼¼¢¢¢x†ÉÍÍ­Ę̂XVÏž&*ý)0 ½þëß¿ÿ… ¸É>|¸‡‡‡««+}ÅìÛ·o¡gLLŒ¶¶6éC’Ê÷|Fó—¤ HÏzëĉ”r ¦¦¦©©©×®]#ý!UPmTx±oøŒøü Àox’‡ï$¹„nÓJÒ'MòçÓ~©»ý¿ºcLÜð"ù`±E§n5¤&…!þk*Ö¸.ñ‡µ Þþ[oÚ)ÜØ¸ñÓ‚:U³[%å܈‹ûéâÒ¸Žéïí¿”óÄÑý#ÆL¬ý >YÌWs]ºÒAKÖ¹\°`éϲÒ&r{…áAá[:..äÿ‚öÿ,»cÉìf&LÀ=fÌØd¯¹XÖ96‘“¾ÓÆÞªW¬ÿ(n’îW"iÿΠ 3«hüñ‡øCþÐî¨øD@@òC@@@òC@@òC@@@òk((ÀKJ$8­(**˜{Ÿ)$?$?ˆï×0EGÖ÷‰7—ɋǸô„ÞWNîßwìòþýû¡ûìÙ³ƒ ¢;"VøüÝzíÊÙÐýäúáý—^«ÿ”è€ãñbî-Å%5U\I“tê<·8§ïÁ|g›óÖ®°Ì¬/ ½æ§ó½-ÕÈ0ð8rSÚ–±ºFÐí›áZii7± í°r+n³O¥™™ã:-°ZaPrÇK¯öÃ0exŒ>vl ŽçßXÝw8±Ùlί.gÿàââB~?$Ô4ìðUsBì@P"1Æÿt*íDÁÏÇÓñrzp‰-ÖÔ(ºv!7m82}®±”®»ý•ÔØDv€‰G¹²H)$Vä°¼;ù(se°ñ¹ÿÅ7ÓçlûZç‚ÿöz%#Òs±9Oa¹z…&øaX¹ ›Iúy¨Š‚tl¹“:¢cÈélOã—Éï:ï-—+¯+X[’Á0°óå“¿bâJ´¦›ñüÅí”KãÇÍ9´Ëmk#¬8æßX77÷œw/´ZudËx‘ýæA'§1YwÏ*wì×JMüÌ;xôÌøñîUSÔÒª U}í`/½6c»š9˜²}¨Æ…¼Ÿµ–+ÀÖÑ+–µyw<$+€¥^ +5P• (klím"üÕÄ;NvVkr–Øk‘yU‘uŽa -ÐÅÿÜßóPžë{bÞƒ«,wÆÅ¥¶ž«W iíµó y˜¼ÑÔb±7LnÛX튛Fé€|=‚5Áíâ>J”'Ï›”}ù)8ðXšÒñ >õ uÆ1ufm2¾–vZ‹k®Æ&ÉðÌúRfÜ‹Éô® æeÅ~ðZåL‡øØ·cø°IXæl:‘ µnm´asØ­[¯ž_?6oÞ¤õ·µ;4‘Àø‚¦Ž¶¶(ÍÇŒû §1­q°u«ÇÍ›RÍ%†i03µÊAR{DûeŒµ——§µ`3¿0 XKZÓɧ¸óˆÊ.U}pà.»jṚ¬{½\DÒ”ÝF$“ƒ×~}°Š.B*XVÚêïn­L×u“#¥b/ERòóp±>›wëÅ ë’¿< k.F}RùN:ŽûÛ–»ó:)V®1ÚÏ£N”{mJÚDt]EK—Îz§ÞL&kÍÙ† ¬wËQ— æÍWÕFßÂcM'wn¼ $û ýØZâ„îˆyófÄÄDAÏÓËôøÃD×q=|SC®–$;)m~pðJ£q}´ˆÊ|…5‹+½zÞÒ 6SfSAžë<ë&aEZñtM4ØŽvÜ5 YÇ’U+GÞV ‹9IUo“Ö4xÝ7w-ü£ûTºIYl|Þ s¶¼Ű¿Âï3†éZXd$'kF÷²¼ÍÏ5·ÜöùœJ¸†š,wNÿ{yòŠ6ÖýºßN&N£aÿ³o såìP÷n_|* ¶ŸÛ10“WNûmÁ'û,€¬Ò‰Ðì}øéŸ×AÁ§š–tñ¨ó&Nùµ2ƒ=ˆKåEî5£±‡2Îí ßô¤¡öØ},ïîJn“âÉÃquÖ (¥(J½”´nçáVßbzè­PÖ¯yìSZ Ò×¢vdX­ªýü»lø bç0ŸVsö¡—÷+/phÂAì2}Öi»Joæ>þ×…™"†Ìd0L&³†(B9rd̘1õM~Œp|ûDÕ!ý¬ìÍÖð@¶„–Vtç*w ­.â2+oRŽÇññ ªMÊc&”••KJ¤j¬°ª}tª…š¦¦ô†¬Øïêê+ ©o`¡¦ÆîCèèè|úô ÉOö_i’'LóL ’8~ü¸tò°V¸«ÝÝÅ‘üÄ)ÔµR¬…„„H'9--­œœ4|‚ä'4z÷î-å©éx…ŠŠŠÔž¨¶´çëëä'Û¸ÿ~÷îÝ¥–œ›››Æ{`$5íI--Ô–]7$?fQ:=@éX*ONN¶°°3fÌ‘#G¤OÜÔ©S‘üd ƒ>sæŒt’SW—Æ~Å:u’B*P{ð8}úté³&ÍÎ-’ŸdµwðàÁñãÇK'¹.]º<}úT¢IJ-÷úöí+e¾`áòüùóúô6ôå¶P{“&MÚ·oŸÒ‚ÚKOO722’­,š5kÖ_ýÅáùáÃé4t)899Õ3í!ù µ·fÍ¡Œ¤Š¨½±cÇ>|¸.gHDD„••yÊ­½óçÏ0@j÷ÓªU«wïÞ]½zµþ½{H~lPÚ300ÈÌÌ”\BuM{‘‘‘òòò°Š£||}}ù_"5í•••ÉÉÉÕº-h$?éA¢Ú£#77WSSü6ïÞ½Û³gO>ÂÂÂ*+ÌŸ?¿ÎrµW¿_6$?~ •Pä’ÐÄÂ… ¹ýõôô²X{ªÐµW!Òb$¿ú¨½§OŸvéÒEBñÇÆÆºººŠ7Îääd&“É`0²j¶ŸQ‡ÒÒÒ$áݺuk8ÚCò«P{ªªª_¿~•Däb×DçÎ)·©©ijj*µä¢&‚ö >|Ø Þ.$¿ê!!í‘ûÜ‘ŽÙÛ:Á Ôž å³½½}BB’'Ú´ióæÍIÄ,öIÒúúúb¿arRÒ™Üд‡ä'($¤=ˆ]»v‰7B=ÖÎ â½aww÷:þ­É¡N yóæ&&&?cœ°y,iù1B: …‘ü$ˆfÍš‰W{666’¾í””Tû!È<”””ħ™™™¤o{ôèÑH~UB:ËÕÅÀhñsj`` éÛ¶³³Cò3Œ ->N²Rª9õþý{™¸ÏgÏž‰})ö°Ð«³VˆjW~´äR1¬+}]Ö>¹·0EvÇ !ÓÀòp;ì¦Îö,-…í!ΨH<Õáç’×y'L aݦÈ~c½è>=‡º?øøîóêînC…®¯>xÙ\WŸOü÷¯vjT”*´Î'ÞÑmßµR52jÇ}^:åç#ÄÄΰðu<æVêû¹Næˆ3ëÉÝ}DÎÞ ›·ö]iÑz}cŽ$¾3_X[Ýè½pï‰N+~‹KŠ¾Ú´Ó‘±Úy~aË¡<òïV/¼¼ˆWÓ}zØ]!· ­UIýıÕeC÷¬x±rN‘>‡?°e¶ç>n¯a@ŽkÔÒ¶­tä–¼)&nƒC{<Ñ®çð)+ýÞÃ@£ÚÀ›¶nwt› üµGä^£Æé…~‹Ùã3wvUÁž¾UmÞ‚C{<Ÿ…|³Tp>ª.þöãeÁoèàÐïÊʸ'Œó[qa/£‚g/3§àóO ‡öxBÑ&ñåã{'³öÕ^ê2™Øó›¿öˆ¾q3U27ÎíÝà_×å·qãFP¾vDD„¯ïR¼t®eŸ‰‹—­Ó×,úï‹Ð‰k*É ÇIÏuhÆq€æ­!!_ÂÖ,¨­”aàÒÐЭêå Õ"NÅß1èÔUØ«ää oS‡ t®* @jO(ÀKà…fºŠþÿÝ} aÐjOØÓ ±OO÷ïÉc“›ãW®·5¶6BEex‡ŸžÝëÿ‡}µ¯àxKÒ4^>û£6Õíûïº^gvÓ+ýkjîë)›?lla;`ÇíÜþ{S®-„ݱÿ}5ÓY+9ÒÕbáÕ zJ“<ôצ9÷ìÌç­’WPp6 2yWyv`\y¦½ò‡L]èÔÀ+¦SZ¨Pþê/)¿Äì—ªâ‡øïØ|u›PB Y%é% § ÌìÙÙ€ÍY%Þ»]MH‰›}ÿ«{wUꪘ“g܆ f·S®^oÙÅÒ ‹i³L'ìO“˜v39²/Œöñ¶¡&sNù{y8g¹ ™}øôö±dró/2ø‹ >aÿAƒµZüïïí¬îÛÓô”`†˜›-NyΖqVI|ÌAÇ!ú…Míacî°¤-%cÞqëëNW]è^8H­ÚSùL¤\Œ¥î™ðaåv…ÆÙ¬)™Würf!,ì`†÷:A–j?X˜%|.³Ón²Í­5GÙWsV©Xœ“K:¹{}úðs¹ÿtrýè¿#ëŽüþaKµa TÕäTÍNG˜Ý=´òâí.Eg7oÙââíÌ^ÖØíM{> ˜¼ðêâ‰#V¦iÉ-]WxvgçzÆ[·Þ^ð½äìý“ûfº<>‹ÅE­#_Ð[Ö¥|Ì^mݹ,wG‚áLS0Ï{ib̆·{–Z€åª„s!æ=\Áûóðw°'jwÌ/’nt0·†·QõŒ‰‡b¤ÿ³;«D oéôñ¤ƒð¹v|M#s£‡w“£çØ ^òò޵Ȍü(½Í‰y;‡æCwâøNš§¹ÏEœÕ+ŽÛOx›‡EqDRG𣤤)k¾Uõ· ­×Saf¬d¿ÄáسW…W¼Ö÷Ï3óœÈ­ßBŸª:'Cõ”N±ÄC–ý|împŸ í=¹~Áض?[9P],Lôö8€Ú#ni3{÷DòWÕÚøoùJ½²Òï ‘›·þ!|W^hÆ÷  p öŒ‰M¡¢mô»¬+C•®=vŸõ"ŠðÎñ/#Â7D8O˜)ʳW§=*dHXØ Ïù¢$À?À¹;©z­Û —½?¿wÕרEí!ùqÂwñÅ9ÿkÑ›’ÆÕ~rcÄ@A§wMt%†:ö:Òí¡ÕŽÛ³i ñ…F‘ÿ;Gâ.u´¬þ›Ê£Ö­ø—‹ãYŸ²?áͪðãóäŽB[‰ bíòôìåë*ºÕN¿Ÿ0rpA{ïVÄpî†È-}ÆL¸.U¬õ÷ ÉÔU›™©Vòyþêõ—ü‚¶­ZêéhW` ÐS+'ã4‰ü$íÕÏŸ?M:¶oܸBðf´¯£ü1Æ¥ýôÕ›L¿E —üÙӼǡ€Âä˜|y÷Á£ÉÆ?×Áˆö1CWt mÛ·åð¹“ô0õqª££}»Ös»ÍÖ……ó½é§ß¾¿“üÇñn&]š«©Ò²·®ìõ†ä':µk+¡˜;´clíÚø/ZÈ¥½¡§Y×?¾WÒžØ;Þ–ÝàŸØ£UTPp´±ªËï’_}ƒ$– +¶H~¡uëÖ***………bŒ³¸¸X¢÷¼k×®iÓ¦I"æfÍš!ù!H¶¶¶°?)Æu·Ë–-ûøñ£®®®„nXBÚƒ¨ËÚCò«Ÿ8wîŸýg››7oJîngÏž½}ûvIÄìåå…ä‡P €Úspp¸víZ ã¡´÷ðáÃnÝÄ<@²{÷n i4K†ú~µR{Í›7ÿòå‹h1899‘†õ455sssÅxoä,| j‡U™0K†äWáëëAºÇGiïùóç§ŒZÒµëÕšŒ…–””())a’ÙÜ€¼7eee™` ɯ¢mÛŠ¯”‡‚Ǹ¸8J{C‡=uêÿH.\¸Ð¿îú ”‡ðððØ¿¿P7vùòå¾}ûJb+DJÕ²õɯbÞ¼y>P{¤#11ÑÖÖ–C{‘‘‘Ÿ>} ¡|JKK9´Xë9é§”ö`œVVVòòòÜw£Ý±cÇòåËÉS±oÁ¦®®žŸŸOº%¤j$?¡‘™™ÉssNŽí4áiLL eàJVzÐAjiÙ²e°òLJJRUUå3ªAÅ +ÕÞ½{þüÙÔÔÔÓÓúèèèPÚ«9þúë/x3t«”ödH~õP{°RªvïZ&;;ûåË—íÛ·'œð8lØ0{{û… ®d¾°°J‹’ë÷ïßaM [¶;wîç¢E‹xjONNàðܲeKáɯ>jïñãÇ&&•v„úiÓ¦ G³c…Ûì‡Íê'N >œ’ásP`éééÜ ×çÏŸCyÓµÝð'CCÆC’_=Ç£G(ùÅÇÇ;::Bí‘§ôOyÂŽaRÚ °A T§V‰G¥ÀÖ&¬E”öüê?ÆO:nÞ¼ µGÿI¼ŸÑ!ž>}Ú¥Kž?‘Úãºú)S¦üóÏ?H~õ¤>(9XéI"~oooŽ>›••Õ;wDˆŠÔ¹‰^M¦ì ù!Ô>^½zennN.A’ök¼ÄÏÏïüùó©©©ðôúõë¢iÙ¥tss322¢÷‘üd 7nïò¿ª°ŽP³Y¦ˆŠŠêÞ½ûéÓ§‡ Ò å÷ý¦èXóm9#챉8ô eÄÇÇGFFRë$Š~ýú]ºtI¼ÍÅû÷ï÷èÑ£¡Ê¯䜣ò:0Ôw¹aÓûî×Îî¼ ÷-^Lb‚‰†Ä‰D$„ZÉšÏÚµk™L¦Ò»©F*Z99¹RÂ^dCm|zyyul©ô,²ˆÓN­pºðmÓ (<Ò˜ð)Ó-ø$…Ú€¥¥eRRR“&M$­=@›“#‰hë™ö“Ÿ‚C¥=äw’[ȇ’?îÜÍ]Ôñ?E&`Ë ¾»Ü3K$§@yyù‘#GŠ=ZÉ ÉLí'äYZvOJJG:YYÕ`2qþa`Õ"È6.ÁÁÁ^^ÕÏŸ~û¶´Ú[ÄŽlÂ=ÅãǹüÃ4i‚kkc2,¿”Mξ—ؕկԔWºæµé–Üï„9[f@¨ÅV{’M¡Â„ÜbõbÎÐõºÛÎ=•±e}¹ Ý@<BƒE%ù•~§ O765ïÈêÑõ ÃoúŸ 0…î¼»+¸DŠç$Ž«³E+‰³ƒ’K{Fû#þðð ¶T4_Y¿æ•Ÿf`˜¢¡æ˜ÈÀ0‰ìe00XÝÁ#Q1ñÔ¦ ˆqkr†}töZÝÚiÆ­;ÈL¦(ýöµiñŒŽd$Ч«©Å£Ôd*iÒA?vm”Æ\‰ïTU´ãçFŒßæÇÄ»–ÇðõÁª}v‡^yKÅ“Ìzè~~v¥ªÙÒõØÆË@ä©[ò³ ¸…€ˆóœ¾¾ÿÔuã–ŠP!Ùi·a÷}Ó¾‹x˜^Ù›kY‹27nÜH¬æWèJd'­l'„¬N–~Hê;xº»Ïß'¯ç=ùü“2ÝÎrh”fß'Gjݤ{=ñ"bïM§¸°zƒáxf »‘'/L¯º~ÉÁ¶_¿;Skø¾Rz¨H”%ËG&îw94‹ežÿ“„¨nû_%uµLüÎÆÞô{óŽ~ãMÄÌ.åAÓJ±lLõa2#HÁד¾ŸïŠHß§Ú¬¡HH·\ß@Pa‡]ƒtG†í¤@–µòTŽaq-9‰3.7ÝÎþUË’Œ¶vMº× Ë ׇÚc©"žòó³&644î[󊂌;ºO+-¥Š`¿Á¶Þ†µQ,¸ÁáC‹9é 1S"d·êhÖ ’‚è¨Ö¦‡p‘!à¶¢mIXíS(**~ûÆß¤!†ä‡P!. ’`mB¡   öhQí‡ {Â,K#ôMÁ$ƒ¦M›þøñCWWWŒiéèè())•””H­AòC¨÷uïÞ½’N+33ÓÞÞ~÷îÝäÆMâ§OŸPßA†[ƒäÎ’FB‚˜çöΜ9sÇŽyyyH~2 Ø£ïÑ»wï7nÔå&·¥¨½‚‚‚z©=$¿R{mÚ´yóæ ]{°b´²²zøðaM"766~òä ‡§°–ÉZ´hñùógÒMmK£­­ŸõÔ. 8þ^ =ƒLƒî’ì³–ø­Øbe¢ÃØ-¹ž°º÷Ù³g „t" ¬ZµbéR¥:Ê?.캧o"q„Ú«ðñíC0?uíþ'Òk†våw‰´ÿ¹Âð-×Á¹Ð }@ÛžxÆTèˆ{½Õ¥í\Žû™2¹YC¯ýèûÌgßø³…-1M!::ô ·BnL³VÅX£ö±†žÖª“xÅö½¨…m¾Â#/ð÷Ê…ÓáUÊ-:ÁŸŽ;FƸÆ[ƒl³HY›xâÝp3VC4iøhKÊ?ÿN¬º•+)a“øõ›Ó'¡$Ê óŠÁÃ×jvâì¶Ã¤1ì.t½]Ì¡=Öi“zlŽ ‰`T\E{´*¯ Ù³„ð›þ%ÏÔ•»¨qt‹Ÿî¨ð”ççÜMP{`à²mçVΡ¯è"-ñµ_ýÛZ§î`Ù²ä0# ¨=(ƒ×Gz =*bû5ùÏésVL¹R€YbMCK$ÅG)÷¥G±ñ³ï‚*“˜5k = "EÜ®\]T—’ÒÛhkåÑ\"?t¨x„,?Jîûß¹ï¯ä¡Ô–‹3ȯ”Ë“Ol‘Ïæˆ“G bã'ß:´lg¿ú¾³ ö2+¦¬:ë×”Ÿ€¾¯¨PÝwÿ$BC³õD¾Ã×ê&x¡¡—*1ÃÃ-*úØüeÛTd¾ –¦¿ºmÔî‘€áÍi/Ëöí¹³gW/ŒÀÀå7néÝ»@„Û _÷q±ŸWý¦ Fòã0#Ìs/Ô3ô€]•‘F§ÃÃ#9 Œ!éÖŒÚY`ý×_¡³f j¼òêÕ&NNSf ü9 wï1€ø±jÑB/9v¬täȹ‹ýêþ×H~·ÎXÙ¹@GÉÃuÊf‹IQ‘ÇÛy¸Õ÷ƒ˜Þ„À<ˆåjOèmíÚMð”\>ó…FËNHµŽY³Ùö¢œøk]\87¨ÎÌTIKûÙ·¯;t;9‰’Ä¢…lc)9Ù¯¥žur’ãðòe³ÏŸå{÷vƒnqÛÆ­§ò#µGtöºùá¸U•×fãq|aÝe>uwr´µ—<úX`˜÷˜v`A"±Ÿ’ˈÄîÔþÄ”點ý™ƒ –†h8ÆÄOï[6#p!¶´ø‰N8iàS¦ôa¾þ4u$ÃxFã?¶†\ YÂ:uàØQRxÖþ‰±ómC˜Ì$ñ‡5|Ðù qdÙ¡±ÝS±ÝS—íM¤ X»˜EØW"rE/Gê×Ðk_‰0ïþ‚Ç1kߢÀ†ýÚ C&­œ£ªãäè0@Uéýˆçhjf å÷™òÇ¿³þ+䈲媌ÉÏ7g™-Á0åàIlS*»|@™ÅùۀěTHmØÄP±snËlƒœtž¦èZN3-–Ó9ø¿‹Ëÿ¨ ­:ÈÐ#J.Ó¡àw§)–„;$–éÙ«ô']Y |~-’¬Ó¨š-ë;ËdFî2w~±³©ÚD{1°·,Gͨ Ç0f|RZäçaѕۢ¤£í¸ƒÂFƒµò”Ïœ³ø\ £o€-/55r8¥se9ê-${D”{$»À™G¥*7rš:B¹2Lƒ™×3Lº`Äd¾"_¾·L\ÐRãg(+)SlYòñÅ¥»nCB¿ó§£;tb¨§ž‘™oh ž‘ñÂÐö8xh×øqÓÈ#<=m3È]K©êÈq}º*Üx…û|¹ƒÉ¬´KçmVÄL]Îúé~y˜ûÕêêm‡PR3%e ]kBäÉM-¤``ˆ¾]¼ÝZ™ ƒ‘ñ|¹Å0±=g0Oì€bi¦#v_Ø6…çB…."[Žèr:{@ኈñÛü˜ø)dÌ0˜ÿH,ú ®"oòñ1?gïõW'†O±•yù±ë1ßÏí)Àiíî%)ɽÿ'å¾Ò•CzUª¤DˆvŸØÓl µÇ~#±Ïáé¸"ïÁCžøúxë¾_ófjEÝ’÷rÚö²B†\ó܉ƒ‡k뱾ʶ¶˜Y_\ÃRbF2‡1OlI*¢“¸³­·Öœu굘ÛñƒXI¥M‰ËGå>éÖÀ+–p¸F^¿½ÖŽxн‰`i»H1¼°¿B†$)<þwäëÙ•P{Z4RBúTÜÕµN'™Ç†&&&ÐoÒÙûYÈ‚)¸ÌËO–P†¥ÿŠ•kÄ&M)wgç¡ðØ®]?&3ž[¨_t¶èÄL» ®à,l»ËÛRíùá»Á‘áÁÄY:Ô^y =¶RGn¦¬µ|—õÞÕË©D3¯W;xÜ6…ó¶%Š-Θ÷E~O´3%h'Y#æÈ¥C[ý†„¯°®Ü½ÀjY‚è³û—9Ù5¹C= ÃDo¢ðA%R~æ1 šC·©!‘{vvö•¯nAY՟Ƨl ™Ã`æ4xäÕ3Lj²3²ÒóÖ„Æl[Ãd®†…f/²]ÔÔ±3é° …jY°b++çþ°ˆ]â´æ&Õ`Æ…à‰gn®vdÍù"n.«B€ÕH•Ÿ[·^^Þ¿ö2ž¿H{y×~ˆ;l¢–ä¾>zúª§çTzû¶$÷ùÙ›Ï݆ —SÒJK †7ìÔ‘lîîÝ»{‚çTXÉgdfß¾~~üxwØîmZÅXxÿn½bD]x…¯h‡-[ºkã%æÂEÐÒò0íØŸ:"AHÔ¶­+æÒ ,‹9Ëü|Ÿ³\Ø0¿ f+HqIŠÓØ­ã–/KµÛ•5¹ÔMðïÞCÑA =êþ—Nù1Ÿ$´4q°°””'…äañrÞG]œ­M·ªûuÙ¼$þñ ÃñŠ,òšCyzÎ4Qvx ¨=Öå¯HÿŽ.[©Øˆ#¬Ô?k¢@?îo ü‰Ÿ†?ä¿­S'ª{ã´!ívÈTŽöíô+Šÿ=|ñŸþYÀÌ*"ÃÛŽÜ>m‚މ ì<µnAEÂX8ÝK€lܾ“ªFV.ªÈ:¯¹+£\1ê1IÏ vÖÅWzdá¹ó^ÉÎ^LQ¤l^Ly²æ=Læ¸IÏqî2VûaX|òË·#fXKͲ¬¼Tž39ù½¬Wís[Ç̽¶´Ù@¼^´öíˆÏŒ-ë/äCùEhöêr8h|ÿUJD;aL* ž²k!6\j|3£>¡qK1ËjïŽ3þЇÉ>£|”§†%pF; ÇÂÀƒ :{ö,G˜<?íyDKõ9uu[Ê:U[ß ¢kË/”ôË ×*Ú·žgud273Ý™bfn^]ü:ãFíú.f2+Ë«yÃFI«øŸŒ*F>)á•âxñ½U=—‘jLÌÃí@B¢Ž¿"+Ì;aÎÖÿ2ûð<æl"”_> ð‘»2LÜñ4zFg Cßâ…k›Í0Àt“¹ª¢}ûx[“îs4ü*Eo"HI~^‡2£Æz+æÕø„xuS{.o}Žó;a½¬oC½MŸ>½ÜÏ@±©Ø>Å »hÍÝÝ=:Z<•¯¼&Hêêêêùùùuÿm*'åååKKK¥–‡ôñ!»E˜lȯ4í |‡Ö/o`pµ?…ÈSêZK»Is+ÇAXœžÜ>irí<ózôè1oÞ<©¥XPPP“ËëfeUÆ‚œœ@‹üäÚÃñqÜ-OÎSº[;ö|4ö©UÀEœ5ÎŒûÐ/ÝGR8-ÝJAAAR“ßı'OžKºbÿõëWãÆ¥ðP$ÊÊÊb©™\·nÝ>|XŸõ™™™ðX\,½iǶ¶ÄúÀɤ%OOÏóçÏüøQ Õ¨Q#²”2}M›“(=zT?û~õ­[·¦Tqýúu©¥ûîÝ;I'±oß>ÔœCò«ÓþT˜"lvÂÆ§ÔºdRèÁ‡jÞ¼ù—/_êwf"ùÕH:‰Ù³gSÕ;“É”ÂC1¢V2óåË—¨öC;w–tÛYæC1ŒZÉÌ=z ù!ž={Ö¿‡ª­ñ777$?ÙFll¬«««Ô’4hP=ËÀ¬¬¬Ç×JÒ>>>H~²   iÊOUUµže lÕJÒõi¢b•Ÿ……j×ïß¿zôH„Ų'Ož6l˜ò¿>m¦„ä'  ö $·3'Ô^ii©¼¼¼4JKK+''GäM"¤£=ww÷¢=$?~ô®¸&Lø÷ߥó,¤Ô¡öDŽ¡]»v¯^½’­J.’_Ezzº‘‘‘„"—šö‚‚‚BBBjɹsç¤p«²b‹ÉO€Ú«•b„¿¿¿XfÛ´oß^ wÛ ´‡äW=BCCk^uTÒü¨änþË—/R˜é&.\¿~´‡ä‡À†ä´Gä~ÉæóæÍÅÏ•+Wúôé#…’...É¡îÝ»'¡•?.¹Ûã'rŸÔÔTIçsCÓ’Ÿ@5jÔ›7o$³Dg{‹ñ¹Ôf¥#ù!pâõë×2wÏô•é5ÇŒ3$}ð€kÓ¦ ’'dq[eñšœ9s¦¤oxèСõÆh&’_ƒFbb¢l@ZZZ¨ñ‰À_¿~•ÐB›´´´:ˆ7Î+V\¹rE¶rX¢#ÌH~² ??¿;wJ"æÓ§O/\¸P¼qÊœö€`“Å‘ü(:$!ùݸqCìòC@ò«W055•PÌÏž=o„8Ž×'\H~ÀÙÙYB1¿ÿ^¼Ž?ÖÕˆ2$¿ú''' ÅܨQ#ñFxæÌÄ’_½‚䦋kZ&…1cÆ ¾üê$×›RWWo„nnnˆ/$? ¬¬,ÞÅ»Ë5’_}†¢¢¢x#TPP@¹Šä‘giÙ=) ™ƒãK@T"ùU £/ÆÆq¼Ú>=Lrrý§Çe¸1‘à Ó§>O±Âçaíz€tè_w²æ]vþùsçz8§{â¿ÅíÙüç’ šÄ|4š¦~».к'öÁÇoÐQ”Ÿû0ñœéSEˆ0lÝz‡‘“•šÑ=oÆý;qôHUQ·_ŠK±ƒ% ›©kö2Æ|vïæ #Ü{$ÖÌaà÷9Ü?Ù¸ŒÊ( ø[‹FEŒÚFø!;ïóOy kÄ¡=ù¦Šƒ<çÃ;|z'~ü0ÑÍ»?}ó¡T¡9Ì^íMqeç 3aIWÎL0Jؘ·ïÚc3x¬qwáŒùܾ-þ‹eI~”œ‚ûibØÌ×8Þ†í9T]þ½U=—•+°ŒÃ“ĵòê.ƒøµ¢–{¬`“GY$—Ö…Y±©ß¸ê—·™v‡\–¼{nÓÃ\À˜ž¾…Ú«6Ø Ïy0f3Ýjú„Ò3+kCíUáçßÍþ·)Òϧz»(aáëxÌ­v”·‹•#¼ÃFyïºvz²xÄæmŒžRm0Ë>ƒaÚ °¥n A³÷ɨ½jƒ œäô¾ÐR_EfäG6í–\'O—͘_ ///Øb"}Ô{L€ÞåÁyz²QÆÑ;¼»R#ì •ÖÒhåªUC§/<¼R«NÇãÎpPmHø2©6o!xÌüxåÎÍÖ­ßX¯Çã&Žpá&88xØ !ª…ß­.Å_ïç(ÄÑC§.¢= Ù@%÷Yš "'²WSGÀh7i"HW‡†^pü†u«‰æetT^v“Ó'g!§ä‹æôÍ05à¿'%ÀD™s‡Ì8جi;¶bKõKa=êºù(ö«õ¼X&”öH´µt(-+“çklùú‹ÍÔ„þ¼^Õ+ò Ç…Ò “^N¡këjw…„† ¥=Ú,þü)àèQXø†³…MмÚ0·Ò?ÃV«¸²·ÉoúÖ ¶j,g§CC#`M¸ÂÇÝÒÉ‹ìé•fß·îk‰ãÅVÑÅ'<í 'Q©Y–ºQ'Â×—ý]˜4Ý£ ôîöæú„-µ™ßË~ºLÑlÝÓÜŸfºUÊoÃæ­}FO-æþwtÊN«ƒ²Š7eóØîâYRüü…õÙç”™éV/¿Ü´Gâþ»¯Ý[U¹Øríúý'Î-æõ›"ù̯»ò>g5øèËZ±)zE¹§œ–eRRlEÒ¯*÷¾¾œCXtÒMFÛÆvR`­nñxëY¦F =‘/ÿßÑØ £]yþ$²ö K€EŸ!ž'/'´6}Ûµ{or{·×åð¼ö襦^+‘㮼5™ &ùëÀdi¡D:3½â¿~x9z¤F¹eíÚ5w³‹þƸÏúLS˜?¯¦TMhqa"çc–„¬|;«|< •=ìžëý¡Õ'ªë ¸@–IÖÌ-Šî·2·ŒÆ0ÀÒwÏ Ã=ô# ¹æ¼µÒB>ÙEŒ*"ÅEE‚BBY™oä ŸŒLš’"`…ŸðË@ƒ^¡Kºs«&ý¬Woñsž#>d‚ƒƒí·Èm´Ò0 ?ðs^ìtöé·À¡¡‹od‰ÂÜ鱨'hŸ×ª ðçõö ¬ý|"Z²÷b%ݺ©o1¯Zñ_I•ü/Y¢Rò~ZB«ïUc-óY›eB¤7…ýÙ+ž'dÁŒ\òé3ZøCÍN IÈsÑïÅFNÒA_ÉÕ”]Xv‡z½6ú`@;ÄL*órðª>d4Ì#Ôìg¬Uø;WZèHvQè<åƒ×>ìNý©%ÍÇHÀàÎ(=‘š}†9>Ÿ2i_ñûA…ît½¤eÿ HŸçW€>Ì·‘žöbö„á )e¸‹~"àX»,úVʠЦ[?@I2“x9¤øi{]ÍÜ2æã¸H§‹áw2ŒÖïú»âñ}÷!?77 ÷œ¥…ü.­ë®¶RGIÜzf—•éLÝC½èýðçVï½ÔØå+¬Xz”t8l «ôÙcäT­ƒö‘>>t‡ÕR¢÷eÁb,ÈêµU½ù_só$ʼnÊÅÜoÕ¢þ}žeU¨OüD¥¦¬]ìôãLG¬ß®=‘\U-ܵ5–ß ss]ÏÜyˆ>=ëM_¦ÅJ³.•ûRß_]ô™l_±ÿ)3˜‹v¯K¯ê(LooYçž°w'ÐÙ3q¹ºA0x½–‘û;½ÚšZ.òfóH¾.ÝÈ·1ߦτÀ×ÜÙšRù$…kŸ·uo‚ZÐ]‚•®£˜Y­ž.Z•/E™a¸“{DÅÅáÇþõ8²î=‹¿=¯èõQWƒŽyÃ{œì¯Ìz្âFÝhjÈ=–*Z!Àkœ).ʘØE¨Zõþ“£/Þš«šYVËßXÌžyñy*ÖU€izßî­² (ãLÈ?—Ö0=øžSmGøÒºžÝÀ}ã–#{wROØ-–v!ÇQoÃbkù¯/¯õÕÕ¨¯¯HKݘZj¬—‰Iˆ‘ {>:„„»A ãxó{?CåøK ¹®1„oR¾IA¢ ^u°8”'³]/Å!Ô$òmœ&Ä%™­Ñô#aC.ö>l·á9Š%ûsá>‡qLõcÚò ǰ-<<çu3pü¤ $µÿ²ŽKÞBá|ážDÑàL¯þA@îùy,>ÿúAq÷!ÎÖs}¡?Vè±ØŽzë(øùz­³“OhØfã·'/=11Æ0¢TÔyÛÒo¹å½Å…²^=¦TÜâìZ«8Un± <´Ú·°Ä<%A•ø¢¬2ð¯óâ9û‚_¹÷Ó’Ô×I ;ØÃÚÂaûI%q–"ÿ5’âÂ_‚Ý~ªKõŸk½ðÈálõ±=²讲Ò9 ínžöTŸjî!t/„Å€TU¯—Þ„r~ÉR—Ò`njñ2 Á%Þônáîz³¥…4À„=yðè`Äæ;ôK®­Ñ¸(´mÍ27«ØZ<5#Õ)m­Ëæ}DÊKwï¿+Ut5_ÒpÁ À_òz­÷%ßÅ´çç*f-é)=ùšò¨¦+@C¦Ï%?2zòÜ&ßb•W­s|Ó&*oê‡+ù)"%‰è×ö˜jbtõá ‰>}›píƒKÿª-°©íì?‹4™0Ãd9Ì“xr3vØXó¦å¶Eó9L8œe1éæÓ̦ »ƒ¼R³¯À£ýâ{™yMzö*ùššÅ¤ÚÎÎ27jòëUSlÛ\‡èW£áƒ›ð!c;±´dp6BÄ:7Á¶Ì¼Sm2‡AUógÏØ¶#|ìŒ…Í˜ÛÆUl³§\<áhßÐÁzZнšp‹kÇ÷º»Ö3.¬þ#ëÙØ˜?Â==º «u± ÷û ¨g·–NåeC¤…Õ¦ÔS½„š{åzŸaõVMŽ=ö¿Å Hí;ÂÇ4@W_%Ç7–{Ì[ì>¥mZÿ:.9/6„{$ú+Ýëw^~ì.ZÏ6þ+")¤ÆÜCôã wbx~mòÒ ƒϱÿÙ-lBÕÅ\dyyù™+7hrh-øýõƒ¸°€r¿†ÚŸæ‰™>á»÷Ž®e ™û—þµ_`£¶¸ÕT×¥D/RXÄ>ÝZZJHÁW³˜Òä×»d!a'ï<<Âxzͳ±FÛñÿ-Z¤½’•Þ ÙœÜü‡o²¤ûrèÈ)-øÆ_^8Te0÷ä4D¿ºä…t?õF@€_mÈ Òg¨Ý¦¿q>>+3#FŸWðñógqq19 ½ÙCj`"tdQ¡¤Ô?%%ýûõ…õ1BjjoªN•íÛ·ô×]ºhk#ߨ5Xðë†-£Èøð)ûí»÷]»ve.Z5lÑ‚&G+!.j".JºŸ§¿16áòƇ“’ç¶<†èW?„…Gªm‰˜%z‰ÀÍáè/»Ö@Pz÷†ÿZôõÊõ‘†ÿZ"æ!Êýÿü)aš\D?^ÃÍ›7ÇŽÛ¾ÒüäÉ“aƵDÌÅÅÅÜüàˆ~¼__ßvG¿Ó§O·ý444ýZä"W͈ϟ?ËÈÈ´hšCBB6nÜØ1»¸¸ ú!´âmî=[âââìììZ4ÍK–,i¡˜§M›†è‡ÐzðññiÞcbb0 [¼xqË¥900°ƒ–•(¿òttt¢¢¢lllš+Âëׯ'''·(ý:nU½ëW¯”••›1Â… †††¶\‚]]]·oßÞ1ËÉÉ}øðÑ¡õðòåË”””‘#›m†îÎ;[4ÁóçÏo¡˜_¿~Íå Ñ×ðôéÓ™3g&%%=º¹âôõõ}óæÍ¡C‡Z"Áêêê-ô*¸ŸmD?^CZZÚéÓ§¡cîܹGŽi–8¿~ýÚ©5kÖñãÇ["f…÷ïß#ú!´*>þ {öìùãG3,á²yóæ+V899 ˆˆ4ç¹ÒÒÒâ÷sÑ¡¥E¬uÖ,܃¸ŸX)Ôßßþ–””4oRZè%ØØØDEE!ú!´6,-Û'Θ1ãßÿýËØXc’’222ºzõj³¤sÊ”)çÎk‰7°cÇŽvÁ=D?Ä?ÿüÃdŽÏÚµk½ïnüøñêÍ›7ýû3v¹ÈÊÊ’——o.îµèö;K—.m/ Ñ—¹WTTÍ6Ƚ~ýú½}û¶Q—?zôˆI?“/^4KªÜÞwƒDôCh ÷Š‹‹Iî-\¸ðÀ ¹2dÖ¬ªõ›‹{0;ÿ.&&†è‡À]€Üsqq !öd ¹×>‰oß¾IJV[ šjÐ`cŠ(4ž ÔÔT(Â-ñŒ½{÷† Fê‡ÐöØ´iÓªU«X} ÷,--O:E’Ü“‘‘!{)jÊ6O6²ÁÃáÇ?|ø°Þô¬\¹r¯%öëׯ0µí‘{ˆ~¼‰k×®±Ñ‚ä+嘎îÝ»ÿúUµw$UMî‘õØI“&]¸péCrOJJêË—/S"//Ÿ••¹×ù‘öû¥ýx7nܨíI9‘‚‚¦'“{¡¡¡^^^………µ]ÎÊ=&Hî)**fff’>ÖÖÖzzz{-ñ€JJJ²t ¦®CÝ#THîåææB[.))‰éß§O&÷`MMÍùóç¯[× ­àâããÅÅÅaœÍ5à¦&DEEsrrøøø ÷xã3!úñ&®\¹2sæÌºÃ@¶0¹‡aäëÜpÈ^¶\¾fÍš;wîÞ½ÛÊÊêèÑ£sæÌYºté³gÏnÞ¼ Ïöë×r° ¸±°°000puý«ý˜ôõõ¡ÕêìL,ú˜ŸŸÏcŸ Ñ7¹uLXX¸ÞdE”Ù«‹PôH"±Á××·sçÎÞÞÞ¥{Ðä‹ŽŽ†ÕQ¨'Cœ=[µsÓ˜1c i™Cêd/aX. J,î˜Àß ÑgͼÔ oß¾ïÞ½c5ÉÆ&÷ Ð]»v é«W¯( ôÙ@Óäƒ`Ù\ZZjjjÊÏÏéÒ%Vÿ[·n1݃ º~ý:!‰¯_¿B²‘“*`Ý•µˆ·èdz¨ƒ{¤ÖAîq<[VV&&&9°„’i“'O>þ<[H(€l>±¬>[¶l9xðàóçÏ™>/_¾$Æ ƒ\õ÷÷—””l¡ Mˆ~m¶ëׯg*+JJJÌÍÍ!y v±]iFrOGGçîÝ»l–——×±¼š;¤›F£©©©=yòšŽË—/‡Ò¿åZkýÚ 5¹T“{ä<†®]»² WM܃Æ!ëj¢{ tùÃCîÁ*è‹/0 c¶ÖpìëGôCàxxxÔ$^cç1܃UÊ {¡4„{‰‰‰&&&« ¤ ’Ü;{ö¬……¢¯!77W\\œµ™äo&Aî±vß×!€äS]]ÝÚ$‘µþ ¹'**Ê{} ˆ~Ì5?wìØÁœgffvñâÅ¿Ÿl¹Çd]mÜëÕ«W^^^½Q‘Ü#¦BîiiiÝ»wÑ¡}ÃÏϤs!÷æÕÈ: ‰œœ@oP%m9&kÎAîÅÆÆš››Cîqÿ*ˆ~õ}àÊ6I~~~ø ©Òìõ:È=r–=z°6ö@5¡)^è{vvv{÷îEôCh¯ Ç‘uëÖí÷ïßÐ#eªÙAnÄÖp u¬ÉÂÂVGy›{ˆ~¼rä'äÙÒr7211aérâĉzÖž={‚ÊIºˆ~í***¤£Sc› VîÁªã_r æ¢_B‹rs–“¿¿§)Õ 111Ú¦ó"ú!p;lmmÏž=Û £ºÌÌ̼¼¼ÌÍÍ›‹{ ²/¾ãªŸƒ««•Ç–ñ”¿%j‹.íˆPwµ°uFT&%%Aî½zõjùòåÍmkÒð>ý"BB¤¦­Oé…òq;Å_NxmnÞ¼ Ùæ¥ä^xx¸££cÇ­|ZbØiÈdö®*j€ÌT ½rk¿€ewžz«|GC‡‡‡KPPHÍ~X„Öër- ¨T/^löh׬YÓ¡éwšþ;Úy}Fˆ7Ã+35áÑ+=ÛôXCúë|ûó‡p¯O F‘uNÄÃÖó·NÍŸ¼ùÛŒ·cFË{ÆKCéG!o£˜²Yˆú$š  Æ´¾xé¡j>Îχkz ÷e=µ9%w…¶8/ÐZÔé~®›o_e]óÂN3€­äaeÿŽù‡ZÀÄÊÇi6‹ŽôØ´u+Ë¥¬ñlZi‰8Ó¬0 ÑnYvìÚµyY%Xa¡ÑÞ’Ù”ùËàç÷dŠŠé@‹ÙÝDª÷òyÞã Õo\“¦Ñ.’1ßËÆ]¥1šœFÖ‡ÔØÕFŽñ$7ôͱð»ZoÑÀHÞ³»¬i#<;ЮíöKÈ]­/Óϯ®vɘõnÎN¼ ~ó`ý“ÛµÏÉ&’~ó‚o®ì£±ñ^Ø Ýçé¯Zp°jTO/¼‘¦ùðéãÇw®ZŠÑOqйfôH‹ê5 ,ßzó½ ¹#‰\«¢ÜÛ:ºiwÒr¹sÖS—¢^`=ÇàÆ5  ùž¾-ÇA2äO ± hjøfxȽÎ ÝM-‚uÅÌ,)˜iƒnc‚–»«zòùºXº9å'/Ð/ Ç£Êò5µ GÛyfDvš¥é?×ÅÉ*èVRÍøä¶nÝD* `·eÓYæÙ“·âcçyyÏÒØ÷/Ê´qqµ« ³iÓV]¥ž 5h á¿÷6”GIÄœfÁ@YÙ¾§éòB¡t!òe·êçqQȽé:˜’Ç{‹–ýÕPlcc㉤ ¿X!Å,œîl›&‘¢P-ØéÇG)”áA~¬­/Kˆ½æXÒÝËCèþoÉ«éVÙ7q OÊú±ð÷2 —˜¼Rž¶ƒ6m{#Þ âU ieÚ2{^MêVžÝ®¿¢¢¢¢BCQïPc''§°°°V¸hðœt¶ñÖõFÎÜ„ Ñ¡=AVVöÓ§OÍB¿"88¸y—Z‚xúô)Çuý¸{ðwìØ±7oÞlé{©ªªúnÒÍK¿~ýúVY&Ñ¡E@®ÑÒxöìYKD›‘‘è ×#ú!´? UTT+¦´Î›qU²ïß¿óóówêÔéÏŸ?Ð-&&†è‡Ðni ##óèÑ£Và¼×Ë—/ ÔŒqB¾•––úúú¶ò’ˆ~Írç“V@ór„››ÛÞ½{ýÚ%.\™›°·4FŒñàÁƒæŠ-$$dÛ¶m¶¶¶<ùiý:Þ¼y¹×½{wÖÍŸ›düÍann®‹ 1^|ذaˆ~íiiiýû÷‡ÜÐ××OHHh¡»Ü7n\³ÄvèСùó‰Obbb¦NŠè‡Ð^1mÚ4(€$÷Ž;6{v3Ï’’bn‚Ù³gÏ¿ÜÒL[[;%%…t+**òêwAôë(€Ücº!÷ÊËËýüüjþj`l¬Ðþ ÷DEEóóó™Ü»zõª‘‘¢o}x>>’{&&&[¶l2dH“£âØ£ÈÜÝ¥¸qãdš¿¿?ä«?sѯ#bñâÅûöíc^ºt‰tÈÈÈ$&&ª««7<699¹>Ôô‡Ü«¨¨èܹsÝ—8Ö„544ÆÑQ/«ýÚ7HîÍš5ëøñã¬þ"""……U+V<}útúôé#Gޝ^½âx-o(è‡HîCõã¸ÚêСCß¼yÃæ¹wï^///Ȩׯ_CîÕ} ___h^úøøÜ†êª¤¤TßXK²#pѯ£ãòåËðŠÞË—/‡^wàììì©S§ÚÙÙ±ù/Z´(-- 2SWWwÏž=sæÌ!ý×ÐÁ¬Ù’”†¢Z[üæææ±±±dÈŽb£,ˆ ,, ¹·lÙ²­Õ–E®eeåôôt6OXó;v,9#Töûijj®[·Žã.{µÍý­›™ˆ~<È=r€š§"##Ù¸7a„øøx999&÷˜`.TyçÎX³Ý´iëYÈ=hþÅÅűzЉ‰}ÿþ½¾sD?„*@î½ÿ^AAÕSUUÖ-Iwqq1ä›ŠŠ ä^½±éÑQ“]{ûöí[¼x1y9ß1¹‡è‡ÀÈ=Öz&4 ™Ü311j¹×Ø8Iv 0àõëפ“{§OŸ¶´´DôC@ ÀZϤR©ººŒ¤™=„MäÞ‹/LÓ :,÷ý8ÃÓÓ“œ¸Àä^³€É=RK“““IEEôC@¨9Á§EƒÜƒ¿ãÇïÈïÑȾ;GGǾ}û¾{÷®Ùãg|swwGôC@à€¹t´DÌ{eeeüüüü #ú!pFllììÙ³[nvüÖ­[{õêÅڊ臀ÀÀúõë[te OOÏ‘#G"ú! pÀÇ[úõÚFôCè PRRjé[Ô;Ñ¡ƒ¢´´´¥oÁq|)¢`zÛR™Ñÿ ^^ŒöÒÓ—À˜“~þüS>~¿¿ÅæÍW¬"Ý¿~3oqâd¾Õtw¬ƒUGýlßæãê&Éä&Oî @äçO¥2}œš|‹½{7ÙÙ‰3¹Ç†™V¢ìñ²xð D?„…ȽzÉôêñä‰ü°a=^°ý{õaj"ú!p5˜Ä8wî ü2…Ã.Bšëß?Ú PÓ_X¨ó—/éRRÊ ¼EЧ@IqaWAaD?^CÌÙà©ÝI·Î”aD'¸Žï¿)ß"JÏÒ·‹‡‡Ýºv†>t nÝ!…ãÈ«~ý¼Y7ý"öøÙ;HüM"ïß?¨oà„è‡Àk6¬ª¯7`[¯@‚mÝj¥ÝÛî=±Œ8WR^I¼Â­ás¾U­kÖ@=}t66½þ2‘ú¼ýꡟ†Um§f‰Sÿ­ÇÌÆ0Ç+ò1LŒyÈ6ˆ%XÝ‘ ´ú*vaºóñ="ðµ[&Î#vrøÚ·‚—‹­zß)•ªŠSÇNjDe²«`'ôžÿŠ~ÝX¨Yq§è‰€û¶Þ¸ÿmODýd‘åô%§NGÅÅŘš2÷@,²™çÀηŠGßp26Ú딕¾;÷GE‘3.ý½]Þtݳ½jcTêõ³Y¿0^ÝW‘«@îûŽŸÒ€XÙåÑEÆÖÞzÂÞz}Á†=äáÊ d½ðó§4™>ª ¾Ï7(´ž ^'Ì5µcûxþ/hÈ[m›«Û®f«BâòH™%©^•m**xx(L#+Ÿ¤ˆÍϧŽ!5 Ä/¦ÊÉ‚Ð3!zа_^ö›ÁΗ¡2ƒ]ÈE]Éß½·ß»ô§Ué^ÞU ÃiH¹9¹´†Óï¤} UDpPHÑŽÍ 3M€k©›`µVÄÌ8Ì”øÖö'üjÜ"£·ä€M?’]Ök÷é1ö{:$ª~ˆ¬¶Àÿ¦{âø' ëÝô¡O½¾Už5!4ÈÖꨂ ™EŒ;ŠOˆH+Fù»Õpï}©–ahTŠaå§<ëѵû*×ñ=9…’½å~‹™{[E ožKd›`ï gé!|?(÷þGß¼Àqô+ö‘1<̽†Òª†öY½q½@(bc$kx~öF‘ÇT!µ:·Á½p¾hžM¿Iä¥KÀÄ©ÏÁh‚Û`kFåþ»gÃ(ø~Xëô¥~ê¾Å<÷ÆŽçfƒñ„E¨òÙˆ"±jw Ëqrjè¨6éûšÝKRºw½W|ñ\ÙÄqgŸtWÖѯVå>–PŠxÆ#”›ÛœœVÿþ½«[·¦dqIéé æ¹r]Ó&U@¨ ›š^jGEš°ÄÐÂR<ùÄ:²¿Î Ãtï{êZ=X2Ƨƒ´‘;Ñ­ÛÿšP?üøq”¬lCwîìЄ[üü1¹GO€èW;ʈMÛÒ2iã­7âÖÈ NGЋÞFJî`Jt©óéÀ`ŠËï¾Û:ºòJ3…aôMõâ>ã8ñ -a_Zº[@ ¡ľ}#+ÛØ¾8{ßa ½Eáïé=zöê¯þ/è×Õ 5n†²L¥m@èÛºÛ¹ð7ÿsú©¸;~¤øi{]Í ÖÁ¶ÂŽ !; !÷òKq”ÿÛK6Ök¤%ÞÐÕ³í-Ù”[`Xƒnqñ"nfæ Ü­£¼ù¿Q¿¢ãwE™]ï+½ïŒÒöJþvvvì†xàvæ/ àïi/*€©¯NLõ8ж€Fü X²D´¦©¶#4o©³—®^3ÜbëV77‰šJ¸yË×îkÍÌ:Ökÿ ú'ùZù2Žè¿ ÄÇo3•éšzó#Çã<± !€¥‡Ï=‰O¶·'&¶èiÊ¢ÜÏ%pt\ÉÑ©s³ÝbÙ²µýWtÈÆþ‚~=ŒØT˜‡¸kµ€¸7ù×¥fà={ö LÐaºÝýýýýýýýx€~h.ª{" ñC@@@@@@⇀€€€€€Ä‰?$~Hüø! ñC@à.””àÛõ#¬_¿}G$~ÀŸ?ÀÞ¾‹GDÄôø! ñk··T;ð®sjŽ þáúÀ­„òo;ýn=z „…ÇŒŸ³ÔÖº…îC¡`ŽÇsVë‹×rÇÌ?Û›–†6ZHüJnc‚càßu·s7è÷jóG*ýö$5þíÞŽ>Cv6ïd©ž=p!a¬¡« ÏËŸóA¤¥r:Kói´È¥ä1VH‘ÆT\îÄ{êÒu1ÿ|LìïR0aê9»è¬†%8j÷„K ;¾$BÛáøÝHy[ …hiX¶~ŸÝÂE4™K)”.*¶—ã7̓÷ÂGõ» O)*IÁCE©¾5­:9ÛË)›C·'‹4Ú­Sö˜s,`D˜Ë ¬«« èæ¨èà êÑð9³U%Öô§]XyÆEÙû»?íÖÊÿ({§g-g/¿ãaàpeÔã€Ä¯yQ©|Ÿóã("B¬§ÂMÊ—þYo ¥áqFÛÈÚEb“F§Q"ÝÓË»ú)ÊWŠãü,ª‰€Ðzõnhöͯ²Þ>_¥Œ˜øÓ.ÁÃg·GbœRP©Å@£S­Q}ˆ4¶«°ª40ºÒ'êÄJƨ9k`-_Ò1€üI÷Dmœr€•Õ#6`üí>‚° ¡…޾¿fÔ'ê¥ SQá*ßÙ‡ñ£sãpûÜG†ÚÃõ•eX®PÂñ·uÇ™ûþéppp ööö„!ù"³kã³õÈC½G_¡õ@^‰/K阙ÏïÁ_9~ «&qú=H¦9üÂúa{† %D›ü6ú}þ¬¿Rä *˜¿–.e„íj£S~iEþªBà”ç0è“ó5qôÚI¤^Øñ´XÍÖJ½ „6¿’dLPþ=ô(ÇFƒà§·¶!€c{Áѹ¤M¦¾è0þv.Q&\ZÓ—03 [gš¸s•°ÛŒ7àœ11ÖXÝΞ_&>:Th¥î[HKÜ"£·^ôÝc¤rpYÜâ¡^øSÿ¸ ÞQ¾?·(}Ǿ¤á=«—³Z.wÎ’IÜÊFNT&¬¡ÑÖÔ<5iÓIää‚®šl—èfÀôTœàC£ùn Çxš##ˆeàË@7=Ìâ¶?ƒÛ×â WNE9ܺaXGÁö+Òsäå­n‰÷»\´`¨`¾ü‡½¿û#ñCh ñëÚoëÖ­ðïå^ ë(hùå0dôÖM¹˜`÷þ½‰+{M‚>¯SœWm4›q à⩟ŠÔe蔓°ÅqÛ}.»þ70ý|èÈÙgƒÂâ3ËJŠ{tkãÀoÆÊÆì)Ybùi¢Þð4 /Ϥ()0ƒF;9[uÜ{‹Ë k vƒ \]\ ÿþ´[+‰˜;ЮíÅß(J’@b%í‰?y;"01š£[Íÿïñó±²é:Òý”†»ÒÓ T @axŠë‰ƒñNZ¶áðP€," +¶H9½—«Z¡òU DV°†Âh+ïEvzÝ Ÿ`áGT¹†*€§ï¶çõ3.ãP¡P$´Hî¾þ‡7y ñãeWh˪r£Û™œÚD}75|‚¹ßU˜=ˆá¦ÏÀË,ÚÃÎÚØ©Dž‰˜†y§³•1{§DºÊ¯:ùáÊ\•èÚÃCE_o÷S&¦œKçsØc ´z£Ÿ_¶S&EJIuöþ+Á ŸY €ÍmMV>ÛÓq$]e¾5HL Sì«Â‘)_Å1lùpåi¡ªÈŠœÙƒËñM‚Ï‘” ¦¯»¾cɸÔãKÍ—…mI,š£(ˆòd›[~2nnnÕH2DO”>ç°‚2ÞÛ#¼9ŸãºÊ<˜·lÛi‘ÚQ§â8^¹xeÈbfǧ!ããA’:NÆmdÊ¡/qqXÝàW|{-¨Ÿ¡Ñt¶óîª:ZýÔzïèj­9~ÊBWF9[CE!O ¢TÅpçÐk~#²ÄI ×ÊwàäEð»*\ê^‹™C1xêñõóD Y¿ÐðqZ,ßùçØÎ¾Äµz2Ê/³3—é#¤ Û+8ñ¬cT>ëà˜q¢ÄŠîZ;OµÇ5P‘R –žÁÃYËó/;°p¹;tNßþz‡UÎá/eøË* ’'òŒêXZÊîÊ3Pùˆl¹=kÕI, ìþr~º¡L‹¢P¢A®X0— •°zƒn9G‹¥ÿü‡—çƒFö´fNa¹€tP·ÈºG¦p|Pù¼.¨ƒz…ê®JZ ·NoºrA!w]“9´[(OrS³'B3¢+ÑÁ3hRæàc¬ül¢Y÷uQ^èY†Žýk<U×:Ìê¤3;ÅøøÄ¼”ôb E/îo0ʵo1w³²hŒÑC¯öN PæÑh {¬/õÈB5·"-òâ7½–1!!¯@²±®)yhlŒ“U¡I&U¶ûÝŸfíwV±‹Y¯,«UàK@Yµã’ðgܘ)ÆŒ) SiÓ‚x;C†}ügS.î”N‘¤írýŒç8À'j<{9mörX8íw`‡ÑAy÷v:Ð&Zã#éÆ4Ä©ÃǤ”ËÑ ™Î)’Xj6YÄßfĉåßÁŠªò–Àï*Ý$¼3cý>ËÏ­-!òê#¸š[ÙíI(_sOk—`yð´F\ÙE‡#SNâü*ü'¹,¥EBG¢«ŸI ¬ù&§šƒ±Um-;V¸L]³Hüxd÷Lñç-ë’Ÿ¥‹õQwv÷¥ÑÂÈ“ëWùI d˜ÓÒúðy]ˆõàMžÓß·M Ñ#Í)O}»ðnÃgx…ÒðQažžë\|7†Ð22œ]Ls€íš}gƒ·<þO ðõ^º*:r‹çµ»)@¨÷Ô4Z<×çg¦O\$Ø[ÒpòrÛã›ÿyËÿ0²æa©çPXªÏ°°ÂAÑëCýÆØ2=§¯½%øôë[”c(/Â>Øt’hAÅcIJY¢/e9È.)ÿ)Ó²?R¤«n±é ÍV[ºiiO?4wœ×QÂ¥ Þ­¬WÞâªB€lpf6½’vÇÆÛuZؾÐKdíÖûßc‹¢é­Ö ²•›¬”džY¬ë´~²åM~”~Vr<ëw¾`ÁðždS!ÓŒ&/5Þ$ùú%«É-„ðuÑð²Ÿï6yØ<|“+Ô è™Ø3‹cèØâ1oú´Õ£&- ¥Æ9I†åüU‰±av«ãò~ã9D`2¸Æ¶ô¤(ß-Ǹ\e«;ŒÊþò¡uîçJ ;x†è©£Á‡ÏÙá5©rr.ìå¬ÔôÒñ¥e{ÙYh K™Ún&òmq3·õ œyä^ßÉÖÓ €‚ÿÁC뽂ûõæ«tÕÙ'À‘)_ü-úúÈÙÚô;¦Ú_¡y ©õMFà–lñœ~—Ö©—¨ŒO@ˆ”*[Xüj›6ÀœrÐΑaD·PB>®'­i”q߸§¦·ƒSU³0¿¤ƒ“A•ñÀª@ö©{˜îÙN³‘+†FT¶$k.ת `ëh[ãŽRC¦¾2­å´Çp(í¾U·áL£9³ä9âGh@µI ° µ% Ùj#;8»ñ>ÍÕsI¦Ì3T²‰ýÆÐh·l£ñá÷¯%Ý¿Ÿ÷ãÇ£ð Ó®v;¬nql¼½JŸÅª=BoÓÉËʲtË#ûk´´dZ9Pz½™x.s ?ó íĨ|ÐŽ¿·uôS–;¾:1wìd‘ŒC².5\‡5Udwï߀¿GßUAQO¹EWeÎ¥F¤C×Ü þ«Q¬”(¶9<Ú†ÍÛxþFãêßVÃÒI£êH`©ý2¦î=Ãì"¢Õ¹QväÌÃgfV>β:HÇÆ¾Ú˜ÂñUIj†ŽkÈ›$üÑ|æÖ¿¼ }€%P]‰?õgzß ˜ ïuŠb>ŽÓõâ†I²^w-³l|_"Ú;Tô½Ÿ sg³(}p&Äñ´¢qE{{k¹‡aÉ9Žô¶8M #òrïÀ­záËN ÏâøA9§Ê¹íßp¼²1¢ÄYoZ9‰¾2É gçßµºBžõ8ú&pv?Òm£ˆE¿c\uñý;Þ4ÚÏÉ<ûû64I¯|ÀFÂ#Ï AÉjm¼Mfùpvì"]´BgÈNù?ðe÷¦E–ƒþs–î–xâ´#Wh‹¿ÊRà.ó’œÏïk&£ T«¼fpÁ›©6ô ÅÏßb2£8eQ>½•ñxe—§D¥P !$,ECE~XñÄñ[š¦37Ñ!<'âᦘÓ%0KULÙ ïmŸ ívÕi”Ę2|°Ö©w ¾yêDáø)†Å)ŒaaŠñ¨®fkG½1Œ°8+gS8_Í 1ìU´FT׆$¥‘Ï×õ?–ãsn®é=Î/hNÿ®2ÄÒk•†¬&Ž?„Âg éðÍ ²²Ò¾$l¯¡L”cd³ôÛ!Ñ. ööVdÍÆÛSëu÷&1=Ï>%´Ð2äƒ3½ÝÒVYš³ÞÚÀÊíhlN6Íݦ°´ +‘õž?¿ Õ%½´“¼îaÌ0óTAtÚ©óßø±@i ¸ó¹î‹I僘M†Ï§¨8ž#;´™=,qîÄ¢Žgn9Ñ—ÍùAË%«Ñûò´ªÉÑ@‹nÁ¥øi{g5ºHþFq^Œ¾_Е±ZF¨‘x(kƒLµ¤Ø ì ÿHŒõ€Ù_Äh’ºÍ°ÿ-tªnA¶Zgs™ïß¿‹‰‰µô]<<×5<0F_‰‘ ‰Ñ #S×8\®av³ù0rk6ÞZnH´ä” hœæð‘P6ßL£mf ¿4¾rL&yUí0;²åÏŸ?+**DEE[(þÆîS?w‹r0--MUUµEs,Úš±ÙÄÏjϧ™iÌ1g¬ƒâtLh 'x]/`EÒ“ô¤+¨ZK©°ÏaÃ$¥÷;:`X£…§^0úˆ7L߀µò}–Â\òZ!ãÂï())ùåË®Êüü˜dïæ‰êÀ‹-úö후„Dë?Hµµ4ÚZ+Z.[ÂÜÈÇÇWVVÆ%OÊLJµ(áóÞ¸qÖ>‘ä´ñc¨KùgŒ¿Ï|M ÖjjÊ·2- âÂ=8¾çG&2”u\ ³Ï”þ©]¥%ÈÈï•cïÈ?%ŒyZŒæšÒ?´j—².ÞóÄ®R•áï¡´¢bU¡hz£¨ÇÕ\‘êñ0QRÊx"úU·Ék÷¦<§‰áéŸJôDºòðgŽŒŒ„¿_¿~íÖ­Ûïß¿yò¡òÁßáÇgeeµ’uØ š-h¶–—Wðñuæ½/E²¯¼¼\WW711‘ç YòyóóóŸ?>dH™„ÜÎÅ~^¦ž=Uk 2ÚÇ}YêÆÕBö0b=ô¢â^,×ê­†¯¯õíŽ68^5þ¬z<5³.Æ;Äð ŽÂÂB;;»½{÷òØŠˆ0šÎ?|ø ''[á¦Ý»cÝ[rã, ã·´´ü÷ßÛo{CMôéSµ¯KRRÏSOAAé633{ÿþ=Rv"~í¬Å ľ}ûBBB„„xg¢FûñãóðãÇW®\™8q"˜ 111ÐBâããžž>}úóçj# „……a…ŒW©÷öí[Övèž:u*ü¦¨PBâ‡ÐÐ××ïNDZcÇÜÝÝ¡ãÙ³g#GŽä™”‘‘Z^ZZ uBPPJ…±±q»žŠJnZèmƒ²²²Ù¼²Ù±®®®££cnn.€¡C‡þúõ+''‡‡©WRRräÈ‘Ÿ?nÞ¼ÙÆÆæ7¨DBâ‡ÐJ8zô(é e[…•7ÀÔ9mmí””x"Övé/_¾L™2åܹs<ð\’’’aaÄE:::wïÞåyê©ÐkÖ¬A£1‘ø!´`]›·°_¿~<ðÐ(ïÖ­iöñóócvåÊ^úLÓ¦MëPÔDË¥#ñCh;XYYñöª©©ñÀSÜ¿ŸtxzzòÞg2448p`‡¢Þøñã?„¶‚‡‡GQQ/ uaØ1cxéqxò35êÆŠzÌN\$~mÞ›çÀ9ÉO^^ž7§K—.¼÷ÈþK²]·ƒ>/|jTø ñChKÄÄÄð°øúè///$~\ WWWøË3”†`íÚµHüø!´1Ž?ÎÛxìØ1ž?Þk VVVNOO‡SSÓŽC:Tò ñChcŒ?ÞÒÒòÔ©S¼ú€¼4cZXX˜—>˜˜sqË¥K—vÆåææÆÅÅ¡’‰BÛƒB¡ððÓ­ZµŠgž¥G<ó,’’’LåÛµk×ÿþ÷¿B·!C†|ýú;HüÚ¡¡¡<3iº&-Z3uêTÞ0•xã£ôêÕ+//y¸iÓ¦Ž#~gΜAe?n/­É[[Û‚‚x6Ù›©y?ÄÉ“'Y•bܸq„hÐvÿùó'*pø!p †jnnË“OÁ"))Ù®Ó/**šŸŸÏ6Å-55õСCeQQQHùø!p òÉÊÊ~üø‘÷ÍÊÊÊÀÀàöíÛíýAÄÅÅKKKÛãTw Ãp‡ÊW󔞞oãÀDyyyß¾}Q9ƒÄ•W×Þ´io<Èëׯɕ‘Û***(Êׯ_kÛRãׯ__¾|é䊎Žfn¢‰€Äë•ïéÓ§%%%¼´ÃÄèÑ£™SÊÚ5à#´ ñ“’’zñâ…˜˜XÝ#µµµŸ?ÎÛœºÿ¾´´4R>$~Ür«‡@#ƒ—ž ÊFÏž=Y7¹mxüø±¥¥%w¦-22rÛ¶mOž<ô}—ê ¯¨¨˜™™ÉÛl"»9Q©‚ġ݀T¾Q£FÅÇÇwïÞ7 *ŸŒŒ ÛÖáí T*•{SQQ¡¯¯¯ªªº{÷n@U ÑÀk‡ ÂÛÊ7pà@XßBʇÄ¡]"99þÒh´±cǾzõŠž*_ll,,¯Ûéb’äi+œ9sfÁ‚+V¬X³f <ìܹsRRRâa]Û…W­=Þà ?„ …B2ùÞ½{nnní}DŒ¹¹9üi“ÿäääZí^;wîôôôôòòb®’3Ž¿‰¾óK—.ñ¤ò-[¶lܸq0w!k‰¯AKK‹T¾³gÏ&&&·ßg¥0´h?~ܾ–Tž7o^ Å|úôihÕùùù1—Ùü‡Žf¼Å¬Y³Ž?>{ölã…¤¤äׯ_·nÝŠŠ$~< :}æYnnnûµh!Ú×ìFh^TTTtîÜù:µ Öf”••áát:Z.å<9‹tôèÑIIIh­N$~¤ò‘S˜Ûé#À¹…¯:22rÑ¢EM»|çÎQQQ)))¤Ñ:ÉÖÑÑá1å#›Í›Ö剀ÄG•¯´´444ÔÝݽ=¦*Ÿ¶¶6) ÜøøøÆŠŸ¾¾¾··÷¸qãš½%³!¸KÏäv˜Õ¡ÑÌ«Å" ñCø[@åëÝ»÷·oßÚcúaþíÛ·ýúõãþ¤>}ú´!¯_¿~éÒ¥-[¶$$$´a‚}||`2x#Ÿ§§§÷êÕËÆÆQ‰B òµÓ‰ä;w¶°°HKKãþ¤~øð¡Þ0¶¶¶ãéhÛÔnÛ¶g”¯¨¨š}<°·?„æT>%%¥ŒŒŒv—r¨|×®]344äòt Õq6&&¦ÿþ‘‘‘\’Úððp777ÞÈÛK–,‰ŠŠBG⇀ÀPù8°páÂv—ò)S¦pÿâââµâÂjG||räÈܹsÛQš»ví3uêT®M!sžß•+W¸ùMòÆðÈ‚‚‚Õ«W#.#ñC@h¶mÛÖ¾ÄÐWãfñôý²ÔKâÅ‹<ƒƒƒ}||‘‘ø! 4\^@s•JårÛþ:::rùkl¿}°âܹsHüø! 4FFFí.Í\¾G—.]Nœ8áééÉ寱OŸ><Ѫ.Hüš“v—æ/_¾psòøùù—/_ÎÍ3xIüTTT‹‘ø! 4í.Í×ã"ŠòñIKKsÿk”’’B⇀Ä¡ƒBVVöëׯ\.'låæäuîÜÙÒÒ’û_#÷/Ðôïß±‰BSðúõëö%~=zôàrñãæÁ¨LÔ\ƒ´=BAAQ‰BS@£ÑÚW‚………¹9y¿~ýj;Îsùkl ÐÖµHüšˆß¿·¯óóóssòÚËö<{™ëé ñ«–Àñ„š%Ný÷/#Ç0 þz\Í 4¬ÙaÄj÷ ù¸ž‡«@‰G¬þL7˜}?Z5)[ Ã2NMˆòG½€/óöƒÇoÒ_æfúï¿ A¡‚¼ü -ÛJŠ‹JHõí7@_kxîÜe¼Î¢=¢R?geÿú!ÔCd¨úðm;ÂKŠ qü¿ÞùÁCTt4UÛ0yeå剞f¼y•—ý§¤0/7w“@YéអÒ2***šCrÃk|øâÍ‹gÏr¾|þŸÇ'Ð%//×××O «¸¤´ÒÀþzÃÕùøø¸?÷Þú"'ûSѯüÅEEññWº IP((ëŽPã¶4çüø™ü€úé}f~ÎWøù*ÊKñÿþë!&N‘S©1TŽ"Ê¥Ö¿n,Ùˆé骅POAaU¦û¶^Lx,Ø•²ÜoÝ !–8м]]Ÿ},1¶vZl12&&N@ t„éTfëƒùp¦òù{z½¤•N[dcÑ¿ö ýÔE ÷[©[ÆžÔíHÇ7f:•”@F8fý¢òƒ†Ëª7ye¥%7ÿ;Þh„Z‹Q¼ÿüÍõسcf,bPƒO¬ÏPýZ§7<Ë-ƒ d°ïÖ%Á.ü³,&µ}î½GMM¾c0݆Ÿ¿ #/HöU“ì[陼£”ËgdúÈL6ך .¯ÀÃ÷DÈ Òwˆ&³©BNe$üÇ1üwø¯2Í9Ÿ³\=çäèÄmµO?Žè¯(¨ô•>XM®ù8Uð8R´7ñ%R q-! ‰a©ôsÓ Lå~3 2¨š" 0?ðÓ…’2!³@Oê7)oµ@4?qµ¨®5x,m N¡›‰nW #òBvU‹~FÆÅû‘f#mUø1R¹ûN ‡¿×vN1üçÊlØ®m6CP¸»˜Ü@#¹&ZƒµÆ0 ”kÇ÷:ý³¤k‹µ•½xOK¾›¤9κÇÏ\Ü´Hº ™Ì_ú‡žæüœìÌ')‹¬ç4W¡´9Èd¾†u ©×´HøºN˜»„|¥E¿Þ‹;éæ¼´y_ãŽÝûÔ &vèվЦE¢>Æ„ùÝ/Gít_æÒÊFáæmÛõ§Ìƒ_SL^ÙP^¹I­PF™X’OÿWq)*|ÕÊ\vàe-ñNÂÃ)DëÚ4›¦E"!#ojëLÖ>ß=Ô½3f8f4*Êš_üªé°^»/z#±^~NÒšJ¿Cvˆ@Û0¿kOêÙ¦ä¦{ÐÇ?aXU]R¨jüX>3¤^_!Ù 2Úw½šß†Ç@ë‡ão H¥”^h^m*R÷þóím¦[ž 'Ú7¬¾éöå”-ªÚX ~Ÿ‹n8e¬ÕÂæÙp–]ú÷ŠÂ__¾¾¢N3oΩñÛÃvLŸu%•¯¹ *!­ih˾›'÷»9;59žëÉÔÿø»Hôé ¦fLžP·c­Ãä¥ÞŒ›5ÕTˆ¾"h“QPX|ôø ³™zS›y¥Vc› QX÷©,ç“ÑÝͽY_ro'%mh4Û¡£Å:u†ß¾êìwodDº4 ùsoQRùš ¤áÓ²jåJT²5§ø1›=Én³Ã>‹7.‚V—p7ÆVœ …¸ž{‹ùgUè/m‚cE÷ÖVg„ ‰÷Ò&ê_Ôo*¾Ðã ‡•®d¯$Nó¯lŽG`Ø¥j—èæe´m¿{(W0L“ˆÈh³Û•„lîÞSqø˜¤74¾_ß´4þ¶eï¡£Ã'LcÙ²[ôµZ ‘+Ñ»”Þ;Lt×hm“i-{Mm]`î½êµÒtl4ãCŽGÛÈÚD’Á0»ƒo"lW¼;÷µï”`}a¬†X ¾Ø-r¢EZNéƒK§ìÌkøUQ§b†êN„Ê×:‰„Å# (x¥ÇòfŒvÇî½ð òµÎSP•¿ý6YN`…ãÄùË£L[o)[çÄôÏ}Eøe¤z#ñk".r•yQçEU*LÞ‚ã[8kÚ gm‘ȯ­3¯FȆ@¦¶•æYý%Æãx0óPke<Þ±[üMšµE®0šmúòéÆP÷³ðÅçïÃô'¶r‚;wê,,;(â@¤ýBÛºCî;|Ü´-^éËMþ«¼”¡‚6Û8µ~"mþ9~îÒ¬)ÍÓôMV2Zÿ) evˆZ²°ÑýsY_ri…ÿ‘Ýá­‰n"b9P/_376\:¹_m„-ÛCÛDùH(©:zæÂœiXV^>¬g·Ï•‡ºn'wx˜·t:[ÝÔº¶¥ac™Î«­^©éÿÀ /Oºƒ‡†µ‰ò‘PÖsèÄéù3§ÿe<~þf \Úê)´Mg„îÜãüO#êè…Å¥ Jº‰´ÙÊp}ÔF'ÝO=RtVÊÄ%зðjô`q€ºsá£jl„¥_} 4´ªn×WM·´¬L€Ódùía;[º§~£dþÒ„”úÚ#j ðàùëñV‹Ú6‘à L7üÍXŒs×n·¡ò‘Пf³kïÿÙ5ttXô‰“ÚÆÓÛ6ÍÅXdùµ,ŠrŸ¹-óŽˆ>M(j¸X/Ú¾«ä.<¸—¬;I¡Í“1ÑÚ±¬¬¬! ¯$>J³ÙùÀf'á.Éû±qúÁ“Oc¾Õ ¶ÑÖi]äÕ@vä" ðã@Ã…aºAIË(ÿ•*üuÑð±""ÙŸ^¤ÑüšÓGßäà/û÷„LÆŸaX~ì)´û#qÕì¢ÀÀÀµkÖ°ÝÖäÔ ¸b§Ì÷Yuˆß½¤;º“ëšÂQöõÞ‰s÷­í›sEüÑÈBn––9ÏÓÞ5šk8Û¡¼¼¼É³ ~üøÉ ¯z€‘©î {X¬æ8¿®á¬±×Ú\ù ÄeƒCB—»8w´â®Uį" ãJ8äg–âBüàúáu†óC6:`†ã±ðŒ†Å°én±×¨®õÅ–Œñé^Ù9š{PV^>Ê„}„ iý0-¤f¿/¼©1U™’Ÿ?êè‰Eóëvq#.¦WÎI§Ïœƒ£F(‰•~ÿ=Åp *‚- &S/®P§O¯x•Ï÷d²zŸ3±»“ýºb˜jõ6÷,1ON‰¯§&À´øgA¡súÅ>Vó^ÝÕaÈ»ÿM)˜œBX~÷‹˜-\Úã9´µ‹¹8xÔøzè‡POp6¸sî)SŸK`دM~ÉjcÌrò $z‰pþî¦V„«Ó{uÉÁ}L‚. ·–ç¨ËÊ“½$ûfðú ?¬þÔ§ÖðLØï:¼äqÀ“•ù£ŽŸ\ØÈ™”ô¢CóÅ»“ƒFÀ:SΫ;fFæ-”W‚^2ò¯Þx¶tY^@Ýâ÷,õ~Ÿ!U5’ÉÒB+ÝŒÖûò÷gïv²Ð—kiÒ)ôŒ,¿*V¿3Yÿ¦eÒ´RÆ[oÄ­7Vž®ä²JGpUõåÇLMMãââ˜õhulÕãJŠaXßeI™Á:ÌÀ.AA!$#HMežÒ7ÕOˆc,Äö'g¾#TÃûOÄ9p !»¨;ÝQòØ[]z45;‰®ˆêæ†ÿÅ^{¾Çúkø©±ËIRÁð{¤…Ž`i3õTTÌœèŒF‚°4”Õ²PøröN½T*¡ª÷­7ïŸ÷Ñ;9&æËç I³´|¿v÷©«‡*Z/ÝÉÏRdFÏG*K@juÇa@X~аüüúHÃ}•|®ùT¿¤»ÚJjöJ€3}àK`ïB“è;ðç¯B¶¥4²^¿¨[üàëZ—X|JW0$aš‹~¯ÊÚ†º¦õÑ{|;bÂûAÚ§lƒwÑs|¡º”ù9J^l5~õ ýÅÖŽ{rÚpŸÚK‘¾@ N$þÓ%{i“´TåŸ[„êÒÓ¨Ùgr¨&˜:-½ðeáð0µgéÍ˰(Ÿ<Ï÷qIÞ#æ%dcrTj®ª´ÐAkåÐë`†!w*úñjUIÑ@_ †4óiïß5)óv{L}1ˆþª)*ã¶Ù«¸E<£Sý°úp{óuq>ÿóìø²yn»×%åY( /yð"êõ 8G]Qôv§>ÙHÖóîf•¿¿¤?jº²kÜ1×Nê ÆÌüï,-tg€õöòx—ž'?í¾Ÿ­%ÛsË4¡#ÉôVwzÕaМ]Gƒç_Þ¸íõ ŠÜ€ªUôàÛûÈÖz?·,ŒBýÆÿ–k<¶M œþ9¶“H˜ç2µzæsîÄ%*O'gÀœ}Ì'MŒ½pÅëJžÕ°? ']ŸC‘øµ º@Ý¢^ŠÐ0uÐV–©F'§ó©;&1ÕnÝíÜ ô"úÐ^ÝJ¼—û³ÔÔHÉÉmô,À‹Š ˜ íu•iùùëW)ߟ?Lý»¸>!oƒ~Õ<_¨|낣ܖÎáG2W›åWÆÑ_ÔYºmj¿šíÍ4céí„'RòæB[ýWµK\ž>2|:T> ¥O%ˆúñÞY²b»âdưc£ÁÔCƒÌ@Ð6ã’AüN š Ö©ñóØHXŸ-jöáüeåìµ¼R[´ˆO Y4ØfjäLYóE2ïöÓE7‰FØÛFžÕ)ð¤‹SÂ¥uúôúT>XL×|±›­”j»]i9çad¥¥•þx•ÚÑ^Ý?èërêÚ“êõ¢ÉWB}!NIמ-®ÃùNÂUƒÊÕQ•þ‰N•rßþ|ðP¨K7~~þÎBD‡ó‚N|Mû ·}׿ vù–’_ÔZêâŽÎÃí‰ÖŦ±•5í£§Zd_¹xÌÓlv º4ñ¶-½ö¯vž þ$õ<¢ÈP0a<òÎëC僿KFV-¡¹$à®[wb‚0T>øÛ[Ål:¸}º¾DwfyXÇóTGz“¥õ>zá¶_+wx V—b|ç5žêRòÀ³ªM`þ„˜ÁüR«‰Š‘g©ö¾†“®3G,[Çò+º—róÌÝjƒYŠžlVó †M~¿W`«¢ŒÒöJŽ”||ª´Ž’âW7·³9˜ utWHðªå6—‘ÕW'¦ú¢e~Ø¡2 _'ÿ„äçÝY?æk¢È8uÿåÅU^ûñ8­«‰x½4-ÓòacZôÈáÓͽö;[élž:HÇ ˜{%xÀDߘGÊØ3Ë)óMSRPÙ<~FªËË'»8ϤjÐj„9Ÿ³š÷%ü‚V¬ÉMjö•Æ^X’ÿµ× ¶Œ Ä¥ekmòÚ;éÈ=æVoHQ—¢^K=BOÏRÌXdç Ëbx|0ô¸”‹ Ó]¢JØ(Ë®äÁbÚ:ŠýÅÖ‘B%EEŽþƒ(åÓ~ÒB§* PÊÀ‘«£“Œ¼1utE<¸õj‰6¡‚'|¯q °ÇÂkIvÙ¬ ÆŒRG"™ ‡6»lëA_`öÙÿMÑ÷Iüi®Ä7q¸s¡J¼º»O\ëÊs®ÜýÀ´±2öNR—^DÍ. ¤ñpúfkåÊH=ê'búï›" AWŠ;ÕîàÃŒJÌæ—:…w5¸ß$´k¥eååëYO¢s³?2× LÚ4²fëý/<5dKšÙ6¿ôÆÉ•N*ýFyÉ"e¡øg|T ò1VïSŸíw#lî¸ñ«Ì·§Y›Îh8é~dg9ÑŽVⵊø'ië­ðA¾ZìY Œⱚ`›µy¨-Zm^¼¦ÞüÈãûAÉïÊfÏáÀhž€¯¢[ÐvœgoQ%{y0ñÉä¥ööš¦š ŽÞ£§)‹¤®&d¤Äã® Ô¨V-¨Ù’Æ?–ÄÈ@çèôÊ.ò1¤jHÊ‚jW¥°µ:¢fÓ»»R³]á_ÊKØ0ѤAãD i“–¼Ø5jür#ë®Þ jv¨‡´ÐUæÎ›z$:†0·‡·®jbï·›:ÙìKy-U » TÖà]µÉWÄbfy°6Æ2ýâ«:Aw=ö[ ò²^×â¼€Ž„xS­fÉ*J '݃[ñµe$~‡FõNa €»ÖÔû.î]ëµQ{öTè5©z´{ÂÕ?¿¸b¼\Òù£Žö yo5ÉôD Á®ÿªñD/]ù}É(÷¹ZYd8Ì!,¿&¤Yg4‡6@)ñ^QÁÛȵ§ÛéÔu”n…ß«lþ‘“–Àì5I=ë¥Uuë¥+˜n‡ }UþŒN3÷p3æµRC|"β7;WÆÀóÝØcÿ,nô²S‹Ì=½{ƒ1aƒOulŽCÐÑš3ï¬Ü}­ÜÙ=•ÇÍ W}¸ BUâ5«fà ÊEÇ×(Å„ƒÎÜ‹ qœ¦ͰzëPKØ\¾—&%ß”ŠÙk¥¸H“ø@Óa¶µu,îÐf¶U°™9­í祕ü\dÛˆ¹”ûÊ7vÔï¦5Úó|SNÝ›q~§ïq%>¢}jÒl¿=Þf&›`ù]<°}õ*Η»:'<ÏíÝ–[¬=¹uÑqI]Ó7Ì™A. Ö¦5¯ß¶sgÿMË:?Lר â÷—¬FMU,ÿ™@¿¶eý•û=ÜÝø!tt¸:ýsðøéV[‘½ŠW’Fís4lPÿs—¯Ë«é4üÇäà%Ç€cAWvD‡×¦|€Þ´5HºÇÇâ~®mòJi/ΟUÿ¢‘K—ØE¾Ð¢Ë@×ùÝÿã/ý)$ø·kCÏ·œ|þÖ=9å¶»ˆýù­¢Ø§Q—˜‰ø?{gWC÷Æñ3(*ÚKEÚPiQY*o‹—²”½ì•µBÑ¢E¡Å’·D"kxQe/»WвT–,¡(\» …ÊþgîÜn·Uh¹Ýžï‡Û™3gΜ3sÎüæ9s–£ú·n®úÿïºúO(âð83&Y­[×°«ÀÔ‡Ò‚·ª]$…e~õÀÑÃßy𨤓L»vMÝi­>³ãËHIŠ–”$Ýy"ÕU©‰“wóLÜìiõµçì¬F6‹ÝÿãË%)aqY•‰mÔ@ýû5ìjVõ!?/ËXOó7Fè;̰=zꜢ®QÓ×ô3Ñ›[­òø5ãéæzøä)¥>&ц{T¤èeq6lŽ4ÛTmwß> þøZÏ4c[Ö¼o¯ UÿX4áü[©ñ1sfýš’a»ïÁÃ&M7+Íé=‹¼<6ÎYSÆoØ´Íxœm“åâÉ3ÜüÇNœíe0°É^8$Û·ñöpG­? fèfG;z5ê‰RNîw˜1M§!V›?×þcAáÞ˜C ¾+'ÄÿJ¯&ÄÎýõ®¾>‹ð3nÅŠ}Im ñ÷÷×™õ;6ÜÔ ãšæ¾§=2mʆV>VI˜çÐ4¹¸¹ÐuÎ/Û¶mÛ)c†gÜ}÷¾PA½wã%¸ÍÿJ¯Ÿ=6{º-jõ€øuŸ¡ø÷hÂY©.å•RBÈÿ%ìŽÀ–“Îì†\#^\TdþœÙXc‚W¯¶˜ÞÀfVöÍK=TT°•ÙûwÓŒŸqô%]2ÌΩa k²8?íòü\ÓažâÏïûÞØÃr=u¶·NÜŽßtì¦4Méݼ}gï–‚…´ôþ8½ÅÛÓCǵ!§ÄÔÕR×Eècá§=QÑ Þþœ“–¨¬¨ ­¡® ÊâÔ“1ChÇ© ÿ1^¿é3ø÷×v/xþèfò%梵½m-i¬1t3VÁÐ5úìÄ;ÿæ”vÄÿ~ü·Ë|è0žÝuF4X{ {¡ÕkÖi þ½þî4O3’?|œ6y"BºÊ ù\›jÍšãôàÑ“¥ÿ#5ü~w˜o^^Š?B·pjû4é‚™l}çž½ÂR²Ýu ;ªü¼¬[©In ¨q½==)Áâ"®ÎÔ¤ÿX·mÙ2hÒlþö¿ýªqùH´‘±‘ކºŽå0x”ø¿ÉðÁ³ÝeeeGãÏ<¸›ÑCS¯KõN2U—zóìÑ£ÌÛ--†w“c2½öëÝd Æ*ȹ”Ý£ìggÏûQVª¤®-©Ð]°cåI8ÊÊ ^ç>{üàåÓ'úFÆf&¬>½ç7âBwž +Æ´>y–—p ›ƒ*š½¥•Ôª÷ßùñýËëì‡÷3Ò$:K5JT˜šEgx£wîŸ0¦¢ÿHñׯ‡ŽýâyOí¾UÔk°¨ÊÊÞç=~t/£äÛWú¥yß»hx6oéiWÑþ± ðȱù?ôÐÖ“QéÕ¾Cµ9öÊJ_?{ðøÎ-¢M ånÌyetþîßtƒÁ± .â(½ÿ]¹–rù?ÙnÊ jšâ]jè<õõ㛼Ç÷gfhêöc¿°jϱ‡ˆРE§];ëÑ–h´eíArgËAÆÜ“æž*Š=Uê|tÕD}5›+yÝ»U_ 500п¢S)H5ï%`Z™Uéׯß7Ê/£žÙ®^U\T¤ÆeCòòòºuëÆºÔ]õ‡ésOšÿþKÿ«â‰S‹ÓÌÚQ4쥈F[ ÄZ:šššœëœp-¥¥¥-ýRãë\®|-†OŸ>AñDAAaÑ¢EÁÁÁ\žÎwïÞµôK½ÿþ)S¦´¬4KHH@ñ¤cÇŽ›7oæ~ñc0-ýRoݺµÅ‰ßèÑ£¡Ž€ø‚_íùe¾·fáêÕ«222-ýR·8ý&IrùòåPG@ü€‘’’0`À¼yó6mÚĵ‰Œ‹‹+eÂ×’—E=þ|ËJðøñãccc¡Ž€øo²~ýúèèhnNááDZyjbb’’’ÒB/r=?~ܲÒ\TTµÄx–œœœµk׆††pg EDDÔÔÔ”••[îEÞ¿ËJð®]»N:µÄx–‡&%%ihhôêÕëþýû\˜ÂÀÀ@>>> ‹qãÆa+°Å]aIIÉ÷ïß·¬4ãë<}út¨ ~ÀËâ7oÞ]]ÝŒŒ ¨ ~ÀËܺu ÿbå[¸p!V>n3S<==iñ[ºtééÓ§±…zïÞ½–rmÝÝÝ[\?—íÛ·ƒòøï“™™I;V¬XñàÁ¬|ÇŽÓ××ç’Ñ´6c^¼x±ò‰ŠŠríçINÆŽ{äÈ‘–U’““íìì R€øï#&&F;„„„ =´™L@YYÙW¯^Ñn¶`åÿøñ#7_UîOau"""œ¡F€ø@«`È!lw·nÝJJJöïß?yòdZù‚ I²YöéÓ§‡²7{ôèaffF·"b]QTT|öì^ϼ¼¼´´´§|]»v¥mkÄZ3gVZ;—ŸŸ+_çÎß¼yƒ˜Ó|à_¬4=jâ¹`,,,’““9} Øn¬|—/_Ææi¯^½¸çbÊËË?þ¼eM`ýáÇÿý”ÄZýúõ;vìX•Y±ò=zTUUU]]VúwÔ¨QwîÜi‚TÕØl¸bÅ ssósçÎÑ›&&&ˆkЯ XùZÖݧ–=<< "€ø@«cùòåÕ§03f þ0a¾}ûÚµkGÛ´òÝ¿ìØ±YYY”žÞ½{×Öl¸fÍš*>ÍÛ<{áÂ…øøøµk×Ò†r BNNŽÁ`°?© ~Ðêªm×Áƒñïܹs544hÏ^½z±•ÏÒÒ[‡¡¡¡ • ‰>Ô¶W[[{РA/^¬âO+ý@o‚+†WGGçÅ‹ƒ™´ {$%%eooÏ e€øðGœ;wîëׯµؼy3þ-++ëܹseÂvÛíïï––&**ú{)±°°¨Cùh°òIKK¿}û¶ú.úž YöŽÄÛÛ;''çСC’’’-ë#Ù–-[ð]Ž‹‹óõõ…â???ÝSã'õ¹];Z™JJJ°üäææŠˆˆpdÂùÀõôô ž7oÞOÓ@Ï*’PŸcåSSSãì Ê‰ŠŠ ­|……… 8_:uúí‹3{öì§OŸ^¸p»¹áCNŠ‹‹»uë†SÞ»wï9L ¨ƒøP‰3gÎü’X²‡™9r?U±=T½í´ú7))iþüù|||›6mêׯíi``’’ò«³Š`åûï¿ÿºtéÒ³gÏÚÂ`mf§óòåËtoŸvÅ´³³ÃÆë¥K—¤¤¤sÆ“tׯ_tëÖ-AAÁ7³ˆMJ¯^½°‘„¹_=p,ö¦££#–™+W®ˆ‹‹WlllÌž±åãÇFFF÷ïßOMMÝ»wïêÕ«±PáÃ-ZDD}Ný÷ßÓv^vvöO›˜˜pÎ ƒÕŸÈ××7::‹Üîݻ٢¸gÏž–rמ""¢C‡Üvw®^½êååõêÕ+œkkkÚ³{÷îܹâ- ¬|õùøW:uêtòäÉׯ_«©©ÑMŽÃ‡7559sæ¡C‡°­\¹²n;Ï…Ia°I7a¬999ôâGáááØe°`RwRk´™®]»æííýôéSŸ¦ùf¶eË–°°0|^üÞÀö0`@•!ÿˆ V¾™—²¨¨¨K—.©©©Xöddd8›w2©rÈ7<==±q³fÍÚLä„$É 6lÞ¼;fÌ˜áææÆžqFTTtv`¬©ØŠ6mg Xƒ£¢¢°_¿~ÝÈÈë%P÷´5úúú—.]ªîóåëëûìÙ³åË—×f³ÖÁƒpjýüü8Ó=S@üh°òa¹ÂRñÇ.^¼¸¸¸Û.BBB¿ºöB¿~ý°Òܾ}ÛyNNNXÞ\]]ŸÖ°´´ÔÑÑñøñãÿþûïÈ‘#iO,äXeñ5ÁV)í©®®~ìØ1(i ~pXù°ñÄ–ŸrõêUlÉmß¾Uÿ³`#oìØ±XØ¢££ÙýMz÷îÍiluïÞ=%%…ó(ºßæîÝ»«ÏJSZùê? 6NÆ?LjÜ{ïÞ=l¨?~Þ¼y+V¬hÛ¶-ç^¬mØÜl×®{¡ Lzz:–:Nå@üàâÚÛ®^õ700pÆŒ˜ü4ðêÕ«CBBRSSUTTsbIìþÕ„Ué·‰åKΆ ê8+_ll¬™™Ùo¾§ÑÐЈ‹‹ãôÁö¨]hh¨¯¯¯¹¹9¶ù–-[Æ`ïÞ½lwII‰‚‚ÂŽ;~úñ yðöö¶¶¶ÆšQ[l½xñÂßß¿îx´µµ±<ÐS'ÿ´Õñ7`¯ó÷ùóg,-µ}­¤{Hbœ?þžñÝ»w={öÄfk‡FŒAŸ±ÊâØ9r¤——¾ŒlOlã²çÒ|ðàKXKÄ®[*µí¢'cÏöY…]»va‹‡^~¡iV@Ìž¥´½ÿÞØØ«Kõ0Xù"##ííí#þû÷ïO:5##CJJ*??¿îÀØ朧­ú{uuuZù°b“¿I@yñ€+ÀFUqq±  `•§Öüï¿ÿ°´ìÛ·o:“æJ¶¤¤$­|]ºtyùòe•½¦¦¦ß¾}û¥‘|t÷Ÿ^½zýê44l8›jsrrôõõñ¤ÇxÈÊÊÒʧ££“––Vå;"â@SƒÄsæÌ©2ãIïÞ½«Ï¨‚å[03ážôÓÊW¥çjÏž= êù­‘^)é÷:¾Ö†²²ò»wïh·¼¼I2§|HêøD/I²äÐ,¥ Ô’›G^c¨IŠ Bî+Ъøøñ£——×¼yóx,_“'Oöóó›8qâ¸?µ§NRWW§Ý¥¥¥°@R«?²ø!¨½l Çg÷¨ j §Ò™’Æ0‚cï3°_õÓ8׬¡bLeeó¤Â9ÚÍT>Œ`¤²ƒ; ð>œ#Áé÷θ¸¸qãÆµô¬aå(++£ÝOž<9xð b®>qûöm.¿ô]ùÕ…‚–.~o7®‹5QA‰OX­—ë†è¹SåïN’O¢¶o´²0öUZ¡d±ÔLŒÞ¥²Ö1;¼$ÂÝx’$ÙuMLL,::š'³&++Ë`0¸9µEEEìÔ¶oßÖÂmâ'l^inY‘>uL5›]û.·Ø\7ÎíiUBv©-ZÅáKHrI¥¨àÆ­ zÁÚ‰m¹<}úTII ;¸\ùª¨u3ξ4­øðüѺÉÉwÅÄÚäçÿïOâÁm¢Ár”Oþ¯!ÖZVèÜYfëÖ}?6ÌÒ Â¨];¢ñî¾ ø^ôèÑ÷OâiÓ w" ^€øóõëŸ>é òë×?Š)~ –£ðõËü$ªŒ j‘‡ïß&a+Wúûûsù½h׎D Z4·øÜ?,¦aUÅ32ñÙlžÈ~>Aˆã?—óIcQ( ~¨” øiWf©!X¡ö¦Šö£ÑÑ×—aÁë3r^ ˆ)/puÕPbÉH)#ýÜ­×%%%cÆŒ9°ÉïÐÅûFÎsŸ6ï:º=(útºÆÀ ÎXïwâ_}'Jøǘõ puÈ|ñqnÀ–Áš’x×*o—´ì—㬚hÒƒã uRqYå¹ —¨J·«ÏöU^§Órú œàS~&Å«¼}0¾™›e7¶”¿ ÂLXÊW¹›‰ç&ã¸s—ѱCyÌ?ÙG‚ÖMÕTÛJ3çìùôÊFKæðsLÖÓ»“œ´âââN§6ÓŸ>³_Œ7—ÍŸH÷öÜb?bÛSfPmǬ©OÇ{™Û}ž>ZëÛÓoNb%¦|û€@òÊžU&„Zç@v:+âÁ¾Œ]å«„ô?1þ-i!…úD:sßåœc™/´Ùð«”¾IÜy,ÓÑÁé—×vÔ®mkÕ‡ÍÑï&¶ùÅïÛçŸL+2ÄÊçÿZ¸îv×2üîÓ£ÐRZQ‘S>ÉWq„,Õ‚J«ùõ=#èÝd,ŠÔº"D‰–%y›ší,Þe އy,5WPê*sŸså§ÌJX¯j±]õ'ˆŠæû„p´ñ0'?Ë.ïuíÁ ©6´ò¥+ #$B’/¢+€{°‘#.¢> ÆÕRjŒOéòYšÛÏ?s‹yçi"Ù¢ó•““%,¯úàâž7í´¬ëbŸ¤ÿ<ýYËh´–¼TE°ÌËé_w“6ؘíYø<ó\rªx×>ƒŒ©K¿¾|þꋲ²*gÌ’ôl*eùÇžüR‚†Œ±•ldùúêr`€_=ů³©£ƒiGù,tÓ¶ñã ñóÉxàKPSõÄ@΢·Ûu³g*?Ÿ_n¦,BÈ›vnd*V͈ÖgÎë/ô ,‹5†(Ÿ#¥©Ïʧ7«1žÊ´g•³ð³>Ö”^ÞØ %à>:"ÄÏœ‹Ùž§Û到hOW99ü§Â`ü+ÓoË*/Od_ÏÈ=¦oD=Waå£^ÇãÈ9Ð?.?ŠXò'¯Nñµí·”$—Vlw0­ôù°ò¦Ûe’sdºþ¢³ä¢Z6%FÖ1ˆ™‘n5é4KýÔIÒî:ÀݘÒÝJ|¿„•,·ðho!ÉŠ6CÔ†ÕXò!+ñ¸ƒ±•Þ´‘# ×Ük´òÜ™=Ú­6pzˆ•NÎá)FÎû±å·Œdepèâã©A;ÑJù¡î±gÖZ>ŠQ89èbñ¸¿Œ:÷° ©µC±Þ½ËŸ'}è÷õæ}¿·cä)­u]7:˜U©©ö§=¯&>DFرkå¹Ú¢ÕÑD3}b>×þw•Ê‚‰°¹IühJl‚BI•v´ÈȺc5ÎT@Pzþ²Hly9ú­XµlÉÞkJ£lÆ™"AÅUÿîö÷YÓ]º„²iðõ /»´'¹â]Õ¼ÓŠ¢e™fŠÃéÝ~ÇnJ 9z‡kÉ5Y¾8[ü<Î’ånážÓŒi”kÜ>Ƹ}ˆ¬ž¬8P¤ç$cÓ¹”Á`½U[ß¶®á#o üýÔ Èšøû®¤Ý| 5^·v¢zøj«}½k?ÏùãÛ/'ÒJúÈòÑÉõ²µ·Øp¤ûúŒ³©7IŸ5tÌ´_„y/‡5•’’ö^wˆºq"$”?h¥ ›¾°Ö}´âWÌXëÂ2ìäûOŒ><‘v;Îw/¯ÙÒ¾!{jˆÚ²aÓà2ÿªгiÝ×MPÑÒq>õÍ%2ªê4Â!Ql·ã|s*®¦óç›Ò1Ó*†®¦gΰŽÕbß8Ä@üÄx‚mÛÿ0†&IÝ“^6}¦hBCCçÎ+$$÷ñžâŸt×®]ÃOÛ+V”2Æó@ŽØü÷ß8k«W¯&I²E¤œeee¥©© Ä€FÄÀÀÿ–••a󨨨ˆ—²6hÐ Ú!,,üéÓ'.O-{¸³‰‰ ï­­âÉvwìØñË—/¼‘5Ω3>þÜ·oß›7o¶ˆÔæç燇‡»¸¸@ùñ QpppàÜÄ–Ÿ€€–ŠvíZvÕ®>«TZZZtt´M‹H-ˆˆÈ£G¢¢¢fÍšpøðáÂÂBÞÈɋН¯oXXØ×¯_±¢wêÔ‰kS‹¨YFpR{öì¹{÷nîiÄx„=z,[¶ ;nݺ…•OTT´  €7²†•ïÈ‘#AAAeeex“k•ÆÉÉ)99¹ÿþׯ__°`”L?š‚ŒŒ üðÅÊçèè¸eË–ê q-‘ùóç;6$$$&&fÒ¤I\›Niié·oßbG÷îÝ¡(‚øФ`ðìÙ³[·nå|·\°„¿xñ‚v/_¾œ;ÅïùóççΣ/uxxø¾}û ‚øФ`哸ðávÓc11±üüüšyyy¶ûÞ½{nnnaaaÜ“¼¼¼¼•+WâW™3gÒ>?†Bâ@3€•ONNŽÁ`Л´òݸqÃÝÝ=))©e$44ÔÃÃÓgÑ¢E/_¾ìÒ¥K³§MEE%55µ[·n´‘M#""Â3½@ühy`åÓÐÐÀ¦Û§_¿~´òýøñ‹ÇîÝ»‡ʵé?uêTß¾}«(¦sçÎÏŸ?ð຺zÓ§êãÇÊÊÊÏž=ÍÎή²{‚òøÐÌÐÊGD•YÁÚ¶mûúõköæÂ… ãââ.^¼ˆëÜlœZÛ·o×€nÅ©ÍÉÉi‚ôDDD,[¶ §GVVV\\¼¶n´cƌᙶ ~´xhå“––~ñâ??õk˜p†wwwŒŒtpp êСC“%õèÑ£ÉÉÉ¡¡¡u(¬|t§Ö4`V­Z<{ölœ ÚÇ™I‡Ð“®áÄCañ€»`÷ù444=zô¢E‹j ‰-Å0&Õw½gÏžuDþêÕ«Í›7:tèãÇC† ™)**¾{÷®mÛ¶¿!–Š}ûöõîÝ[YY¹Jg!!¡ÙLªB+bii©——VbìÞ¾}{=ÜÝÝ­¬¬–1©çÙ±qÆ^Q' ##ûüRúéñ‘ x ~´ „……iåÀ¿ØÆ:sæLý½ÊÄÃÃs Eu>|ˆM7ƒ={ö°WŸ˜Ì„&ˆIõcwìØãÇ!7mÚTŸ$ÑýBˆÅUVVö§áé5Aó@üh¥ˆ‹‹cå#IÒÞÞau~ÿþ}tt´««k±Y©¢¢²~ýú)S¦àM55µß«0‹ §OHHÈÚµk333%%%k;êÒ¥KègóÝŒ?þСC<¶0âÀï@V>kkëØØØÚÂèèèܺu +_mmí5kÖ˜››‹ˆˆ`ul¤z1a«¬‚‚BVVVçΫ‡ÄÊgdd”œœ\}W—.]^¾| ·Ä ¬|ñññC‡­¾FnusÊÐÐ0&&‹Ð;wš8XeÙŸ‡ ²råÊ~ýúqÀʧ¨¨øìÙ3NÏaÆòøÔ€¥¥åºu몘wúúúœÊGO+Ê9¦¢9{ö,í‘‘áœ×+§ý‡MÒ#GŽÀýñ¨¬|RRRïÞ½£7>|xõêUÚ-..þñãGö„Ú\­|’’’ì¦WÎzºººÏŸ?‡› âP+XùöíÛG÷[;vìƒòòò°æ±‡p-Xù° ¦¦¦Ž3¦_¿~ššš™™™Ø§n+ˆÀO Äâwÿþ}¬|ÁÁÁÞÞÞݺuk)—‘‘Áʧ¢¢’=pà@ìcjjZ÷¨ Ä€âúõëøwøðákÖ¬ÁÊ×âÒ•îÛ™››Ë¹^.âP+"""sçÎíÝ»7{ÊìV>l¿bñ[¸p!ÜP?€z±wï^))©ãÇ·è\œ8qbçÎp7Aüê…¼¼üï­À=øûû‡„„À­ñ¨/¢¢¢ÿüóOKÏEqq1ÜJ?€úÂ^¢E#((·Ä f<ˆM¾ò¨øËžj |âí’“ç}ü°éÓ—oÞ”åå–em“ôôš¡¯¿§§ôº°®n  ÙÙÇ”•_É 6C‡{{‘U«}|ü¡,øÐBl¾²âÂ/—ºÈUÒ ‚p´u1§Œ©Òðm‰T0rëO£*-ýŽøÚóýb\ݤƒ‚}}ÿH9Š>?è*ÿŠ Ú4×eôñ‘ _éâ²Jˆ-€mÛÃçÔÐé˺ñ´%´@AHsv¬Òqô-D>XqULo7–C¬‘EäVA¦X®MG \ŽZ”¨DÇ`☤C’s))ôß3[.Ê%Àn½ IÚV9—¯¯ìµkGõõÇüv.Î]81fŒhó^I©¼¼Ûݺõ†Bâ×”ä«(›‰ŠlJÊÖoüÆ¢»¨žH¯é;£æCAZ0y¹çkT>6ÏîRš˜B›}¾·‘×­„S÷‰OXPÌ4¯J™b± ïí«,p³â°4Bi[>¹5€i•áÿ“T?%œÊ—6>~XÍ'zñ"W_ÿ7sqíÚ®fW>š'ãæÁ3¡UŠŸ%A$Ô¾—$É?‰œ üë—ø>Ф†¾U9OÓÑSTRRu0}TÃ&)e£t$4ü ” EsìXêü5÷Š,bÖ$Å ÓÉ C±éÿnI®"ÖôÏçÃ)ƒ^K„¼ð½øö#@Á1¹ºsï[rëÛÄqÒ&at° Ë«-j:ÝØ±í”•¶mÇ÷¹8{ö™¾¾\\.œßº›ÇŽ–†BÕÚ-¿Ê¢r— ´ñÿsÍ% nmÓS)ôèMäѹ”ã[ !0 zl—óIcQ„ë VV3m¦ò¦¢~-%ß UäS… r*‡É)¨D]‡]"ðV•ÈúäÍ*^Œÿ–t´;ìwÝß6MÊЂøñ?²–ZÃùˆ—enââUýU6n1M½`ÚàãÀd(I­;%mÚ´¹÷0I£× ßÈE÷îêP|í„AIºÐv¹`Û+Ï€‰‘ l›Š¾Ë$¤7d8A¸ÐM¸*„ãìÔõo „ë %3Æ•>¾Æßs'Î齨÷øºO$×åùŸ‹‰Ã¢¸­Xüj²·L±ò•>a)ßS’Td‹ÇæS„É}XÊ7zyt&þ[pc¹X?öññ‚ZÊR¾ZBÖŸ»E‹ŠÊ¤¤¨–Ÿ¤€&)“”ù&R'ʧ•ÏxvØéM®‚5½J 2XÙH^ñƒÒ´8êliòÄ 6 ÷´©Äpƒ“A©ë™/Í™T¥!‰HÆK¬|=:})Œ@¨–·¤ƒ—L&îGHÆGûŽñ|h/foÇø£ ðQšÎw/Ïg¾üQb€Vaù1N/ì2|-UÚ'_^ñÕ–’ÏZîÙÏÑQ€éèêàà€_Íþ÷!V+bÐ"kÚ!Úo*XSôõY-çS”CÙÈÎyRÿÜã);Åp¶ØÛº mwcûn»ûÕ^“õš·xË÷ªÿoœš>¾ºôæÚÚµ KIÒ›µ]˜Iˆn¨³Ý¯¬ ˜lK7“ZHþÂ7…ÒR>uUãßËÅãÇ_q…­#€.B¾ |™n=?lâJO·“ªÚ¥á¤Þ‹úWÓŽUýMŒGŠŠÈÌ,¬Šk"èPw³gn®€‚BW(W­ZüXÃÖ¤=A¨'­4"VRm†¢ý–Ò:µ-jB,{IðQeEÉé4»øúŠ ûHꊄJ-W„UDjÖ².Öžb9²“vgòÐ˨ؕ±ÎPÏ-;òIR”ã…nsLš=SÅÙŠe'oõïÍ,%5 ·,Т°7¡äÚöê»»»¬w$lo’Q}©* º!‹ÙóJ×Mæ—?#ì(¸vJ¬¼uq $ù.鈴ÉiZ-pliÕñ]îàÁ‚©SÛþ^.FŽìл:¬#·®«ä¡H’#9„D¶ÓIÛéœ!¶>ݺµê ýÏ{ÄÅ=sw‡bÕ*ÅÏ>bY ñí«0‡Ÿ6¡V……asiÕª07Ê(,Íð\ÐÇ„šÜ/æ\ÆD3¶½Xú>mŒ•ýl³>ÖsVdAÕfYÒ!íŒé¬St®c„k‡îk×Rfè@-Ö¿l’Ì89{Yt=Ó=q$úìzŒGH¬«wÀêàÒ%‹œmiòôuQG!™ˆ˜ NY$Ö®\‹œ¡2vÛîy>ÒíÈ¿—žÝ&æì&Ç¥h1HËôZ·.ÎÕµÖ‰]Ö=ÛN8†-êã®9'(u}ÏZºn^.°«ÒX‚”ñØÈq§ «4‡Ãۼ·ÕÝ#EKSó·s¡£3vß¾¦Loöç§íT(T­TüÆ8ÕüÚããæV¹µE,`]T@í0’}âÓË·òëj±©²Ú)j ‹[µ0ºcìÓÆØW¼öŸ;‰½i<Úÿ«‘›o¥xDuǺéBáZ$®®K³²Ö«ªÖÚg„χøè·G¨}]7KXŠW†Ð7ÚYÊü·u3á¸Íh<9¸®HÁÁ¯½½þ$ã­] ?ínÛŒWrÇŽ³f)C‰j¥â×Јýáè~ŠŠ²ÃÛ7;¤;óÕ¡l·Tµ®›å}A‘¨ñXº¾Òᙟ ³Â2ÉìgÊ÷§ŸÌùøJ †–t8ËÏß<“¼„†¾ñðX e Ä€–Síù:Hwv:w~¹Y§ŒVßÝüÙ°M›>ü¹òÑHJ+ÿ(›•™¹ISS i¯WÊâ÷›xë!5ø§‘ºÌö’‚+KÄŒVz{löóÕCèÎ)`5@ý17[¸cÇêY³šh½õÛ·D»we‡˜K†°T­¸J_˜"’Ä* HæVH ÒÂÿrB"TªBðs f·$É“,gÙS‚¯R3}|N±…’ 2kµ.O`` ¿¿lãåq–ѶKoƒFŠßÍ}Iä"%E@Muà°a0žÄïí½@¹¤#¤Ê1>To¸Ýé%>RÌ7*¬v´1t©ÐÇT˜=Ì‚sïÃÍ$h£Pˆ Þ’dÀ3rø?C |α-¿GÑ“UmcPÅ …‡¡Žc{I’r(ŸV>öx¢{Ì*Ÿö>«ƒþ­j…Œ´S$ñXO¯CÃEL„¬~ãéîÓCµm“åâüùèî=òÚ7àC28ø…··Ÿ¡!”¿† ³‰²4ûÊùö{¤ù÷p*æÁìDÖUîÖ›K®çˆ¤Æ¦‡5kch‡Xå™bžl›Ô=óàBÍ k8wåD†ïÙ¶ŠÐÊÑí3!ü½|q7zï—Ò~§/IÊÕö³ÞϘ±»½š|ÁW33Úñ0+)!þ’‹‹tÛ¶¿3˹sm ò¿ŸàŒÝÞÞP:@üötØ—ß–KµC|b³¢¿g&9ÇQóã–r„|õ²!‰íެyÓiÛΑ ¶Ñ¢YSäsìÌ·eP† »MÕÒÎÑL_{ ±â»G—¥}BhKº¾$ˆ®œmª»öÌu\LFkœD‘œÓñÙ¬8µx¨Ass»–.Â"--g[ZÂýšUü(D´êîœétˆtâØ¬ØŽ$÷Ô¾¿¼Ö¹®íjÒv5Üx?ñ?ñ?ñ?àñƒ•0û”@ù”@ù”@ù”@ù”@ù”åP>åP>åP>åP>åî$00°E§ßÏÏ ¸(¿€ƒƒËMü·o¤€ÜC”@ù”@ù”¯9ÈWQ6+Ø””­/÷h"ÒÿÛ³9&æã»"ñ®ºs]ë)K5ÆY>Ý\¢6j%ƒAþ<(‘/'+~‡AJ½€ÆP>S‚¸Ìt$É YÊyšŽž¢’„Zˆò¶è.‚œlÛèïïÿKyçÚ¼Ô?#rrÔØƒAvþÒŠ[2"6‰flšZ[øä mÒðTÿ¿½¾¶¯oHRìâÅ»’­²Xþj!ä:øYåûS:ÂUþî|âàGI}ƒ~9‹.æ‘jÌÊä±0x—-á{Ô•+_NúåÔ¬G2Ê}éëâÍÂ×OÃ"îŠLfää‰ÿjkéݹ›NÆ¿AúDÄs¤ eš»ÐžöSf„‘…v(h+çÞÉñXÀôUÐC¹é &RùFFj¤ÓÁ‘R{Ë{‘6½à– | I¥Y!G…,sWšy(g‡5Þ Ð%oq¼V‡‡ Ô‡$o"”?v¬%öñK|h"EŽXZR»¼5Ÿ ÄÙFEo«ØU˜Bˆ`ûOƒ[ 41‡`ÅÛh«¸!­AvKÜ÷a½–źtc ­‚Cƒ/ž9L^–#Š\’ŽxqF“ã\¸ÍC9õ¤Bd,R‚eÏrù½ÈY”V “#îprài,‚>ÝX®6ÚÏçpÁ|l€ÊõZˆHå4v›t(u­5C¼×d³îÊ×`ÜXN;^‘ØÔ»=ßÀ6âéÎñÅ;HA”OË^dâ³Ù& ůR„ä°b¥­Mÿæ®÷“h-Ëe©v,´½àÑ»O¹ì]Î)0–ùBv…› 41) ²ðEÚR_ûØó{&^܃}¹ö½ÿÐsì¾xúh ŠúDw7| òTc$ª÷2¬ÂNÄn¿›õ}/À>üøÿÊ>œ;…e¢-6±òûc¦¾ kã_;æF§~øç–LÑJ‘‡û[ÓŽqèNê¸e(_ƒRÂ2øvŸHö™h¤o³´ÙP%Èž¨#S ]e 9zÄ|­3Òâæ›s™ÅÒÃÌõÜÎò´BÒé £w+áš/’}™X?¸¯@#ÒµÏú=éë™no"jݘB/²{7”’‡zÐaTrskýÆ–?ûoûõqv·”“/·Ù¨ÏèÝÊçãï$TŸÄðÃý€¦T>Ñ¿Zо“Œ}'1½-‹žžd~é£Ã$mwÚîVn¨+ýtêÀ/ôŸè Ñ38ý³Ù»‚±^iEûÍD”h:Nlós xÈ`dû_½Õõ¯¼2Ôžzµë“’r³úQD‡ª>¾”ìgÅÓæ¡ï¢ÕÌwBª„?(d™wOï¥Áî³ù4I–8Z Þv*‰åñ,^ˆ ŠHRÙV¹=ÀÞ>p;;´‰²à¶»_í5«Æòògƒ _¹ÃÒÁ¡ Û÷ë‡5%à;ÜT )Qj“ŠÐ9›åÑÑKm˜šVdØõ/üW­rÙ¸xû¨•»ÒÞOï#Iw`‘wL¾æÿ—8B^@.FÕ";Dÿ‘“Q§Þ ïÕ74ÅŽ‰Ïe¤oÆ18F=‡ ^™’­Û"lÜaì.мÊG ÕÖ„Ë[™®G·©v¤Þ‹Ÿ–MT¢Î5; ÿCÌNžBÒT—3‡%ûìΤ¼q=1{¸ŠÜ_=^ãåþ[—ô«ìWÌz_?ëc@™}7öò̽¡º3H0îVtúÿ…Ì@S¡9ûìú·ãDØÊm¶e{^|@â÷5‰¾+‚ÆŸô)åËòîƒeÿÑŒb/ÈÉpÞʘ«ûL¦û‚Z­ÎˆóÔ ³üóîÚ¾åúS–ÊÉmÁþÎ6¡ÑTè2Ž7¼Rê3b)§°÷Tóùþ¥¢åWyxvé é+8}Ò_‘2Í[‰¢À€…“ܡͩ|å£Ú雨§© í/)Ö.c¡ž~;Fù$)Š ”*½KWS•}ø)ÏþÓŠ¿÷â슘®c%-íÿn )UÑ}T…$Ÿ”?6ÆßÛAjóÒG>j²Då‘ËõÈ 4!Ö¾‡­}kÞ5=üÖôðjžÃ*^'1“Ø›¦²%e ƒ±„íïë……û®`0ÊE¨ã %ÅXîê&óÏ’ó/“mr°ìu–ºÃ•)½oåºuÖ“%à= åC‰_o}J«¼â¥Ê`¬u®)Èò«¼æµÔ[ð[dGÕh¤'Mµ[8¸Úl[Í“Vö€hGµÿHsû–){E÷·õšC½z U^c“§¿ò¸ê¡e½&ÚcˆMèYÚ˜`0¨W +9"¥ç*…G>t_ KïS‘.ðãù• úãé1_Ha࢔}«ªÚ”ß/É)ýMOv꨻öëÕÃÆïTÈ*†Ì8Ï!óCYž+’Bf5x¦rr²„åU\Üó¦–õ`jÐwÒž¿þ¬e4ZK¾¢cHNæåô‡¯;ŠÉlLÛ3Ì!á¬÷§÷ϳ>‘•»‰¾ÎúP"®,úy_üEñ®}†ëbû vß~þΪ£WÄVy˜9P+¥Eøç -{Ô“CúÉõ«¯Û©P–ßZ­¡™œƒ7œbÞ-6‘Ì<¹tˆKž9¦DoAßR䔜<¾~Ä(ViÌeôÇŒ [¥ˆ ÏhÏi%Ʋ|ˆx+'ÛùZJœ¾¡íÏžŒ¶Œ1î[Ï7F^ÙYàÌ]m•vÞv Âa"bÖ¦kJ —_Ñ·çt†Ðž³V_[>µmW» åˆÜòä±/МÊg±Æ¬„øöU˜ÚèЇú˜æuìÄ—Ï„Œ’†»ï²ÁzЬ÷Q’L:¾-êé뢎B2œ:Mdõð¶ÝCjZEX»‡‰vQÛ¾?V·ókµnØÒûÆ:G[5|lT˜ÿº¨Kˆø2b†w óz ºcÉ»¾c¬ì_!¬ç¬ð±¾*,¬Byú¶ñ RlBE¨ y·–Ùô i…óûˆ 5@)©>š‹Ž¦,½ËUÁ7xø]'R‹¯Ëý¶^øì°ú«ùѶlzU·)ù˜ ªXö\Ë7–½¸ÑÆfÙp÷¹¤Ì‹]¸2[ù]Ø0gPzÌüîÆ:‹§(7𪦮%¬S>ÉõˆƒÌt=¨Ü F<û‘Çë}í)ÿíª‘ÑßÔmæ¡®újû¬bœXt`žZÕÄ€Ñ3üD•7Eϵµ•ïÜÉ™ƒ¦1»PMÃÌá‰Pû£‚ªRêrÄ3Oµ¨-ØÕP™ÞÕ^?Û+½vòSóñ³Þjå… 3}xtXk ÕÒóþËÍ©[ŒeïN.)Y–#§¢¢ 7Á8xÂ¥[Ä…çt©ó³"&öág>Ü©Ç;–=*0A™˜Ú#ÿÁwÖOŸ(WÁ9¹Æjág¿:—ç®¶J‡=±ìùïL5í¥Š^ÖTST¾Õt)„)ÙèÏØ@U4Oý!#H¡¯dyAeÎKP$'ÛQALm.P¾1NUÙg»â56mÿÕ¸Kw´söhçò-%77·*lÝmÝj’É>ñ‰éìM7·Vrór™ý%zMChúÅówç÷aÙa‘ FSMjër}þ“»åæ~áã£:Ä‹(ŽÃ¿µÄí˜Õ[H€ <È)šáH:cB¦¸2SFù¤ †î3ç\ZÔ™¢«4³ª³Þ©Ñ—D¬‚IT>âø¶d3Ä4!þ‘ãàjQ|®Û†!ĪÌDZ¯­ýklèn„vÕ<̼–Áv"¥s¯_Rè?pâPÖŒkƒ¬ü£7Ô}?5*C¢ç¸òëLÝAÿ3’XÔø”:kMˆ :õ8z®ï’„e»—Å‘Ûåˆu©…ž†Ô믕R‘´&Z•v¡EÛŸ#Íé§™ÆnÊVCÇæ¯tªŽÃ¨ÙæB‡ÔTS«_Šç'gSÅo_]ÑÖw³-,(­ñ”Ä[ÊÎf¾Û‘BŒGgjœ¬hÎÖN I-¿š½M+=zHJåVÏÔd7%a?ýX[œ‚½<ò¡MkcŸä7•EÐ-ê54‘ÝÄDñˆz5xŽðã¯b£³ñéÓG©Ç[G*1[]7ž«C=Pd‰Yn.NÑg™í´ŒÚã³ ÿü5Úež£Ÿꃆ¡Ð«ØñKÃ̪¼u5e=µ³R#VÌ‹Š ”‹ ¬Õø ÅÜ ÐZOýhO¦€ÅdY›°¦¶éÕõÐÖc‹"¼h÷µpc9Ž¢Wï"C ìf=£ÚsL<ÒÊ€•¤Î}¸¡Òéc½Õ\SÈEÕ/Å»gyœ1X¤¦iL¯ñ”ßî㟉*•¾æÜ)è€òñÔðþG•êBÞôzWi°sñ÷’×—<°ì­?óÖZK q|#¡èT«Õ•|fû„öFêĵW$s¢ñœCÖš‚7ëOÐNÁöXÿøŒï~fÛÜw„9³Úꊢ¨Ž}¿4Ìœh¸u‹äU ‚£Òƒóãä4¬ÓkŸ…Âã0‰/nνóžöæ &©^ßô,d¬0gCâ×âBv`ö mä×óóo³™Ÿ¹®ÎÖTSª_ «: g%˜}wÏä‘Zð´åk 0?ÚzOwòd2Äÿh=ŽK`?éðO_C5Æ]j®GZöлÔf/wj ¸4–é–u4’õê}fOô-é ÞÃŒ†Îf0fc™ÜpîÕä(åä!ö 6xºŒY*/Ò¸_Ù5§Ÿ>4´š7ÿ²m——Qy >x¨XLj¡z¦”VzôÔ‹:†™·¾v‚Ÿã-GDUþú!›t+'ˆ %9ÞÚÜeíÍJ Y½/xûVe ³¸«¤=o]ÈXªëõö¸ûúÌ)³÷­d¿4Íb ÆÒÕý<êHjôº}!£©žY™QAt¡®&>DTKæ.Žéâ~J5EðõÕê—beÊ4´òêÃ2D/ñ+š¦×µ ²5\IÔq þùgÏ­½3uè*¼+þíôQƒá ÊÇ›8&äml¢É›êúébJõê÷äç_¶å亯߰hÁ|ê{êbÉbIs´òœ¡Í«^Ek#¨³©áƒO›fMðš8n«f›ôíYÍŒBoŽÄ…½™jç8¼wLÀBìãn.+cþflg\9ƒÖì9±Ð.¡îáòÙØâ®‰Öîö>ˆ2P>=Ú­6púd— SŒœ÷³¾ó‘”iÛ­·2ê¤ïo˜ÐëÓým)õ>EmÃÌ/Á9³„»©–=O½˜ž)¯eb¬Y±–HÍ Uí¡Ê|í¸xämÑiÅþúZªXÚs²_°û¬R_eT%™¶Ÿçd!ayyæÆ‰ýŸQ5] =59vH|TìþíªæÓ´$ùJ?<Ü{⊸¼æ(3Í?¹#Á¯žGÉÊã2  ia®Óõ\ô6fWÃiø^È ™ŽULN Ï„Þ«ÖP3¸u@| £¶EÉ] rM¿¸ó"BN{X ›ñ^ rƒº}º¼6u›Žß½ÐÄ5 /¹þ–ë݆/XH »XîéT[Jv:t¹-`¨ý+}¡ûT¹Ujø¦?C„RÂýÚÝ;ì›ò+ZGÖPS†Õt)ødrÔpvŠXK‰9êÚ_0¿†+‰ßö PÀÝqÞ£ÔŠ}WnÀolÓGÝ„'$(Ï‚_±¯‰Ü}äÃAŸ•çÏOû ÈšøûV¼/c·¬Aù°H½°‹>â+BcY:nŒ¤:O öòL>$ê±ålÖë.Tóà·‡Þ~a³ßÎw¿²µ£ß®Ó‰*ƒŒË"ƒp%Ó´;rWÿ\àš­'Në=Ëïðò@AJj¥qJB½­Ž'—°vIÿgLãÌ™ñêËxó8Kž§UŸ‰ÖØÔ£mÜ>mçýz²­µ©+G!Y"çªÇìoO}PI}ƒ*Ù.ýþªâåcÀkfþ{ buLeu%¥»Ö²ðP =T‰‡ô¼-tïS*äëFFêtWé³ÝFFÓÑÐŒ©9ôÔ,Cîmúû¡B?ª»ÿ Cºû+b/ND§ÄéðÑô`ýé¡©h£ì9xoÈ®8þä“[vÊyõ¼ßTïmóiucNŒ-f~íhŒÏ?›®e\Ë%¶¬í.M¢}yo‚|=N?$޵ߙ´d-YAºó ƒ\çm•U@øo¹ì8Š~aOFö{‡)g.%Ìr‹`É)ä°F¥\ÒúNÙà3’š~XÀó#Z+ýö^yÕa]nµú%,—v/Û”«§ íÆYn=x·ŽJG9LËkYcM©ñRP÷+6Òk_B’¥÷¦øj»’Ì—àQ÷;¹¯<_Ð9(üüôñ`ð5¾òmóúgãêìlìTqñsZèÆ3CIV™¾”©cJ’—¸6‘úCíñ¿ªmVM+z²8:sŒ£þòMí/»è¿ì8Ã+ÿeø¯òH:¨‡Ðsì ‘ËF:°¼çÛ³"‘P5_¿Í¼zJ<‚ã[æ97O×Ôu¢FÏÈÊžô‡;Î!Ûúîwå9Ç€×8Ìüϳu`êßn» Ñ®×5.<$ZCU[ųW dœ}ÎJÀÖï¾9«Wìªé”÷™Yͤ۫ýl{™Èjà¢Ë`0‹.sœÜ¾ì²)Ý™˜„1b\™ŠÊ‡p$ý5!âêfÐh„3þWÝ_¾ÿÄèÃY¥q~ùµn'í²§z`üvã[½, t‰ŒJ¬Ò²=Ÿcr=kgöš.úV‹ÏXU-· ‹GHTEýšo^G¥«Tûj«)µ\ kûkûz\I„d4ÆÅŠÕÊ—±c–ÞìÙáËÜñ¿q«“ã<þâÌwàÉ[Ú Ju3½£x]£ª“—#·}T\ 1Ûi«ôP-}™NÛ‚Aá SÇ?½oú UKCЬ] O Ð‹/=u)Fèy,5#¼<‰r2ð.ŸÆ¥5àPlê”ETÈ[]Ù©ú{)ËmäŠ"Àm€º”¯4o?[öl=üg›§Çï¶s§æ?ìi”0´‚«ÇuØl{3u„ëÐ,¼Ê-ï‹HR·@ˆ¿–…‡jì¡ÚeRî]I6Ú¾.¾.ˆË8kEø*3DlÄÛcÔä°-{àNÙU·cÈdKÕŒ´8 (Ìê4É9\Ó¤¿û¼ÍH±ôôÐà^å3P˜B;ž’¤"Ó¡Ñ3ÄÖ-„ž–Ìr`0yÏ›z» °™X1I´ò0×ìSa´›é—òu™!{²ª]Q… rX›åo~K$â¿Eä!‚5ûбóÁËL‰òM¥Ñ›rŽÎ¥Ý6-0sªXöÏfÅ©¨ÅÃ*ÇS"DTÔóŠ…_#䆔{[F çµÛ)() “Ö7ßÕ/ŸEš9«¨­¬qá!¦Uí¡zÒ•OB+îUP‹_Æwï7‚9?È„\’¯Pë§ãÛ4ùn¥ZÌÖ¯f5ˆKßgh݉:|>ͱ5H±ôã]n¸H22] ¤Ü«|¬cZ‹«¼´U,'‹t‰ÊSK£œÓë"‰ž®š†Cö0‰•çó̦×UgÏ$Ï–=LRÄ"¢"èÓcó®ÏÕDïN-,—==”Ó½d¸îàwŽx*½Þ†ád ~‘.­=”ï| Ð`äý;ºt%5¡bШ¿sÁôu”wÕ…‡¤^ͪÞCu[þOP‚š\¦‡ei#4ÁÚ Å Š&ø„øšÍR@ÈZžª¶Îò(b×°Ò êŒÉ©‚ýϼþUÆÀ1û(FÞT´&}©Å-?”"˜óå«ÅŸUŒG¨ýØ—,Ù½‰dZc×Ö1p;†PZÂKdÑ¥ŠXk„5½V-§<· ÍHX»<ìx’[Ža©²  ~‡"4’y2súÔºðPõª•×bÃÙ+µzÆOãùŸr¥ÍõÿÀm€:”ÕKâæÍ'hR÷CdÝ`©†t…Ÿýçõ늉yŒ•§ÔõR¬#5ª8ãQV«&éŸ!&>?™U¡Ÿ2«Ûf_stŠXJQ%UZÒÚÖ ]ŽãŠÛ°m[`cŸ"))ÉØØ˜ ‹`›¶ 3ë<ù?’hC@•nÅrÏž=vvv­$³˜ëׯ÷ïßJ÷+ëS@Æšá(´Òƒ‰^{6âdÆÔ~Ød¢D¥!QÖNV©ŒL#¶"²d¯ëò9õ埣¿ÌOx÷ìyŸ(î=ÿ&8 A§OŸn׎»æ.hÀ¼««÷rvvvrrj®„•ðÖ£¡Q‹e@@@Ó{nÈlvvöæÍ›ãããAo¸_ùPæÆÑšNǘOÌ$y’ö ›Ö6ôÖ~â´Ã!jhÑ Y±9”ÝÛÁzè8Oî×h f‰«î[æß»9õ>rԤѾITŽ. j¥\TÜzš:?}ú„GŽyêÏöêyøð¡——WC)߯ë¥Õyyyü»bÅŠ%K–´†üêéé}ÿþ$ÉÊýû®T>yGõœfÏøê7Œ–:„¶!ôtçxbg=•´ôl–Yhшý–YíŸkÿꓦ›ž˜Áò¾¿ÈÛ‹ ©k¥.yÛSËÍÄ++«¸¸¸Ös›ûô¡sÉÌÌäílòóóC•æ~^¼x—-[ÖJ”~ïœ;wî–-[àîs»òaÒH²Êp=ÊØšúOz´7íÞJ’²ãcópX–ì9’I•îî—ò™_Ó>|¯´Y>³"M•ö¢o•·_0£zš°XÉ‚š=){¦$¹ ¨.!«}ØÊÇ>®¤ÒâmÒO–+YPv*SöTÖg»S³—%òüm~òä ýÄÉÍÍUPPའҟ0 ðÃMWÚ©÷˦y«ÊïÞ½¬ççþýûAùZ†òa&Dãu8”PË.Α´ŽÖ¾)V%p›ŠÃWäŠÚöV9ÐØ?“ähÀW¾„$+½fºµ²û=hРììlÞËWrr2íX¿~}“)_£~ÊËËSWW/**â±;¥­ÍzTZZzúôéaÆñv›1cíhÓ¦ èM‹Q>€°´´¬°ªKJx;³?~üàŒôíÛWLLŒ÷n<¶{Ò¤IØLçíÉÎ/ÎiXX˜›[k{Ù嚉„„¶ûŋǎ=z4/ep„ lwqq±½½}dddKÏÔ»wïðïáÇÇãú'NœÈ¹É{mæÍ›Ç¹¹dÉP>P> y°±±ùüù3/åèСCœ›GméÊÇ`°¾OϘ1ƒ—”ïàÁƒœ›eee·nÝÒÑÑáÕº¶yófÎMž4âAùndåÊ•U|Ú·oÏÛY.,,ÄTn¹øKèêêÒŽŽygå º‹cÆÿøñãVR_¾|Ù>m‚òÍϘ1cž>>~~~¬yØ œþ¼™™YKÏ~GáÜ´··ç åÃSóšæææšššÞ¾}›çkŸµµ5»4G |@S£®®ÎóyÄÆä¢Êw/^jðd[B¼—©ºQVV†G(Ð ðññ=~ü¸G<œGº?dKçõëל› #&&fÒ¤I Ùøûï¿y{0;e÷l¾É“'çååݾ}›&™«?œ d |@3°ÿ~???^Í––(7CÏ]yÿþýÖ£|Ÿ?ååš™½{÷ò°òõë×”k‰Ýµkje½üwîÜéââOP> 9aÏ“óRvx¬oçìÙ³éµ ZU/ÿ£G‚òòÍÿôááÜuêÔéÊ•+ýõØ|ÜFqqqbb"ínUÊwãÆ xì€òÍÌ¢E‹ ylqZNþûï?žQ>^²ùTTTØó ðññµž7vìXxì€òÍ ??¿““O΃Lsþüù%K–ðF^JJJx&#QQQìMž\!¹ŽwMxì€òÍOLL +/5.ñŒÍ'--]PPÀÞä±Ï±u°yóæ¹sçÂ3”€—ÐÆ…—†ôñ†ò¥¦¦>yò„ÓgäÈ‘­¤®ƒòò\ÁâÅ‹Oœ8Á«O^ú¬òíÛ7È…§§gRR{333SWW·•Ô5UUUxà€òÜÂôéÓylrg6ÿgï<à©üÂ8þÞD\…ì‘M)+#d¥•’TZhIš’–æ_)%)¤H*R4¨(™ MQHd”‘‘ÑýŸ{_Ýl×]Î÷#½Þ{Þ÷>ïyÏ9¿óœikk }>ÚADD¤ÝzÍW¯^=|øð`Èe âráÂXÚ@åƒÐ <ÁHHHÈßßÕªUÐç£:...YYYíNFDD å³¶¶ …¥ T>­°o߾ݻw3jtæÌÆP¾ß¿Ó¯ñ/_¾TVVfggow¾²²r䲎ªÊ¡2ááጪ|?þdŒ¡ßY aaa‡êø‘¥¥å`È_………qqq°œÊ¡-^¿~ Ä!§Ùw–1ƒÁУÙùùù>>>Ö«êëë÷îÝ;ò—ŠŠJII ,g òAhìÅêààÀÊgcc5sæLzVVVº³8:ñññ]5'€JÉñãÇCþrww‡… T>-òåË—={ötÚ$Eï¬Y³¦  €ÞŸ‹ÅÒ—Ák×®]²dÉþýû» àëë;”o̘1?~„% T>B×c(º>ÀðáÃéÈZ~~þ¢¢"&&¦nÂlÙ²…áóT]]Ý©S§`Ù•B»;vŒ››ûÇ ö\>>>7oÞ477§ë§ —…ÅCBB._¾Üc·VDD„³³3Ãç)ÆËPPù ŒÈ¥ZZZÏž=c°çZ¹r%½+¨”и…õõõ‚‚‚……… .ì1ð²eË~JÊ+ ìAåƒÐ<ذaƒ——#=ÔË—/¿}û&,,L¿ÀÏÏO³¶áp8 à[·^Šº{Ð Ù䬬¬`y•Bprrnß¾ÝÔÔôöíÛ óP \æââ"½\¦A€;EƒV(UTT<~üx^^éWM˜0±÷h­®®¾uëV7£{ Pù 4‡˜˜ØåË— iú-ðùnܸ1oÞ<:µ_\\œ¦ì¹yóæªU«ÒÒÒú0nvåÊ• œ}@m`õêÕׯ_‡% T>ÁÁÁd‘¼`±Ø‹/Ò¯òÉÈÈЂ555:::ÊÊÊ}ë:åççgàiݹ¹¹ÇŽƒ²•BÇÙÓ××?{ö¬¼¼<<ÎÝ»wyxxètcŠqãÆQñÛ›››çÏŸŠõ{÷î½|ù²Ï÷Õ©W¯^1j~ñððºò ,: òA蛄„PÒ1LÇ ð6¬­­/]ºDw–‹ŠŠ–––òññQòKëëë-ZôñãÇ›7o†‡‡÷ÿ†ˆeÈœj‡ñññ¼¼¼°Ð€ÊaÆdOVVöÑ£Gbbbtý,LLL7nLNNÖÑÑ¡;ãÁ[ Ì¬üŠŠ ssóêêj x亭  `qq1ãej$eee°¬€Êa4²³³`ØÚÚÞ¿Ÿ®ø¯«W¯¦GåKKKPå+//Ÿ3g¨ܺu‹ìžÙµk×r§žÔÔT///({Pù P {ôÛUFÄÏÏTÒKKKéËì”””¸mss3<àçݽ{7))i€Œîã¢E‹,GìÙ³ÇØØ˜áç&Båƒ@ðžðüÖ¯_¯¬¬L¿OdoöìÙwîÜ¡#›ß¿OÞž:uêèÑ£ 64o@-———÷îƒe yW¯^¥ýµu Pù äÁ××hFrròºuëè÷)–,YR__OG»ÿ|ýú•\·RWWWSSóññ±··h³›šš\\\, èëëGGGÓãÖQ¨|¾¦ŒŒ à4P è /^¼mÛ6†IÞ 6B•B6èwÃúúzº°³¬¬¬›í2hj'Ḹ8CCCÆHØ7n<}ú4ÌàPù N033›7oÞ7èÎòõë×Ó…@غR>QQQšjŽ;þ¼c$숈¨|Pù .¡ÓÙNk×®¥‹±©]i›››­¿`¤éÞ~~~0kCåƒ@ºäÎ;=öEÑ ÌÌÌÛ·o§q;+++;žÄápuuuììì4e*…·8\\\vîÜ ³6T>¤K„„„ÌÍÍoÞ¼Iw–‡……ѾòUWWw<ÉÍÍMƒóÌŒŒŒ#I=z*T>¤ž={Ff¿}û–öüõëW»3·nÝ¢ü"Ô¤0}útÆHÏ #á¨|äÀôh¶††íYSSÓîÌþýûçÌ™CkvÖ××Ϙ1ƒ1Òó‘#G`¦†Êô€MRR’®®.}™=eÊÚ7²®®®õŸ®®®/_¾¤A;cbbfϞ͉ùüùó =ÃL •é™Ã‡ÓÝÐ>cccÚ7²¶¶¶õŸ×®]Û±c Ú ª>Œ¡|¾¾¾Pù òA ¤|tg³¦¦fyy9-ÙzÆ}dddzz:mÚI³†õ–OŸ>Áì •! Ú_ ¬SâââæÍ›GËþþý›x¼hÑ¢N‡zÒYYYŒ‘’¦·•2à¬Y³†Í~öì+_CCñ8$$„fí,**bŒ”¼xñb˜¡òA $¡­­ýâÅ‹ &ЗÙoÞ¼¡q ‰>ßêÕ«iyazܲ£#ååå³fÍ‚Ù*B*W®\¡;åËÎΦq Ñ@$!!ÁiøÆ¶¶¶0/Cåƒ@H%::šîl¦Í)á­!¶vúûûÓ²’’’ †cbb òAåƒ@zÁçÏŸéÎf...ºðùN:eoo•o IKKƒ*Ò ÔÔÔèÎfQQQ·°¹¹!¬’CãÊ'##Ãi¸´´fd¨|H/ »N>„°Ü6]ø|´¿Ðí×!HAZZfd¨|H/˜4iÝÙ,..Nãþùó!¬Õ c’())ÁŒ •éÚÚÚtgó¨Q£h_ù"##MLLhÙHGû1I 0#Cåƒ@z_vv¶¬¬,ÙLû³Ð}||h\ùŠŠŠ„…… +**ÂŒ •éiiiô¥|bbb´ïó=zôˆÆ,..f å?~<ÌÅPù Þñþý{ºóSiÜÂææfyyy7²¤¤„RoUUíx‚@åƒÐ´¿$J;hßç£ñ¦N„°è¤Þ?ÒãødT>•ÉÏϧ/ƒ¹¹¹SÅÄÄDËFÒþ¾w?þd€Ôûùóg¨|Pù ^C­^eee´ìù544¨««ÓxÒìÞI½‚a¶›€@åƒPZEèÎfà¯Ð²òÑ…¨TUU1@êýöíÌÂPù ¾¨ÝÙLã¥vë=Ùi–ÚÚZH½ŒÑ[ Ê¡4tgó¯_¿hÙ¼Ö{²Ó,uuu°Þʤ >œîl®©©¡eóZïÉ•o@©¬¬„Y*ÒkØÙÙéÎf/µ›ššh?éBžé½Ê¡QØØØèÎf/µé¢µ“¸q<]C]ª¨|¨|Œ/-ÌÌÌPù`J€@åƒÐ.,,,tg3—ÚC† ¡ý8D·Ï¥w£Í•Bñô4”þRºT>ŽÃAõFV¾3óÇ_øÌ”^ª€Ë„oÓ \Þa0ÚC•2h”¯*ÃiÐé'ñ_¬õûµGsÆI#U‡˜®2UCÉ«ôtðÿˆvç+’ÔÝ öÅ—9ëóÏ×¾÷dg?]65y¶¾óÓ–ûàpÿzQН`„–¢‡â ¹`ò虼¢²éé_?ªüQ†Á1VAé˜Ç©ßµ¿˜‡± ŒSTTT“CS765Å=Kÿ”õ¡¼èëPæa¬X¶ŸUÇO®©ªàÉ#&%;I{5ß}v~QÆË×Å_¿Tþ(2„IYYùð‘£@Gò ˆKJëMPáàAõh¬¨©M~žžÿùÓÏÒï¸?ÍBÂ"‡ýÓÔÌÁÍ#8J\m¼¢Œ¸(í§Þïå•ÏÒ3¾~É­(ûþœ ©yøÈPàppó‰ˆKj¨(Š ÒZ ãÅ›¬÷™oK‹ k*2³ £ÆÆ†a¬l¼B£F‘ÓVU¤ñEhBùºîÓY6I"'ºÌyOŸï-©£‡ 1½½ŠKg‚à•ïÀ$Gg\ñ¼óLû–ƒi<ÏÓÐ9gÒ’s(¦œh¤Çª¥$<OøýØ÷/_ŒÓœ$1VYFMüt2îÍç·OcY˜‡Y/µ`¥^/`î·Òðë×Fò ÕšÄ'£~º YPÄÅ$~x‘¨¥?ÅPW‹2æ5ãpWoÞ)Êÿ,¯9IXjÌh }ðƒ~d²rsë95rŸó’Ô&êMŸ¬Káh¼Ÿø<-ñ‘¬ÊDiå "ãÔÀO§Á~!ȃ”·oŸÅñ /?‡ÖÊâ[ãß¾x2NCObœŠ¸¢&øi‰ê¶ÁÊäurú»gñ£ÇXÌžIEƒ‹~TÝ eba;A‹_L™¿ËM%Sò~||‘ø=?wî¼£¥ÄaI50Ê÷—#iuNª¬-uê²§,|Úxí1šçŒ‹CO–f¥»{Ÿ+úQ¯9uñzë­¯}—tmÿÉ0,·Ìñ3.o¾,jhÀ03ÑC0íR#éê%$9ÖÉÙ©›ÑÇ'ô-‰x·Aþ)ŸÛÂã\Z—!Rx#ÿ ä–(DZÉÉÁ+"LÒÐØäíë'9NErüDðCR]„OP×t 8HÏÿñøzÀRK+ QŠnƒ“üâmZо¹µáB/‘VT?à ôÞãŠâ¯¶+¬μâU|½5¦™)èLSÐé9<3„h^ø£¤¯Y™ëlW ´´aö¹È+,>ZEËDV‰”KĤÁ8Hx—÷$2ÄÖv-•[Qê›ÎùøJÈ«H(hˆ+hr‰ˆôXð¼x—¹iãFv,EG/'¤¾yÿhò‚eºfKI φ>~¾€­EßÀ+üü|f3¦Á‚k ”¯þW ‚´(3ïDPNáã[Ê &÷oÈ    ËŸ8š æc07ˆù¹þ=œ„ÃÅq©¼·–•¥Z‡t;°«cîÆm!´Ážy[¿^`Uñô£È¸­Ë·\¼ÂýÈz|ûg^ò ìuŽ÷ù‹²ã5 æ-ïÛåØáœ&+ìË›š‚ºlqØÌÆÊ:ÐgæÞ»nh±j²´Bßî z¼‹ðøYMyÑ’ùsÉk^S3î˜û±Ió–Í\nßÇvy5ðó0íýç7©v«–P4ú_—Õ6YØ·ËGò ™¬ØüùW¯¯ËŽm[©5êô9?à0Ì_Ñ·ËD¥f,Ûø²àgâ-ïÛ)`pvþ÷ˆ›×¦-²5Z*Ó·;hãSìÅ›2bâzÕ` F~åcei=å¨$诀Ï]ŒPÙK(iÔ㪆Á¤ƒÌ€±ÀáB‘ò;-b&jË Ý9}W؃¢î°ªÿ¬©AÐ 6Ä£ìpgAÈ…®º´†cúÿ#çõߎ‚‹–êÒÌ6ûo«ª"9À˜éõ[TY#ìà‹’ ×?{-€É¢Yùßããbµg-êÿ­˜†ú—ü>¯8ë­åÂyg³û©Óz³—Ùëÿ­ÄÇ*#ˆòI/ïÕ+W«Êëaâ ¦ý¿ê]ù]º:ÓÈPDŸŒqXðýç­Ûẳ÷ÿV,ÃØÀ{ðüuCEÉ\“é”L½¯sòSSRôÌ,û+vΑӭׅ=ˆççdÓ×Ò8›Ÿ:­=k!½þßJeÒ îÏ×];wÀ¢ŒÌÊ·k"gG/,²$üÖrBûêì€ìÿÒšž`†j#ÈõF ßµ YCŸiár¡këalÌ\Xf‹? uX×ÒÚVp¶³šöOÄa)rA¼ò?•õ÷Ú†*ñÞ?QËù©ãì][ OÌõ-§=æë{ÁTц‹WÃ$Ô&™‘ñž¼Bbàg€rcm}ãYŸsSInÛ$à.Ä¥¿Çb'ëhöóV®ÇNL]¼š‰¬ÞxAÙåßcbC—/± Ë ¯FDòŠJ“EöˆˆHÅáÆP²>w1HaâdÕÉäÜÔ^FY£©±ñ˜ÇÉm›ÉnpMÝï³>>S­&ã=1˜!3—o 0˜¨!.* Ë4dàf5|ø3²M Ä`εyÁ’ó¾=Æþ=¹]qËèän9_¾¶ˆaO!Qôþ‹@œñ;®…|nZÈó=y׫}MSó¿ ˆóX䛂´4_èÁùm9åí£k¶”iÈ€t#ÜxÌãÔ6{2Þ³èGÕ½ûц䖽–‚[fl]MõÕ‹çõ±€ÃᎺºö¹y³{¸x”'›œòò¶ß°®Ÿ·òöÐ06:”üËÇ ¥°‹Û±Û· tê~ÿÔI C™™§-^sÄÅe×Îd¼í·²Ê’Wöˆ(ëg}ý\\Vª©¢ K6ò”ôÛcÊ\§ò ­æµ•=€š­m«†æÚº’F„•ä±Ù„ìa;~UIGEKÎ`dÝ g¤;«êÈ¡ÿu&Ô@µNÂÑFöΜÔ×^=™¶Øöø©ÓŽöÉr·òêÚß³3–o8ƒÙØGÈjL ¿³pîì>\~àÀ9k¶œy@W&Í_ááéå°iCŸoâåëJ8¦[op=æ¾cÛÖû ggçjBÕÍžŒþki寘¸Ç*8ˆ”o”dEù÷ÔWoÕ•Á ¹úùþVi‹n`„ð7…¸·ûZôññiwUÌ__¯ñ¯3וÇÒ1ä—Lº»…Ê äÙf§gø?W_‰í4X¸½âÜSovýŒ?³G ñ¾¦;׊_ü3²x M×Ãn¨ìëûbJš ORôµ{×Ó\„.‹Q ¬ð ^µli®½¡5“]ÝÆVë®Ùˆ7e¢ø¯G]Ýœvô÷»ê#nÝ!oŸBW ß jò ¾ òfOr·î šÛ"ˆ/8x·?¢pŸ™òpûÄ©nO$ò|z™*¯Ç|ñ-7òÑfÛ#7„á‡Z±(mÁ½>ñèd—o½UH'Üë£Q.=²ã›|AN€t|Í™ÅÏ€1;~9¥Þr,‚@P^çäQÓ FÎN=€~º)÷ãîÔŠØ©çWÚ„ÔÕד>B?…RæÏo¬ÎÔÌìœq²Ò½ºðC~‘ØØñÑÈÙ•lxóÛ`GæÆ½„Ô·ÓÌ(–Yf,ÛxõFøâyýú{ÒÃ,ÃHA@T*öqÔŠÅóA ùûµ|p ¾¼8wÈ›¡ëÄ ßºZ˜ÀÄ—ß̲Ë„0ÈøãÑÍ=[…tÁ`ZÏÉënÔíD×ЮˬÌô· VÞ ^‹ãŽÃ¥¥¦ªLšAÉ/Õ›kòòµÆx¥¾]îJ1]A­èíãGb;íû¼oc5õ)iv8ÇËgi½R>ðÞâ'LCI;5fÌ{÷![^N–\7®qiéèQÒ”| !9•ïe?xGöíroÿŠÉŠÊ䙃|´gÿ”UæÄ ¼_e ×z­æ‘>‘ñ{<&6é‰ 9êyTà×Sà ÏxÓs§þöÊlç]ìÏ^O””?ãw)|‡Ùi¶DC ¾T“àÛX_ÇÑu®/RƒÀ’Y›T%5yå‰3êq¼-5Dßã©¿0ê-aŽ9êñÒƒx鲓^Þ“¬¤tBdf~ñêMß”¯¾±‰o”å#Êp¡MØ­Èùsz76@úaœÖd7wí[H ú¬Ÿ¾¹…dÆHFå;ò6eU!4!\<Ó·nËêºßÒJ(Ÿz­×æ¿~ú|"ä«Ñ3mG·í¦Õœ¹,ræ²Noa³ó” iãڄœ³A¾Ç xÕ:™¿É§¹È¡Õ¸t½Õ[Z/½åää€ bÊÕ¨šRå«'›÷­wÊãøq tïuÞdÀÎÑc˜ÐÈhÊËÞ_OziyE)ËVÖÔ*êN¥Š‘:³_ºf½h~ÿo•UPd¸È†*Oalµ>&.išA¯×“;}Òcæ *Œ0`”ö*BÀÏûÜtëuÔúvö‘¼½½¤¦®A˄ҋ|+ÈÅ/Š(© x%tY·Sè~””P+>ÙØGøœ=µË©çª¥ÏY#K;jÙÉÄ2Œ,÷¹~9ÀdÅfj=EVÖÇÞ*_Yå/ƒùË©e°ÚÔ9’¦êë"ƒÚR¾Ê|ôÿ_P¨B3§nhBEä4ôoÜŽšgÚ‹Ý®Û<AüóIQ퀯«ˆ©0Ñ›QT‹þ5lDw³s$=ךAÍ¡zfK»ßؼwe}j®î(¯=õöý‡¦Óûåt–WÕP¾•¾ójº¤·í‡~ç¼)ß6ÛV­³ òQŸÿ¾àþƒúC=‚BnŽ7˜I]оö*<=¯ô:!üVV{…°ÚBG2ŠvÕÕÖ²a ½É5ÌìøRS“—ó Ë#Âlj?ÈûÉ-=nÄ_Qø–›Y‡pIKµŒö®«mdc©ÉùôMLn¤¾<«°´Izl›%ðå&7xVÕþVÒ¥~ê’KzƒgZÒc“ÑÊ´ãaqG9å‹8äàYÿôtüÌrýóö¹6T‚zC;$§¿å‘¡KÞ½L#Qù†aÙGÈ/Î(Â}›tÝjU€óƒŒb·öEÿ†;È!>!ü–Å^ÏËuCQAÉl r:73é’þLE¨e%áÜR³Ñàÿc~™¤AF‰p¥@8b†)UD›=·¦ì«ohè¸ïîËÔ§Óh@ù@ÑöøÉ³©zïþ÷$ýH³ßB}<ù&.¬ÄC.“Jß&%½ù¤4m©4/sô•€–ásçÏú¼ÏÊŸ(­¢C ©÷ir²ºò¯5ØîeRr4-,«ª •o€ÈÅ`Úd­„{7¦Þ» Ëpw…EüÕÏïU‘¼g¤îaâJ¡rñöõK-Ê+¦æÛ·&a!ÎÖçF‘\"ŒâoÞ4ÍÜ3æ&^Þtd|P‘h30õG!‚—#;Úø™óÀu¾&OF«Æ½Âׯ12ü£G’ð¸ãñotOõËξ¯±Ÿe0„õÓq|ÄA.xýHIŸÒawfòìA__^ØÈ&2Û—ûòeWÊ÷ÿÞ[²§©¶ ÕGcÍO]9ƒŸqô¿ªöê“•H*"ë¿>U-£9¶ëìYj4m9øß)ÚÜA‘“ðR@ù„Ä${ïëÅ`8Ú›¿?b·µòÑï†zùµeË–=)Ÿ¤üxê˜Ø6Óqñ }øôENF*™±þ+{ñYÖú²…Z#G¥N$Vè xóSÒï¶T÷0þm±³C­"/%_¿´;“~bªcïˆ.9¿Í’ÌSn«3N›ÌxœQÔf·EaI’¸±©ITmQâ[[• æþ>;« ß9Ø®G&¹!8|sáÖ‹ñL/ kpÌ{…°wÇ­Š vz“öOÐTŽõZ:ß!dÖÉ7F3õwŒ6Ž\”&‡y;β«µW²sšŠìЯ¹M£“å:s²³:*Ÿ d´“÷Ënð-ìf<ê9Ï«v›‡ÙÈ>ž”à¿Y½ïÚYWÛõ{ÿŒ¸d/âC­8¡HŒ;nêànºÆÒB©w£e¯nšœ¤ŸÐòÕÃ7ü õ†£¢º£åÑ„¥zïüìÈ’Æ7—¼¦X¬DkήѺèl6\&Ç~*uš”FI”z ŠËFòÿ[9³4ù¤Ñü[½¡I"òôA“{ÉnaÇL—ù!*ß4£µüodßpÄ%’†û>iþV+ÓyÒ,Èç<„èùI:>Îu7xä½iêúÓÄ;Xº´{:‚üÄ`¸Ñ3»´Ùvýõý7êÛüÛU/0­ÂZµ¥:s~óôÕ§Z•ªºkp‰ç Èu 'wûñ],ÄÕe‘øÞ¯ú’G•'Y,¬gƘÌÉÉcãA«ÿR¢ã!±øy¹Ù3û¿UqëJsòJ…åÆ Ѽ/›˜Gбt²îåHAQR Îúò•i¸(@#ÜΞädã>ý^Sßwãèñ#Õª–±·ü2y»¢Z-߃'÷nåWË(ºŠ øârâQ{‡Mîá+‚š™tÆÇ—Û×íxÆ-ö¦Rd 1ãMGZzkœv9‹bÕ Ú†}«.sÚù¦òÉÅöÛz”~ïd¯X÷°°+䆓©­T­!çÃ'~ÂˆÓÆê Ÿ£«læ[†V.dfn¬­E°Xt”é¿ãÛ)|£º~ï-›ÙþƲÿë~8ÞöÒHQÛrc,ö‡ŠPË[Þw§`®:ÏÛã¬Üß_ðéòÖ­¢ç€~>ÕWq%hÛç*ò“ÿjÂôŒ7«Sÿӟ톆o©`¹¶æ"í½wdóP~1üÌ f®MA. aŸGÅ#SµóbSŠjÑØó±Õ:wçu;1%pÝš-MP—ÞÔ*òâ{|3y÷d¼!Òï‰éhßp›‡ÒX•që41Ѓ÷×õhsö—Û½&m7¸–7u»ÜjµŽÑn—ÃÿýÉ­²b… zh±±e Ëþèâ6’v–ÎÙÝhÃúL¼|fd^U§l®EМq`–Õ’»AW¦zöÛ×A>¯5]n0"ü±æÃÙ¹S ÝЊˆ†4ï–åÃŽñ´‹Ø.å‹»ë÷Þ¢uÂÀÏ}º×f+îôÑ­l„\ª£÷Ï»M:jác¤(ôàÀlѹ@ÕXSõ®õ­†± kRj_akì¢.íÒA05x…»!#Ö ©ËR‘¯bp8#î¿Ç70“ç¹¢:ÔBÎ.âÒ.¼° çè´»´G:¤^§ QŠð‰X"ȧ!„ÅÚMýÈÞ¹”"MQÎd¯•œÍl×ÖŽøèdoÍ…§v3”÷ja­ñ!ñN+÷°Åeþ¡„—f¤ùÕOœ8ÞXe¹NFÀ"wMüwid«(ž8¶çµô~–ÿànëeµ¢¹¿€êK·ËÁÅÇNY:ª,8æu›Çzʹ‰¯ðâµ…|žOQ­éÏî#½ÏtÕ•?¡ò‘Ÿø/70øw÷N¯c9ÝÒ s<àÑ–eø¹,N¸).FèÖí¨7ÓÒ!Ã}Bþv:ÚœÚòv€þy$®Òi~á(UTö&ºàžàW_]ƒÁø"ˆ–Œ·'w !¸ü⢵¯<§2úÐù‡Á—çâKU3ÏXgIe6 Íx½±RbSi±]6iíz&;ÓßÎÈ7k«mµ.ïÓnÙ_ì\l±ÑrÔâË@öð'oïÓ7=ÐÍs[qÃX€öí~ÄPJ,aÀÛh/[… ¾‡d7vXëw€6œ8Ѿ¦¶®Ê·ÍÆNùÉÉVYùénh48ã»ßÁ”â­ÍÅ'3|GoömYS<™±øNX6|”êl¸±á‚C[mB d/èHjY#yÜ™ !Aª‡ôñQÿxèØÃáš»TÙ!_Ò@Å;Íco2¾Ü¡¹Ú¨ÈXRøá£¬£-¶îÅ·°µ¹„ßèñ¸ƒ9i†èŸB0í#¶·/5òÏ¿ae›|ŸásHSEÜõK[vÚMÚ|ߌðó ö¶Úð#;§upµ…Kß]=¼Ó>mŒ•‘ñ¢qn6+í)(al2Š< eÄ{|‹T›øÉ¬D4Ç‚b—µ"êéªD|HçD¾ç#Èhó¿…¢ˆÁ”.w,ûú<­uÃfk¦™ã—Æ!˜GõÜ#;¤í$ËòKÛQ.òE?úüâ­êô–7Uñ M\Š_¹ÅiÓE"CŸ/>ÀôÉÄ»•4Uö*Óa0C[qG±Y XŸÈ tkÚ/ï &¡Ûæ±L<…{Ú~µÖD#}‚ Ø3õZ^]:õªý‡6a¸Õ÷ˆO¸¤/l¿™å:Wt¬„ÖÕtrVíàí»m¼º9ËNžH¯›,„CJBU”[4"#˜‘ú ßð…à›kêËßMT˜°~]%òw8±Þü¯Í'7¯³²¸™sxÏ#˜8FRiAŸ¨x.lh[…o‰ÒÚZf–Î7ð‹Üh„ÙØ.²ÄWÛ×,Yì¡Bò§™‹cx¾èaZÚ÷øã4õ£íGï=_ŠñäjNðc®ÔÊu(äÐHKypqÍòõfrØÈbBìduf^‡$ÖPÿ««hÿVX‚¨s4áô¼þ"¶U oüžèhéÑÕåá|¢%%Åèñ§dм7į(-@Læª"Híï_ÊÍŒíU¦ÆŠlÅ%”¯ôÛÇû7f¯Û…®/!¯Ÿó·ÑÉ+î¾EöFÙá ΂ÿ¥1˜Ü®nÍÚÒ±q<µi‹Sg•n|}öÑ­K'ü•ŽÃæf>Ú•„¹ƒê?:;Ûɹ`¯©%fSñ²Wèl°s÷Ù®e²øPÊü›+欹šü¾©è9>…á˜ÕðudËè·nžKdî¾6Ï(—¤³öùoÒœõûùê*J™„{à.>J¨¸‘Ì‘`¤1ÖÿC­*g¯/ì´K¯úgéð.:Øö„~=÷æ´oKRQ°Áå‰ÏF…5SxÔ’_íÓQÎTu˸¦ êoÓSˆW©nAá…Y_R±t±Rù³¬«~UÞû**ª›£üwüóð¾^—]Vl<­FoyÛú£Ú*|cŒ¢Ñ|äг "MxÊ ›Àê^•eˆp_”:º:ºBD^~œµµEÐù3·ÂC¶]O0œc ~@*Æ`ð­¥Â…®#˜ö'D lùÚMÏ(þ¶Ýfëù!P£ŒðÚ”œQœ·×ÆþìùÊŒâ싗ߊ™;^åšvá¨Ó06îˆø~…óEµ‘^Žž®ûDæe<˜G¨Vl·\xòLQJAvÐ¥wí[ÉŠ¾"ãzV>Až‘Ÿ>|ÎÕ—i?‹ó~TÔ´Œ‹Ã?{͇×yâŠãúy"£D;«óIŽé$tÓ—M»\5yÿÕÀŽ¿¸sùnž†]JÚ9Ÿ}ÛõŽ„¯À/{fgµäÖ•è9+OˆˆîƒãsÚåÌ<¶eûïŽÛe]¯OÖÔ@è.ý3fÓDÄóäL•“È,«•¿‹²b&á}¦§]véI‡5DEÓx“…‚§;>÷±ú.€£ñáâ‚åƻýÂdë|A¬ã ï×Ý@½ÄÿC7F– c%úÿøk ¤<õÕ˜h»Íÿõ±UJŽZÈñ=Z6/ÆÔ=|_¨˜&cKnÝ=yçÕÓ¥–3oý‡ßÅbÍ4!¾iÙ{e nÚº}U,Ò´uâ]÷D›Û䆼 m¢Î,¾ A6.Å®pöhH ¾“@%ËðZãw·\’~ 8yFZ,À÷„ÉY‡Äj>:áî}ÿî8›]çîaÃ×@D2ŠK=·Ùgdä.ÿÏß~ÍbpJÃñ‘눃¡÷+¥'eÅ\ô=6ô¯JÅô¸ÏËÎ.ú“¦"~êŽs¶¿’›ûå§… |8î¨ÇQµnwEdce-úœ!,õo8nÇ!ʪóö©¢õ™Qš-Y¬UµÊÀö²A«áʵ•Ȧ¶©…ôL§¦¬4ØŠ;J´vFâpkflñ½ßî|ú÷FBó²Þ‘ˆ3Ú(”†¼6ýµ[r&¾H%ÈÞ$î4ƒ1nÇœ€òyÍG6„.OCd¦Ë¯Ÿº|A,íF<þüŒ­AQ{ð®Óp8´¥ôFPпÊ>lêìC=ÂFš°äOs‰!?¿Iïµòýzˆü§Ïi8ßÌËUÁQ;èéæˆ¸¹«õ¥Ã£¶¼H[¾“ùË\|´RËš Ñåz:ZåDæØÔAk–­ä–Y@ÏrcKœ k,ð _ð7ÌF¢®{úý»‰Ñ²½Fm7¥n}‡ÖwnlìÃ`ݖͱǫÿTbbûïž|ò¦ç£Ûï´ÙnÝ2îPÛ?ÿ}ºþ:n}Û‹×û$­ïâæ9PçH†gäãûq£ÇkRÝ=ýI¤ú©£{]pÔËCڌ͓.aùŠˆÊõÇàŒäÇÚòô‡VÑÀ›­(Ì1Për eQAþщRJêÔï“ ú|­®úøG/³xú:žlàþÌœFòÂi}-œXÇ­ÍHosÆìÔs³¾šœþ$a*T>È`&+í Õ•ï{öc= [˜š¤æÿìÕbó„í¿¥PŸå¢ùåQ¬ý²yX[ŠK‰‰\»#§Nå?Ÿ=ŒêFù™/¨¯|¥9o§éôkm¼ô¸{ÓÙR÷)>>oFêÞÎfsf•áþP}F˜”ô ,ë òAþ1Ãt.ÕmHM|Dºò133?¹b¸Ð†ôû3KX"ˆíƃ×ö-W2ÒP×=ôìØ¡#ž† Ò×¼uva_jùõÕ–K,ºú4ïê+Ÿ²zµ™©Æ&Tï)ñ1ýT>)9ª?EQÁgÒ‹ E_¹®6…š;8þþQ´`õß>T>5QSëwéÊ#ªé¦©qùнºDAY¥·ß’QTë¾i‰ï±‡‡BÓLôÆ"ˆÒÕLð…cŠÓ}ÉGËörJÃýkUvîèêÓÕ«m>–ÿbçV”~y™lÖÓÖ?ZêÊ®ÞP<“jïýO“¥Õò~ÞdÞ,ã1‰ÒŠTs^qµ¶6«zuÉ„ÊÝ1#B4w9!ƒ¨|6ððpSñÛï_õݱmk¯.16ÐõU3œÝ««¶z¶)'gl°e¯ÙÞíŠÁ5/X¼¸›Üœ±Þ.&+ì©¥eÅŤÃc¡â{¾z~›£\®Ï©¨|Q×Tz©"+–Xܤ^'+¦±ÞÆÖnptPù m07™~.à²ÖôyTɇó-úÒÞ((ÀOµÂ.ðŒS×ÊÊ•«>WWa‡SaÁØÏI6Ë-I ¹È|ö…+aªS¨Ðð…ùó{–©YnµÁÖæFL‚´âÊ?®æÇÚuëúpaù·/ÔR¾ÈË>»œvBåƒ@𨫪4ÿiîÛ ŠýR‘`ßU¤SL¦¸;alµ–Â7V|ßÐvZ^§ ð]ºDó0ÂŒÁ^VRG‘çÈXþ6ÕPg8mìÝŽö}Yxõr+ªÔ5k¾Ùâ¸eЖrPù ”OIþ¨«ÛŒe)ù¥Ÿ=ê›ì¡¬µ[“œùIHœ¢{Ê¿~ž4AŽ$jǶ-þËàÙ«(Õ›¨vÄÅeærŠ6Ì~NOÚ±}o¸Ðl–‹Û±éÖ(ù©ú({(“tt~þ¢h«¨å¼§£<*ò§ÛÏœ¿¨3k…¾ïwµ’¼|nÀ1‚½±¢'*‰¡”«z¯—º2UO'ÿgùˆ‘<”1ïSJÜö^ö˜víÜéwéê#3Ê9¤±FJ|™_ÙÖ-7¢ãǨjSÌõŸ9ø?w++é}þ‚ö¬Å”ó³/ž´íœPù ݱbé¢[û°6X¯«Ÿ?f¤X˜õ×256ôôöÑ7·¦@ä$ܼÔ[U\T8#*š•]ƒ™eØ@›WõíÓÄ *}S”ysLâSÓ$å|j3×ü29ÎzÑ|òjC‡ªËþ\˜Ç+">àOÑXó£(‚œ~?ï³Îf%ÅZâ®_䲕Ò%X6¶)Zj·îÞÔ˜n>€µþ?Mñ·®m\Kž ț֭qvvž³fû€ÆLrDðú56}¸Ðl¦‘ÿ¥`åɳ»Ù'¶ÿÔ–ä‹ ðˆ öírnN%Yñä䇯8J6Ùûsï’÷Îõ¦¤ÅEËÊ_~Ëý(,5fSoSý›§ñK-ÈÓEç´c;Ro|ØÅÍ×Ãò *¤KxyæÏ}'æ²®ñ@ÜŸ©±î]J¹deÿþý§ÏújÏY2@#t…ømÞ°¾Ï t«¬—†ÝŠUÔ603ü~|Éäî—lˆ‰b‡±&¤&QÓ˜êÎï´Ç÷NöP4UÇ¿Ëú”ÿùcçÛeôÿ)~Wå¿C.Ù#¦Þõü]ósÜ wç†Ê!ÁXj6ëØñdÏ_2’ÄDEÍ#—Ò{âql}[¸Ë’ùx¦ÿåõü9&o3ß¿I«©OÞÕy++KðÊú+!>SCÝÓgÎ-%ó|¯¢iÃY†­XJ‰>-ùÑ2üå×B/ë͵$ï?>{¤ 7fÖtò»ÅÀóó»4V{ÊpNrά%8Ùgº¶•Â@IdèPÏ\­@–¶£!šîzíܾìCˆÌ0œT]S{Æëôt2 vNj– K®‚CEaœÒØ1®nÇŒ­Ö3 %ClþUîhOÎÚ +ëvGà@ËOœÌ-8Š,…oT —£ã–lìmׯµ«yœÔžiÁÞÙîÁ½~ЦÆÈ ïí'[½Â*'¯àþ+:³—冟3’™‡` ìAåƒô›å–ÇÜëš.âàèã]pÃ/ëMÒßí4à &`Ç Õ£ÄäœO¹3ú>Œ¢<7óuêS‡Md%T”žoÞ'&'é™õÝ#Á4ÖE]9¿ÆÎNÍ~@šÈ€][Wwò”çä+zµ2x#äù½ëãäÇõgâJØæ°¹´¼ÂßßËp‘ 3 kŸSoÂÍK†Ó¦Q`xˆ´¸èúÕ«¯†°rpQëûº¯U_sžÇ?Üæ¸–`Pù }„™™äùú††Ó§½¥ÆK)÷bW‡ÚÒ¸»a&sÌ6®[CI› õtÀORJjr|ÜD“ܤ®ö”7‰êjkV/·2°=\TÇ‚ŸŒ·™÷ïÞ™8{!Woª?ó³Ÿ<Š´µ]ë4Àuy,Û®;ªªkΜñTš8YdŒ"é×6ÕüŒ Ñ™d`»Âšº©8;·o+*-»àwRÝÐD@\–ôkÿ,~t+d–™ù¦õ]Ž`Ùbü’F!áw~þü©nd6t(©¾2¿kÄãÒïßÖÛÚè«+À² *¤¿°²° UÈ×ï³bÆ e&%¯È+&Ë2Œ­Mý÷§¦äk^væçïÕ´&Î0œ¬­HµÔºêà§¹¹ùntìë´çRr Â2c¹Fµ[¯äweyÑ—¬ì·¯¹xxLgÍT¶0§Œy* ãÀ8ˆˆŠ~›ñBF^EXz,Ÿ`Ç⬲8¸ ßòr'êOž¬£9Yƒr[isŒ`G=¶ä‰±1¼B£$Ç*r’ê¸æË者¯Y™^§Ë)(›™«ou Ô+ÄÇ»›°´flâÓ§‰qÂ’2b£y:›üPû£8?ëݧw¯”Ô4M§OÕKµÔ»p.~MÚŠª_7o‡—/­¬*(5¶ã)\ScIÞ‡ìׯ@Ö›a2ÃbÎLXXA僥±£ÁñÏ¢âïå?êë†ÂÍÅ%&"Ä"4FWy ‚Ì¥ƒ™˜˜æÌ˜~Ð? ŠJª««š›ÿ°²± òóá{³ÆŒBŒ§PËB³™Fà=þQY•ÿµðgEå݉ñÉÏy¸GJIˆ² Êé§ò^<:TÀzÜÐØøéK~Yi¹±á¤¨˜Çœ\¢BÂ|£E5G‹"³Œi9õNÑ›~ZD®®.7¯àÇŠyf³BÃossqŠ‹Šr Jj“DÌfшÁ\ÃWZþëöËÿò[ɯ_µ¯^¥khj ðñÈHˆ!£4Œu4`Ñ•B9D„û<‡Œ*033K‰‰ÃiÓBp8<…¨¨(}œ••¥¥¥“T>„Ñ6líÉÊ'##C_CåƒÊ0&·Æ©­­mlld¦à²dÁäÉ“éËàäädkkk˜G òA ŒÆ!C=zdhhHËF><11qÊ”)tÕ3gÒÙìï´´4˜A òA ŒÉ¥K—h\ù888ž„aggonnþþý;-Ï6ÓÐÐøúõ+]Ç3ÝùO UØÙÙÁ •a@XYY333.\G³FNœ8ñ÷ïßtÏÿý÷}ìéééàà3T>„1}¾™3gñ£Y 555‹ŠŠòòòÄÅÅé1’CBB@Ý‚¾löóóƒÊ•aL°Xì¾}ûrrr¤¥¥iÐÂû÷ïÏž=ûÍ›7@?®^½J‘looOwÊÇÁÁsT>„1œŒŒŒ»»»››[II ZøðáC |ïÞ½‹ŒŒ¤ÓH666¦;›?sT>„1áââ¿]Ц…ñññàwffææÍ›é1†ÓÓÓ/\¸@_6ïß¿ßÙÙæ¨|cÂÃÖ±±±QPPxûö-­Y˜þ>xð`ll,ÝÍg722*++£/›Ož< •*° ÛÐøúú­]»–-D§ á·_½zuNN}Åð–-[è.Uxzz¬•aXPEaee:uê»w︹¹üøASZZZ‚ßêêêà÷ôéÓé+z-ZtíÚ5ú²yÅŠ/^„Y*°7SRR¿ø={öŒvö¦)//_¶lòw£ƒ3g΄……ÍŸ?Ÿ^¢—§!¾yóæ ¨|##&&†\½zõÒ¥KÖÖÖ Êÿþ}1ïðáÃ'Nœ ,@Ϭ^½š^”oùòåáááô•nÞ¼ùâÅ ˜/ òA ŒLëíR7oÞ ”È???ÌpbŒ*//SSS«ªªèb¶+++Ý¥‡µkך››Ã|•ad„„„ˆBòþýûÜÜ\))©ï߿ϛ7ïÆT7OCCƒx ”OZZzäÈ‘?þ¤ñˆ•øòå }%†°°0z_*!‰> # ÀÅÅUQQÁ`<>wî\êÚÖz–!± 6..‰ÎD¤M233ïÝ»Gw)á¿ÿþ££>T¨|¤ï¼yó†èZåä䤤¤€?Ç—••…60R˰ݻw>|˜ø'±KRYYUhšR;;»„„úJ¦¦¦48›*Zo½ÍÃ㢢’ŸŸŽÍÌÌ>|HÅÉãžžž­•ÏÆÆ†x\RRˆû¤5øøøJKKé+ ÔÔÔXXXÀ¼•,¼~ýºõŸ@ö,Xpýúup)oØÚ9RÙÙÙ²²²à˜……8U4¨| –@w²‡¶¤»^I¨|¤ïdeeµ;3þ|â^µ³fÍÊÌ̤ü€—S§NuÜ%g÷îÝ¡¡¡è±¿¿?­µyîÝ»÷òåËt—DEE `F€Ê "€Èµ;³páÂ1cÆ|üøýsܸqaaa‚‚‚ÅÅųjÿþýöööíN>xð õŸ@ö,,,ˆZH]:âMXX˜¾ÞþªU« ·•tÈÉÉu< d¯õJf ÈžººúÕ«WÑöÆ¥¾¾Ý¢¡ûöíë¨7W®\Y²d uãeúzõ6l~*ÌPù ÁEWk•Ù?~üË—/‰gRSS“’’,--Ÿ?> &)))ulƒ8::Þ¹sgöìÙÄ3£G~òäIFF†ŠŠ µ"ÐÈÈ( €î¼=OOO {h›6*2¸wW©“——÷îñŒ®®.½ &8p`ÆŒdR7û¬\¹²Ý’åË—666¶žöN1(ÜL.vîܹlÙ2({Pù A p¡êêêØØØ:ÉÀC‡wJ[[øU­Ï¿xñ¢¶¶–‹‹+//“““¼öHKKw³‘]Ç“ ^Weeå´iÓ(o@€§L™B²üc໳³³ÃÄ•¬¹tèPà3­ZµªÓOYXX’““………¿}ûÖú<‹­¨¨xýúµÁ—/_ȵfyyy÷[ävìØ~Z2f̘w0÷ññé¨|a‘OàÍt­JF€¿»dÉt§xú",,ìæÍ›Pö òA zSE@TTtÍš5{öì雋/îQöÂB£]m"ÈÌÌ dØ ¬¬LÞXª­­yÿþ==ÊßÇᲜPù H $ŽUñõõ­¬¬ìj#tBô–-[ÂÃÃ?~,!!Aº ¯_¿ªIJH666SSÓnöQ>(Ð'™OŸ>‘%~êëë%%%ƒƒƒéÑa²²²£Ç•e òA dçÎ$†äää’´ 8@çÎë4Ì  €®®îСCoݺÕã(˜††/// ¬$š;ooïuëÖu`ìØ±@öV­Z5bĈ“'Oö9fÀMÔÕÕïÞ½[TTDw¯uß¾}À›…)*i(ÜÏž=»víZã»Å‚«¬­­7mÚÔi,‹6¢–——g100PII©cÈæææåË—_¹r…tƒ1Ì“'OºQ>ð[SSÓÌÌÌÉÉ©Wq²gÏž   W¯^Ñò¦]^eVVÖ£G`Ú†Ê@º+èIW>”ÔÔTTÿLLLœ» ÆÃÇ;::úøølܸñÈ‘#@½À™ºº:##£ÄÄÄÞ|ùòe‡)¢óî§M›&..ÞãÈO Ö¦¦¦À©=D€¾^"ˆLôu€z LÒPù Hôyìª+W®üüùstt433s7¹¹¹@‰ºØ[€ì‘¾ngLL ø}øða ½@Þˆ»ý¡DEE­^½h†¯¯/qÍ6:bûöíÁÁÁ ­—€@åƒ@ ÝôhCŸg‚_¸p!ŒR™2e ´Ý»ww^JJª¬¬ xcrrr¥¥¥ <(¸§N JpÒ¿×ÛÛ{âĉOŸ>%1ün555ÚÚÚÀ•••õóóÛ·oß¶mÛ éî­íÚµ ÄÀÅ‹ÝÀd •ôŽyóæUUUõçJJJè´„S§NíÙ³ç¿ÿþsttì hŒ††Qiøøø|  FFFž={666ÖÈÈÈÎÎnúôéÝ|#//oDDĦM›<==I4ØvôèÑñãÇ»¸¸444Ü»w/>>~ݺuô²¦ ¨(¬X±"33<ò0éBåƒ@ }äúõëÍÍÍdY¹ßž88þ¶¶¶"""­Ãƒóû÷ï×ÕÕíjVbmm-Ð`àÛM˜0áðáÃDÛPÐ-é`Ï;7''çøñãfff´öjp8ÜÞ½{f›››Ÿ;wÄL®Pù 066î~ÍÌ>`ChiiÕÕÕ>}Z[[›ô;hh}æÆ—/_~øð!°vöìÙVVV@Mìñðð”——£a®\¹âåå•••µjÕ* ‹=6ß1::=Þ' “'Ož”‘‘¡ÖëhllÜ·oŸ···ÐoÚgK=ÚTÔ €/(**:ÐñŸ›› D7""béÒ¥Gÿ“%T>2€ªÓõ9ûÌÚµkKKKÔµ>9Ÿz <3àʨªªéÕž·÷ïß×>~üؼyóæ{÷î¡ç/°hÑ"àÞ¹sçÉ“'àæ¦¦¦@Kú ] „I‡¨ ¹»»‹‹‹“+Š€ ëââbÉÞÞÞÁÁ!88¦C¨|„¢ÄÇÇÿøñƒ››»Ÿ÷Ù±cǧOŸ@±Þ}° Ðcggg *@¢<==YXXÚ…|õêPˆÛ·oÏœ9ˆÄto8•º¡<’vbúàÁƒaÆ͚5ËÊÊj„ $>ðƒ‰*ˆÃáöïßüã9sæS¹¸¸z9àIåäävíÚÕÑ……@åƒ@ E^^¾ŸÙܺu ¸q/^¼èí…û „ÅÉÖ¬Y“‘‘D8s‚‚‚7nŠxõêUodOSS3::ºõÂiÅ2%%%(((** ˆÙ‚ €‡JÊZ£ Æ™B>ckkþ®g7Wùúú¯Q]]跘ؠòA Z!22²o½}à*>>¾¯_¿g¨Ïß~ðàAàÍ7nQ77·>L·xþü¹““ÐK…®Â ÃgNŸ>M<œÂ€€€ÇÖ¯_ßãZÞX,–8%ãÎ;@¡µ´´‚ƒƒÑØûöíÛ’%KÊÊÊŽ;Öºû•ÐÚÚÚzzz½]Ql×®]lll}[¸‰[·nòôôÜK€øÑààèÑ£À[ºtéé² .nпv+¶tƒâŸOž<9~üøýû÷ÍÍÍÇÖ}Oälàh'ø^àªffföy…T>BQ†Þ«ð@´€«×[7ÑÕÕ¸t@ç¶oßÞ£ž9@Å xQ$nApöìÙÇÛÙÙñððô­ÐzÆI"""@SõõõÛ.**š4i8þüùåË—ƒ3 öž 8|Ýlü Ê@h‚ððpPŽ=#%0_¯¶»pႽ½ý™3gvè­mèB‘‘‘Ë–-ûðá//o÷áwïÞ­¬¬üêÕ«þGËf豯¯ïþýû555ýüü€¶·5%%%++«uxà:£Nð’%K~ýúuûöm˜´ òA …••uõêÕ¤¬‚Á`p8)÷¬©©ÑÐÐ000š·råÊ~Zhbb<¿ììl™¼¼¼î·²'((X\\LÆ(Bûíttt8P^^^]]ýøñã®ÖA÷`žâ7Ð5¾!Pù ÍAJ-""BŠì%$$Ìž=ûéÓ§dßI@VV¶¢¢"))ÉÒÒòË—/Ý„Þá£G ÉõÕJJJ rœœÜúäÎ;/xéÒ¥Y³fu¼m°JùìÙ³¨¨(˜Æ òA ÚbãÆÝ@çÌuæÖ­[ëׯÏÍÍ­¬¬8Suuuì á¶mÛ: ÃÅÅåààðúõëþ……ø¢NoåB,[¶,==è"G»0ûöí¿õõõû3• !õõõ¬¬¬~ºpáÂîe/++K[[»¨¨èëׯ”18[­WïlÐ*WW×>ô,ÉÈÈÎ%)žk`` ø ôXFFÆÙٹ㮿À( ðýûw˜Ø òA š€……åøñãn3¤ÅÊʪ›k»CÆUÐH„™™ØÜ©‹/JKKw põêÕ>+Ÿ¢¢bhhh¯l%$$JJJÀ‘‘8&îÄDtšìúA@@ÀèÑ£a’ƒÊ@¨Ïõë×;U>PLwåW;w.--¼cIz p§._¾üèÑ£Ž3ÇSSSŸ={¦¥¥Õ«ßWRRø¯}6 ÝT€6£»øyòäIDDDHHHëYŒ¨|„:t:Ü]9sBBByyyvvvT·ÜÒÒ277wÛ¶mÇŽkS$ :}úôŠŠ ÒoõâÅ‹   þÈpð{ÆŒÀ†Ö{š™™Mž<œ¼ÿ>LuPù 5QUUíxòôéÓç$ÔÕÕ©««“EÈ…””Ô¦M›¶nÝêîîÞú<:žD€Ì‹ŠŠ’¾á;) ÛJ ¾{÷Ž8¿ž““óÎ;šššÏŸ?‡ *¡S¦Lé¨íN¦¦¦^ºt‰ì3ú­ 6x{{¯[·ŽxrÇŽè+Ý,""2mÚ´°­¸¸xx®®®?FÏ033?yòdÕªUþþþ0íAåƒ@ ´¢|;wîlçðÅÅÅ’×+"#ÇÿòåKëM{T¾ÐÐPà„ œmèÞüüüùùùèZ&&&tE7t©RT>Biôõõ/‚þYSSÓn˜˜ªªª¥K—ÒòSX[[ëêê&%%ÏL:µûKòòòêëëTöˆ”””9rDQQ]íš——WEE%77WJJ ¦@¨|„Ò$>>žèùMž<9%%…øé»wGJ³!Õ²×z¯ù3gÎt¿ ÓúõëIY¹\ìÚµ+33sÞ¼yèF¾&&& Î‘S T>B€“GT¾‘#GÏå8pà@HH½<p〵 .DýªnZû¹1oß7n\hh¨ºº:ºh=QQÑ‚‚˜¡òA Jóöí[ô ¢¢",,Œx^DD„º“öz ;;{PPª|aÁ—N•¸_”—=¢‡ doĈÕÕÕaÜPzzz§Ãk!Pù ÈòéÓ'ô`åÊ•7oÞDµµµéKöPîÞ½kjjŠn¦Ó0õõõÔ5²ªªÊÀÀ ..N@@@SS³ûe¸!Pù ùÉÏÏGˆ[´ƒ3ÞÞÞtú8rrrèÁâÅ‹;~*..ž——G] 1Ìýû÷wìØáêê dïäÉ“Ä!Pù % n9KÜACCƒ>777sssà¼v\h¦¦¦& €ŒdeeÕÓÓËÎΖ•• „Ê•P...„0íU> ô+{(膂¼¼¼QQQ3gÎ$ž—””DW—¦fÍš5eÊ”ØØØŒŒ  Ó@­aR„Ê@(„€€ø}øðáÓ§O#„®²íÛ·Óõ…‡‡_»vmÑ¢E­•Öšpì¡Ã;W®\ •*¡|||aÓ |ÇŽcŒIf«W¯ÊGì¹DË©ÐàjÑÀáKOOðàðS1 LPù %¿Ñ…•Ïž=ÛÕŽçô…««+B˜áGãvòòòŽ?þëׯ ,•˜¡òA  ¾à÷ìÙ³NçöÑ;ëÖ­óõõ•••EÿüðáCdd$mš dÏÃÃab*¡ÐÖN ‹iÓ¦eee1Ìs999¡ëd.\ØéN„4‚···ŸŸLŠƒKù<ìmó0غZV_x¢Ÿ 7øïáOœ!|¿A¡|@ðTUU'OžÌHÏ5}útômÑ¥Y€Kúøñã#GŽìÚµ &ÈÁ¢|=ý¢´¡|-°Àw pssß¿ذagΜa¤çÚ»w/qzF»}Ûi &&&sss, •o)¡"¼¼¼!!!±±±Œô\rrr÷îÝ300puuݱc[ëîî~íÚ5˜¯ò}KòÑ[ÓúL ‡Eš>c˜Ûìh™[;S’ØD¹=¦Ìu*~ˆZEòž‘º‡‘¿sZ‰þõm¼ˆ¦UX«r¢ÇÖR˜ Ïoøå3|©AÇ»wï*++ïÑ®^½jaaáëëKûÊgkkÛzd)_sQöô'©$Äg€v 0¢ìéÏÔOˆÂÏ:2‘ÂâpÄ&|Vb%˰ÖwÍð6Ó_ 4~¾~uXB²LkL N‹Yÿ“=ü %$áK… *¸¸¸rss÷îÝËxö@7»ôÑÀín·³>¾†ž·nÝj³ ÆÞ«W¯ØÓÏZYèÊ—t*,âä?o•ÖF-3Š4¢›=zþªüÍë»Dxx“äqWï`üÑ%lf†Ä!OMúp·‘+]É=Ø¡ášÊ«0ýµTÍÞ…±7n³õÅbÉ¡C‡XZΠwí[`<ÿ*v„Ì 7è$þ/ƒÐ#jk/AìmmÌJ#ïÆÞ¦ºï£¨ÞœÚÿµDÈk8r£ê‚ 1@åeÀ-: sæÌaW>Öjæ,áìÊ'***++ÛfS‹ë÷ìö·¤¤$ÎŽ¯|ì¯qõ·d¯Èîçû$#)2£fpKtvq?ü%29#l™Šå6„ÒƒƒÓé]s=O˜ôÁŽ<òAH±â!+.<ãY›†[²wiÙ_ÃÇF1zÌL\»†z¥çšIº1¡do_êk¡x#÷Y…¶Ü` cÃþÀÍÈèP¿=z¼ÿžv·ýîâââ´{ÿþýP2;¾ò¥×í¢â“ïÝDHe‹­¤ES³IÖëêRoÓ.(Ù.¨ñ#ë…´j¬Ë tHΟ?¯¯¯OK`[îÕ«W8StÖ<==Ûxjéõ1ÂÂÂÚÚÚP2;¾òð»ÐÓÓ£×®]ëhϦ®]ë‰J'""bîܹEEEP,AùøŒÑÇó¬]»ÖÇǧ½,½D¿v…9\@ùø dEÅÅp8âÔ:÷åÅÅ?ÔÈ/ €m¬Öi,ýò…l­9D—,ñظqcy9Oyyk¼Â PáæòXVJVVýÐTTFxmöÿÁ{ÑMqt¬†kP>êàí½ÁÆÆýGb£75Iojyù%# ÀÃÝݽUrô¹}þÜjîÂBòóçÖ‰ª ÇWië¶½gÏþ‹ÿ·{€ò@§W¾]fÃ<æÈx£@>=E”ÂR•¯7F¨ŸùølÇÈ|åëÛŒPÝ €òQ4><(?†¨Yo:Žò·­™Qe”ymíŒÝbbäz ;T–]ÍØ6šöL‹=vàï˜r©̲³œÈŠáÔ©S\\\½‡SæÏ³t\]FŠì9$ƉPñ½%+=Ÿ•ñyøîÚ“N@٩ث\¨r¬¡ñãä£nþ‘"2ZA>Ô<ô÷“¢=÷æVذž“-y¬“êηš©Wã]Oi~ªãºmˆYY{Š;ÉG½ƒc{÷ìêáZ÷”…yðsÚÁ’=Œáªh Wbpð©eÖ Ù+Ï&ø”X{ÃÃÃíç¡)oì4Dñ¦‰‰ þ¿.d§Šsm‘¨ˆàço/JŠŽcm^."õ…¨LŒŒ°ÏÎU£ì·\§wouNt©ã‘ÂÜÜáQchVûI­g¡J’¤t±&žcþÆæKN1D{¥¼qe$ÉŒ ¢jôÙà·”síÉõv„±µs²d/çUÕPáWì¶)fÇfƒ•ýÜmçÆ½ñ·ô¾€7±ì)[íÐ}¤c± oL SœsŽ,{Ñ÷ö© ¢WvDzç{6ó•2½àßæÔ®Ýw2eï^%)Çùˆ ¨yl9œy8™–=¯£IZ].èÌ &=riæJ^EïÎ0eOjY°çøª æŒ$|+¡ÁÛOX¤Þ‡ï›7zTÕ«„§slmìàªþ~›fŒN“³¿W==H;|ÓË͉ÂdùU‚—²ÝßyЫåQL_:¡ –ÞÌ·†ûþÂÿU,¶QKR£rBˆ©X·Þ2V–3fÍ^=u¿‹ÑpdDú0Þ8ž;›íª¡iwºrNiiUiÕpŠdt²ø8ý?„&³âQ^zÅÕ\ !-§~žù؃šåú¸óbzoeµÔÑŒ-çͽa¸(ЖøtQBn|=¿‡…$\™¶‡Ûú¥ÎJ–-T>ØNž ÜèQå/=Ö¯åkCÊwåb®‡Žf£»J_=£ Uj–DàÑmx”öÆå´Ckþ¹…Þo,N×ÅÃßÌ5>¼V™5ˆ“sŽªp죺žuçŒX3šv̲àz‰vçå3Ìz_¸BùdBÚ œT{Fxv¥Nw»_Þ‰Uk4@‚(,lçʪ‹=|-ÓWàÈ©èé³ìñ°äå½KW’¿¤Ù”Úžˆ(=ÿÏÉOŸ‘Šæd™Þµ+Ë'ÅG¼ü¤gl!ÎЋGîu—$JÿŒË_?zQ$#3ˆù(#1õÞ}qU=uå6uÚýì4ÊÇ\ 6iSkÙXš´]ËLV•B•-3˜**ëH]óÓá°|ì‡62•«Ç©…Ó u{ë–._ÂÓpðL- ´=ºÒK 顆¹±ä ×½EH¡Ð•ÚnáÌ·nMÛ8çì 쎾Y­C½ÆÎØ®8i[ý AdÕDžZªÏ02¼,úïŒË§=¥Wj÷æü59ú³SËÙ]`Ü,û%D¶Õéw~ûø…ËÕ瘓òŒŸ/«¿wo„rðwà8Jöî>!9‘£a¡ÁŸÅgœúìŒ+8–V¤--´Î”0WåzRHrþ¼¸ž)vP¬zŒeï@:9¡7åi*A˜+.,¼µ‹©v˜çG½¯PúMÉÞôÂÂãØTEðŽ¢÷zètÿÙéÎ<Ñ•º5vgÊ9„V6ˆ×AûÈTTr%³6Çn1Û´AòæKôÄOp®ÿ†ÎuðÅÏGª^7ÐñáÃ?ùøø)=p9ƒ¶q—”6YáëÖ‹zü 2Ï;RÉ OÛêNIøñÝ¢È Â?µd¹†à/Ë—#ÙgC™Œ=›j R‰RR¨×ð"BÒùK¨gFìØ¥§>sÇŽ™Øë·‘'³¶ÍIÒ¦éÈÿ>Q€ÿÇŸ?Åhbâ¦Nà‹~@ùРõ¶Ôñ·^3×Mu3–=ü}þÎl›¦• –Ö$¾PKsH¿IUkz¨î˜.íÈH?ðû•q(VæŸáêG55ÿ£R¹½¶oçÁ?þqõÚÎÃFÉ 6Œ*$CÖ;…ßxˆH‘ÈÓabœH´H/:jjÃx¤µælß>‘µ Nmȧ='’z =t8¬4qçÅGUåÝ¡m‡è—Òn*ª))7Fš›¼nܬLªGB%j±ÍWEUJÞHÑ:[ù±Š}R>e-“ÈØæ-œ=ð§¡¿³›s`.uFŽZ¹²U]ëvuun&*«õVl›æ®;ÌYÖGBkžsÝJ­Õª«U5Sí‡BÚø Ú@ùþÃ&ÜfºƒÄ½c èP5&H.m‚„nbvcŽ<ä%0fÅ„>³ì½g9M—è5â%£Q&fízôçq:ÌÚ­»Ö-·ãü噓ž¼¡]vRw-DÌž8²……) Uǯð(5s“d? ºyÈ^í PæE똩¨”Ró&ÏoñÚå{Qd²žø9íZ2eShúÛùª¢Ø_¢wiÛä4wÍVIsS×-ÍgZBù¸Aµ(DïDÙ Í.ÙMÆØm0þ÷wÂ[ºÏËöš´!åà·c âã\Wn*/ÏLyLUøæ-J>´w$¶Ÿ´×qxÝ9é–Rø‚ëê´ÚÌjBOr½ÕoPœD¹±>ò†+$þ0Úá<Ñq©öܸüW «f[aì™q:@eêŸÑÌö¡Œ‰^ 0z¾„³æjº6 ÿwK#wJ$˜/SâR²wÖÃx’û^ ª 7hßr[ë­Ø!¢æé5ý¬Ûd±šúµjkÉ^³×í„„É<_ã¾K]7á3b7¡#ê£]g ÛìKÉ!O£v Ù³/Öÿ™b\¼²Ý8”:<Ü×ûÊ6ö.|~8Éíkÿôް²Ca”ó£kû‚Nß|^RUHn[a‘rýüHëSQ†|AÇ);c_!™|vç¡c±ç³‘‹{ø2۹سûðå…y³­mf_¸û§óÎ_'{øÔjž……ž¬MñŽ……Žl2Àhö‘›WX8¯á± '¨XGZ×nN.ôa^¢€[óZ'Áîn›zë2Ç6uÝÜ]}‡˜9¼ŠZsêÚFs¼ömu¥|…Ǧ:êºewZfqÚ2vïöTg%*Â!|쎔B2p…iÚƒ¤!³B¢f :žÅ”:%¤”­KS;g- žUã–Ñ´öÖd>þ—ù„³ÂØ:Œ¥Z“ìñ§~¼’ûÂà2[{·¯^7[Ʋ÷®7݆ôæ'Íë„a‹=fŸ(‡ÚØtᲃò(€ò(€ò(ÊÀo 8ØãGûömrr²±±qÉ/Úúc9bg=ƒÖŠÍÝÝý§Þ‹;vX[[óòòB©åàûŸÅ_ETTôÝ»w™™me‘e‚ ~~"§OŸ¦økll|êÔ©“5)))ÚñáÇŒŒ •¶œÚ!C†°ß”©S§Báåà§`cS»PùåË—;RÖŠ‹k׸3fLIII[NíÝ»wYnP>P>~¯_¿f¹KKKUUUé6·ö΂ ÚQjûõëǾY]] %”€_DnnnÇÈHhh(ûæ‡þúë¯={ö´ÍÔ>yò„}óéÓ§$I”€VÆÓÓ³žOYY™””Ô³gÏÚu¾Þ¾}ÛÐ3::ºm*_¯^½z.]ºtûöíPDAùhe¼½½kÉ]»òðð|úôICC£½çK___DDÛy]ºtÁ›ÿûßÿ888°ÀJHH´µÔêéé=z”W;XžAAA | |´>¹¹¹:thjjª››[IIIß¾};@¾XCâ,,,vïÞÝ­[·ÊÊJ..®¶™Ú¿`‡¿¿¿ŸŸ_hhh\\œƒƒ”OP>ZIIɵk×b‡œœœ–½6Þò[¹|ù2–½K—.;¶'‹txx8®‚Œa…”€Ÿ‹‚‚ÂÉ“'ÓÓÓŸúÚ‘#GÚ¾òÑMmgöT”:>Û¶m»~ý:–½zý Û5vvvøÿ™3gÚx:õõõãââÞ¾}knnE”€_Daa!bt¯5j–À£›7oZ[[c‡ŒŒL[Nç»wïìííc–Ñ 6@QåàÁêçzìØ±`|8::ÒîââÒ–Ó©¢¢BÛÙû÷ïååà×¥‹ßÊ•+åää®^½úàÁƒvŒ¬Íœ9³Í΄)..þòåKÚ=xð`(‡ |üR6oÞŒ•1&óôõõUWW×ÒÒj§yyóæMll,kÓÉÉ© *Ÿ¤¤$KöH’l³SÌ |Ða‰g{[ºté©S§BBB¬¬¬Úc^ÔÔÔØ»êhjj¶µÊÈÈ<þœµ¹páƒB!åà—¢¢¢ÂÞ·ÓØØ[NC† ùï¿ÿÚ]^–,Y¾yøðá65ªOHHˆ}) Lrr2”@P>~;w¨àáá¡7ÅÄİì 8ðÂ… m¼‡$;ÊÊÊ™™™õ~oß¾ÅÊáááÁî9dÈ,{þþþQQQIIIm9ýýû÷g_t=C[=Qÿe8::–——ß»w¯á®/^@S'(¿õ'ݵ«¬¬l^^þ_o×»wïÞ¼ysNNŽ  `[K¼ššZ£²GƒwáÄ/^¼øW& ŸîñãÇçÎk*À°aÚI3ÊÀ¯ÀÒÒÒ××wöìÙ½{÷nôQŽyúô)6.\¸qãÆ6’l33³† ‰ìpqqa‹¶¤¤ähöÇGŽ9~üx¬µÍ300Ùå M°téR+++l‘4 OŸ>t§üˆˆ;^^^¿1Áòòòiii_•´uëÖ >üÖ­[?/%óçÏOJJºrå 6‹›yôèÑ;wBaå ­oooßüÓy.ì¸zõª££#???V—‰'þʤöêÕëÕ«W- ŒeOQQ1##ƒ““³µpçΜ÷—/_:t(44´%‡à«:tèP,ØPÒ@ùhCè1èѣǃDDDšÚ"ïß¿¿qã¶“=zÄê×<æ h7–¥-[¶>|¸wïÞØsñâÅBBB?žªÛ·o3G>{öìï8<88¸   ÿþ?ná!ïÞ½óóóÃáããsrrÂÚyôèÑo=oee%¶Pa (m#F&''&&&>¼åÇâ½z“$É`£0))ièСS§Nµ´´ìÓ§OË# Ãf6C±$ÿH¦¤¥¥±ìa“´¤¤$<<¼Þ^œNœH¬sñññ8¿sçÎ]´h‘'ƒï>ã¨Q£°ðƒìòÐnÐÒÒúðáCUUÕÈ‘#±µtäȑ ˆ… Ø=± žÅ‘Oœ8ÑÔÔ´žb)Ò××9qâËÖÊýsÚ´iøÔ¼¼¼ØºUWW711Á§˜É UÎ2eÊ”ÀD- |´K899SRR°kÕüùó:„uâãÔfPÏ›˜§Nº|ù2=q(>×îÝ»[¥KÈéÓ§±|ÆÅÅqss˜™™á¼`fÆÞ}“&M8pà?ÿü%”€vÏ4ôÃïøñã­nb†„„HHH°BHMMÅ¢uáÂ…‚‚‚1cÆ`›ÌÜܼù4X>CCC/]º¤  €E+èT CâÿÎÎÎoÞ¼‰ˆˆøÁô;99•••={Š ( úáîåå…í³øøøV‰sÞ¼y‚‚‚ Gh0ðõõeùc:/999êêê/^láôÐA$ÙL€¸¸8“½{÷ngðIÂ)9sæ í^´hÑåË—qœ}ûöýêXö¾Úìed”€NMZZšžž^Kú¼`£ªÙ[¹r%ŽäÆ­¸¨:VPÄXÆV^^~Ê”)>>>͇߸qcnnn3c*vïÞ 7”€ÎV¬¯¶aº»»75…تU«îÞ½{úô韔<~~~¬g´ ˆ »˜˜˜¦B*++8ðÁƒî•““»ÿ>ÜnP> ,{³gÏnfÂŽ®]ë?I°…·`Á‚¯.ôÓº&àòåË_¾|Ùp&3,{6lX·n]=ÿ/_¾ÐP>&ššš$I6:Þ¼gÏž —fíׯßõë×™ì±Øºu+b,ìîïﯥ¥Õ0Àùóç*6øòòòà.ƒòÔbgg'))I¯dË·.^¼ÈîóêÕ« &äççÿÆÔÞ¼y[~²²² õ ëñÙ³g'MšÄî9oÞ<¸Å |õÙµkWCϲ/­ùéÓ§ÌÌÌßžZqqq,{8yYYYõ†Îš5ëãǬMŸ†V Ê€ŒuttÙ=ׯ_ÏroÞ¼ÙÈÈHII©í¤ùÁƒ®®®3gÎ6lËÓÛÛ›=Œ§§çLX€òБÁ†û& ÖX‚Ó§OOž€&áàà X0¬¬¬°CXX¸¨¨¨í§›}bbboÞ¼AŒa´g½×–(@}°yG;<== ÃÂÂh-ià¤â4cÍž4iÒÍ›7ÕÔÔè…yP>€&?~üåË— >|ˆË YZZ¶£ôWWWwíÚõàÁƒXùfÍš÷”à+ìÞ½+Ÿ™™Y@@@FFFûJ¼¸¸øË—/ÏŸ?ÿâÅ‹‰' åø .\@Œ“3gÎtrrjwé?räÈ—/_JJJ¢££/^ 7”à+ 0 77·ÿþ í1ýzzz£GVRRJNNååø:cÆŒ9{öì?ÿüÓ~;FvïÞ}ðàÁÍ/T €ò0ÑÕÕˆˆ())i¿Y8zôèáÇï´Ú(ímmmWWWz= v _dd$???ÜMP>€¯#**Š­¥É“'·ë\¤¤¤ôë×î&(@gaêÔ©?†ëÊÐ"þ÷¿ÿµ÷,Ìž=Û××n%(@‹¨ªªjïY7nœ¿¿?ÜJP>€ÁÇÇ×Þ³ÀÁÁÁ¾2;ÊÐ]ºt鹨¨¨€[ ÊЉ”¯´Ù |´>•Ï/ÇGefËÈpõëÏ%Þ‹³[wާO·¼y³ëí»/O+ï߯èÒ…ÐÔT6lj›ÍEéÇûqWb²³?ÊÉq÷íËÕ³W×îII½zµëÝûê'ùU÷î• tÕÖT4x,ÜtP>:#ä—OGŽîââBÆÆB††<††â ‚pˆ‰q–ç7®;cóÕ£¼ÀÈ“EzcÕÔLÚŠU÷ùõ¡ð^½8ŒºM™Â? Ãà½CóNœHçâqNNÀÙ³ÅS§h ¢Å”€NAuåÿÀ=ÓŒ…çÌþ¦ed¹W,ÇùæÄ oÁn"ã&XýÆ\”—=ØqdÁQ++Ño:PAÊ;xð_9¹þšš3¡H€òБùûooÞeKÅ$’éÓ±dþ/8hÓDÃÉÒÒJ¿Ü\%ƒ‚½&O\µª÷D³`Bü¶oœ?¾pi( |t4*+žïÝwÐÑ¡gkEhc+VZzíàÁ‹ ,ûe¹øPr;êdŒ­­XkEèìÒëõ«NEÆ&°ž(ˆ'ÿ)*yÔŠ²GÃÏϱ`AwŸ­V,_÷ r‘™¹_¸GÙ‚¢­mÏ^œÆ&è—ååà§sÿ~8_Éðá?kpúŠåâîîî?5—/ŒøƒKP»]çåà§ó8/ªk×b))îŸzw÷Þ~~×þ¤øÓRƒ²Çñ³sáí½aåJ°ü@ùh·|úð_EÕóÁò<¿à\Îν~’l<vQF¶ZPóäbåJq??Ogç5Px@ùh—þ;º;ƒ´D6N 26¶mÍHɪ´´[ÓL…~Y.ÅÒRN«œ 唀vFÐÞM¶‹Ä~ñIÕT«*Ê?ððvo­½}6cAý•Yàà ˆ®O ü€òÐÎ(+}0yªÐ¯?¯”4—··kµy>¿èÐÚýQ[Â#x «'(팀G\]{ÿ–S/_ÞóÉ“Œ¾}U~<ªÈ“ÿ:þåÃ,þK¬ôÓ;~(K |´>—çÿõWvβü¼K·?ÑnN.îÑå[6Ä¡” \H2è›ÎÞ¥K—ãÇÏ._þ£ÊWøì’Ýb±ßu 8¼·ìZ¹ Ì>P>Ú»÷†;;÷b÷É>òñêJ''ì~yÛÈð;œVC¿Ú[’¿¨Èï;à`ß³¬´ˆ_øGrvÝÍ­÷o¼Œ³fƒÁÊ@;aÄøSõ÷7`¸¦Ç:9͹lukâ›ÔÄž#c/™‰Fy±Sª–%ì1º¿sêÀJaaÊæ#ˆÚîš:ëV%xôqÚa½ãÞtÚ·ÚߪO½Sñðv Ùbe½üûó@’3göø½—±OÎ3ÿì›<ÅJ(mšüÇç´4Y¬u«µðÖî(SY1¡,{©EÁêB¤*ak¢áTp͵&ÃÕŠ3ã„åìIr{ Ñ žï ÂuCß7qÑXöžTs¨õü ¡ 3Â=~hø]Jêþ‘#¹ûÅ,xöJ(mKq™Ö.Ü“|„ ŽÔlÈÞ‹ÓÎ`×̓—¯#4´ ?”å1Oàц}FüÏýÖ#I},lì`Ù³9¶I_´‹ßúóýáç‡%oÿ¤|ª]_¥&Là&¿|!8¾sÖ•+WžÙû·_̱c¡Á”ïײËløÇoȧ‡~ö¹ÊnïÐ^x¨¤$ãáCJ Юù_ueXk6™¤Ë0[›c²¼å•z}®$ívœÉ)8ÔÀ€œý~½ó‰A’KN’–¬`Ø(DgÍ 4µÿi‹ ’§†ÙÅÄ:ö–&žŸŸãÆs#FLú¾\ ÂÛ.æÀDáó;’C¡\u2åû@ŽntÏ¡„|K¾?w¦ÿ8çKˆjKiä·ZùúvFþîVÏ¿øÚa­MMÅ©¼:9ÃSó[SRYY’Á8´w†*6ÑmócýM>¶ ú¯ö›$¤ªŒÐY}}ì™¶=${Ä´õ–lw\d»>¨š ìb*ŸŸ¡mÂX]zÓtžš›õ]ÃÄ)Øm©ì·áFã“sﻕo˜"o¹ž·³Ò@ù:Ÿòq5¹gžn¿¼‹o=~ 5 ¿¦6B—Z7·ÅUpËΌ܀F~´•Ÿß£¢jæF?-ÎOx“äÊ‚¬Þ+ÏI+TÙ_R̆ޔ±aõì(q9‡ÿߪ 9‰$­e¬mY†5ñŒyöìã÷e¡º²¨¿Ì¯˜¥³ðôaíëêyÞš óøñ+(TOùjðJ/wUaÎ{[õ6…KlvlgêA^¥=ßÜÏØ¶{ï‹÷ê³ì,'²{'ù¨»$_¾»¶Tå\¾õ¢²’ìfl¤*éF#öÞÁ±½ûvõp­h"%Bšž$éI»"–®ÆÖµ…lI¼…xzÌûk±¾²û®¸ÓaÇ#/—hø“\ìg4q’ªS±—ðó£[Ÿ?´D¡ í…êÊ={5¢ÚIÚÍ 2ˆî²bæcÆN«Z+îÛÒpH_^ ù‰ˆ~g•·ûôíÒ|˜%ýl¹÷ûyëóÑýnè¨§ÂÆ[^Ä{KÉ >F -Ý’s,'`ÆPžªü;\ýwPÛÊcÈŒ™÷‡2OA(1IJ¹oÞTB¹ê¼ÊWñ©!¦òqŠŽ´@(œr&Ð>²ñ¨&dxx¸ý3ܬR‹È‚=8¤¹÷…ïH'Kö°hÆ{JêceM?õK¢Í:R,Ñ¥êŒ*D@&š'ÏgYWÞ°Él ÙÚUUåm'1ÜÜß©ÕUÕ_ ‚ÿ4©_èÅÐTcTL=V†3OºêL€¬Ã,WUîß2Ë¡Ÿ6Õ6{õàeüã70•N}Ÿ;¯±ÏWç²áâîåªó*ŸÛHÁ†VXÌkJB4\éwu‹°ìá¯ôêëD×Q¨B(Úí/:d)£æŒ-çͽٻq ñqÒEï¸ób¦Þìi,dK©¬¬,--­ª¤Œ6 ½5 ›eæ½3–éÎÝ›t¯P{„éßàð]–}èæÓç$É ehopq ´ÄTVþï;sÁý5c±ü!¢GT ä·ã…Uêà;‘a¨/³6¡CÚÜý´­ÉÙ¦–Ó·qG=Þs-Yl_4Hžz2 ’7±TëSœ†ëÿ^™øÞ\Aù’ûžTgr¢½±—}»¡¼»Ïi7«bµBùd6[^þ3¦~-dóp–å +5ºkÎêöç(!Ô‘—¤}&ÎÞ{x{ûðÆ·¬ "$$G’‰Ñ”™Rzüx¬í²Â¼‹ÿ"ccÙ¦"$ÛÄâ`mÁr‚p¢F’+[ž˜÷Eßi€ öF¨¹!äÅY-œ8TÖ¹¿Õ¹ƒmoš.wgì,ÁýP‰®öå„$Ó°/^Âöìå7 ÃÐ $b8±B„î 2aè)Ït•s¾š˜¾@¹ê¼Ê·âÒ[oÆ/iŠ ª?'ŸªMí/ ••¿®B<ÝZ#!‹¾#™E„SöV¬ó8@ÌÚ²væ!!Í5•o&h¨%Ö4·ž;âJ !ɇ …Ø4øn”Í`(=@û‚£k×ç…Õ’MõÉæ77Ð 6e¶äO”ôA^«Œ›Œ°´ÈO,N»„•†$¾í×øþ;û†ˆ÷Vûòå6G“­>Bêjeu.ÅõTÛ(Æ\’ñÚ/+4B‚‰É â"3V½øæ¬%ø6.Ô¦Ó± ø7žÿeøl3SðŠ´h®ƒzoq^(WWùxj~Jä‹(¢·)% Èö‰ÌŃ‚êw÷ºTcëUÕsM™q\ Cæs"Ën3ß?^.#õ©âškmY×"UM¨«²d‚uÀ,relH\§íOàSŸ´Re ž@ûãÞ½ÏM+ ›†ŒÔ¢Èüi´U† ¦R×ú“p–eÆñ;U¢¤S+.ûù8§–ùihœeÔz· ô˜}&³æ—q‘ùα~ßÍ­,ߌäô®wjÆ&õÓæSž@’PÃþ¨¼Rõ|ô½7Þ_OŒ 4ufåc¸i6cÇ÷SÏ×K¢Ë+FøàzVLHÆ[+Q?³¾.QOi±ñ:h¹€6%2k{œ“uK¶®dÖæØ-&ß‘4N~æëÐúà ‚i´ù'ÛCg4m­*ÿ‘B5qŒWC”òQ-±ìUSïÇ•>Œ¾\Ä|’ …´/²n—éi²±Å"j£%±¶ MõºS6›Ò+m0 §deJ:›øŠ ²2˜¡ &R–7}ïëȇkU1VÛ¢WÂ[JŠÎGˆÒ3”íGÍ-wÂr £jÉâÞØs°EhvD8þáHù¬ÐA>‰(Ów á‘2KÅêïÌýs‰ýsY‡ôŸÑ0š ¿±ŒÉe…ÝÛo9ú4í ‘Íw驃ЬבV=Mö}Šy33ÂĦÞ$œQ?î!Úl†c)®2F¥§úÒ‹°"-tÜòcÞ¸ÑfV£¼}Û]IÑà»sñìY›E—Å«¨Ô UçS>žÛ·Sóµ–go ç¼sšÍ6éyµ¶dW¬i±‡6xP?¼áSö¬ç«m… C¶8í9‘,Þè®}aÑS —¤ÆOÅ )Ðkè¡Ãa¥‰;/>ªª(ïÞLê,wúTìaH²t‰¥ÙƒwŸD†P½W>[š>CüSÌÍ”†³·®wõ8“”Žý‡jlpßÔO˜º>üb:T6+˜£ç•—\ô*÷ãáAÏÏF¡A3  íˆñã°)–ÜL€s&ü*k‘Á,ì6%o£“p6=i‰ºËb´Ô Ÿ„>¶™³œ¿ðbîœïÏÅØ±Ãúý“§\¸ôXQ ÊTg´ù$úÊÚÈÖýI΋1œ×hV«¬VµèduBNµÿêdyæv. üøüÃbÙ·ýâj7x%×û¯ohåõÓuvÖe÷quu†¢´Gzйx1vܸ&«Œ|ÊŠ&úÿmºo#jtÎæ‡Ç†zá·VgÝW~Òú¡apÇOÍÎöWTäû½sø00ø:©òÐÎÈò•ÙkcùŒv”EZõ¤j±Võ'á,®¬¦×c ¡š±OÍ´ŸAÏm‚%ƒO{4' Eï…ÍgÌÿÁ\ÄÆ|ø½Ê÷ßÝ fBqåkJžÒߟàÆÀOâÏËŸìí#ÝdOÃ?2 v³Þ$œBÚ&¬ù‹˜}&±Ï¦ÏØ „þh¾×fPðÝU«¦ÿ`.þú˦¬ìßo›BåÒż!C 4òµëóÉõpOàgÂѵ뱣ï—/oõ˜iÓðq³ƒ>²µ™ðãçê.(î³õõŠŸ‹–ðâ…ˆÝ|(K |´\œ]srv+(´òÀ²K4øûç®\Ù:ýÂþZ´èõ›=Å~ÃÀÚèèû‹sAAå ™}œwî …_}Þ[·»/[º°µbëÖ­ç¡°{»_½RØù ÄâÅ®PŠ@ù¾“S!^÷ìÏÈ ÖrЙhºÎ'P_¡7Üø˜›;mݺaù/l-üRÍû¹Œ—£kk>sìíÜ‚ƒ6ÙØþºµ—>–ËËÉ@ùåû>DipÏEœ‹BhkbzR•–¬W|m°Ö&ÖÜ¡´ç%«._40èökNød‰óšVvÖl«œœ£­ÞrÛ”µ|#½ROO (ß÷`Y#{‡î[ê ¬*~®!,ÅXùPRq¨¶®¦´<¶9Z›ð!~~¸ð ?~N®j³Ó3Žªªüôߎ÷–—+W­û1wëÖ«{w'O“ûöáþÙ¹ðó{áüÄè,Êwù½Ë¢â“L'_éš-³˜b*Ë…Ö÷#ª$Y÷.’¡iIqáá!Ÿ?P+KR&#Y§Y¿‚ çdªY²¶ì¿%K–àï›ôæ‹-mæfÎvD- ¡3ÑÂÓsMB¬#À¬\µ.åúÉâO&NøQ³‰$y¼½ó—8Í”•ý¥ÕJŽ®]—/_wþüÁnÝ?hŽúÑ>/UUÝ·ûÞ_¶laëöG:±òQðÅdÒ_óï$ê+è2Ú?×È2¥þâÎI[Æé0LÀ¯S‘ËtÜ ¸]§U$i]³j|â¹psÔZµ+NÞ÷6EFŽ¢Ö¤=v,PDôúߣ_ªv<šd¤·j•åïÊÅ„ ÔŠfl2„[Cã{º½”•v ¼oa¡¹rÌÌ Ê×z¼)¼wþTÔäÅnôü~ý†êäÕ4Z¢'/†gÊžÔ"²`þ–%ˆGMEÍ#OûÞ¬vQåh¬6J™q§Ã¶oX›ñ»}¦Éy·`ìtÌÍðÿää©i¹ÆÆ¢d¿¾%IrÄÇ‘YY/-çsq™Ýr±pá2üÿÒ¥°Ü{&S…¥¤¿þ¬ûRÍsþ|iþÓâù–¦ y |­MEBOÉÑ”Ãî>۲毙ß g»e6`*ϰ`|g7*{/ž—"$‚§7—ªÙ»”LÆm¶¾X,9tèKËá!»NG[~"Qª%þ ”Nj”#$eØÐÒš®¥E9îÞMHK»ùòeiß¾½zñvëÆÙ•£Ke%ùîýçgÏ>½{W¡¤$©£3Iß —¾A›ËÅØ±–cÇRŽÛ·/Ýø÷Vñ‡ i)ž=yp&˜¹xú´äÓ§/Ç÷ÑÖ26šoýAù~<º†1VÉ;D‡êíL½AuK©¬`.×@)ÓXZðPævMÕt匄LfÐÿV­\Azû0WÛ7»oº/Š»IƬT0òÎAh/AÜ0Õ}•@vÚÿÅBçomŸP?aja!ÂK3—\Ù€¦å€v¯|$¬Ô Э>¸`õ`õ`õ`õ`õ`õ€Õ€Õ€Õ€Õ€Õ€Õ€ÕVVVVVVVðû()!ýý7ØØ¸Ã¥ø-{¬X±Ž——€KVVVVV´ª^%>þqŸ°@½_+ÅZ™|ë}%·Ðˆ3´ùà*íÉê«~ì·ts.ÁÇKm”•—õ˜±ÞS_,Rš¢Í:=Ü’X›º—‹®ê Áeie<<<à"´MÖ¬YÇÁñS&$Iòógü®qK!Äɉ~ÒíȽ¸vÁ|Ï'MP4Ùuh÷bñï½_eÏR.g£-/ök.Wù‹D·õë°ÃYɼլ>¢4ÉÇaû3„DÖOúMV<- îî?wÎÛêjrÓ¦ p;ö]Àêk VßÓË;öŲyˆÏ]®/!·‡†§î&\‘ŸÌ#ß öø\øøJäUÕÈÛÖøæ;Òúõ’.։ˆ0œ}&úÙ¾DF] ^Ÿßa±pÛÅìh;•h»MçË(ñ~»±T´ßqÔæT¤¾"í—Y}?RxÙ¿ä2xZv&ºp{÷üt{ì$ÜÙß ''¹kXÝ@g°ú€NKUéç>=EHnó…ýKëËTÏ .ÎHI}Ü¥ßÀþâ"uL¾œ¨åÖÛ¾!œë™´i¡'ÃÞÛ6¢õrŒAšº„å°;ùfµ†h“Iúò(Ômž[xrƒ²n=²nΜì~ÙÛl”¶_®p´ûq›ú ûîú2E3_ìp9ž!yÂx鉧ì{õœâ"Vj÷š¼pçÅ:‡MôË8°„ù’“•‘õYÙîTˆ×ÛFô¢|ä6§’si_÷¤Š&ûïZ( Å ¬¾¶Oar°µ¶mlÓ¬·Åû/S¿¯KõãeY\hò0-Û˜0?Ãþ¬:DÑú~=<ت+.½õ6¨óʱøÚa­M¬ÍþK¯<Ú6º^ý%i¿Ë|«?¥Ñ¡ôÖ*õk5èÅ%KÍqá “¿“K *XˆzII0¬¾û®ƒ%\5æx9[ÎR•ïÇÇfZñIÔ“ª{ÔóP‰ h§éº8ÏEzÔƒ•(Íø{Õ$—k´#Ö mÉe³e/ÇŠ.™—‚‚müE÷1UU•œœMv›({°c€®mã¹½l«Ó6·ÎûÎ\¸íâþåêû—Ï‹*ÏÐÏ—íUæïbVÝvæülUʼªzŸ¾ÕZmÆìú1sò1þÛg¨ µUg³×ªˆð¡ŠÜm³oOEñúx§áÞø×SäŨ3nÂà5tÎYÅÝ=Ë´‡»ãë@Y}Ü5—‡“ésßU}$Ò[r*Ëyª('õŽ4rõXÇð¬ìè?•ž¾¾{f• ”4°úÚ._ÒͺªEÕõSÑÑ >>KÈdYVû–éí[†Vœyá=IœöÉÜm¬bwºÎa2*:òŸ^>ËȨ9.9ÈH&õ^õ¼p³ÃC°BlVOÃZ7û–OßFN:ÜlÅ8ÉÔ¨€ÄË9 p<ý|VÉ*m0ûÀêk³gIJ›|ë®–{è²s+Ûe6xû£~ÊýûJ è-Tø_gSE×ϲ[_ÎEävö PÊnïP¾Cjâð¾RR½{w¿û¸\¢?ï&õ~Ĭ:&ßÈ-E×W2OêíP®-18˜¹/Fc€+mjæŸXT×äS~vÓX’-ƒÓùí#¡ ÐÁá”Òu Êt ªÙ®.*È»w'ãÚ•ØÃáq™ñQøƒ½MýïΈ*þ»•C‡K0—mzv™ûßiíäD­n´ãh}ª?¾§rf#{×ÛÇ%¯ªŠš°úz÷êQ7ó$S{u¯3] gwf_×U•}E¦¤¤»×ÓtNîš®"0 ¬¾6 ¿@OöÍ £y™c]ûëZLm0Ú`ìŽÛv g´äæ®ÓÝÓO˜ð£]:-ðA£'¤W: q¶bJ‹¢‚Öñ(ûoóÊ%Ÿ?³„n²ï}±%(q™‡N—¿°{ÏÝÎfòaøì¯E€Î¥NÂÒƒ4ðg¬¥ÞŒGì„¡fYE-Y½`òq•/•åÌpÓ/<=®ØŠRF<óúCz'ÃXS\qØsÛˆ¸ÊP n·8¶ÀU3%¯ÞH°îü¢•Ká6`õµÎÁ¶dÙ¨•”|êÙ=ÂwàÛüÑŠ«î¥o–cr|V‘äÈËføÖ‹0ñ\8þ°O“´âä}o“-LÉÓøŽŠÜüOu}n‡ùÜn.ª…EIˆõ“FIµÁ2ƒ{ÖÇ3ÂÂ%FAY #Ò%w¥ø`ºÇƒ†SÜÉ•z†z—w7‹v‰ÉJð!DŽž5 ¥œÅÛ'¶„Ý:¼pxÐ_²W…tÓž¢¯«#-X§y¯¬äýWÒSñð?ú‘,·¹®É‡î[À2ù*é¯nƒG* ”„^ùÿ°zµ[ðÿ]ß¾éó)Ž8á>`õ} ^EïÒ›vWÝÏIOKù79>ùrÔ¹:3¦doĕ¾–ÝÐéÛHrí..¼—~#íßëÉÉ—/ÇfÔ™5ÅgšÜ¥ÕÉžš8öŠu .®úR²ñDòÈ÷«;çŠïÍjUޝå­èM~þ?/!ºKVTÜ“¯­ñåQäþ£¸òH é/œ¡ÞT…®êUÊáãW>HDm¶™F?¸lÐÿ“÷~Q e"½9¥2§3A} G —ÂÏò/rsâÙzZλ²™1›%—Ù¾WïMz­OEWÖ(K¬A¦+§ŒŠÞß8³)0…{øÚô›Òt÷Î*æ»ÁìЩãŸZç{ÝÝÈwõÔ!=̇è£ø8jr%‰H ;#)AôàJxd »p$˜Ï[´mî±ÆË"ŽD ŸS¸k¦Ø.¤jáb*K¼¸˜’_úLB¶ƒ†V‹®F·tg«_g$¿v==ëéƒ÷o¯=ùù{Ë)©¨iêŒW‘ƒ²ˆ¢À±=¨—¬Y:«Õ÷:éÔù¬»yYwïä^]t+jÙ09eü±XÌ ‘é?NÅùRMøgÏ_•#!Þ7÷Ï'fç=¼“+žZz\]bþTü±¬ ™®K¨±^f†Ä½ñÔC=õÌÇ¢[¬ØÛÈ•¦ä¹šÍüì5\S›Hªð„icÝ’j]ªf?‰íXLÜfëíw»(ôî-ÞGL¤›˜¢þ4eIá)3§º%ÕŽÜ·ÀÊܘ}ö²¹Ð½³ ÖfžžZ¿šš]®ËÌêM *qÙm3µF³º“X}Ð$¤”ÃIÒ›f§%Æ$&þûßÃ'ϼ§šÒøGÌY9_Yc”ÖhÅ>u;ó“=mN’6½¼s2"ìpÊ­Kÿçñ¡ÒfN^Ú“'鋳÷óUé¿åÔ½Š¼d9×°½„›HŒðüprþÇ쿃#¯Ý{ÿúÎó»„„²CôÖê2T]ÛwôôÍDææÞ•þcœLŸY)…³Êž%ìß±ãìí·Ï$=/å2+dý¬?¿$OLJ 0P ãæí­ãîFÍÝWwP“r 4_¿IÛ²Òãëzù©² ôPPÓ‘m¬ž@”äf?¸ÿ€Šös’WÑlЬ^ù¶à1#æ^2}ê‹­9iý]eïr³oß-xÿ‰›}Fݤû –W$ØT6³²s=ùTÅÈš¬‚’bý¬@C8»÷ךd¯5éÛŽ:m™÷´¯‡ã1[lÖÂH»)ÎZ|ô5ŸÐLj¬—WÃ]lKr_ô±6´©;ϤaxVŒþç“sGÔ_ªÛýd¡­FY&¿¾¾9ü‡¢ùf¹FAƹõv\´Wg.˜A¡¤¨R’ÁV’—¤½`¬~ñPô)a®ÜèÚŽ¢®^¯ñ—3Y?®cY1ÚtÒˆ¢c‹å××íúƒ“‘•]ÛTkäsgß\fâ‚‹ËÕço«Ù#;R_®4ÎzQ»º—ßtƒ¾Ié ÌxâÎVýMii©À5:9…•´>*èimH#ÏÌ}ŒqÂß”å#X}?Ÿîc±‘Öä^AUï˜|ïoUÙbkžÅÖo?NÒ9ò‰s“{7—TÆávAÉvAßvÊ~×äš&w'’ÎPT;uWÖB(ÙKiÆÎl\OÚutE ]Wè*£ "éŠÅøýO.¬­ˆwÉvWµ™˜åÊòŪ‚¨"} Óä“=õp›Bœ´R·ßÐÞE[æÐÍêµ|l:5»Êr.Ò&ßÿüÃ3Ø+p•gܧÌë…®|¤ë,ÃWY³:™Âü󽯳G–¶KÓdÓõToýñqV¶uåãêE;vÚ˜#¤ë~À{ì þJ)Qœe§¶ô\ü–Ç¢´¥©&£²W [I 2W Bã#žœÃ‰ØFì dº)Îs®=«È»û'=,M#ãƒ,4‚þ +Üh@YbÌÛÕX/¯»j—ägˆþ H[7ý¼ëÑŵZó=еP¢–1Ð[t<Ëuº('*Ë?i1Ê×o<¦ITžaÔòõÍyᇠ|3ãŽ^Õ5ŸëhÑU WÔ³«­£®¦­¤$'ø# DQ\ë’Œ~âì‚üüèÜ{zÈ—ÅÆÜ·5”û¦GhÕ“à¾#mûê,½H[ƒ¦ÔjбæJ£e]Õ®;ü®ÏÌ©ÛÙ^!º8An|ŽyE`îÌ=ذÁ1ט|ó.< eŸo6ÙKsÆÎë]éØpe=£ó šÒòRQtvÂÐÉŒ–¸égóŽ×®I”›`K>f²ƒcy˜o ãÈV­ŒÆÊPv!Ä(NG”¢gÏÎ1h½{ajÒÿ—‡ÚE2Ú5ã/g;¨j!Õ…älT<Ëøïæñ#¯ž½{ûêc¶äéýI*§`Olêá Ü•%ý$– 3§)Úã54Ôd¥Å&{œŸ\[×Iù»fi²áŠñçOUÖÆÁÍÕ[!ªr“à›á¨§Ò^¬‰Aë³®Ô¾|{ws mò©;%E³ÙÏ|½t7D‘FÛ5M¶]Gæú§NZ®AU\ºaã‘aõñr °ª¹"rÓv¤Rwê‡QÝ›zsJæ†Ì¸Ñ«ãqÝ»ÿ ÉßhÈ´ÏùúM Ž\­hF%Ký÷±ƒêðo^ß¾õ¡Ño^Já<êáó,=1îô¥¸³‘—3³ãÃð‡=˜Þ’SWLýfä’FÿýUzû#iÝ?ͦÑ­¤('*93ââÌÆŽùú#4ûl(sW/m”{þvíŒK€Ku$J¡Þ %ì=ñ¯ö_°Çà^·×p·Ó4PejÞ-)EH±b³vI½%F´\¶éíJÇ?WÙL¾oÕ”––Š´ƒ›éÎFžnu”ˆä§Ç Rþ°ú¨+ðßyYñMÁï•Ö_ YDª¦Zß12¤"{®«¦¢ºÙpEY±žÝ» tç*ð¡=ù¿¯'93¥pfÕ»ìÓïû;&0%2 52€ÝöØv!n¶¢ ê*0 JaôŒŽ\šÔàxß¾²‘—Ÿßž&îSŸ ÏÞ¬ÿ$í_f½!@[" É£®'d# -jRм·Óe·§¢ˆåêËk(ޱµ¶u1Ó‘û±Ô ôªk“qÖÜáî=êÜjN>nÚQö±¼ÌO^ßhTfœ ºñ¢Š[hČڿqH‘ ”êäyø³½™£ìù%ÓqáOQ¼¿qߨÍw¿¹qk²÷ÓÉÞ•¹×Ž9r6a¿þÔîî3+â‚ïÁo|„ò‰ AˆÑéï•¿Çò3 N*Û·/zò$»ÞH¼–,ÉÈœà[çþ óÒ±M€oTºzõÓoÒ”–– NnæãQmXÿÖ«Jw@ùÀꀖ2n¦T§‡ÄÜuš=¸1á&ŠNîaŽU’ìñcçBUõ½r¯D×Tå©VÅ´Ý‹˜ý0=3ë­]V vkû?ûëÌZZ5ˆ˜mWËADÑÌ~þ°UÝbœFLŠAéËÆ ½;Sì0‚Ÿ›)uª¢êöê(T}fÖðŒ6ÞÙ÷ç¯À!³ì$¹ŒvW=º—žvåR䟔+AŽøƒÐÜÝù>Æ}¼á³ñÓÖ7ÚDé̓Ž©ÔàO“Ú¿ôÔÕÏÏJN¿~F<:ï¸zcF>ɱÞׯ=—Ò¤ÞqÝÏ}Z…ðÓWwõì%yMË øÃ–ëøs7E?ý{îà{oîž]"ø-P~æhù†=ô~ôWcž¼œn«Z§ƒ_ÕûìœvSª¾õý0|ß¡)_/ü¢LÓë`Tª­j]õ.;õ¿"a¡‚½e¤© XZœe¾Ž/X}ÐYáp9 æÿìŸH_ɾ¹­×i þ¨}K+8°îjlÚ$í( Ý º¡kmó7õîXo5„ÅÕËO©“…?´ÜL\|Ÿ4å?øëAò/ ¾¢Öö‚㘺¯ pÕec×[o]M¸~‰C¯r ,ÂNŒ¿‚b„W¾¿[%e¸ õKó&R ‚†Î+ '1áÓžÜô‹l³û°B"Q^¤U aÆû:»P…îú ™EóÖÊb\ØÚ’Œ—­mœ+¯½º­5wB)o±eFH«Ÿ|Šf”ÕÍ×ëÈs—fFûÝ$ŠV<œÅ€ìß|Éð›1×­5 ›î;mxÛm±Àâ€Ðkº‰^[-9—]—»Äÿ²'q‡#×ܬU‰ï™~ÅSEùñúèÎÇ#RݧÛé=«ô1a?”Î~ŠZMßÛ'ÏÌ âo=ás:7é+¬ áIÿËæò5Á(òð FZ›˜^ü'Û~Gû·¤µ×KwiµÞuQ|º±éB 3¡5î_@nŃǼý..§ÃŸ³E‡^W“78è}|‡TÇ5Æ<å‘)Æîÿ!EçgO$ê0‰wÕßyNŽ­ÙJ[«irŠ™^û$7ϵaÏ6*óõéÌìq§Vk ôŠà¹·<ÀÛ$Ÿ´[°>-ÜA!Ü¡g‚¹rvM§hj/ªÅþ’€ž‹g…}>e#~0 Ü&¸hŸæÛm?HGâ¸üzDŒ„+T÷ÿ øM×÷Êý§ àªÀ º&áMÃÖÍv>~}±Äõž—ÄãëîRô>ä1ÿúÀõdMÅsã)”¬­=W¦”j$¾¿¢]Ê 4cv$Álqb™ÑCÁ¾.B*EVzwõêmQWûj·&}¤3oÕcã,ß‚>¢ÑÚuTJlÏ ±™{Ä›{ôïñ‚vÞ©vÞä}Z;»ÒæëÑ•vk>`kAÁV¤!?úæ…И§O££¾p! õ<OÏÕ_¤)Ïßµ*­J«ÂÍ%ñÑ7Ÿ={ùåKrDFÏÀ!4m“û¦yóMåùºî7MÏ VçD\~øêU~Þ›'ß%fŸ¸>_…?)øØËÚ&júi=ºä¦þ¥ßmþ;wê#¶K/‡ƒ§Öî8œõo>ŠˆM]Dø¸~ÜFA_|üP_ªÕÿ|~ÔbñÖ¯ù·lÕouV¯­Ï>)1k=Á¹_qÒi³IÑÇ–Ø}à¿UÍkGtø~ÇS­~µ[ªõƒ=‚È8ݸtˆ£i(y|q“Íþ À½çv¤¤¼h»Ï>à;òfÍÍ÷vvRí…³9×þíì$àÉ6Ïô%ѱ!ž„ÊÞâ'Ï;ÒwöV‡"½!2p¿2áJh|ÊQÿࣻ.ó¸]ºÂ1p¢J$"ȇ=ö;yÓã7Ò7áøÄ9‘ 9nÓÄ õºå­üƒŽʦ°a›ç:éýå[/hîfî:°”à {_dRàÙí£Û¤åÑmÑbúK¿¡mùùi.Ñ.}WçÛºøu½¯ÓÛ¢Ÿbë®ö±>rè0™<û‚ô¦ûÝ»o ï”]R¦§ LO ã!™×®¯ïé»0[³€?v%.bä{åÌ -a›¤À–¼Íù‚ í½Ç a±èST!Q ¦Ý¨ªÝWWÚ]FJ@ù^ýþd0¨¸ûÛí8˜Éʽ&¿„S³êo—ܼJ–ë”v8ýíß|´hF:‚?hͺ¾ºÓ¼ ÍÜ’)Eˆ·õ¿5Iæ±j,½¤KAÁªúz†žC”çÖßq ŽÆ5 È%QDúO•оã7Ëéè:‚âÐqÏ^X°6°3믿ã¬B@kb\[\²o}p¶¼>¸éúßhÓqºM!ß®œ¼ºÐëîãåöîN¾æBTßïo X¾‰ ãÎZw¥œVj]@®¬»‚o:êñv^\àÛ½òíÆë¥[µº¢ }þÁòÉÙEßïj‡F-gz꫊|G'v :ô“öšBåM8ÐO¨)É÷¬ìÆ>Ó¨= ~W„ÒŒÐ]½v4ß“~ÇS¥—Ùšt­Jmé~¤ØËX¸lHÅfdžUËmÚëà±Ø;ÇØí!rßáæ{+;)ºŒK;£Z,vÙº‘šûYÈŽÿ¢vH_rSž¡.äÁ€Ú¦ƒþ+ÍnïIÅlÙ¼_­ˆº¥-âÛZ°¬ºGÍÁYZŠÝú‰«++%Œ˜˜©ÿâóUŒP{ —Æ_¿ò8#¯µo1 ‹e6ºr]oßqá¡‘±Y…ùèïrÈÍ6³6Õ’ìó³â/GÊÎ'&§¤¥9[k2÷¯ç¢÷ôTæ&†‡=ËÊA‘žŽCZMw~/ÇÒµ“Ìø;w"² +<Ÿ˜ˆìÝù‹TQYáò'q~S‰Æ6=ãã#DáfëZýEÞU¡Ó„úöAû¬‰,ö²-M{ÊNúOUH»«¾YUчô·qÏî]=}3.ð-: Ȇ¤ÂBxDùŸcæn­Ý|»³ö—:„:À¶Àäñb±Íu}È­“aÙg5ïnnëÀÈó¡Ÿ®ÎˆŽ©oruÎYôåC‡_ÁöSªïy%ˆJwLQÓ—Š~ùúáGJëãÛõÑG1 ïd t˜·ÂÕ ùà;WÇ}ïxz¸ÍS¡”ßêëâèÜw71Ä·‰Y첬­N&íôõ £Üÿ‹:³dI`eë®n¶2„VaÊs-‘ý­¯•'ÿž}¿FUЏ]Ù„œ Ží¬èªü^"a%Ui}z]Û¶ÀXŒ€í¡_™àRéú*bÃOkþ߇åë€Rþ*¾— wœœ!ž#¶ðÐ ›[ˬpwÏ8m¼Ö±ä³ót®cöwrüMÚݤ¸ÑvŸ÷®pFÿÙ½÷?3Á¤ó[–¾ßµðݶ—¡Òгþ» jãÏhÙ%Úö©žwEÂúvßGË#(ÚÙÎ AîïqF÷¤à~y“L]ÂFM»»D[ºN'„mP9ég:©úÜŠYžDÎ2n¿ÆAãªêÌmSõg œïm<ÃãC¬Û¤V{—sPKܵ#Pß—Óó'#wrNµfÏô„ï°lí^HåÀu/+MÁïÏýí,öåtK’¹bI¬!ÁV—G¨pÎïÖâRpÕ³g‡4EY†ü›–Ä*WfO¼‚¨-Ûi¡Ç‡Íþïü¿Ûëqª\¼Öæphõt‘Çm‘iøCl× ² ÙOB¾?á-—¯:j£¯>kâýýˆ§JÅ]V~ðô¨“ õF„Õ–™¨r"Meùob|ÛƒNÞ³iÁ¿ ïÙ$ç<ãsZÛ2#Ï=yÈÙn›íþßã¯)„ï‚/lêx b$]ÒCJäÍ÷·#_ë1º[h2aKïøu&ÞÁ/×IþÝwsÌØwqå{Ÿ<äÃþ=òFNƪ»…~mí $­j‡& \NÀÀËú¨»MêhI âWqß.iv·|Ç^7lRîp6à AøU+è´;Ö—‹óÝ——yhq ãÜ""Ý÷·8±î&!þä¤CѺڛ Ž/>Þ°V®mç"GðzÓUøÌºìPbøëÊõÅ}“i´s"ѤÀ,-yÏ7]é9°Ãœ¸ý//¡Ëê^À`¾Äœßèìr7çOÒTýf™‘÷níîsĬœæxï|øËšµŸsóˆ§{$µU†iöÛ”Ü<ßÀ-ÉÑØ\‡0É›;zš;þv=&y«Í>V›{Y"6ÓÑuf÷ûO•cò;tèG:éæ8­›Óë"!eë4~·-†WÞd9:üþú ÙÚi ÕÌv¡CÙ¿„~Àõ ¶Ù–êHjbçtÜ~ï7{f*ýv7Ig¶W):ÚËòw|¦ÅRôë×hñýË toõÞÃ×U ZœZ¤{gÝ}]#¶_QÁ*“°©në>OÜv½™´0/3'sí³eýú›ž~CûçæMH\ÿ]ìÚå†kÆÉÎÎVT˜lnnq5àú8œíÛÝhi)Fh瘶—©uBBÂýû÷]]] 1«€¤Øíæ†m"¿§¥¶öô—IIÖÖË|ý.“Ÿæ»G¼n5†}þl!×÷  OiiiUU=- -™‚›ÔÀHËõ!j;.­Ü)íÓ5ã–2F;*/ÖP ×Õëý–I9´6º÷nÿ)SñI‹‡«{gGPÆúµrÝoÔÆ„3‡3ˆgèÌ•'C…ëƒÜ»Y>¥]ñ)D^roÅv[}xjãÐPw§êêý`‹z·â¾Êä Ï\¸þ‰î* *ª±p sçh¡ãâžÞ¹}ÓÒÒÎìo   pww'µTµ´´ÐÑѱ±±=}ú4>>~\fÙMi)öÍx 3''\Ÿ$ %¹å«>|ˆZ>t$(èªÏ9zzè¶÷÷ BžïA333Ôò :8,»uëœJׇ RšÓ¨ºæ<5lý/¦±lÅ=E)~¤6ÿmìm?¯°_ê3Ί)i‹™Éêô! v²mWY„³<]âÑÇ6ë´=‰q™wöÊ-ØG¼¥èÚh-f²ÌÇp "HW}Ë7W“\fªq#¸²×»Í¦zöˆ.špïuåv]Ö!þ&ýÊ—œCþéšq}1u掌ăm½kÔß=dMÜ£0fصkWqq1:‚þµ··700`ffYÈ UUUWQQñí۷Çoß¾}<†“—®`xAo¨¹sçÆihhTTT²²²@–1C\\\XX:’——÷ôéÓÇÏž=d€aq}ía9ý¿FZ‰˜w Õ5þj_ˆÈÝi,Ü99É&¯gmÄgÒ¯%pËìÀ-½n*~:áõZõÁ–‚ýå¶ìô¶'Îz…ÓÌ;¯Îsžh£ÓgŜלê´Ðzl­zÛòÃzŸw’´«{×Ä ³ªkVú!9†C}oÁˆ@U22§¥¥åÈ‘#“hæFWW799”!#"""RRڟ߿߻wï’%K„……A4ãMMMïß¿¿víÚÒ¥KA™±¾¾~çx]]½½}nn.ÈÃäú¾BØ8„Ð%®âMRllLì󗟾µ/fà••’WÓ˜¥­«-+ÐGj^㨶=”~LŒŽº÷ðyræ—âv Â#«®1Ëx¾¡¦߯Û1piß!ìµ±‘_G¬û1M\rl¿Eûbj>nŸŽiÖúÓ«io)ÔøS@¼k1ÍTÇ£M íeQ+BÖж-nl ™Ý=¤Úäù.Çô’*“×x<® åŠÏ•ð'qµŒ|HaÂ+«¿Än…U{?òkí6Þð:qëuA«Md•R±h¼†ý}zz&©Ñ J½´ï„+{}Åïjø½¸¢Z ‚°ú‹¬¬Ì DØÌÛ§ïäàÚwÞÀÌw™£®®ŽÃuµ5­¨¨(..vuu=pàˆC.XYYORRRΛ7Í¤Ž±Ãdl‹M‚-*û‰——333š¹$ßS0~ZŒjjjÖÔÔô˜éèèˆÞk@æ(((Ô××wNÆáÅÃìúˆü›’Æ"tØ8ØpMR·E‡~o™µÑuVßÇ$j»Éµï­Ù Wl2ì}½¦íf;·ä×X¾³ï˜l¬2ÆeúN3¿òŠ=èÐÇb:QË'iƒEõûôü1I­¿Ë©²b:ô²Hv³,Üc…¸¸¸W¯^õ˜ùýû÷3gΘššN:$"} êêêˆç ¹Õ††† 6 6c,)%%)¶¨ì'Ë–-«¬¬¤¡¡QTT\¸p!\·¤Ì¹sçzm‹Åb/^‘5LOï;ýëׯÞÞÞ...¼¼¼  ·ëào³`Á‚^ç755éëë—––ÂWmçÝ»wÑÑÑ¿ÎÿöíÛ•+W–,Y¢®®*ýuòóóZ[µ———ÛÙÙ•••QQÁk”D),,\³fM¯‹š››ïÞ½‹B ŠL©¬¬tuíý?“¥¥åÓ§OA%×c GGÇŠŠŠ¾\;;û¢E‹þûâ÷4³fõY¡¡¡aþüù%%% Ò_GII©Ó6022¢nÊ‹H ‰ß,Eo+ssó¢¢"ŠL™4iR_‹Š‹‹©¨¨®^½ºlÙ2 ÀõÀ¡¾¾ÞÏïw]>&''ûøø¬\¹ä"MÐSSVVÖ×RÔºÓÐÐXYYƒV‡ÒÒÒÎÉüüüøøøÐÐP333‡Ô••%nîÕ+555[¶l9zô(ÈEv˜ššߌ¿RRRâââbmmMII r¸> ˜˜˜ðóóóððêpâñøªªªòòr|?þDmƒ““š7%Žeèùòõõýu>êôÐüÊ„6jkkŸ°/›Œ2¥¥¥>>>¿Y¡°°ÐÑÑqÑ¢EÔã§¥p}0N™:ujdd$¸>2ELL¬¦¦¦±±‘––Ô ¹'¸>²àܹsÜÜÜsæÌAÇ‹‹‹%$$@“±GFFúœ„h.®Æ)sçΠݱcHA¦ðòò¦§§O›6 ¤ 50 Ôð$ ÜÝÝcbb¶Î6***ÀõIÐS¬¤¤:¸>§XZZnÙ²ÇSPP€䈨¨hff&¸>d„ 8t qV¯^=þ|¤-²?///h2V]ŸŽŽèàú`œÂÎÎ.--jnnj#èéKMMHjjj(ë#q^½z…>ý:Ç¢®õUNŸ> :¸>¿˜™™‚ë#S¦L™: ”””PÖGÊ´´´FFFvÎÉÈÈ””eƱ±±ììì""" €ë€ñ‹³³ó¿ÿþÛÜÜLEw:ù1cÆ Ð¡¦¦†h.¤ jù\\\TUU;礧§kkkƒ2ck×®ƒ®Æ5LLL:::^^^[¶l5È99¹ºººÒÒR...Pƒ¤À`0X,t MÖ®]ËÆÆ¶{÷nâ™ï߿߶mˆ3ö¸sç!`àú`\ƒú=GGGp}dЬ¬ìýû÷mll@ ’‚––¶¹¹t AŽ=š––G<‡Ã}ýúuÆŒ Ï#""‚››:(p} :::,,,ׯ__²d ¨AvhjjÆÄÄ€ë#50LSSè@jxzzÞºu+99¹ÇüGMš4‰’’$cxyy999®€VöìÙãêê ®166^¾|9è@jÐÑÑAY©±oß¾»wïþjù¶€Ämü€±A~~~JJ jéA ×@+&&&žžžçÎ[½z5¨A^hhhTVVæä䈉‰¤--mcc#è@:,^¼=)/^¼èu)êúÖ¯_*16nÜèìì :¸>º¸xñ¢ªªª££#ó$;fÏž}ýúuWWW‚t ££ƒh.$z"¤¥¥cZZZÒÒÒÌÌÌ@«±DnnƒƒA ×@’’’vvv yaccãææ®¤ §§×G $&&%''ÿ¦0üæÍ› ÔÔÔ ×XÂÑÑÑÃÃÚj¸>zrôèQ4daaj ,@ó7YYYÒÒÒ é¸>ˆæò×™;w.++kyyùïW 655¹Æ7nÜ(..†Z»®€Þyò䉄„„ŽŽôÿF^XYYy{{Ÿ?¤ ˜™™q8èð· [±bÅóçÏÿø)¤¥¥åÑ£GW¯^ÑÆ X,ÖÉɩװ=€ë ž•¯_¿RPP€ ä‚›››ˆˆÈ¹sç଑ŒŒŒÍå¯ðýûw55µ­[·þøñ£?ë_¸paæÌ™¬¬¬ ݘÁØØxûöí’’’ €ë OæÍ›çîî>eÊ”·o߂䇑‘ÑîÝ»÷ïßjÌÌÌõõõ Ãh’ŸŸ///¿téÒ‚‚‚þouæÌOOOPo̰cÇFFÆ;w‚®€?àààÐÔÔ$''—‘‘j ÞÞÞ¨c‡ $šõüñã;;;H1Ò|úôIGGÇ¢Ÿå{DEEáñx###ÐplàããsïÞ=ød àúè/kÖ¬áááøòå ¸²=_ööö+W®¼|ù2¨A ÐÑÑUWWƒëQ‚‚‚Ö­[çååõýû÷Al¾{÷nwwwqlxäÈ‘?‚®€`ff&##ÃÍ͘˜(%%‚>ÞÞÞüüüÏŸ?Ÿ9s&¨ñ×abb*--)†ââbB]XXØ@Ë÷:¹|ù2ƒ±´´=ǧN:þ|vv6Hàú0ÒÒÒeeeŠŠŠðEœ,¸}û6š.,,„^ªHÁõ¡·è0ŒÔÔÔ¬Zµ*&&ÆßßÿÕ«WCÙUssó†  ÌãØÀÑѱ   33¤p} öþ§¢B_¥>>>üüüP]Ä™6mÚ¦M›ôôôž-ZäââaÉ,+--½~ýz___PÀõ0TV®\ioo¯¡¡¡­­}èÐ!„”Ù±cǧOŸììì ßß…ƒƒ£¸¸t---§N:|ø0š§?{ölTTÔ0îÜÓÓ³®®nïÞ½ 3Y¼qãÆ·oßòðð€®€azPQ%&&æçç ¡ù0Єdñóó³²²rtt„ïß4'ZTT: ˆsçΡ–ŒššµdÚöŸ?þ|NN¨M¾ wÖÔ©S·lÙ·€ë`Døúõ+‹EGäå壢¢ ýiìâ⢫«ûèÑ#PãoÝ,iii ÃïÉÌÌô¾øòå (O¦TUUÍœ9SAA!//Ôp}Œ,ÔÔÔùùù8nýúõ÷îÝ»s玬¬,ÈBjœ>>pY’×'ôú‰E/*‡ àú !\Û@s~¨ñó÷÷ß½{7j6@RàØ±ceee'Nܹs§³³32ïN**ž·oß***ŽÉ¬««»qãÆ­[·ž?.**:þüåË—/jƒt…¦*##ƒ——®Ir!<<õ{+V¬5\¤›Ù=Ú‡sqq Ü¿ÿˆFhú''g~~>êÆÑìojj*d‚GIIÉ—/_Ž ×W^^ŽÞË!!!oÞ¼AÈÔÔÔÆÆÆ¾ ’MóÂ… iiiËÊÊàR$—klÁ‚ÕÕÕ?~üA\äƒ9ÙF}}½µµõ£GüüüŒA™¿ˆƒƒÃòåËUTT´µµ½½½AEII)99ÙÑÑ‘ìR^SSœššª¬¬¼xñb[[Ûõm‹‘‘¹xñ¢‘‘\‡¤Ï•+W6lذwïÞøøxPÀõ@®ÐÓÓ¡#¹¹¹ŠŠŠ aaaPÖôמéTToß¾E3ôè)ÈÌÌäààMFUUÕãÇ“ER“’’.\¸pçÎNNN ''§5m£ì±±±fff>|@.BR¦¥¥eéÒ¥ÉÉÉ111 €ë`Œ ""‚ú täĉÿþûï¥K—,X²ü¬¬¬ŒŒŒ$$$üýýMLL@‘`ÆŒ$[·9--íØ±c·oß–‘‘Yµj•­­­ššÚÐÜÏÏÏÓÓ³¬¬ ºg eš››çÎÛØØøôéS ‚¸>Æ&Û@ ''§»»ûºuë@“ч™™¹¤¤DSS3))iß¾} Ȱ#$$„þýöíÛĉI!Ÿ:¢#GŽ ã[·nµ··¿råÊÜÛÛ=(èžÄ166ŽŠŠjhh ¥¥5\cEEŲ²²”” sçÎÐdô‰‹‹[¼x±““Ó… @aGDDäÍ›7Ëõa±XOOOÔ IJJº¹¹­jc¬J|îܹ÷ïßÃUG²øúú¢×!zG@ ×À¸CYY‡Ãý÷ßììì±±±  É(sëÖ­E‹mÞ¼ùرc Æð"**š••5Ê5™Ÿ={F¨YzøðáÝmŒy?þŒrQQ\r¤Icc£¬¬¬‹‹Kaa!¨àú¿,lÃÜܼ¶¶6::eÂÃçNêïïïààj #âââ999£ó[çÏŸßµk—±±ñ‰'233Ç•Î:::¡¡¡Pc4¹ÿ¾µµuVV¨àú—/_²²²¾zõJBBMâââxyyÑܳ˜˜¨1\Lœ81==}D"88ØÙÙÙÒÒÒËËk Wàü èáëéééêêÂõF‚ìØ±}¶@¯‰® ªªª¥¥¥“'OÞ¹s'9vtF¾ÐÑÑ]¾|ÙÈÈ(++ Ô.„……G¨ÚáçÏŸ,XÀÆÆQ^^>nÎÍÍ €N½I+++jjêçÏŸƒ® ' &''ÇÀÀ ==ýäÉ“ Ȩ±hÑ¢K—.íÝ»×ÍÍ Ô†ÝøûûoÙ²ý›‘‘ ¯^½z÷îÝ””” ©aaaÁÎÎ~þüyÀõôɽ{÷lmm—/_>öâË“2—/_Úºu+¨1tøùù«ªª†koÎÎΑ‘‘±±±Ð¥5Ô÷¦¤¤ Ï ‚Ý8X>×ðg¬­­W­ZY‡QƒÝÁÁaãÆ ù°ÀÃÃ3,®oÇŽçÎûøñãéÓ§AÕN<<< ·OääÉ“éééñññ €ëèAAAºººû÷ïßµk¨1:€#$$¤®®1úF##£ Y7PcèÐÓÓ—•• Âõ½}ûVGGu5yyy 㯠fÕ¾MèÓãÖ­[TT{p}‚‚">>^QQ±¨¨››6m²°°×7\®oÍð Ð !4åo¸qãÆŽ;@’ÂÙÙyΜ9¨)\À€qss311‰‰‰5F555Tm===Pcˆ022¨i_jj*šiŽ—••õú¢¬¬ìãÇæææ é<žûp}CeóæÍAAAÖÖÖ Æ(°páÂ+W®€ë:ôôôýw}nnn·oßþñãH÷®]»6sæLФprrrww\À ˜p}£Ã²eËfÍš: ::ºÚÚÚþ¬©««+//ÿöí[íDGGëë냤C|||NNŽ‹‹ Hàú†„‚‚š-Þ½{÷¾}û@‘FJJŠáåË—ªªª ÆP@e¬¯¯ÿý:---’’’îîîË—/ÅúCrròÙ³gAÒÁÍÍ šY¸>€ááØ±c¨ñð𠤤5FMMÍððpp}C„––ö÷e}8NXXØÏÏÏÐÐäêïÞ½£¢¢)H„ììì´´´Çƒ®`àççGsÆ{÷îE¨1Òœ€áÏFO:5 ÀÁÁÔQ£¢¢@‡¡@OOÿk»¾¥K—†„„€8ƒ --M@@t ž={ÆÁÁÁÏÏR¸>€áÇÌÌ,<<\ßH3mÚ´ÜÜ\Ða(PRRöp}NNN .œ1cˆ3Þ½{'!!:wîÜ™={6èàúF„ ìÙ³ti„„„+++YYYAÁAKKÛÜÜÜ9ùýû÷ëׯ———ƒ2ƒ#''gâĉ ‰ðâÅ‹µkׂ®`DfaaILLTWW5F>>¾ŒŒ bpPRRâp¸ÎIGGÇÝ»wSQÁ»u|ýúÕÈÈt ÒÓÓõõõA×0RL›6-&&\ßH#((øñãGp}ƒ†¸¬/---555::d4………ÐI‰€^̬¬¬ €ë)fΜùàÁÐa¤ÊÉÉÍ„ :ËúÜÝÝA“¡PZZ ®Dxñâ…´´4èàúF--­£G‚#Íĉóòò@‡ACIIùóçOt¤²²2&&&,, 4 UUU )ðæÍEEEÐÀõŒ JJJÅÅÅ´´´ ÆÈÁÏÏŸœœ : Åõµ´´ #Ç722¢  MMuu5gbb)H?þóÏ? €ëYÄÄÄ^¼xqÃG!!¡’’Ðað/Q**‚ë»v횟Ÿ2 Àò‘_¾|²>×0®/33\߈ÂËË[QQ: jjj<ÿîÝ»ÚÚZmmmd(”––233ƒ$BQQ‘œœèàúF ‰>€#íúª««A‡ACAAÑÜÜ|ãÆ Pcˆ”••ë#rrrXXX0 HàúF))©ÈÈHÐaDáã㫯¯Í„ ðx|ttô¶mÛ@!òãÇVVVÐÈÎÎæçç\Àˆ#$$TXX:Œì;€ŠŠ’’²ººÊXíú~þü™••¥§§j ‘ÊÊJzzzÐÈË˃žú\Àh ,,\ZZ :Œ4tttàúê™ëê긹¹YXX@!R[[ËÈÈ:………\\\ €ëqÄÅÅ!ÐÈ(ÀÀÀPYY ¤ Ž & ®OMM ¤:555`žI„ââb>>>ÐÀõŒ†ùùóg}}=TúQ¨©©ÑÜ6è08(((ššš$%%AŠ¡ƒÞì444 )ðãÇ ÐÀõŒŒŒŒEEEbbb ÅÈAKK[[[ : Ž &`±Xbè466BMc¡ªªŠtp}£š,++×7¢ÐÓÓCÏACIIÙÜܬ  R ¦¦&Ш©©h.®`” ¥¥…ÞäFÁ· ¹mÐaÐàp8h5\®Š ²%$A}}=èàúFÉõAåуÁ€ëŠåCááá)†ELèœDhll„rW×0J ÙŽºº:ÐaD¡¦¦nnnGyy9 .ZZZhiiAR‹Å211®`4 „ÊF : ަ¦&T@Ða¸\ˆ@"477SRR‚®`4 „ÊFÚZÿüùtX,\ßp^‡èÕ:ˆ×àú†æ/'6|OAÿÇöÂ4,<’2STT§*‰r‘‚j•©—·K ãhMxC²àߣ†¢ýT²â »k\ç䬘ŠX]ÖßmPúòÂîS)„ßBúò:±Þ;d×{Yþ™õiª6”7(8îY«Å—{ï¨ÛµlvŽ.±ÑES×îwTçì+©Û·ÅW0ÐuýV}ýO×cD Ù0’àš›«jëêëÑ‘&\sAqiN~! †•‰ž:îðx|MCcM]]]}×R×ДWTúîËWTdfzV&h[Õ ›p¸úFlyeêúÊ+«1ÔT 44Qä‰^ŠÍ?›°¸ÚúÆUµt4TtPÕó/=~ÑBsKK3ú®o¨ª©£§¥†°‹Ü€^ë¸f\ËOôéAKEIGKÊcÛõ}‹9é{wàÛì{|w—Î_L85¶Ô'Чs’wåÿ Eûà¸Ç{ìw9—êºøUÞ¹]sžÓÊæœ2éký¸CæÎ'‰f¬úÐ\®øíƒ€À‡=Vö ¬P¨»©Ö[F:Óí2Ïà_fϲݻA„n`H9¼oÅ?Þ}ü˜—û¥¬0ÃÆÍÏÄÆÎÂÁMÇÄBÏÌJKGOAA‰L`BoÓ7£ÑZƒxâ‘Òj¤¥²¶¡¦ j«*«ÊKk+Ë*KŠh™˜ù„$$$¤$ §Ø)ò§¼¢¬ÙßrË‹ò14´l<|L¬,\t̬ L,jT(„S7ZÃØ¶¡‹ä—77ÔW6ÔT×V•×”—VW”ÿ(Χ¢Âp ‹‹‰M‘‘bfû1Zðø9ù>},üþµ¢¤ˆšŽŽ‰•“™ƒ‘ƒž½JY0´´ ‚Pÿ³¾7"H#*{#¶©[__[ƒ XU]^ZSQ^U^LCÏÈÍ' &&¦(=y<¨×ƒ5u™ï?}ù’SZ”__]ÉÂÎÊÈÂÉEÇÀLÏÂJCÇ@KKG1uË”^ÁQèúߤߌ«Æ654ÔVµ]Ѝ˜Õ•åµÕ¬\<‚ÂÒ“%%E„Àcô±Pô£*ã}öwôÙ[”ml`åæadál},0210³ÒÐÒ£6Å™ù*ÝäK‚Ô5㰵ئ&ôÜ¡WuMeyõòše8\#€°°¬”¤ˆ?ÈÛIÑêÌÙß¾´ŠÜT_ËÂÁÃÄÆÁŒŠÌÄÂÈÄJMOOMC¹ÃK£OÞú¶‡m¬¯ªª¯«ª­h¹² ÛÐÀÆÍ+(,*;Y\|¢h ³ëûÓ§u¹°MµeðÔÔÕÙÏïy‡Æ¯uo÷lг¿äùKÉÄR4M‰‹óŒ¨ ózz-BxiNÇô—Óó[WnRgéåRöe­Ä–OüÊûÓ“~·ó[Vë"rüñå׬h³|ò n`¨èäô÷)ÉIÅüb“Ñ[H”oÒtè®('P2²°£—`/KÓ‹ sRó?gU”‰HJëjjðòpŽ‘›[ð o2Þ¦¼DͰ ¤Œ€ødN¾‰妡à öFGψì<½ää>VáŠÓRò?e•ä}“ÔÖ˜)"42|•uõOŸ¿Ê~—ŠÇÿ—FdæàUœŽýß š“CóÍèÀÄÞû…—]Õ\œþ¦ çCÑ×4+7}ººì$ñ±g-2rò““ s³9ø&ò‹Oâ•bš¤ 4i`¹ è™Þ^–¢ïàÄOÅyŸ³ >¿Ç66M–S˜­1}:ê?>{“Ò²Þ¾~]^ø_tŸè$^a1.q9tÐ~0Ô´èÀÀÄÒû½ƒ OÒs ?ø–‰x¹)S´ÔU©ÇMñ *òëwŸR“_–äå‘à•ä‘ä‘A‡ŠLƒôŒ½‹\ƒ ±™¹Ÿ>äçdM˜@)«0eÖôið• o×ç¶~ËZ­_ Í6îôBÿ~»$.lßé|‚Q)îk•»n\áÓ­K켟åöºgƒ-7¯YÜ‹9+|¸læÜ€/¿.P¹žq­`õäM]~S%¦â¡6&5#7Ñš‚¢<”ýÛ§QTîijÆë2ÑêÑÍQ‹‹36O÷0ÅÿjzKö[üCbÙä?| ýrqþAǪêÌDóêýœæ>#Œ‚åû"|ö*íU|,†šZrŠêôføWjZaiEt L~oj|z÷QNúkVN®ùFF¼ÜcÏéÅ&½zý"Žž‘IRIWXBCPbÄßTVS$M˜,kÆ¥<ˆËNKFÓ`h /.LN_£Ëªëî?|œû1ƒOXPXa:Œè/RRQñ‹I¡a‹ÿ÷*;5éçÏŸš³f©N!×®ÞQ§÷êݧO75ÔKN™6q²¢òlã‘þQ¶ÉÊ3Ð0™Q\ýéÁãï3E$L ç±³0ÏgoÛc!åMR<55­¤’*êô¦ŠŽô²qñ¡ƒŒº6aòÅÇüOoK¾çJÉ+èéŒ=sBxÁ¥¼x6aÂôÑ! .¥ªo:Ò?ÊÊÁƒ2jZ„ÉW_J³ß$~Í––7™§K+dçú~›¡ëñýCœ‰¡Ã?5¾6§›Ú}±™ãvÁ¦çÞWã “÷ŽZ°E ޼¼»¥ó¥Ž»¹LÌ2 ¯ç‰‹#Ÿ?#ÈkW¹É­&³«°‹±³6&½€zÛµû"¤–( .`õ²¤}F‰D îèEŸ¿aû€(µ'–í3{¶{&ñ:™þ»[>•ð¼W ˆö¹˜Á9d9X‹«Hw¶&Œûßò«‹?tå•G,[AÜ oqJˆ}ß¹†ú«»Ÿ òøJ˜<¿b·Í³}m²%}óìýëm»xxn¶-Ü'ÀïiÁãc^¼z÷HJy¦ÌôÙè@j)¤c`"äBš›q~׫ÊJÌ› “U[TäûOSžÊ¨ÍRИ‡¤–Bjº)³ Ð50—o„•ä}]df*)"L"É+üQ}ûö†Ú*UýEןô åÊIJê耎G=KÊz¯9[WSm ^Šx<þy껸Q"ÒS¤U5ŦL'µ¢â™&V­¯º¦†Ó¾ñÍ-K›sq²½g/×ý,1ãe¼ÜÌÙ¤ùXàÖµ\Žäü( ¾~’“›×l19Ååj}öƽL}þDZU‹4_pì¼‚Úæ­U½ «*nŸ:ÃÄÌbaº*<¤îúîm1¸·åk©ø¼~äØQ·³òùíEk_Âöokêx;Ñ •¯‰—zn¿²Åx;R|ê"ñ|SïËn• è׆ÅÞ¤Ó~6€ä÷ܧÍù€…Ý÷{3d@û$FêtöyÉU“Ÿ—›í7Èpã"øIÛùľøØë«J¿;·µXŒè®ÿ¼=”Ö·{Èý¾Kåh¹:ué9çÜ>=Þ”W”蓪ú†k×o¡#SçÌ7´“#ýSQaöïš :¢¢:}޶‰§¹²¦>èúu †FY×ÄPj 鋌Ôû¡#%UáGOÈÈËÏÓû‹éILûøønØ-}u3²¸­DeUС‡ ¾ó ²$ÏÎf)‰Ô‘«oć†×UW©˜,_GúJRÓÐi´Ù¿´ïß“ü|´uç’{Q'Å•µ×¯1±q(jΛ(«Bú fbçÔ[∎øz0 ’ޱá$2¨€ÍÀ¦³Ø‰OŸúìþSsÙI¦ëCËÉiµââŸ[+Eÿ1?ÿxóâzz¨oÏßýÀ»Ü áBh¸D„¸ï³å¦üRñ€vš­ò,´ÿiï¹O1é¡ï³ § ¿§rez™î«N.u‘.¸³­µX²ƒ#©›”ÿ”EÀ"˜).1Û}õgf¬ÜxÍÈŸNÏ3¡Ó]G­‚hÍ@ß~ëuä °´š“n GFf YýeÎ õµSTg,0˜CŠ~¯®Éûè!Q9eBÁÙfAæÚ¬Â65¢"ËN™f¾Àp4Ç?My÷üþ&–†vëÉïõŒÁH«j¢#¾—*J 6mÙúKHjqgNzqð *ë™PN ¿È™|B†v.µ•åè¥8CGwŽ–B¶VÖ]:Z\^…‹¬ûÃÄIòèðéˇkW.[Z/—’!I¿×à{î´Ð$Y “¥ä(2¡ýpAÞ×EV Ò“ ¹h.OË<ºEsé¨fI à„Å ŸôÖ"©Î5&ë¢/E¢ Ô½° ëûáUšJs¿w{ŒæT =É4& ÌžÄ>{"ëàç{/¸³Ý]Øú­q{œ!VàÚÅ~îMwÿ5Ûà í…{1Â]íM/rÁ ôB}#îr` ã§mä~,tô­GÑT_ä„—’ÊT=’É Ö54] d`f"SÓТGz¿cÞ§¤ddæŒFý¨¤ôìGQ¡ªsÙo wµÙ´>í£î×þ(stX>ÊMþqÍ×n¶üü9Ïv 9~ß!†‘•½ëjªyÑš­7c*™•û•×Ô_½|‰ƒ"ê`ÉýªæŒž‹¼üoa‡YZY“N ¨ªú†+W˜ØZ? ’»È\‚¨Èå%…Zh¶XFr¬E ÈÛõýýÚ»ø©^sÕ7vùº•òtÑ'SC×µVy=CÄ­«Ó¿Ä k¯ù,!*Ñ.Š\f™n’8//7''Ÿ¼©Ñ Â6É׸Ûkùþ³Âr!q‡é8?ÛVÅüã>ëýl]½³K“7^ESu4e”ˆÒâ#ŽcôìÞúô#A)2~W€FWC¾övŒ2‡Î¯”†Ûø•;âs²Ò4ÚRRQ™ƒ¢¡§ŸcåT]^|àÐáåvÿüõÿÅ<Ë}Ÿ¡iºŒ U~ãýt-WÔVþ@E¶¶¶±þʪë.úúȨi‘cùÞoPÔš‡Çÿô¹teâD‘ùú£Tc6:.)ãuÒ,³emŽ˜XÐ }yÑ÷ƒ‡¯X±’,Úûáñø °ÈšªJBý½1§ê`×§e¾¹sûöêU+iÿjOå¨È7"”åi.´&÷Ýò¦Ü|¨È9ŸÞEÝñZ³Ú‰¢}‚ë#aÔ6<¨˜¶MãsÂ\¦P\óªHh³7´Ó£Êî¨pvµjóµ’ôµB” –MXs!¬½³‡{¦¨Â–ð™TvÏßÛ®]A/Ÿê±Q ˆ¸¡Ù”¢P¢Ø1é°®¯}š™MùÚ#Í T¹úÎ;Tf}—IëÀíé5ÍöÈ:ÓýÆÊË–>ùDóįGn†r> ?jêýý|'«L'´{0sðڹܽÎÈHge¶è¯¤¡Ó±ŒU‘YÙQ‘=ޤ¤ø¹ÜÊrØ÷õ$á}Ú«Ù–TTc°~:šÕ˜o]ôõÓaÏc«W¯dfb¹ßªihòóõ–Q ´Å{pð é/w¾~ƒŸŸ‘‘>)'õ[QyÐe¿ióÊñM“çBTVIPRúä©3³´g«©(þ•4”W_õ¿0EÛ@JUkLŠ,(!Ã+,yÞ×oÚ45­ª¥×7ê`±EÝg4bûr&»ñej*œs»ŒSâ6Š€¨¼W†èÃÛä5_ÊDÓ¥s…”{W»»,£+ ——©÷œË½ó ÞêÞ>QÃÿÍü|7”КPüسš“J ¹Ùû>C;öé›x§f«,Q€Oßç5j²¸™>½´KʉHe¢"P¥]ñÁK{»©€hЏ· Œ…÷­+>3:ËK5[ŠRõcC`ñ:+çÁ=+G õïvÚ¼EÅßs=¾ÞÅy”?<¿z—ó0y%†šfl‹¬<Û¸¼ðû!Ï#.뜇«›©<þ¼ÿe.BÀÀ1 ¯°×R‘ ~þºsæ*ËH€þôÏß"n]Ó³t¤¦£ÃJ¢.z¦‰Uþç¬cÞ§Ö¯]ME’õî?ù.%5¨c©ôéWÐ7Ë\ëUY‰±ïÞ¿ûÇz´›1Ǿz›;ÇfõXª^ÑKŽƒAŸÞ$œ¿xy•½dlÀõ.Ìs^÷ÖÿAïpüae~u¯ñ)&b°ßÝÇŠ»sX¿Ý'‚<Ãoñ”6<À¤¹Š¦{Þ½e´Ó£úÖöwãƒÈ'/r²2È"dß°À#$¦¹péñcÇÿY±‚‹st~ôΣ¸¯ÙïÇÈ|B³Ìí¼¼¼míì„øx‡¸·ªú†ógÎ)ëès ŠŽõ(©¨æX9½ŒÍË+öÚž÷ã_f¥¾B/űm3:—ffçò óRED=|<,‘Q>Oþò!Ks‘Í8T’š†Ng±½Ï¹uÎ.¤Ð½õµðHŠ ”2jÚãð\°óðOÑ6ð>svƒóZJʬrr÷ac}ƒŠ®ñ8™‰SÍÀÜÛûäè¸k\éÃh´?R´œ¢í;‹ç“gM€qÄë¬O¯“ÆO½—^2‚tôZ‹l|Οߺeó½“b¼ìæl,m3ßä2súTzꦂwq‘N‹ö¶­`ì•þ¯åX·iËHÿêb1Ô´³L—ùûú¢"cØ'÷¿|YnÆl4›8n¯Re]“çw®%½~;Ä0iÙ_ß$Äϱ¿÷;=#ó #Ës.lÙ¸~DÍÆ¹›PUY1ÝÀ|Üž !±&…ê‹×íFªšÉ£¤”¢¼oó—Ž[‘ÑǦüŒÙ.^^»r€ë÷`&iAÇ–Àø¤‹»ÿ_È,s»q®§ÔÔéA·BG"à$*r ÏÕV˧ìù2ʹËñhè/\¹¿ôeÐýO42ŠmÁN(*/-â?™€8…_¥ÞQ&FQ¸ÿXbçLlÊu=Ow*¤ê°ñ¸§Ž8]çÊv>á´7ןùÖ±Š˜Ýù[ëtôÔB‘ÿßÿÖxøGŠ[zäÒV›i„‰Æwç¦ënFTþ p¥:h¶ó]çZüÖþ÷ý”9 -„±ÏÏ,sÞw§s¡æ WŠ>u``a“UÓ ¸qËÞf`9°°èÇLlâã½›U³ûWÏÊHIºw,®9*ôºæ"›qÒ–¯/ظù$ä¦Ü½½ÔÂôo¥ákQù›„§úËÇùU=QzJiþ×Çq ³5§û΋T'ÄDù†¿¸tY^îݘ'†z:í×À8%,2ZT^™‰¤•›ö4ôræûlY)ÉayÆB‹„[o^§lSåÛ†š#óMÆ3Ôµd¥%¹9é¹T­mˆÂkcþú*嘾ÃÑDÁE_œ]B‹YóîæMþ›Ù*´¯vy墥ç_¿ huJ¸üÛ«§Z]^¥{+â„Qå ³Ãè|ã#‰ÿÚ(Œe”Û¼Ý[g]ÛjòÍGƒà°­uP‘×ÿ³Ýµ%äs½x›Ëˆ= »ñTƒ<]È·“â4¹Gyd‚й³<_wnë {À`cÂï/,£ôíCÆÛÌ,EÙþZ¸‚òÊ÷©É†vëàÅ`h”4uo…ÿ7PÛÜIhd´ˆŒ# ;ˆ)>eú“[?~Î$.òWyGi–þØ&ÙOµ £¯œž>M‰Žv˜cG߉ˆP˜©K…Á€ÈòZóî^>9Cu+3#¨®€qGqyUîû #û 9uÇOž ¯ë+þQMÙ¯p=BQ÷9îñèä°í!lj×Rr‹¸³hj¿¼±¶ö²™n?#Ü^Óf³ŽælåiKß®îX^Iø§¼=fkGÉF`ÁÑÐí¨ÓË»œq@#õÈá¶Ù¦“›}ž¶Žâ©©™å$AŸ¾:*>½³o}ÿ¨½âK*†sS¨«ûPUÔ¤^"¬pÁ¹£÷kjí]¡›“[K ”fĬÄvd¸°T, Êòür3´cŸÄôßõÝ¿ÿPFUcœ—Mu"0IácêËÏ_¿‹ tÛҪꜬ4ÃÖƒŒíù`õYÅLÿ ÕÞ^¦}hÁ5ó‹M†³€´µû•VV‹ˆŽ±X8œMïÒ²¿TWUL“R…‘¶þKäU5#ïEÛXšƒàúÈŠæ/~[V;zßÿãŠbóv\»zP»—=ܸp·††¦©‘f†…•7Õp%lDv #CìóbrÊ C'Â?[š³s¿JŠ›Èñ ¢²"ãÄ5£ƒÓ^ÿÎÞ_ÿÇjã½&BÈãòE½²Q÷Ýù¸¸å7–ÒŸ‘ðôytä%§c{;Úû0å>š%ÓÒŠsK±8G¨„9MLTF’¨oNáÓA‹ÛD ~pÕ!¸>ÒéX¡Ï\FÝ›Ûg.¿jä$„̨«ÃK¯’–7e㢘@™•ýEZòϽ/ü¨®Íÿ’=EÇ.ÎN&)L‹ñB\xÀU‘Ÿ§æÕ-OtDpÕÝwmÒ“ûü˜óÑ®g»ß×k1õ6“ƒSHüí‹'9ßòÅ& ŒpªJþ;øÛUû91ù ï7ŸSí÷;#Ÿðd™IÒ2lôãôª–Ÿ}ùÎhS{P‰Ý5AxúÖfxž/v–ŒµS_^Þö_‰ûÿì<•ßÇïÍÈ&#{“M×ÈV()J{ŠH)¥Òø£ùkÒ.šJCQF%£¬PFvŠŒ¬ŒDö¸ÿsïE’dg|߯çŹÏsžsÏý>çyžó9ã{Ø5ëI’ì¥_ÜZÅÖ*uE„s{ï¦.åòN{¿7ãìžy©ë,•ÖYº‘Pop7f:õ<&­L^µuÀXܱ éVjâ¤g?¶ÂóäB@k¡#«Éb—7÷1oƒx´9ÛF”V…í›ïD5cÙÍ]Tzau &æá™(¿®µ5½$äØÏ¬âiõì=ôº:—OT"19±'ª/:î=—X?;úËã#¿`°õ,rú2|ý+²pR:”°‹HÆ…677÷ÖI*Š3™ÆVðj·©sûGeâ ôTu¥yÉÑοôl;Ða81†¼u ÞÄñï>æð”g“û£'«ÞnUwâ¶žŽÂ­Jõ÷$D%]õÕ¥]µÚîß³¸<Æ×_X2Ö†$’“Spð ¾}Ÿ¢¦ÈOEOÜÅ2¾?Fhhl*Èþ4yšÁc4•¤G½ILý0š<*wÿ¦CRa1ôPÕVWú¨¾‘@åË¿H>…{ùïÿæÔ­"bïCí?_›kgZzl:Kv´_BUCâµÅmÚŒÐø,¨‘¹ªªEBCS¬C£fm~tHxdL\zfa-±ÆÌ)$ÓÒÔÑûµ¯wÉjy_㣂_…Æ·¦Œ§æ”›ª?}šÊ¤n65%ÞDFÇÄÇfg~+«­¡¡¡¡fæQPŸ¦©,Í Eè-ùÅeÍõƒÿéÉ!›ƒè-2gƒjûKä{Ñ{_ŸRx„8Ÿ$UUÿ>Ÿ•‹?>ôÅ@}kAé·¦†zzÁÅO¿à®[ÌuñÙ©C˜×÷Ú¶ž®ŽÂ4x žÉôØ -ÛôKú¸K?µHÿ^¿ú`®¢6±ê¥‡V2kyL¯-#øz©hÛñØ”÷×'Š‚SÀƒ Ôx ß’ë!Âr+çîÙ&ß9‘ÍN7¹HU­¦:â¿¿tõuÜI¯yñÍÕª«÷ªàöþö¼ªoèÆ,,\¼ï‚{bÀ/99ìýs¶Uº]jVXëÜ©¸ÈŸB·gµG?3¦½>¸KéÊèÀ3-÷+]XÓHÅøÏ¦Â¢ú1#3[FVnOdó¯÷{=s‡ûýGàþVÉ×:™óœ ½µ–³Aw¿×Úµ||¿©\‚ê«+J‰}ûþë ~<¯äd¹ Òi¬Í/(¬A^Ñ Ò¯-É)¬h?TU”S^sål2¡d}/Ê)ÊÁÔ`˜yùé;œõísôÛ¨„ŠºñTJZN¾I2ââ\?7U~/Ï-n½$„ÉËrrª*ëÉ:íì2 _bdÈ^½©¡ß^ËL~ŸðNÍ`q§÷å\' ƒ·Wþní¦ŠœÄäY¹ÕSO–”™ü§F·º²ŒÔ÷ Ë«(Æcñôœ‚â¢2¢ô]+ŸŠÌĸ¼ü¢o?I…¥Ä…;¾¾åeW6b1äùù:øoy:TU”‘–ú¾¤¨ºK¸RÂ8qÞ®_4e!_«°èšÖՒדe«)Œs½¤Œ,F–‰d]ùdn̰@q5ÑÖ_<*7æ¸MQÙ,¾1Ðãj˜‘Á_ÞtìÜ|ÙÙ™P} úF5U?~ù›Ÿ‹áâë‹Iý ð·“¿y혳-¬óÞgN+ž9šöá¡Y ]v;ê·.´zúá¬!E/’%Ät])·áv^— þçÔ8÷¦ÂR¥ã34ÿ8‡„¿šFáfzØJ1èµzJNAÑ/UÀî™4›ãöŽT¨ÀV¼²Ñ6½ì»uÓœY/bLCÜÍ]ÿÛu©cçÑ·?~Ê^œX¢«ŽjÍvĈÙ¿²oOªÓÎÆâ°‹Ûw^zß! ¡»Nmߢ×Z¸ÿæ|’øRf¿`ÁËœ¶ÏÜóÍM{ZG§eeÿ^^:PFÎÍ/l52Ù¤5WÒ{²$6•è¦øÂMÝD`S·‹/´ûk:ò[Ý~ïŒjÓ;LJ[Ÿv7Ÿ“^ññ…ÿë~'…€ÉB“>˜…Žybå·²žôVU|+SRïûÀæ_6$H>ùý;&\5µ¹¿M~³w‘ ÿ¯º¯äí5§ÍVa9?÷,<ä¿sí´º¶ÑŒL¼¹8¡Š½Ù÷åA±“Qy»P7‹šõ³boø—}ˆ]ÒÓÖ؃Û[˜Y'æôJõå2üñ~w÷}½wËt¾ßJ ­Éíš?]à8g-I3òã$±ñ©ÙíGæ{乓:™?\³XAh3úÕ'-‘äkë~R%{b!ÑÞKœæ±ÖȃØáS´V‘¡1ÿÉjEÓv/²¼ª˜ao~¾wû™(2T%_2˜íø³½ÀËÂÈë·F„¶¤” E‘•£¢ìë?{‹¿!Ýþ›µ™æYl9H°izX\Õ.5’N‹;?wÑP­ozÝ™µ™/Ú­-¿ËßmË´¶‚÷á„îN»×^t]¿¥&þ¼0 ÜÒ÷ê·^嘓ºy’Èêp—{d^!”÷ú¨Sˆ-& N¡¾ÛémLéâPU˜õ¤™?kC2ššì•aíóY—’¼”[Ýÿb ö¬:Óž]ÑA/>`0V´ŠdQQ1s×ãC3Þ^RÝ{TnëÕ'\ŽËÓq—1Ñã¡¡–ô]ÜΘ9»¼÷´½¡êΨÎÞƒQ´?·±ÙjÃÑŸ¯ÂiögÜì s¤ÈÍrßÞt ,S㢠þªo„À1ËbfãŸ;çð·=æ…µæëÎÒÖÕP›,..ÈFóûЀ å/Æ`ÂÏZ|&©}ï† ‡ôÙª«h¹‘¬<¯Okõ¬ÃIj^íŸDH,눖ðž¶çÙçssÅ0O³Îö,Yô$xm@=­ãùÇÃÙ’j3ßnÛÌYy&’´ßJ•Éïx‚¿-iE¦oG´:J¾©W_ß\¯Õ:ö¬¦ðÍ5çøV¼Jœ¦4¶v›<w 'Tý¨¢¢é±//ú.")‘¤îZ‚ƒ´öw¤¸­ß+G.â›=ó±ÅBËC¦þþç’"ÕÛ^öÉ™¶™Ÿq`ŠÕB·ªÆþг?g¾Õ¥žQÕ݃Á¸› c¼‹®üæ^ÕÍ‹¤í±Cì#ëQ¯ [? =ØŸ›Ëè]J @ÂEn“|Ë=ò®Úê|åå®XúxúoàÙ‹^p´]?{Åõ–+`îÿÅ£2³& FõÜ\ÓC„&ž­+UD:àˆ·ó總&âÄ W°CÇïQÍKé„ÖÏF q@Ä»CVÕVmÞ‰ÎÏZ(ìq,.A¯mxZ?Ý,÷íMGAËPWýê? úF 4ë=ñKS=V.}”Õ¹ù&ô1Ú®üºSØÚ忉Íê¤7 ºùÙ9ˆ®êZáäaccakm¢°|Ò`ÖØXÝЀilllÀÐr³µ¡ÚýòÅsª™¡m'~ŽM­À2õ(ÙšÛfË;J¾å×sn¯noX°ât„4¿ž¼Mk£Î³ n~X)FhÿµæùúÖm/1v!.¤ji8UÏÄáÏ@éúDKSS/æÅîÓâÜ×Õå·Ò¯ÈÐêm蹦2;âõóÇ÷–W|¯¯ÃÔ‘f¬Å'}*UgíÁ4WZ¢Gù,GG™Y«çÌUQW×=QcäÜZ1zôWç“j-/Ü/Bôž^ÛÖ·@Á=ÏíÙ~Õý~däMb䦦–¡^]ˆÇ5k†}ñCFFÆù{)mi&ë«×õÌ[+ìn}!t=…nŸ€'ô\ÝMý¤$y't‹Õ)·­„ÚÕÛóÄæüYnîö­ƒñLFÎŒÚ'»µUíÊ« Û´#§WzÚÜŠ·Zu6ÔÏŠžàQ+Â0EÜ©çNl˜oî½p—Ú›7:e}u}¯Ninj÷›‹ åO }+U_âßù†úû¾O ¾ƒ¶ŽÑ4lœÚ9§ÓÉòvW:J>„¨®† ú0å%V­Œ»Ý:v@ÞîÀ/’A¥²åôâ0›û(èû0Ƹ¯Ó,ÉÈ(››‡ªàû©Pt×*#1ßÙùÂ6Ò­Šö"J>„´t}ÄsŸe’‚Sƒ!X;ýŒKÒæiè!LÆÈ‚¤2WøV1ÜVŒÂBK=­é * |¼¬zŽOô:h°ëDɇХåª_œPIno€ïÝûàÛVfØÕ±™Ï^½ÿ™Y : YUL,AÊ…¹?|«l¡ØYcŸ•x[!7Ž¢¥¹Y×á¤þjþý6tss3åxÚ®1ëºÖôÄ£r¶­‰‡øò:qŒX[%†8{ù¸ïçþKIa/_}Rž'Ò–ˆ¦›×mÞ)§Ùß³~Çïò&륪Þz¾¶;¨n–ûú¦C¥½¥¹€êIÊOÒÔ3³µ=£±"?99&2ðUHHØ£Ðøßßû.Ö.Ö˜ý¯K´zæ[…‚âsà%+kÿ¬¿ÅüÞ‹V§Ì¤/?»¯áwï®^–™_TCµ Ë쬞ý¬m‡¹Úh¹ÚtŠ={é‘Ý·h ÂðN PŽßÔÔcm3É)ôõöîÃëCI£Ý0ÜóžÞ1S”šš‰úÇË,Ÿ©bv“ ™üÄY@%bb^¿xýü?ó#Ùí/H7É8Ÿlh]ï®®Ó¼²^ô755RQ Œ§ J*ʦ†(l]¹ºî@ÈÉ)šÈÆ÷úá†Ô×B;bð±)GçùOqǦ;Kgn™ÎÙæ›¤ºÏ?dÚqß…·pžq;Ýg_õÝi.¡ÊµÔÍ“Ô×Ñw©½±^CÃø^:SOIÙÜôÇ¢HσÓ[…¶}NvÖ} :¹ÀÐ3~ÚdŠßoO€º!yLQµ6|ªkHÿ~”[Vƒ!¨>FÊþE*ÊñCUð§†V¿Ê}öt<£ˆÜRAÕšKª¦ãœÂ_„4þ§ZäÃÄ»ï<ÏÚEÿ&Ãxù1ØÏyÙ¬m•~“§…&e)×¼|/ÆzžGÛ/bæÅ3cY %¾õ~`•d8Ç%Ô,b$Š)>wÊÖï·ã‚¼ü˜¼œÏT”¤+ÕzY§LøÙ`ÕRGN.å€Te())›ºmù«Ge‰f„‚–¨.‹?WtW³$^¾nDzéf¹o:|s¬^ªoCÁÄÓ0F›åÿ:ê±7[µÛ‡>p´pµJ±û« àŠ·Ç'LéäkAxÅ–ü ô¬ ?BWÚ¸ö-›ŒkºfftV[V†)/Æ`mÀLê{ñø½¥±7Ï]ºíîšÙEügww£ eõJRòiá ôˆ‰¬ÌÉ q•Z]ÎGâäM·PwyRM[ñ`ÇN’äk]‡€Ôøû8±Ô¶µÙ[âéø³a2çű“§nâu.œµ›f 6ÅÀŒÐÞјztŠ®c\à›ª­jºu>‰§‘ÔPÁDEÅ9_H·m÷`™eçžžVkkÆaÇÑÓLmˆƒ•%1¶b˜—„ž;8(šjQÕ–žö¯1iè~|+cæàéÝ|{fN¬IO?w|¹x§ƒ¤‘Q7V̈NГ1œƒqôÅ„?àq µ;¤$À\oÕ- Fè\z²zëõG-¡"ÖU÷G‹ØÎ7WŽ^«8£SÄÄg>ý,Šu?°ã°´4CØfÚ€abb*iõÜ‹¡WÚÿQq½èÜXLü> †‡\Òд‰QÜ©Ç=õBDÁ"e`um?ÎùÏö+.x‰‰wœÉñͧh-Ž¥õ./õK(´ÿ5Ù†ÌèðïÌŒ4¸Äù©°5íå¥ó¯øy¨õJѵ^¶ß£veƒÖ«ï·¢ÍYquE#Cã×Äø03Ó„ìœì.s%ôÌ£2žœ¨@kêng­Å†.o|âŽíˆXþsDwÉ Ò”TÍ™RRúŹq]ü RœiÒè‘ÑÖöÔ7Ë}}ÓÕ}/§e`‚ú¨¾Àï’ì\R­e—"‡Qõðõÿœ'wˆŒýc“¾½Ù£æÎ_ÒÇÙ‡ÇüéH ûá«_¿¥»ÜvH–Xëýux¹¦ÓË{{1•Ÿ‚Uaýÿ®¢í—ÇSɇ»‡¶tX·0ÓlïÝeÞki ¼=@Bˆß«´ßÒŒ×ÝĆúB¿òúˬ5Üìà¶Nøgœ±ûöv슓O1g 9Îböm9`èèàßÑÿ$¯ñ_FÀÇïÛåg-ǹ;¸œÑÇu¸,´ÿÄzTCêóIy[Ÿc•ÚvW~ó`I¬ƒ6þÍ,ßKòXÙÌ)®˜ ¯gyisSS—®ä† =up:P|/úÂJß­ÂÁù­¸ wª¯9e§äBsƒñµÃ¿I>—ÁÅsfVW²ì”Wóå¹;§òI^ºdÔÁi*á!}.ÎO=ä±Â"¼˜°¼ÖB»ôA*±_ðk'¥Ë=Ïø,Üê“J˜èjuùÈŒvyØ#w©½§´¸@O[«W§HŠ<õ,Æã[Z—Á çæÃÆØ=ñ¢{VcÓ5[ëüŒ2„­Nqnç*ÒKÓY½/Âüêz¤þŸË›fD»´NbÄVøíœ¹÷VRçØxzÒK5Î;¤jk«/aÓƒkNÝ¿ž‡I¿>o=ÕËû Ú³s~n›?«Ý›{;"±µ"ýµu"×ßoü¼Ó’~¦›îZ”sݘßå &ÜQ w¿ÍßU‹Øúóvw a0ñÛä¥vû¾6Qh²„­دgG¨É½1Ð÷j˜qö±ã|IEË+s­Êχ ßDÒœ 7#†Œa»Ç_‚ûV”¬šSpàœ¶IÔ‡Ø%šTóR䃕uÒÈÌŒ}§8µ k¬J½f®kÕi—ás™Ë—’Wʰââ­ «~ºìÊp7ŸjFŒ/t)=Y™#¿ÒAáàÌXÂðTýƒÜq{‰÷iù׆ºDq~çÖVÅ_1L}½GDyâßv5?pÂì§_zäQƒa%øONo}8ØÞ÷cw00=ÖÙ…2×2·äYÉëïÎ7ÞÏ\!Ã0Pn–ûö¦+ûZÀÎÉ@õ ˜”vx™9wð®‰n<+ fÅŽÓógªŠò°ÓŽÇTË{åëºíxè¯çâŒ5ÿÔÑ·çôõS-HGXQÝâçPÌø›îÑÖêÊ1¥±{(þ×)Ñ7Ïb+ìt™þž,zÌXÞ~zun{ÇJؾ)òÙn/\×’"„ŸàÖÜÑála¿¯ŸôÙ0ñgTåm~z[­sv]Öþ…4lbÓg*b:¬V“Éôô¡@NÎÆÍ[˜ý‰KH¬›hòÛÞÆoëArxZ£+5F]X°ür{Ìâ´Gà^sû/sÒþê|’0ÑÂ!FÏ¡fÉÿœ),":PF&##›ÈÃ_”ó‘[Xbh.ë·øk,~qGÉ£m{ØÅQ¦Í}cœ_>í‘‘'‰NЉ‰–›Ò£K‘Æ wƒºÃÇøöÒ"°ñqÍÆ?—Ië˜ëŽ{ºˆ,¼ø~üâŸå¼w%¶—4|/Å´`¸{Yu£¤ `aç*ÎÍâàoz$¾Á'Þ,ßï íÞsOÂ/¬E[×ÍC³NÛ ·—‘ÖÕ;~ѽ:D?y×mÇøû}0…® ù:Œ”á\ÌU×n=¡u§´Éh¢˜¡”™Žñ Ä`>Òâ$¬½DZ¸ÙßÀwÏ gb/.šr±s{ÐF7os}‘Ÿ6ï2{Î3±(þÂ3 EmÕq”)Á'U•µÕUÔ´m£€ðlk×È´ Ÿ¯ÃyÈ)¢lŽ5“âø¢õ¬çytdï‘Îéâv{yš^‹˜SQÊþ…{¯Dž5>ÛEkR&®òÄ*¦iGâ?›œ˜«~')~ŸK§Û«Ý·è žiÍÍi“f¾Ä`|·ÈûnùgΑÛZOW\"Ââ*,†ž0o–mc@Íü··ÎÝžêe¡ãeѹ uœ²H¯éZœz}™¤K æÑùGª?¸åvf)ǮģŠQh^5fR_û¥¸¹ðÍ-UßÊè'ü6»§Ç•Ù¡ ±ú„iâ ôàTM·ôÝ觛徽éò?RS)«Pc[õ!Œ.©ÞrCSjMÇqi·ÛÜ>ÞÍIÂçÞÄv\ AHYƒéà ÷ɦ‰X¢ŸtÉ£_C/Êß6ü™ø—K*ì› Î]²ÚÔþ8}=}aðõΗ„Å»K6ÅŽÅ0¶)ÍNIâ¿¶± ñnë&º­ë"¿ªG¿F¶FÅm}G¿T~½GÛ)Ë'¸ýÑ«± ©{ôAu(ë@Ï‘‘Å%%%v¯úÆ-Í9ÓÍ3À$e'ãâcß …êkwGɹþVº‹ iµŒ”;æÓ7¬”9AëØO§Coä)rRÏ|k~|§¡c„âIâsZ¢˜”tN””–ý˜žØ®úHšÖÀÞÃÀ¾‰t¹¤G7‡¤7ÜßÐuRz…‡­Å2ýi¡á9‡ã;EîMöþ˜gbQ\8WЯÃŒXü/‚âR9é‰â ¿¼²å-ƒã-»hw0qŽ6qîIÓ“C Aw2 C¨p¶5¶7¨æÛPV/Øø[ªlJw -zòý˜Žm|õ_CŸyí°!霣—Âê"ÈÈï¥UuþÉ}Ú͸’åf¹©¦â{yÙd) ªo¤@#¹:¿špÛ”|ŽŠ ›õ±$³øé.ÂÓqp°‰J+Ê«MQQW™ÄÖE×›öI<Þ)úÁéãç>Wÿ CÐNQÕ›ÏÄ"ŠÍq7¯Üô ûAlj),ÂpHÍZ²z½©±WÝrµÍý3§Æ2ÃÌ#(®`"Ëø—d[›—ÄÅáa0Q>~Ï^F¼Mɪæ¦-Ê*æ”Rœ6ÇÈØÇݹ7·î.~Ý]Lm~X€PÈÛä韋ZÐÑrðËK+«Î4ÐÓ˜@¥è-J“#^W–e`žÖ@d'FO’’¡¦Èɱj8™ˆWÁß¾N˜È9¨™¯Šw#­.¥9_4ížË{,á™HII)ÄIÍÁÞó«ZÁѧ Zzid9%•ŒØ7“§Î‚òI¬+7~Ly¿}û¶>œª­¦R]õ–~‘›òNTR–†ú8B›©­uñâE1yÕÖ·cžÔ¸h%eÕMs–Žæ©S§%”´†r€==q‘îÅù¿r³ü!.J~ ¬ÏªodBÁ&¨i„¶•}Ê&öž&]·pRpɯÿÚþp*µàâÝ.‹{Ÿl;L“TV Í¦7ù¥æÖœ·mP”ešîŒ·A†¦` Lc]rltßêÓÝ£3]/2fIKK§>E~Ò_a2ÿäifNž ƒç0Lʩ裌µÎ‘ë‹p×=4õ? U> ¾¿Nûmä:zzu¥^YF\äÓçÏ ¯Ÿåqžãšê_ù>2·°èg:k–/9qò*ŠtŒc·ï47)šrüx-µ)ÿ6+V¬ºyóºÞŠMdÝ:RÝÄùÈÉ+ ñqRúkV­¼xñ¢Þrs ŠñcÖÈ)áÏ…DŤ&‰@…Tcûé@Nn½yÓgg-£cs]~ÊÛ²â¢u«– ÞW‘‘Y[[ž>uZ}î’AlÚ'8~øÐµãÒØÎþ98í_RÞ–-^»²F6ž=㎧wÊ›`©ä›áß‚miôs¿¼|Õ*f¦þzµ¡  °´´:{ÖEgñºŸ>$ÇEé EyÙfkWÿóœpq°/4yrçŠÞ23ì˜~H022ÌÔÖ¼¯@·Ì²å+=î^™±Ìœœ|,.Pž2nÙÜYÓ¡¶ª€0ÿÁÜbÓ¥ ´MÖŽµŠ`駤ìÌ K³õƒýET””¨¶}îüÙ© VŽ5w%ó²>Ylè×Íe ®¹{|ˆ‹“[žÇÉðÍÏîº/\ÌÇ50èi×›™»¹^žnjF9žzLó[NÚÇ´Ä͇I~Ä…jfÎ ö¼¡k²n¬½w2ß½Âb0ƃÞ/ÀË5oþßûn3L7Œ5uó>²¦ºrõÒ%PÏÕÐ 3#ƒ­íö3Îgå¦Îdç#¿:-â%ß<’¯½¶½ÓÖöÌÙó’ÊSÇ΂)áÏɰØ~J>k—›zù=yá5eæXñI€¯©|zÿÚú æì¬ÙR€RÛºe‹³ËY噯ÌóõkÈGTÁà`cݵsGbz†ï Uý…ƒ¸ÖËÐ={뽯:Ëà”þy[[;ÌLŒèËÈÌö¼î‡øÉ)LÙlaö³±Êt1úû<èuBì;Ù#¥–ŒÅ7§E…äf~\¾|¹æî]Ã!KÌŒ »‰ÚïÖíët ä¦Í)óýš*¿¾öÂ+ ¸vÅÒÑQý$Ì¿g—]N^Ç­s|bÒJš#e*ZU~fD ¿²†ÖÖÍVÃ<«üÜ\Hûå}½ã~‘KPTBU{¤¸‰ª)Î}á;YQÙzÓF €ê`e‰Åz‚óýäô ß[7ùÅe'É« Ó1uu•QAþÍÍÍKL© íH¹~BEI¹~õrø˜óØý"· ¨˜’&åøa˜U|õ·È ÿqdd¦‹©ïÜ1Lr5Kw*Úb¹÷àn #¯m0<½´c1˜òÜô7Á/”T4–Ã"í·•8ÿ*:.Á?ไ¢º€”üðt1‚m¬Ž ü^^f²hÑöÑ8‰Ÿ—k×Îèö,øURì;œ¶>¿ÈðÌ*z,„¿ð¡¢¦Y¼p¾¦ÂÎddnމ;‰/‹!a±ÑršzÜÂÃÔÈ5•o‚|±ìb“…jÃæÙ €ê`"->IZœð¦yŸšþ2à-=£¬Ö¬Þ1…%̉úô‚™…ÍpÎlóukF´‘EøI3»22³ýŸø‘QPNÖšEÏÌúos5‹)Ïˈy9‘‹{®>n¸ö "ŲiaÍ뢒R_?Ïò²RÜԙá÷o¾!=&üSj’‚ŠºŽ†ŠöÜð/ŠÊò“ц$GTìûÐ@>!)5á0Œ¶±¢82ПŒŒ\ŽÁše£E222²93tÑÖÐØøòUX»ha)yQEõÞ1…ž½ß¾´?foáýN3µ5ÑÖØØÁ/.#¦¤INNñÏ\]œøœô‚#=ßTC„œ¤8iÁ¥Êªê°¨7‰qï&L䘄S²EŸ±-M¥Ù o#ÈÉÈUTÕ•å¤Ô'KŽ2#O˜´Ùjëê"bÞÅFGÑ22‰áTYy†¦ãÛÔX˜•šøö -š†º&NzšâˆYŠƒuýê(ÐÔÔŸó&…ÅäU9…Ćl¤¶þGz|ô§´$^AQí©&ó 0h’C}Š<ÚP¸¸ôÛëðÀ©ÉÜB“Dp*tŒÌC” |Ë¢œ„舚UŠÊªªJ8¥í òŸ@IAa0Cm(œ•—ÿ:$´07GPBVh²òxª!rf‹Å7ÍL}ÿ. eFEM]c²Ôz,ô ’üCáÜ‚ÂW¯"¾|þÄ/&-$7eÈ&8`[šËò2ÞGGb±˜)*êSp2jrÕÀ?„ž¶½ÒÜÜü!+;>.!'ë#ã~A6nA¦‰œPÉn¬ÿV˜[›…Þ¾ä”"bâ*J rj ºj cÁÈÔTTÓµ4ÐFú˜•›ÿ66.ûc%%;¯àD^A&v^òþû®h¨.ýò9?7» ;‹†Ž^T\BEQ^NKe––ʈ¶99¹ºm¤ùE_£ß½û˜–:Žœ‚WH”•[`ßÀô™ÔWå~.ÌýœŸÅÌÊ.-#£$/+g4ƒ¶Ñ;ë#CŒa`jccc\bjbâû₼‰\|ì|‚¬|B´ôDãÕ”}ÉÎĮ́­þ!(*¦¨HXbB'‰ÚâåZfJ ×ÔÖFžOMNú^^Ê% 2‘‡•Gd<Íè@lKÓ÷ÂܼìœOX2,áÙ«  §1e†Æ”±`d>.ΕK¶ù]|RrRRyi1'¿3Ÿ0õ@,l‹„teñ—¼,dä–æÑIâÊSå”åu”塜 úŽ‘‘IŠ £­ãÎêšÚOŸ³ó¾””~ý^^^]õíOMK9žŠŠŠ;nÜ8r2L ¾¥iÆæúÚš†ú†Úš* ÊñôŒ˜™Y&NœÈ/À+ÀÍEÁ+‹™" F&Ôöø¸Ñö‹^klÌÌþ‚øZRò­¬ôGÕ÷ÆúzŠñãÇSQSŒŒ%'£@ni!˜¹®®¦©¾¾¡¡žš†žŽ‰‘……mâD6A~^~>. +FMit[›câü9ú´µ*sŸ²órsóJJ¾–—•ÖT}'#§ ¥g@Et<55y}]}YY)7/ßÒPW‡6dÚªJ2 r&d?6!>öÉü1å1R)((”äÐÖqgÁׯYÙ¹ùù…å¥%H bFCÇ0žŠÝî”ÔÔã°Øâ¢"ZzzZZT‰÷{=28)ÚfBaäããáçgàÅÈŠb03à~ï 4ÔÔ¤µLÚ÷ ÇiNAQvNnQQQyIIeEyss =#5õxJ* **,v\}m-%a-o<:TW[ƒÕ•ßQ˜Ž •kvv~!^j.)Œ‚cFÖR›‚¶ŽFþRTŒ <2r2ò·²æ¦&†ñã‰={±Øúú: JJ,ÓB0rmc}]uUeSS#ºÌ,¬l„Ï+"ÀKÃ)Ž™,ŽÁèCa@õ0R¡¥¡–“’@˜bð ¤ D˜¢o•9Y‰IhûS;;»ÿþûǃ­þ ×ĉhëî@K+%%¶TÈÈȼÜÝÄaggsõÙÈüÜ\hë&އ‡‡²²2˜ ÕÃfff0ÂÒÒÒFÔÕÕ100€ÛÈôôô`T wXYYÁHSSa8P__ÏÈÈvl#ÓÑÑP}0ÜáççGsssùøøÀý„8Ó©ìðÏ),,¤"¦<ðx|ee%777˜ÕÃÒ(8sss°FÿU_mm-ØáŸ“’’jd°ùôé333˜Õà "aaa úúOKKKMM ØáŸ“šš* vT222888À¨>PRR’““GEE)úOssó?Àÿœ·oßâp8°Ã '..v@õÀ€ŠŠŠŒŒ,//¯¢¢‚‰‰ Ògªªª0ÄAž™™™ÂÂÂ`HTT”™™ØaP‰ŽŽž>}:ØÕ#Cõ!¡ÂÀÀpýúu0HŸÉÍÍ¥¡¡7n\xx8¨¾Hyyyaa¡¦¦&˜bPyÿþ½““ØÕ#ZZZÒ¸Ä .€ê묬¬ÕÕÕ/_¾\µjä_qýúõ©S§‚•´´´††F €ê€‘ª½IJJ"áçéé¹páB°IßHOOçççONN~õêXãâáá±cǰàrçÎÞ €ê€‘ó%¢­­ššzôèQP}}æãÇ222ÑÑÑôôô)))RRR`“¡'???33sñâÅ`ŠAÅßßÿÀ`TŒÆ¿hÑ"###iiiggç-[¶€Mú@RR’›››¾¾¾‹‹ËåË—Á&CºàÇe°yûömIIɼyóÀ¨>1PQQ±°°ððð¬ZµÊÊÊjýúõ´´´`–ÞòñãG555]]ÝM›6êzÊË˽¼¼¾}û¦TNœ8±fͰªFHãUTT ±wëÖ-‡Y³f………YzEZZÏ\\\¼¼¼•••RRRW¯^ݰaXf(±²²Bz›’’L1x”••ùùùݼyL€ê€‘==}ii©¥¥¥““ÓéÓ§ß½{gmmíââ–é9/^¼PTTD ‰¤¤$GGG¤¢Aõ %‘‘‘¯_¿ÎÏÏS ¶´Þ¼y3˜Õ# &&¦òòr@bïÀÞÞÞRRRwïÞ]ºt)§‡„††jii¡€¬¬ìƒ´µµÙÙÙoÞ¼ K8 Hf_¸pì0¨dgg¿xñ‚ô¸P}0ÂT_qq1 ìÙ³‡——7<<<99YPPœœÜÄÄìÓCÕwüøqPSS#ù6tuuÕÔÔ\¹r%‹û äSWWÿ"ƒ ²ð¹sçÀ¨>y°²²–””ÂÎÎÎæææHõeff !á7þ|0Q÷„……!å,,,ŒÂRRRÕÕÕHEKJJ"ÓYZZBÔ`sïÞ½7oÞ¤¤¤€)•ƒòððÀTŒHØÙÙ¿|ùB #¡ríÚµ888dggËÈÈdeeÙÚÚ‚•ºáÖ­[†††í‘ð B•ã«W¯rpp,[¶L]]¬4H ±gnnÓù›øøø3g΂)P}0"áââŠmÿèííÍÆÆ¶dÉ ‰äääÅ‹#s÷î]0ÔŸxòäÉëׯÛ?"Lê ÐÖÖ...&'‡êÊÀSTT¤¦¦†J)¬52¨ÔÕÕéèèDDDPPP€5P}0"áãã#Íëk}­’“ûøøL›6­  €ŒŒìþýûçÏŸHMM¥¡¡suâÑ£GÜÜÜH!·ï™={¶……),++»{÷î©S§¢3Øj`A…V\\<22’——¬1¨ÈÉÉ={VRRL€ê€‘ŠÐׯ_;îÑÐа··WWWŠŠB---/^Œ¢¹¸¸€—N8;;#ûtÜ£££ƒI~~>Rƒè£­­íÇW¬Xqûöm0×@‘™™)//Ÿœœ ’o°QRR²°°X¾|9˜Õ#QQÑvo.íX[[øðaÁ‚=Â=¾YYY999ÅÆÆÂBØ$êHOO_¿~}§ýZZZwîÜÙ¹s'éãåË— ¶mÛvêÔ)0ZÿyþüùÊ•+?þÌÌÌ ÖlÉgll¼uëV0ªF6HÂ111eff’¼P¶sþüùµk×._¾ÜÝÝ´çܹs•••âââ«V­"­O0ÆÙµk×öíÛß¿téR$ðÚUÂÏÏoΜ9666§OŸ»õ[[Û7oÞtê</""²wïÞ5kÖ€5P}0àææNMMí¤ú×®][·nÝüùó?~LÚÃÀÀ••8a„;wîèëëY£ÅÅŽ{÷Î××÷÷CK–,Ù¸qcAAWûNsÑ¢E0Ô³Ï444ÈÈȬ\¹&I6ÅÅÅOž<ÑÔÔk ú`”€ô^rrrÇåÚqsssttTVVŽŽŽnß9}úôo߾ݽ{‹Å†„„L›6m  éºcÇŽýéè‚ N:uâĉŽ;>|¸ÿþNÆzÂãÇÍÍÍ988Àƒ {fffÙÙÙ ` TŒ$%%‘êûÓQ$TÙØØ>|øÐq&ÕR"7oÞD;Ñß.EãhåüùóãÇ_µjÕŸ"ìܹSCC£“êC =uêTdpNNN({¥ªª ‡Ã-^¼Fuúúútttú¨>%ÈËËûûûw_LOO»}ûö¬Y³:ZE$<<ÉÂmÛ¶íÞ½{Ô›«ººzÏž=Ÿ>}ê&ޏ¸¸¬¬ì•+WÌÌÌ:ÒÕÕÍËËC`ÒT÷X[[#…LEEÖTRSS544Ðs@EE¬€ê€QˆººzFFF÷qXXXJJJJ9räHÇEÉI ú":úýûwmmmè8¥m”1uêÔ“'O"•Û}4'''SSÓßU‚††&77EMLL¤¦¦†BØ ˆˆ°Æ ÒÜÜŒž"""ååå` TŒZ€a``HII‘’’ê>æõë×SSS™™™ÝÝÝwåÂÈÈ‚訵µõªU«Nœ8AFF6šleii)))ùûj ]jéI“&9;;oÙ²¥ËûöíÛ¼y3JmíÚµ( åRÔ‡NOO§CÀ®]»îÝ»ÍÎÎÖ@õÀ(GNN.00ð¯ªCœX^^~ðàA++«„„„.]>,'‚Çã÷ìÙséÒ%T‰·°°V:wîÜÛ·ocbbzÿÚµk222H(’“w]]abbúüùsppð„ |}}‘PË…ÐÆÆæáǯ_¿îr= ``Aw¥½½}JJÊÑ£GÁ¨>hii…„„ü©WêwöîÝ‹ªŒïß¿Gº¥ËuÛ±Xì"vvv—/_FÚÕ5Ñþ‘h"Ó§Ogfföü>>>333$€ïÝ»×M4oß¾]¸paÞ¼y·oßž={ö˜*{ºººžžžÐ¿74zïÌ™3{öìk ú` ahhØÛ 7oþþþÍÍÍšššHõ¡Jd—ÚAAAqŒ _¼xÑÁÁARRòüùó#HòíÞ½»W’ÄñãÇÅÄļ¼¼Œ»¹‰Èµk×XYY½½½544Fw‘ÃãñȤW®\A/-- îÁÁ¦¡¡aáÂ…éééÏŸ?½€ê€±’aãÇÇáp½:‘ŒŒ,22²©© U(?}úôòåËî$° ‚yyy , Y½zõþýû™˜˜†­qΞ={éÒ¥ìì쾌Ì;}útzzú¿F^KÄÇLJ™™ÙÑÑÑÊÊjô¶›7oÚÚÚ"Œ”? /ÂÃÃ/^§ÀÍÍ}ãÆ …¿:JmÇÐа¼¼¼¸¸EGGçéé9¬lÒ7.^¼ˆä½¾¾þ… ºYê(JKK‘ÓÓÓ‘ØËÏσ úÀ ŵtéÒ“'Oö'‘­Dòòò&OžLCCƒt]OÖ"G‘Ÿ={F çææš™™=xð ¤g̘ñ¯ ‚Äž¨¨èáÇû/QŒ?}ú¤¦¦Ùó³ØÙÙcccQàîÝ»VVVæææNNN#«¯ ;vx{{oß¾ ûÑáÔg˜SQQìréÒ%???0ª€Ÿ MBII¬££ÓϤxyyPàùóçòòòW¯^íáN>>¾+DH‘`8wî\LLŒžžžÍy¹tuu=pà@JJÊ@ =EâÉH--­ÐÐÐÞž»”?xð ’åÖÖÖ(oÃVþ566¢KväÈiiéS§N]'÷×`“ššºzõê²²² .xxx€AP}t͆ ¨è¿êkgÖ¬Y………(àïïoaaÝ7„……{ž‚‘öÏž=C2((H\\|ÕªUË–-cddX#”””Lž<ÙÎÎnÀÅ!©†4¤””T߯‹b±Ø}D0ÄÑ’( ©©yþüy..®^rªªªNŸ>}öìYQQQ"pC 6555‡F{öìÙÈþ=_S@õÀØÅÖÖöСC999üüü›²¾¾>JÐß9sæ¼yóf÷îÝ[¶l¡  èU:³‰´,++svv~ðàAbb¢‚‚‚‰‰É¢E‹ú<®¹¹Y[[›––öË—/ƒ´¼úÉ8Ž……%==½?SõÚ⤥¥­]»öÝ»wH¦¢ÄÿäFu0@ üÌ™3ÑÑÑóæÍCWs?¸‰›ÊÊJdviii$ù³ úè)HêìØ±cëÖ­^^^ƒôHOúúú’Â(`ooÿýû÷mÛ¶! Ó[ˆ@òi ‘ö=555wïÞEù ¡££ÓÒÒ200@:i¹nÒill”““?~<’£TTTƒjd”¥ììlIII''§Õ«W÷35 ‰çÏŸ“ÂAAAûöíËÊÊ233Û¾}ûÀö‚655yzzÞºu+<<ýKKËN <ÂÂÂNŸ>®/2øþ6À,¨>ú’a|||¨f?ëÅÍ!B ¿zõÊÑÑ1!!ÁÈÈhçÎâââ}K“†††4 ®ãÎŠŠ $Wüýý#""ªªª´µµgÍš¥¨¨˜ŸŸ¯§§',,œ˜˜HN>D• zzú¼¼ª†Ž“'OΘ1ÃÑÑq8LBÊÄ„HûžŠŠŠ .xzz¾}ûVRRÒÈÈIAQQÑ?¥€Çã‘ rppX°`³³³©©i÷ßHAA¡N¤Ë£µµµ!!!HÆÅÅ¥¤¤äää°°° lLž}ª««C¢ðWõ˜‘‘t’vïÞ½ËÍÍE" 6 éÓ§KéùOj_‚Dss32&ÒÁÁÁ(q”s$ZæÌ™ƒpwtjSˆE÷ 2 ÐÓÓ«ªªÎ˜1•@’Àûï¿ÿÀJª†)ïß¿Gʆ¡ðë‹%Õ°‹‹‹wíÚõøñc“ýû÷ûùù¹ººnß¾]DDdÚ´iH· ¿}ðÚsØØØô‰t'''ÉÂäääÔÔT$Þ¬ª®®æááAê ‰ÃY³f!Õ§®®Žr;wîÜ7oÞ ]‡äkMM :*//¯¢¢‚~Å$"6lŒ_AFF6H§ý(Ï>>>H ¢,ÑÒÒ¢œèéé Õ:ºo‡ÊÊJô«ƒ‚‚ÐåHKKãääœ2e º $·@ˆÝ»wÃC@õÀÈUñQÕ¶¢¢bÏž=Ã6“Hé8pàÞ½{H~9rä:Ò!T5owÓ1>’‚HV¡ê;úi :::H·àp¸!Ë3?###¤%""""##ãââµCCC‘,**BfGÚ•ŠŠêÿû_HHˆ””;;{^^ÒUUU(@ÒäYYYjjê!Ë9éK;)œææfô+ž?ŽòŸÀÄÄ„òO²*ß*ðMMMQQQHÝ¡+‚~H}}½œœœªª*ú-šššFDà± ú`´3cÆŒòòò'N Ÿ\•••!¥çî«‹”Þ%"=<ɧµD:íOMMõõõ zûö-ÒQÊÊÊHF¢Z>G²Z[[‹$Dxxø»wï®ûúõ+B222JJJjjjHu¿ðiyÃÒÒR””——R€„ R€III(ñôôô¬¬,¤ZxyyI=‡(þäÉ“‘T¼¥)ÈÈȺœ ‰ÇãÑoGj0,,,>>žžžY]²9sæ ìýÃò“íïï4jlllAA¨¨(ÊØÔ©SQQ祪Æ"/_¾œ;w®µµõà9™ì HÞœ "++{úôésD0}I";wî츳±±ÉŸW¯^}ùòÉ'$ .\ˆbvŒVTT¤’^444H×!E‡ôƒ––Vÿx°²²¢ôQ ¤Z /_¾Lrú‚´œ‘îS¨©©‰ŽŽNHH ‰ÃÌÌL¤?QRÂÂÂH"M(//~ÝÀ.RÅbÕˆtÚßÜÜâç燬Š2ƒÔ)²’ÁôéÓvð-ú±H6“¦>"%¬  €¾hæÌ™ŠŠŠ¦†ªÆ:OŸ>ÕÓÓÛ»wïÁƒ‡ø«‘*@µó´´´Ã‡ï"2”ߎDHû$=RÏUPPÐÖ­[ß¾}KZùiQ$–V­ZebbÒiuøÁ‰–ââb¤š/^Œ$ܳgÏzè2 Ñn“’¨««#õIÆÇǧ¤¤dee!¥$""‚$.‡#Í^ѤÈn]zÄÉËË{ðà’Ù111<<TÛ¾zõ*E¨N?à‰_¿~}ûöíÇŒŒTçææöðáÃ> )²hÑ¢¥K—ZðïBïÒG$]:äêêŠÇã7mÚ„ÔÚ@ùÞ$-µ·jÕªŒŒŒðððþ/òÞ7 ˆü~¨¸¸ØÇLJ´ìAYYÙäÉ“µµµçΫ¨¨Ø)&2΃.^¼;uêÔuëÖ+++wòÖs˜)ÜÐÐpþüyT2óòòÐÕܹsç¿u¨>Ù,Y²U¯Ïž=‹DË@¥¹uëVoo€òòòÏpLLŒ‹‹‹ŸŸŸ°°°©©éš5kìˆ ½éèééí‰>¾}ûÖÉÉ)44tÙ²e¬¬¬ýLÿæÍ›………7nÜèRzýCØÙÙ×é´?11Ñ›HjjjKK‹   ¹¹ùâÅ‹{ž8%%eGÝ^RR²cÇd dØ3fÀm  úèHòiii ˆêCêëîÝ»gΜ¨ìeff=zôáÇrrr7nDúÁÝÝ}šQIIééÓ§¤0Ò<ëÖ­Cv°%Òç%899‘æ™?>úÉù!¥÷¿ÿý¯´´É<ôÃ;v{FGGß¿ÿÅ‹HÄêêê.]ºÔÐаç6acc;NCtÁŠŠ óóóŸ:uJEEî_ÕÀß‘””DŠåäÉ“¼nôŠ'Ož¬Y³&44ôرcýÏ’ŸŸŸ££ãçÏŸ­¬¬¶lÙr•ÈÈ2)2)Œdð¶mÛp8Ü•+Wú6RññãÇH™KHH 1ùWï—CI||<º:033ûÓŠçÊD:îIOO¿uë’‚ÕÕÕ ,°°°––îÉ7RPPl%‚!δ¶¶Félذ•–¡\ÚP}0òرcÇÆû¦újkkeeeW¬XÑÏñœaaa¶¶¶_¾|AbݺuÃmLcŸYJâââtuuYYY=<:£££ÓÒÒ‚jð¨ÞÜ«QmÛÒÒ2--­oUíšš;;»[·n!͉Ä^ttô(6²¼¼|FF „‡‡+++Ϙ1霞/€d0Š/ ››;ôþ]س··¿xñâîÝ»wìØôù&Žtl»6&‰ÀëׯŸ?>//oÆ èë»Oa!ðññY»v­±±1:ýßÊc@õÀ°cÑ¢EHTôJõ-Y²iÅâââÞ~WAAÁÊ•+?}út¶1ej ¢¢"Ø»wï¥K—rž6mZONœ={¶££#Rïß¿²Ü^»vmûöíÛ¶m;tèP{ïÜ ‚ÔÚ"¤ééé›6mòóó›;w.ÊC÷£d KJJPÀÁÁÁÙÙùöíÛ£¦Ó@õÐ_V¯^­­­ÝóøRRRVVV=?¥¼¼iËììlWW×ÀÀ@°ùA"‘‘‘–––ûöíûë)ëÖ­KNN^¶lÙ;w5oH™Ïš5‹……‰Òµk×þC+‰‹‹·ÿØœœSSÓçÏŸ# 9r„žžþOg‘ÖØHKKVUUEòoXM‰Tü›º5ULLÌ_§T555 ž;wnÞ¼y=L|Ïž=—.]:yòdPP˜ºjjjEEE?~D²äøñãqâÄêÕ«ICCÓØØØÔÔ„Jcmmm`` 9Õc¿.…§  €‰±±q÷ÑBCCŒŒŽ;Fª—=„ŸŸ????::š‰‰ÉÛÛûOóý888¶mÛ¶lÙ2gõÞóçÏKKKGœÑ‘\w¶_‘+Vìڵ믪ï¿ÿþ»ÿ>’4`F Ñ“gtttff&++ëÕ«WI+‘!=ŒÌ›““mªïÿìÝç~Ï9œ³Z,))©²²²œœ111v—;vøùùU À`0”••ÝÜÜÆrýz"""$«())=yòDVV–t111!Á/55U^^¾¡±\\\¢XP@NdK~÷î ÌK–,‰‰‰1b„§§')/ ~4 %@êh™Ú¶mKr;õ8qBFFFCCƒÝ«°°PAAáäÉ“FFF(Ôo\A$‡kii¥§§sqq‘.ýúõ;uꔓ“S½ÃïÚµëÚµk<@éêµzõêyóæµoßžý$O’¢mmm ÂÂÂP¤>€–©]»v)))½{÷¦XÏ÷·¶¶fwÏÏÏ———¿qãFU„ßE__Ñ¢Eƒ&­Ã‡?wî\½©tß³gÏÓ§OQ´/‘‘ÉÎÎ:t¨··÷‘#Gœÿûï?KKKOOO© ’””LOO' 999ÑÑÑì·]ݺu;xð "_3aooáÂ…]»vÍŸ?ߨØxéÒ¥u‡ÉÊÊš5kVRRÊÕW¯^]¾|yß¾}ïܹsàÀeeeR[T© ¥‘’’ÊÈÈ žžžÃ† cßݤ¯¯okkË~è4íÚµ›:uª‚‚‚€€À£GºwïÎ9@¿~ý¼½½%$$P«FZ³fM§NŸ?Ô³gO’œñHO¤>€–†„vêóóó³°° K—.[¸p!ŠÓ¬ÐétggçÙ³g_¾|YMM-<<œ3õ‘µÖ­[·Ñ£G£PßÄÜÜ\PPd¿äädÒleeuøða”©ï/”»g€íÑ\"¢¤9/:«_dâQ:ÊÐBHII=~ü¸¬¬,::zÊ”)ä_¬¬,T¦Z¾|¹œœÜíÛ·{÷î}ÿþýªîééé»wïÎÌÌD‰š`üøñ¤t={öŒOHHPUUEYZbêûb(Ü?”£ƒóõw›µÅêacd>LN¡¨v›ƒAQH}-…ˆˆH^^^```§Nxyy'Ožìíí§Ø7[‹/&ÙÏÒÒ’ýd6+++~~~Ô§i¬­­ÓÒÒÆŒCÊ»páBÔ %¦>^J¨f~^¬ø+ˆ‹‹øð!((HWWwëÖ­ŠŠŠÃ‡GYš-›+V §¤Tü96..îÞ½{œoY„&X¿~}ß¾}IrŽˆˆHJJRVVFMZ\êk”‚g1wîFÜ‹Š}™““]À¤ÓéÒJ]4µõôhË4|î« ëiÄ»÷b¢^$åf”&&­ÒQK¯¿¾N7éZgÄܸ÷’ÁËÊœ ¥jh¬"Zc€’÷q×o>§XC0Œ¶ªôU„ë›kfLdð­Ð˜Ä¤×…4Šb Hwèah<¨¿nç/,i|øå˾ ¯sˇWRTë9pÔXmŠ*Âv Ðr‰ˆˆ|úôéÁƒ³fͲ³³KLLDMš¹Ù³gŸ:u*''‡ ðòò.]º´¡w÷Á7¹uë–¤¤äÔ©S׬YsäÈàïJ}Ëd×}u°³ÏÄšÀ‘©ÒWj´_ûÕñ´Ž&†™«°šsO-ìÀñŽP×w« j\hšÿì‚ÉXתVý¥á¡kõ8úç4ïaáVïœ6¯©hpȳÑåÈŠï#&ˆô=‡àoM}………Ïž=KHHèׯŸ´´4jÒÌYYYikk·k×îÉ“';w¾qã (Ë÷#šä½'N¢WêË»¿†3òiÌ9ä·kv噽‚»ÇéNwg·¤ž(˜¶/ÿÚ*šs:íç…†nÔï L¦Ÿ^5ÇtÓUl¦-;õ•••>}úøñã(H󧬬ܾ}{ƒñüùsÿ!C†à޾åŸþÙ»w/IÔ'OžœŠQãvö˜C^Gwž1TSF„„;ºŽÙn¦ÙîzÇkSãNÁ/ï *í&i)ÉÐy(ºt·h¦Û^ÐoK3ÎÈgöïKï™ò•m¢ÓwÜî¦0DÓþ:»=Ài¼×ȧæ*ŸOÛÿ¯Fä}ˆqqvÅ{jd'm 2~¨¶+¶T€–ááÇãÆKNN®ÛëÇ$?´jÕŠ‹‹KJJjÏž=DÅšƒÔÔT[[Û„„ó8»Oœ8‘ÝÀ~ú -vvv²²²(Ú7B>>22’ùãÇ™™™3gΜ5kV) {°:Á°Zfêѳܮ¿œãªË—Éú.µR4q]¹xÑŒ~—wŠN[ºÆ6`yU{ØA{ƒƒöµÆ>uÃ’µ ô;üˆ%ý”ôøgû±Y Çf}aø¤ô7ù”JYÒ‹c­_<¡Ö«IEzÏvÖp݃m %PQQIKKûÂá/»áýû÷$Q())=xðEûíöîÝëëëûÕÁŠ‹‹===ÿý÷_mmmÔ­1HîÔ©ÓW£Óé @äh±©¢$íC™öTILÀ¿ûwó «gWVÏ$Ššr,ßg½"..c2—•¼‹:ê¾ßûØÁФzÆ ðYB>¥ìù8΢[£~ޤßOh pëZ]L,-üS_av6•ó–¢Äù[ý—•‹  … ‡­;wîüßÿþ÷åÁxxxùš‰7†‡‡ß¾}û«CÊËË“•‹È×x;v úòim²²2777”  §¾Š_Ã-=ȧFÇ‚gÁÇìZU_Wyb…ï:SÓÕ_ŠG\kîÊäSc´¬§>ëX쬺_.Ér™Ï´‹³é”¨Jß¾Tت!ïßK¦j<Í%ýÌ -"oÍ·Oè¯Yᱬ÷×¾WoÍgzºì¼æ¨[ãt_Þýã›_`Ch9¬­­=<<>|ø…ÈÇˡZ5$õIHH¼{÷î Ãèéé©««“õ‹r}###²GXYY54@ii©……EëÖ­Q+€šúŠ" úr¼½]ëhT ¹¦xe+½³¡AocŠãMžíÅEË¿QŒ[MûȪ®sŽœ&R5š„Ê ¡½¨êÔGitSa‡¯¶R‚œópÒ¦ÝXíí¬!Ræåj0×½áeµñ¾|˜ã¹,a˵5_ºzp¶«5#|«¬>çc•¯d>7– [®ÿ×ö,Ç• ''òÆ/ދܠV¾@þ§™,¹ˆÍ …‰ŠŠ())i(õݹsGXX…jVÒÓÓ«n3«¥sçÎ\\\ûöíC¡šÀÒÒò¿ÿþÛºukÝ^t:½¬¬lÇŽ¨@ËM}ü}B ˜ ôò®ü-9CKbFƒC+M ÈÊvvÑ­§jέ8/sÈLôYC£u˜rìnåÛtì®xEóš{sô¾ºRSreÕ,,íúxº«BmGF•&,ê­Zu^Ì¡9’‡æÔ3dŸ™w±Ó O§™ùOŠ»Î«îûxc7Á EˆÂ+ìþp$œ9sf̘1õüêãç?tèP÷îÝQ¥æ†——766¶[·nõ&“‚‚‚[·n¡JM¶eË–„„„+W®Ôê^RRB2!//Ž~Z@êãïhá¾mPåã:‹ Û ëRy]%¿–“éEQYñ¡—®݉OLMþô‘5$SHJI¡›–î¡ÆšŠµ‚¢1LJ9LJ*L»ætó~܉)oò+b“ ”‚f7>CM†(ŠÖªÏt/æt¯’˜K;¶¾ý꽈ˆ%(¥ÕœµÕT Yª4¥Ÿœê;û0E­;IÖü5ÞeS4sEå=‹ô½pýöýøä|)eÁ7Éo¥:¨õê?bÌØ‘å©õ«BÕŠÉ´*¿ñ ×…€°7ŸÊ¿P;µac§L™0\Q”Š¿ä~9¹„¿²8í°ÙüùF­¥¥U«û?ÿüƒÖ7[jjj$œÔ}';F‹‰‰a?ÉšÌÏϯK—.OŸ>­ê"$$DRßÎ;Q€‘ú(Ù16_BBÍ`.ù|ë„dõG[Ï7ŽÆ£1ÚÖk´m}ë0ÝÁå«ã‹tÖN>öß2Kq­¹‹É§¾ß1£mÕ°©´8<àááá¼b°OŸ>›7oFeš3GGÇ3gÎÜ»w¯úXC@ 00P\\Åù~‰‰‰bbb¹¹²ûüùóôéÓ¹¸¸P€–‘úþF·nÝêׯÅzÕN¿sçjÒüݽ{WHH(?¿â¢Õ«W³W"ü999üüüÅÅÅì}@Múþ`zzzYYYä·*E@ó÷ñãÇV­Z‘†®]»:::¢ ?Ù¸¹¹?þõÅ)6(ÖJCGº}ù{fÃ61Xr½²s”IÿM¹ñ‹8.$Ì<µÒaòªã ÍMi˜çÞ-;ÔXΰUÝ VÆWµºFÚ—º‹ê;ÕÛðäãÓnY÷véê,¨SÝ#"mt9Þ©Z¢%Ð?ºªUrSþÛ™z;œK­5¦¾íéÀÝéTæó^¶Þiµúv½÷îEë:Ù¶ hïâA6»ú¦fk<–£svªµ<íVå¿YrmÑȱ›¯ÖÝÙç¿MS:Öîúúº¹Þƒ›\yáÎ+„í©¯Ž»vG׌|é6È4<¼þâkÌÅõöÉ\©ÑnUl˜§©™]=‡ä@·AJn¥uáÕƒ1²•]ùZs޲º@W ™¬NŸÜà¢%Ùö‘¸²%Öß±GEU#e.¤-ªwÌ0÷I‚î N7åÒÿ$i7#óOëTf¸¬€…’ÆÛkeNcc•7þþU_õزáÇ–QÛ"òt…ë_ž·+i >PkóÔN›w¹åF,¨LÔ%Þæ¼æÞu†SV¦Êo¾¼b¢x[?R_]¹Áþ×k„ºQ#dš2ßôEší7sF¾Ñ{s/Zs>Lä®Û]ûK•mQcÛ÷¿òê–1+øi 4¦¨HÎÉ ßrÏß±7«±`± m@͹ޛÑš^1Ù!ºöÕ_!Ài¼Ý>5vÚt£O…†qŽ9±*¼<Ü%ØsA­¯áUh£Éº?®(ÂD ¯uŸ3îÞ :Vªì^Ö5#ßüëïvjËnλ½L´_õ»ÎÝ9=εâ(QTõÕ§L¾/<ït[T\Uä#$¸¸_W;â²Âù‰Ý5´c¶ß›#ò•ï[§ÏöÅP¯¢'Ç\ƒ1 ü˜cÁ{§!ý4⨓ý?ï~‚\µùáÁ“¸¼xSæõÓ àÏM}BmË8ÛõèŠ~‹»„.Rqå‘»¥) ê+o¬"Èàj/N1|t´:Ãú(c~êÂN}¹ò_ÀoC6þ¯ÿÁS-jè±—ÿNãa­¾ÝƒÅ6ıÖÔº µfF¬D‘ýìü*óñgƒ=¦ëzÌñÊX3HšÄï–B }Y³ì¿âê.gu’éKsƒ=þ1[wü“Î!'ÃS ·ô…)ŠOšä8ªÞ“Hu{¥‘é=«bSq Z;ÏH˜õ£O,áà~l™þ±eÔÖð‚©J¿¸¼âÜû»ÖîÚveÿÇâMÚÞvŒ§ï`7)*w§’UçñWŸQ§óòá©{ ë\­UòúÞÊî•ÇG ê†ÊB#bðà úê'3l[Üž¤n6ÕyìÀT…SMŽF1׬¹›&î˜7ÄÁ«:~$xl“diÒ“®7Á’ZîYÕ#c‡Ñœ¾É‡&pŽÈÆ¥Æ-m†¶SzÿÎ}œRÀÙ®1izuä£[Õ‰|%?`¦µO“˜5×t çKØ šáòN€Æ¬ÿ}¦ew¬9ÁnÔ™3œýûïéI[v䓟¹~(Çáfâ")UoŠ:gg1i(+ |ZnÐ9v䳋~½CŠÉÑ+ý¤Ù¬}b] ü¯<³2î\òÒS¡«‡Öá¨ä9†ü2¾sÿÊß´{ÿSné·Ô{—TV>º¹¢êËe?XÆŽ|: Â.,êWÛ®>Ç4Ù®7vëꪙ[ä'Ö£ÔZ·c1J€WH¸ò$RÛÎãvE2wqÌ$Ío;òÉϺ¹®ò4/·¨‘ͱÇ}¦ÞLg´yŸG ‹|ËZÎÜ:‹ù O%ÝÒ¯JvLAÍÉ»3FŒßyhE9ÎÞirk±ðï+ðß¼´*æO¶7™&noÊ[ƒ§ªV…&ŸœÖÏÁ‡¢ŽÔ3ÁÏî'|AÆÙ¹‘ÏdMü9]9ŽÔü†©|„u€ÔW—Úÿ.2ŸÐí<•ãažWfhIÌøÚˆë¯½[2˜}Æ©‹3¢©V¿”Ãiåw¢iO|TkTÙ·þ‘W¦ˆj©Ö¸õ1f»m;¥i¨Rߟ—ž,ÖÕ2¡êˆŽú²MŸ«Úÿ<׸äRý¼œA¢4òCvüøž)çÎEcÃh,ÿ±Ê´/ô¶9ôléðŠÓw*¦Ç3LS¥™qÑ|ÏÌx“™ýþ}yâ¼ç•Ã7å¾!¦`[9’5(ÊMSÚ’3œ3aâƒþÝÕ;‹ËN>v­úaÕýŽT4µÓ§’üVßÔÀ+īՇŠ(¿1dÿ™{úÖÚ-rmé È™/^Þ½Çn¸»S_fgƒcÝ yLéö£˜¢3$½›¨¼=’:æ¤sŒã…Pê¬,¬&tf·f½¨xÀÁ´q†µ&ÕVÓx‚f“½èIl\ůlÓ/lrϽ_¬ÿ›b ÏU‘ï{¶7ÝEG8#¡4uùÜ>É6}ñÑ{»ÿ?Eœðc7M´žÚµæ‘ÚˆõK³O õÕÆÓiJs Uš~jí¢/¼mMcÚÆ};êHÔš ~L÷WαêlÕãa¢ük=X²ŸÕ /÷Zïë£Å5v䢚§ÇÔÎÖì⽌75 pÞçWô±Á^¬ÙfÔøùÌøRßW•K¥aw-EeYãuœ}+#ŸáÑÇæÝž›ÓºW=q39Üks8E ±Ð—mõååùâ"I.‰aN XÛÁx9Gÿ¤sçØyZù@äåNjËI|U¤/Â] –%Ç"H2?‚yé¢âÒJRµï±zzeî‹C•mZ&Óµ:HHµn#ÜšŸéþ]‹À;rSêÈMŒÄÛ'}Ž<èrhùT÷–Ÿrìê¶iä8™Þ–βî3{ë¶ÊÉ·Ît”¨—/“è|Ë3+JŠ+žPûôNC¸”Ï3ÙÍ¥¹ÉO£îÞ¼~ÖgsÄMùäCQf{_l£PBULöQl:¥õƒJ £°¢iâÕÔÓê­fMÞÞ2ÞÖyRS¬òDRðglžBK—P–¯s…,okAü ¤¾/-kºòù4} Âê+ϼ\ù#‰è­e2×6Ø»a“ÙpßÁ ÷%IiIÃóÕ_Ç\Ñ”¾ŠÃ¿¸À”º“éUï¡Æ—ç«‹¤8|“¹¬ÁÞ¡L{ì_a¨­¥Ó­1öŸB\*"_£öV‰ {·x§Öá«Y¥Ö¡i=óÕäSÕ–ì>ÝlÝ…ÔfªO³üì…*Žœ»Í ¼Æy‰éßJsü jëÒpeùÊÇ3jG©d¿uÿ&ˆ f¢«^þX¸ì§!7#oß{9ÀuÏ0ynŠ[TImùL±ÝDq=0k×;˜¢ŽýÏÍaìÍ3¨uìÉnNœq¤ ÇdÃ7´Ÿ´;½|8ß½f§-(^qÒÄòÓ‰Õ›Äã#µN. õŸ2‚Š(?uf£WìñÙ=kô-{¼{ñÁÖú£È ó4“ò6}{£×îò1!ªâ– Ì| iMØA¾¨ðS:»!+"á=Uó\"#%> ; õÀ79s'¡P]½â„BòµåýfVýá/$ôI¡NŸò^õ†³Ï“ìøßò‘÷öU$ZîYçþó½kÜd”|~j?[Ö=„;¢OÙUß×Ç”U¬xi®|%r+<ÒÕ¡ÛžíqTÜ‘aºïùí™]u¹ã‡gG'õŸÉš®ò©„çúÂÅšàQ°LôßÅØ™¬”¡ò4ª½±í\ãέ™nž>èWq¹ËõÜŽ‘ꊤ¡­‚ÐÕK¯P”·¿'©Ò„¹Ó4•$Ÿ2ãîù­|<‰—syý,ŸMUŸ°Ž¢ŽÉ-Ÿì¤~E¯ž®¼„FÿðE»òkU†ÌìCùD°/1=o8wâP Ú›[çvE¼¨^H~Šâx'x›3¶ÝÊHêæ2 ™e”îøE£úªQ9÷}×í®xPèñÛ‡£È5›u'×Ôí-õßÑãøN¹ã©ü³ÈúñýÙßQÇy3{àÆï "d¸´â2ÎÓÖWÃ÷±ŸS9ÁŠÄ™õ6—¢ð¾>@êøK”0ÞU4}jì „ OÞ91²ïr@ºj(}GŸñ[bÖi‡v1\@U>½pÎɬ5ËýVEXq‘z³ßH~õÐRó< µ²ØR~ š›OŽA•Æù<ï6t9†µ×”®ç*ñËÃv[³Î#1%¯1Íîïš9zÁ£ sº_˜SkHõ±{ŽîýŸ³¥­.iƒ.åwRHªÔêÕ¦§SF†U˜xÚãÜÀ+)TaA»9öîC†ÕW—áˆ_Z2˜TifxàéÐÐ{))÷}ãò™LÁ¶r½V8 5N]ºúÍmû®ÍÈX[ð6Êÿò©Ð¸·ÃèBB&f‹†Žœ>Z_­údœèàsÌ´{§Nõ‹}ž'P€®ØÇ⢛Ùh¹œë§—ÇÉÊõÁ”´<Ï´¤¨7ñçyˆ½~9/‰Nå‹MX°^ä0“R¿ë½¥§æ^׺§æ··„àÓì† Ó· WØ_{>+ÎgXéJW®¸Æî œKX}%v.š6×ՙºœLphGÎykÙ:š¹o-¿r-66“Ý ?ÿ©àï 4$0㛳âäÀŒÉ ô왑1¿V'M‹ õ=2c3g+½óŒÀŒ\ ©ÞóëÌ‹ãH½®.žv†V¶†_Ì…²Ãf¬Ö˜rKöaÛoD£æKo§5Á‚|¾2˜œ¶©£¶ií®í[ý3¸þ5¨6ÎqÓ¸æSÞ6½Ê#î÷oo:3*[núê|¿ƒÔ]Âz—Y®ÿ²ŒŒúï|qqðÆ=@ê¤>@ê¤>@ê¤>¤>€¿õ!7“FûÍË™™)))ù{—Öª™® öÊááivoœHIIîÐA©ÙVìOß/šÃNA´j®ûR@c­X±â·/ÃèÑ£/_¾ÌÏÏŸ››KþýË«Q—€­.Xaa¡¶vGÒ0k֬Ç·° û·üõë×jj2¤aÍš5Ë–-ÃO ¤>€?›¿¿?ù·¨¨HLLŒì £&Í_Õi(Ÿ™3g &?ÊË—/ÙÍ[¶lùí©©¾Ë´iÓJKKÙÍ………222IIIRRR¨Ls¦©©ùéÓ'vsqqñðáÃß¼yÓºukTæû%&&ªªªVµ2ŒÕ«W»ºº¢2H}$&“yæÌÎ.ÊÊÊ="ÿ¢>Í“µµuLL g’LtuuãããQœïD K5g—¢¢¢M›69;;ÿÞ‹Ÿú ‰FŒQRRR«# ~=zôïÙ³'JÔÜøûûïß¿¿VÇÒÒÒׯ_Ϙ1ãèÑ£(Q“ݹsGOO¯nw>>>[[Ûƒ¢DH}ƒÁ¾£¯®üü|ƒË—/÷ïß…j>Þ¿?zôèz{åææ^¿~Âyóæ¡PM4hР†j{êÔ)WWWyyy ©àO2pàÀ/ôýøñã˜1cŽ9BþE­š ’:ªn¬ëõë×ŽŽŽ$¨wéÒµú&¾¾¾£FúÂVVV¨RÀãýû÷ááá_fΜ9999³gÏFÅ~;UUÕ>|yƒÑ·oßÌÌLnn_5Ö‰'¦Núåa²²²¢¢¢nݺ…³ßH}ŒÆ¼òððˆ‰‰‘à—‘‘‡×ÿ^$–$&&~u°’’6`À€°°0­1nß¾Mj+""’——÷å!KKK,XððáC ©àÏK]NN;Ý1™LrPK§ÓI3éÒ»wo%%%…Y³fIKK£\¿Ý®]»ŒŒŒÏ;ÇÇÇÇÅÅEVÙçÏŸ¹¹¹óóóÉŠ+a!CΟ?¿G¨X#ééé‘J’†åË—?}ú4###%%…ì¼¼¼ì3«ì=‚Ô¶  àÑ£G$%ÖûÄ@êhvØGºœ¼¼¼öïßçΉ'’¤GbªÔ|ˆ‹‹Ïe©ê¢««KÞÔ©SIz‰‰!UúkÖ¬!ÿ¦¦¦vïÞýíÛ·~~~»wˆ@eúZŽéÓ§ÛØØdee9sfòäÉsæÌ9tèÊÒ<‘X?eÊŠu¶êôéÓNNN(Ëwzýúµººú£GÚ´iCR_Cô¤>€?F›0a–-[6oÞ|òäI}}}ÜÖ<íØ±cܸqd•‘fSSÓýû÷#õ}§ÄÄD]]݇²ÏšÞ¼y÷² õ´@k×®UUUÝ´i‰Û·o÷õõ•””|ò䉸¸8ŠÓ¬˜WõX333›ŒŒ T¦iȦnaa‘––ÖºukÒêççG¶ù®]»¢2H}-¬¬ìèÑ£œœ¶nÝJZGŽ™žž®¡¡1jÔ¨õë×£>ÍIJeËÌy# ~kÖ¬Ù·oŠÓ#FŒ aïÍ›7U]víÚEB *€ÔÐ2yzzJJJ.\¸ýèNž¸¸¸àà`QQQ???<½ð·ËËËÛ¹sgZZgÇuë֑ľ}ûv”¨ñnÞ¼9nÜ8òoÏž=«:&%%Ý»wïÚµk¨R@ËDb £Fºÿ~UG##£ÜÜÜmÛ¶;öúõëx1Ào4~üø•+WŠˆˆpv$­3gδ³³óðð@‰ãõë×:::ææædîÕËÞÞ~Ñ¢E(R@K6{öì7náææÆÙ}!‹¯¯oÏž=;6mÚ4Ôê[¿~=Y u{íÙ³G\\|Á‚¸íË222deeçÍ›—ššZ·oppp||üåË—Q(¤>€ÎÇÇG[[ÛÝÝÝÖÖ¶V¯‘#G2™Ìû÷ïËÈÈ>|˜——ûHÞ>xð`rrrC?~œ¬¤¤$Ôª^áááãÆ›5kVÝ—UV™:u*©3j€ÔðW¸wïžšš???ç›Á«ôîÝ;##ƒ4,Y²ÄÃÃcéÒ¥õž€‚åÖ­[õžžª2tèЉ'Ž3æâÅ‹¨X•ÂÂB² Ÿ;w.33ó CŽ1ÂÚÚšlÛ(RÀß">>^OOïÅ‹k×®mh˜ ,%%% ,8zô¨••˜‡‡ÕûNœ8áââòêÕ+nî¯5mܸqÒ¤I¶¶¶îîîyÑJKK—,YrèСE‹?~ü«Ã;88ðóó¯X±ÛRÀßåöíÛ .ìÛ·ï;w¾0‰y;Y(ÖÕ¡$¢ oÞ¼yèС¨áwš;w.É{)))þôéÓ&L ñûï|²Kzzºµµ5Ù\I~ÛÂÒ˜±œŸ¤>€¿Z×®]³³³]\\&OžüðáCö«ü¾ŠŸŸ »5 `ûöí÷îÝ2dˆµµucäßlôèÑ #33³iQyÓ¦MwïÞ‹ŒŒìܹsË«ÏíÛ·ÝÜÜ®^½:|øpWW×M,Mˆ‹ªªªdD333lrH}PþÚÂÊÊêúõë< ‰â›FÎRÕJB §§çÍ›7'Nœ8{öìF†É–ÉdŽ?>)))44TXXø{&¥££“““3a„ׯ_‡‡‡ÿÑ'ZIY=<<È£¦¦F6B’ÓΜ9ó=Ótvvöõõ%¥æççdž€ÔÕØw‹­[·nëÖ­îîîM~k_­XTTtòäɳgÏ’Ãú6mÚŒ9ÒØØ˜N§ÿ%…%ÙÌÐÐPNN.((èN–”´¬¬¬cÇŽ={ö|xÉ’%½{÷&Yè{žøRVV–À.xttôÓ§Oóòò”••»u릥¥EÒ™… Ë/þ¦/^$[ ¼¼¼§§gã_}H}ðƒYØÍ$q¹ººž>>"""³fÍúßÿþ·ƒ¥®š‚‚òÕH–#ߎĹ”””ÏŸ?_¸páÎ; ãíÛ·]»v]¼x1 ϽYšófF¾YÔððð3f¬X±Âßß»Rü;vÜÅÂnÍÊÊZ³f··waa¡¹¹¹Ýo _ ..>€å ±„:_Û´i3lØ0’ùùù Hü#ÝW¯^Ý®];EEEòÅUTTH8ìÞ½ûwž?lŒ„„„ØØXv^%I;55•›››,‰©U‹AU‡å Óyÿþ½«««‡‡GŸ>}6mÚÔLÎæ±%'';;;Ïœ9“,d³:‰ H}Pd¼å,ìÖ7oÞØØØøøøôèу¤A}}ýæ¹Ø$G9::Þ¼ysìØ±$×­ciüè$(Þ¿ÿáÇñññOŸ>%ù0??_NNŽ$î]»jjj’ˆ¥  ÐÐèL&óÑ£GáááÑÑÑqqqIII £C‡ªªª¤nìÑUY¦L™òßTXXx ;¢Ož<900ÐÉÉÉÅÅåw½ýïøñãdH¡$_AFFæ'…m’r#""H‰233±_ õ@ËÔ·o_-HÃ7H¼1bÄþýûö† ƒ0`ÀV­ZõC¦I¾…———ŸŸß›7ozõê5lذ‰'²_:ÿMÓQPP¨zü&§‚‚‚ .\½z5,,ŒdH555SSÓïy+à ž>}š4,^¼˜dÔ¬¬,qqñ8}b_¿~ÚºukìH}ðW é(==dÒó3²ß³gÏ´´´ÜÜÜ^½zõ=ÓÉËË#éÔÇÇ'--¼9sæ 8pËOªNËÂÙ‘Éd‘ÅhÕªÉdI´µµà|7²ØÚÚÞ¼y322òûCÚ† öíÛwïÞ½ß~e/ õÀo@² ‰1¤aĈŠŠŠ[·nýQï|ôè‘‘‘Ñúõë?~üØ„Ñ †‡‡‡»»{aaáÔ©SÿùçŸÅ,¿·\$dá̾¾¾‡ºuëV=¬­­'NœÈÅÅõ3bquuu2Á… 69*wíÚ•ãÔÔTlêH}ð·óóó+((èÞ½û¤I“Ö®]ûS300““{÷îÝ7•½zõê£Göë×ÏÕÕõ–f^7’G²Tu ß²e &&&K—.USSkÚ”ž?îì쬫«ù­£ß¸qcòäÉÏž=Ãæ €ÔPŽN§“àåå%-- ""Ò„‰<|øpÀ€aaaO;»ví"9“ ïîå.c?vóÇ>Ü·oßÍ›77á}d,Èeee“’’v÷îÝû÷ïÿÖÔ H}ðW0771bDçÎ}||¾õÉ($°yzzfggõÁ‚‚[[Û .8;;/^¼xþüù-²˜­[·ÞÎBš322&Ož|õêUòe]]](YçÏŸoß¾}ZZš€€@cR4‰îñññؘúê'&&–™™©®®niiÙøk,ãââ¾6˜L¦ƒƒÃÑ£G×­[w˜å頻ŒŒLÕ;úH<^µjÕèÑ£÷íÛט3x:::¤h***_½CÏ×××ÍÍ-99›1RÀW<~üX[[»´´ÔÞÞþ«/Y²„ä½ÀÀÀ†ˆŒŒ;vìðáÃ===wìØñ—×v iضmÛš5kH`^¶lÙ—G111±²²6lØŠüîÝ;33³””l½H}rïÞ=eeeYYÙI“&}a°Ã‡ÄÆÆÖÛwË–-7nôññyýú5JZËB–üüü!C†¤¦¦’D§¨¨ØÐÀK—.íß¿ÿ®]»º –dB’¨ñø¤>€o#''§£££  PïIIIu{­[·n÷îÝáááNNN¨ä ^»v4̶víÚ+W®èêêÖ;äùóçååågΜ٦M›Z½ÜÝÝÉtê¾nú¾„¤‹cÇŽ <øÙ³gõ`ll|ôèQ:ÎÙñâÅ‹cÇŽ [ºt)jØxö,vvv¤ª~~~}ûö­5€˜˜Øüùó-,,N:U«—««ëÝ»wQC¤>€o6räHooï%K–lذ¡V¯Õ«WwéÒeôèÑU]>|ø ¦¦æääÄd2Qº¦qcÙºu«©©iLLŒ¸¸8gßuëÖ‰ŠŠ¾|ù’óìëÚµk ;uê„ê õ4…———„„Ä¢E‹8_âWTT´eË–´´´ª.¤õÀIII¼¼¼(Úwrtt\¸p¡AÇŽÿý÷ߪî4ÍÆÆféҥǎ«ê¸oß¾€€ © ‰øùùçÎkooÏ?\\\&L˜P•û÷ﯦ¦ÖÐ… Ð$à………?^RRòñãÇíÚµcwwrr’••---åæ.?ð»xñ¢¸¸x÷îÝQ1¤>€¦[·n„„ÄÞ½{«^~øðáG‘?:vì¸~ýú©S§¢P?ܸqã D*ìåå5lØ0Ò…$mƒΛ7´9rdÊ”)(RÀw¡Óé#GŽÜ²e‹««+i%‘CCCC^^¾¨¨ˆü{ñâź¥M›6™™™$éEGG»¸¸.$`Ÿ8q‚ú‚ƒƒÉê@•ú¾×Ì™3Ø©ÏÇÇÇÔÔ”4¨ªªîÛ·‘ï¸{÷®¶¶677·³³ó„ Øïy ’‘‘©õÄ@êhŠaÆM›6íÅ‹rrr~~~ÆÆÆ$ Ž?Åù5îÝ»§¢¢¢¨¨8iÒ$²‚ƒƒ¯]»†È €ÔðÜ9s¦cÇŽjjjûöí+**Z±bÊò+EGGËÊÊêèèèêê’ÔGr ¹¹9Ê€ÔðÃR_PPЫW¯455W®\™œœŒšüb‚‚‚»ví7n {×®]{úô©¶¶6Ê€Ôðc 8pûöí999¼¼¼ÖÖ֨ɯGòž··÷Ë—/ß¿¯¦¦†š õüÝ»wÏÊÊ*..þøñ£¯¯/ ò»¬\¹ròäÉ$~W½Äú~ YYÙ´´´‘#G¶nÝÕø]ôôôÈŠ ©ORRÕ@êø‘„……322F…Rü^cÆŒyüø±€€J€Ôð#µnݺ¨¨hÒ¤I(Åï5}útWWW¤>¤>€ŒD>>>>„ßNVV–½:P ¤>€œúxxxP‡æ€¬ˆ?¢H}?RII FCš²" ê€ÔðÍÊJ>eç>É|û2''÷ý‡‚……eEEŸKJ>Óé¼bb¢nnk¸¸iüü\\Bt>aAQQ1IÉ’ªÜ<ü(àŒÙÅYYï33_å佟W_PÂ^e¥Ì¶mEùø¸ÉºàáiÅÏßJ@€»uk~aaA11ñvÅÚväâÆ‘!Rüõò?>‹OHÌLO/‘‘áVVæo/Ë#'Ï+)I«|)õ©`g÷¿ú&ó™¢Þ‘ÏçÏwSÓJÓ^1^$3^¾,oËÛEUFMÕ°­„"Jý̲wïÄ?‰NLÌÉÎ.UTäSPà““çi/Ë-#ÛJ¦ü&¾V%Ä9†Ýòú&TJQoȧ¤$$%‰‘žV’”\ôúu©l{þ®ªŠj]…ÄQl¤>hÉJ9]¼‘! ÐJSC {wºŽ.MG·í™x«V­äåxÉG¯OU·b&30>¾8&¶ðuCGGY»÷H~6Xå¥)|q÷žÿÝ{ïddx54T»ðrJü‰óðÐ:*ó‘aÿª¬˜Ç`œ½¿0&¶ ¸˜ÒëÛ½G÷a\ܸK©Zféÿ]ðH””ä10ÒêÅ«Õë׽ݛFk¥¦&@>¬¶üœÜc×®ç'>-4 ¡¥aLãâú»VEÙ§ѧ‚ƒÓIAôú rJý²¹óò¶êÝ[|Xm¯_¥y„„~ÊÉ.nlÔ±cì(H}ð煽؇Ǯ^Mí­-hh ´`A»æ°Pb¢Ü£F ¢„™Ìôèè]ÁÁ{õRêo0¥eÇ?fÙÇàc¢óŒŒZ÷ê%л·TsXªör¼Ó¦‰QåéytçÁƒüaÃt{ô‚]©š»Ì7·N —‘æ9²MÏžRÍs!i´VZZ‚äóùó§à›nÑ1cÆ êÔÂÎ81™Ïž¹x9±W/A£‚šéºàá¡ Øš|ŠÉgÎlz›Yf:ÑTBR»R4»Œ‘àsÙ7iÜx‘l%ÿ”¥nÕª;rdeEoÝv£®ªžÞ„?]”„†xp?{挶ÎNRÊRóñ¶š8Q”4<}zåðá¼±cŒ:wé‡ ©š…„'ÇNŸIšg%¾h‘Ôú$$x¶ûô)këÖÕZZ ˜úgæ½Ò}·Br$ ôÛý¡ëBE…Ñb©ŒŒØuë‚Ç3TU5Ä.€Ô¿MFú5ïc‘'Š®X!Ý¾Ž—££Ô»ì¼Í[Vñ‡å¸ÇÞWSæÌ7ìßÖ…Œ ïÒ¥Rÿ=¸iSÈŒ¦RR*ØÝúà×bóqk+Æýçžßkˆx[ng'©¨¨÷=·­,ìyxšù3ŠÒ÷øW¿Ÿ“cK[:ò“ ÌÏÏïãÇÀ)S`·@ê€_$ãÕµc'"--ÄEDZ쀖]]ŸÇ¶þ:Ý{ n¶Ë{$,ü•õ< ZK]#F´ÎÉ.Ý´iõŒ“¤¤º`@ꀟëþýƒ±3%i´V-û›òò¶šÿä… Ò^¥š˜ÌivËÇdúúí`2Ëþ Çç4™X[nggÉýçu{khh Çn€Ô?‹¯ïöV\esÅÿž¯FqñÙ³{&L°ùíëâä©m:óii þ›â¨ÑBgÎd†…Ñןˆ©~Œ¤ÿ.ÇÅgÛüï+÷ñò‰QT&Õoj~˜a­“PaÁú§l œ¶›ý/Ñ»OuŸÒ¬Œ¼Ì\i•—‘à­3Õ¢Œï?•”7 IˆÈˆðUv´ÞcURÆC¯èRPP\ïR‘x8Û >¼Hÿ”OfÈKïÐéNêëó9ó1âι>}ÇÿÆuîÁÃCéõú›7ȉ[ïÞõLN>ZQA»'R|¯²’gÏÅZÍkô½|KH¤ª•¦dô6˜2=F%?ÎÊ£( ¾b‚5O³zJ·RoO½ÞéÇj5Š+1Uã¦^\òé0&¤¼ƒ’êôÁ’i/Îù¿,oU‘þb¤ •¿ÃÈ¡ü\ŸëâU âCô»Qš•oYâÏ MLf5jº.‰Z¥HQù{†;ز¦gÐ{dáÿNßðO)oîµÚkz£Þi>nœÐ–-ñ]»öù=ïÄË˹þÖÉY›å”i¢‡û99ô qq¡H}ð]nÞòÒРÃKþ·ï€DQi1ûV”úðÉfOv¢£7$‘z7œùÖGîY¢S1e·ýTТƒ6w •fø6ž•ñ†O|znPçÊ4——‘[B§×½Ì”®fŬz©zéiËE¦¡åMîÑ;m4ø©ò«Ow—G>Îó'’ÅÐÙímîªÑy—½ßW¿mèP ÿ€3S¦Ìÿ-ëÂïÊ5c“6­ZµÂf)Þ–[]MôfÈI#£i¨R4])#'òn–³ó7½þ›Qü¡ì#UJšx©ìÃW£Ë;=ešv®¢àUö+Vƒ‹®KÝ „§¦—R:.öWøO›,<£B?SÕGIÀn¯ñÆ ß]˜uÃOr°/i0p¶ÙTõn·üÁ¬ózá>‚4ŸºcÝ~ö©1©ÐÐà ÉÎ|ûL²]ç_¼.2ÒÃróJÔÕE±Y² äÙº5ÉР„‹›Õø R_iÊŽ…it¯ È'Ü®SמZÚ½4:H4‡ªåÅY´-B mù‚¾£F¯ÙjÜ¡‘•ÌÝ` æVÕjx#÷ÖÀ/Ý`O•¼<ï´ì*ÕVà[²°0[eäZ‡ ¿¨&÷׈j»Vµê/ ]«‡ àw‰ŽñíÚ•ÎËû-'—z¨ÙÙTýR²±ºCßÁ!}¾o‚öa}×ïÐ]úd¤å‚‹ÝF‹òtÒóYª»ô…©ŒÅÑEs7Ïë$J凟4u¹GF0v_qÅê³9Ïšò§1ÀkK?}’Ùòs""Ò’+~íqQ¬s‰U‚Vl´š}—ÞôP‹¶/¯³/eá%}»âoZL­ˆ£MY¹ÈÏ›—`ûAîÛ„(>ÎxÈG}ºe»øxáÆ®<`,^Ìñ4ƒÑAW›8À߈É|õªXQ±±/èSf¸žÖ•¿½RÝW è¸ØçŽŽÞw9zÔC "ŽË¦ õìFÄ^¯gÇ'íé%*åucÕäRìËõ¼˜û3^ž;õ(ühÐ¥‚ògn¶í¨t*tü$}vòáêk;y½1£ë°òV) ½õÛµù)&E?xÁ1çª5OJšGD%„éAå½ñÞriãùër‚T¯’™UÜÕjßö8‘òŒ¼ÜW"¢íÙªx—ËÇÛªµÐ÷>¹¤¤ ¸¤ÎsMkv,-((£xøè¤¥$¿üy§9e‚RâŠ2µþÂ\š—U˜—OV —¨ ]B¢ž“YY)™åO_Q”©1@IaÖº¡ËV…QÓ½´g””òð4ñHR©CÙåËï±§ü¥©Ïu£A“föKÜÈ¿©ÿ*+ÌN®ê˜±ãJô Íêßg%¯Cœ&ÏÜú¢Þ)w<í³eb=áìõus½!Þ)u{hŒóɰVq¨Î›Z7r°¯Æäâ|Wûí¸7M“+/Üy¿ñ‰Í<Ò}lúÔîZÔÉñqÎÓ¤®v¶s ê9ßXðòº‹ù¡ L]ÎüÊõýÆ*uþ\šrÐÑÚbçÕ«ŸÕ¯ÆûWä˜CS5çžàì2ßïåNyì?Ç)|üTã/ï”ÐÔ\ÒðƒôEÔ4—Ô¹¸²ó >nƒú44 ]Faº½BoãæÖ1 SÙ¢6F¿QnŠHM_búý¯÷ã—ýKS_vvªXÛï>Üz7Hd7ùe¾=’ãé5•×GîZ¢Ã³g¿¦íãªT5ݪ]¼GëÎLj{ôN{ þg§¨˜F°zªZŽT(z»ÓƒÝ_;º`®†³à¿Ûê½XÇZRÆÓ»´§²=½ÙìYð?æ³KUưºHeŸ<«{ íÞ«SH÷&|!~Z+.*ÿS¶ P[ì°]êûÞZ°Tn-X™ŸŠ¢&ô:W³÷x‹Eí‹oïô g·l$º•¾åž¿c煮xÚ\ÉÔûUí)+SIIåÒM¥ü_GŠzÎî!Tõ·2ºlŠªLRŸÕ#—x›óš{S LóЉâ•_X²ô•íWÅÖè¤d`Ü…zíSÑžæeÒÅ‹ì–{mAU$ŽÙ;FÓæRÍÑ4 º}zó*:º2w‡{˜(yPÒ‹Ó36È|q! žQW›•Ì™À·†û/Ä-?QQaY_Øàç*Èÿø+çXXXÀÇGûΉÄ»U~ÔÑcŒÇ£kâÏD°;Î(ï˜sé+¡õ›˜6¨ârÒý£Vö\²ê!õþSÙ(B}SË;êI ®È:tÛOd} DètfÆ¥ã²cÈôºÞȳ(\ä<¼¨‹– LjuÞ˜²ªCçÑÃŒ °8Oi:›]ÙÔ‰5³É_Ї«°ð=RÀߘúîß¿ÿLJ.?¿2F1y)Ú§×q·»-<]cȳ7NQaŸa{l^3ò¹†¼_eІÝìv”zqlN‡é‡+²Ÿ“ö¢.¯7( Ù‹3ójF>å“ÉÏM;T§•=Æ‚¶U‘¯¦6†!Ìz~Ì‘iÖŒ|Z^=#Ë1͉‚¶gQ1cöÚÔŒ|K2™ë+ï*È\©Ñ®ºïu»¡f«*NæÞñãŒ|ö¹ÌíœçH îR﹫ýðž íÛKK·IH)”©÷Œ?ÉÈÉv”w¾àè8zoîEkìv?õE¨ùíݪí—Î’Æõ™ù“È9ÃJtãmµ8î|ÆýAyG‹2å/Q¼GÒ]yfó,Q™Äª:êw"_}®·sÝÓƒl/và¹Xy8¥íubòôò7päŸÚÊþkö“A"–u—àÅ*ÚûçÞçËÿŸ=¹ÃXåËIÃ6 ð7¦¾ÇáŽ_JË3*È¢òÚμۗjZK9¿Îî|åýmL>*/гïæEGG,’ rOì>ÌÙ}ÜN_Ó?Áè6çoèú ‹_{šfû½9"kšÞ·NŸý¦i6Æÿ.2ÿÇþ©Ÿû,åiÚË7^^ ;QÊ(P±/ë¯Æü;Di;ØMç÷4xPÿaQŒù"_{ÒrØrmÚrΣKNkàE!¿„  TAasŽ}E/>1xéœ÷Œ•ä}È*(ãáá‘ ÿ¼Çù” ¶ùµë¢MaÁw­‹ªð¶`b»ªsk/N_`w´6#‡ÌkG"I‹’¥¡puļu¤üzN%3½ÊŽümÌ™6ÏVÍŠ ¦éc®{ïúÕ^3KY _½gIÿ­c*O9Nú¿Ñ ÊŰ·ü©¯üg“’Rrrù~”•“Ê/Šä$cŸž¾½Öõ„OÃjg(4ƒ'/r)J‚â“P”£ÂÒª:wë)Y{HþÞÓÇS¡ç¿ìµ§©¤úýÓüBL„ûû#RésÏ2ûœqì6i[­¡Þ䳚£‹óùg›ÆvjÜdÏhö\ç*ƒ}à$ÖÒ$|¦Ó¿ý:ÏÒWxÖ”ÿ¦6› S_+õoc^~Χgt‰uŽÿóî^—Õõ#‰%´ÀN_€YÂÛ­â]|JúCÎ^ !ÀüI•ÉÉ-jÛö—>ÝZ¢m§Üœ{ß3£„}³qÃ=±y>=ŒYmÖŸõø€ŠDW˜x`×kÒ:w¶jõú*|¾g×òÿtKužÂç†ô-åLÆ&7ÖêuoÏ[’—å*–}•ªºÅÍ7ɼƒçêM›·îvvc¥oÓ-õÙSC·/Ѥ¨ü[WÊO9*uHŽÏ(é #óo£øø‘‡·•]{+Àߘú\CÞ­ªñH’ÊË,Ù2vÈÒvx>.´èVýú•úu½z ]7FÄ‚Fü°8ëEgûëä\ªÖÓPŠî{[<ûÓlª¢Cá'Í­m×Oè½\¡[õ—ø²´à+™·Œ%°ßütrò))ÅjjßþòîögÓ-µd=£Ù.ÅAºVÿøƒûY‘OêT†7ë,VÁ‡ò'F–?­“Þ¡“H­,ÀzÎ$7Εõâí«œ²ö]d$tF2ò‡”ðpÑy˜$Ф§¾-®Ç˜”C$³¨  öÃ*©ªçUòðñ4õT`V™h«6Âí~åŠS¢hTnN©¨XºDtF2s{Üq#ð~ì&×ÿ”zvÙ¿wÏíÛG µÍz”ã“€ñúqýÚHÏå|aý'žòŽ|3úðQÜC˜YñOŽŽ;ºî|éKTVÕ¸5s`åÅSW-f®zä}ß÷ú]Ç+%WYdäÞ^:ŠU5´=êR´,0üÕ{6½Ÿ±z¤ŒbÓïMN¦ÉËក¿5õÕA·ñgör¢k_ë,ÕwÅžû§"Àˆ¨2¦\«ÏhEÚÙœ4ñœÜ±zoüÌgè¬,ÕNJRB\\Z}œIOJtÔäÑ.aÿoïNà¡Ì?€?CÂŒ˜ÁŒ³4”»r†µK—£ƒ´H9Ú”6ë/¶Sw[*»[ŽíÐêZÛv ¢-TR õK¥cP‹$-rÆfþ3¹†¨Øòy¿ž×kç¹¾óÌóôÝ™çû|¿m°EÎñtšÆ?`zC”‹k7›b¾µÌš(—Ô¼³œ•ÔþôÝÛ"ß½ý]E¾§÷RO¥Þd?¸‘}3'.Aörõa#y5˩ܩu°]ÖX’AkáYQgŸ†š¾žã̃·-¶N±e#5øFoO±a–)]9n|Xšš*Ù·î÷$õqÉëŸM±£=‘ðÝŠu¦¼þ!۾ɲNi{ñnø,Ot”«üiòbßS¼åʆv&÷'7݃rÝ·jŸ+/\•gœ ñnëµY{yû·Ä™W 5“˽‹"ÃÎÞâ­¸qyöÔ\¢’þ…ÔÙ•rCàŒ’ëV­ß/ ÷ÏTåý±]'–Ýt3»NCsðÇ¿êRÜ·¶°ëyTYÏ•.žüK¬,VXµ¼¦+ù½vóµ…t-Mÿ•šo| KW+Ë®{J¢+íš÷^ÎÉíÛÿŒÐÖF=@êkc´èL™a(ͬí)±?ütH±ae鯺1‰/=®/=¥µ·—Hçá‘΄Þd7ƒ!UÉ»þhî=²ù†¡Mj™kÓŸµæG¬<¶´­¿“+‰ T¬tŠãøúŽÖE‡.éªL¼¸ýÑ|XT}kb _óNïHôôô*23Ùl¿j¬QÞB ]‡%¶Ïü|[Õ˜Â=~}WM)QîÖþ„vI~¿¸¿ùÖÕðN©ÿZ5Aú\È_©«¿@õøpF˜š˜Z]ý’BéÉHqT Ûìð|mß›KwŽníÔ±6g†ÞQî­\±Â’œ¶|=/ò™ÍªNÛ|oý ¢"[Ÿº#Æm™®j»xö}òÖ –Í[•g4/H]#üìš’á(óßã›bžùcÒŠˆG&*±š;Š, Ï‹|ÖáË{ùBàê•¢oæy}ükah0i÷¾ýÿ)õ}^*ŸS>,vv¶Â©è©¯¾¾¸ý‚ºú.¾{LC8¥FúÒÚ‚ÓåE4RL|Á5k‚²cq8EvØ™ûµn™¸¯}ʲٛ¾ÇÍXšo #(‹ãœÊ´æïu„×”ŠT~Lý]x»n7»Üì¼Ì¸–2#/¯ Ðâ0%§ Îœ*òþO7 sØëlT–ò?Ø—™ÙtNÌÄœÚîR=K­m =6ïyHŠçìeÜ“™ð•Ûæ–áXq1¬×Á%ôØŽà)m72ë_𯭪kh›á] ;óH- ÒB¿$E¿}Ôè1’  ¹9ólrñ”©=lG§åóÍ¡N‘·­F/Ì·“'*Ö™lå}«¸xýÉköY}í¯W÷õ.ÄRH±¯ï~ñ^_BÓ´5 ¼µ—ÿ––›r» ½¯)DdnÜ8nó†ÉƒÎ.ÿuÛC‚°Ÿ{ħçßé—UUT¹-¤èÊÌ¡âUFF~Õµ°žäï:¼çÃyŸ8ñr°"C{Ä'~ŠLGÇöñã¢ø„ ë~;\@ttÉdë1RRLÔM¤¾ÞCÌfÍIæ3Ò@Þ·P}=Gnz€¾FG×®îEÝo¿±¿þº‡#b Õòêº 'UÖ5È©Ë{7u/)DæÔÖÛ¶¼šßúkjêïžLÒsâ=Eh½%x—cÏ¿€“Î΀ñãÝzõ˜<ÙûÏ£ágÏVZZêÿI1ûžëè¨N\úz!UsUœèãŒg¼|ypϞм0ë§Ýz¾ÊÊ¢~.k×›«Šö¼¨øxNCgÚ´y½çÓM›î·ãô©ç'‰÷«œÑÑÏ´5U íPúàƒ05)/›±eË__úÀÞÝ¡ˆ®Çç=Å íáFFÓzÛtpXpñâ¡Ý»sçÌéÏø½x1hûŽN޶C†è¢& õÀÄT1šç­²mû/Ó§K SùŒ?ii©dtômW7y9­Þy„¦¦NC•nnØpÔÛKš&ù9ÿ»w—vüdÎüoæSĤPú>¢ó¢–|ëµíô[7Tžø±ûÖ1:)áЮ„JaáuÂcuÞ×}bZPĤ–üœ´çBÚãÙ³i$Òç7ŠéäÉÆÊÊ¢ÀÀe½ü@GŽØ³EŠÞ8ùslíÉá üuw)s¨Ø’ï~@Õ@êû¨2Ö›]î$à1õ¨¤òÌÜ\þ…¹§×ˬgÎ=žÍß½á°ù̈¦™e#­uïåoW¨X€Ž¬Æ»×T—…ÿô³ŽŽˆ¹ÙçÓ­È;’'NæÌuŸ%MWî+Çì⺸äɽ›N›JUUýlnÀ’RR„²³‹=æÎG@êû¸ž'}ß.òé,¼æ$ßq«ò‹!4³5­³y¿N t.Ý`%•Ÿý™Hýÿ~ujÉf\W“Ï6HVVÖKi˜™«ñuŠ][˜qîÂ¥+™9ìǵ¯úš–”SÖÖµ0ÿÒB­ýM¼îËUS’uù¯ó©YÍ%sD嘣ÆZ[3V}ÃØµ5Oï¦_ʸ’ÅÊg—=«­!“É¢’rjÃôMÇ™iË¡ô7d mo07o„†ÆNœ(ahHéÓ'?_rïÞ[^Þ¦ß|Õçž!£ú}À²œ;©+Wž›í.9TI¸O_‹«WÈ ‰lÿEóÆŽ•EE@êûj*«Úͳng="ä‡tØŠjÊᄾ¶sÙÑÛÅi—&®vM\Í{a|!5TšwËn“?¿uŠé{üî;¡nËÛ2Êm”WÌ߸quó‹ðôrcþQ€ Wè*®¼þÖS£¿7'ÍMMõ _áæÏþy¶oÄÑ>8œéÆu‰S§sfÎ4X¾ü«>}-Ô5,–/·ÈÏcmØok+¡¥Õç¾”…’Î4Þ¹Sìæ6{ùhT.¤¾OGvÒ·„W\Û‚U¶J«š_ªXØ[NúÂÒlŒŽº:“NzmgÚ¼¸B'‚¸°v’SØÍÖ¥^{®¯±¦WW×S$¸±ò'kŠo"ßNcVÞ=¿L•WXî: •¥-é./|Šqräΰa²ã­fŽŸ@A…@êûäÈž¿sfÝ>àf7+.·Ã*vêÜ)¢ýB¿í{7/0múê#ÓåÉ¡$#KmñLŽ©H§KÑéMsB>Ç꽪ë뉆††z‚¢@oiu©”tú”ÈÄÔ–óX·Ë ;ê;[ãíÂù\v?Œqo½EIsÝzQ[i‚žRÓ|b€Ã>»»nj¼/ q1þ“²/樚Œ£¾²<7Õ’åLÂ29a¨ðŠˆ(ÅÁ7`ÂÓöÏ?%ÙZbô²øWYI;}º8ÿa¹ƒ½Þܹ3>×kA¥)zxð~°3vî:Ã*:q"¥·Å¿'ÅR'ãsëg8Œ_¸Ð5©¯—%?MçßÙÎM¯Ê ³³¯\J>î\Z\jÖkÛ²·û™m÷#–¥”®´x·¾U„„ò’wúûú%ä¾mËŠw?ä*öÍþùýs”öÏyÃöìÂâjBMœ¿^í›ØÖqVZ”¿E”‡­'ÏZºÐœ‰æÀCg¨ÌŸÄ}Q]UúGÜáÜügÆF†£ ¢Qþ…¯g ^Jÿ{Ð ak«¯¾Rî?×BEÅhÉwFÜ%%Ž9VSûÂtŒøÈ‘>͵xQ'–qåEÆ•‚a*Œ ãÇyx8 ¾ õõBT]³éÜÉgK_ôEkÓGžUßFùÞ ¤¿­´ò«›h£¿ïð¿k×…J²âƒ¤Å«RÝü£zv˜Ÿ³°ñöVèjëÚgψž¯Ë,ªi‡ÒPÊÚ¾3fT*»“ícƒ¸÷P#nf{i£…'4£ˆIÛ;ÌozýìY^bÚ™œœ'Ç‹éë‹*)‘>ä¨Ožˆ_»öìÖí§ƒ©ffÆzú#õôûõµ`0†Í™ó]ÓëG_¿–RPX¡­)n`0!óËq8‚ùyƒX™O<(ÓÐ’535³°baÊ€Ô×[½ÉÂoÖútr$LÖîÞ¸M‡ocÒ‹®Šá´6}©ùmy»òuƒ/d†š¶Îæ9ßþ]Þt´|Åò l×P“0_½|Wˆa7R£´¾çŠHîÄ¿°æéÝØ5 ùÆ-d{‡Ä~ýç\2ê¼FJŠ9mÚ¼ÖÙ’’ÙÙWØ Ÿ>­UP$+(ˆ* bÈ4JHt/ VW‘Kž üý¨¶ ðùÇÏÅÅ…UTd4µt˰±´±Á‰ïÄÁ:³fé4§²—/=úß­[×ÙìâꪆÁC(Š äÁC0dþ¥»Õ"T ¢BäI1éÑ£šÂ¢çÕ †¨êpEMÍÑLe¦2Î:R_A5 8ê½m:_ïš¾#D} Â5`«ýD“áŠ2a¢ºìQæù“Q‹7¥¶ßWwºyW7ú–nýÍsì·MkÅ¥ ¢­)fÖÞý~¦F ¢¡”â`°±C¡é‰¬ò@KêÛ‹%šOÌñ_¥§d¶¬Mûa´^~ô騹M]ج`À··J|Ék:‘f¢çß6R…®Çþ¿¢¾n}C2]Íj¢Á7Z½®¶"¼ cØ—_r§×SܳÊ +ÊK*žWÔÖÖÔÕÕ54¼|Ùø’DHB‚‚"¢Âd2™B§Ñ¤©rT9¦ÁdâŒöIPpÈ]îôúªŠòÇeËËžVU=¯}u1^þÛØØø’»JP@PHHPDD”KBœ*AeHI*JHÐ$$U5œT¤¾>nÚ®‚ê…{̵ædò-ŒÙä³é ;©„§³øÇBP6AImëÍg^µ~Ñ\_’ú‹^Œ][á;evòeþ³²ïü-³)V4Þ@~ß'ñ|S±·éRv¬ïjlliwšíÁˆöèäxMÖ—\jnŒª»(=sÐ,=Ï-»¸Ð¢»ìI™é¼?ƒïÎ$@PĤ¸ÓàÁ8Ÿ7Qs'B gúYê#x]¹¸³8îÜ Oó._L»ÎbåÞÊ~Rõj u‚#&+K®m 7f´±©±*½“[_ô/~äpVgÞº)üL^u•…·‹É{ªÔp‡ÓP”¹7bïÑsiUbrÄãbBVkÒLwOç/é¯Úlú¸û Ûz„UÄ;IE¦º¾ãH‰·ÛôÆ‚ê29¢üÞåñ‰I¯ÞÊ­–U¡ç>‘ejŒ³6ÝNW¡c,º±X¢¶0íLÂÙsW³ïçäW7­£È*éi™L´™`1”65©ï³"DgšOãNn= FŽÁ¿;w^²¼žç îÔÅ®¢L§ íNÝ/¶UÕØ•;ùwçxȨzq'üs@ê¤>@ê¤>@ê¤>@ê@ê¤>@ê¤>@ê¤>@ê@ê€>íÿ¢d=6-ŽIEND®B`‚nova-13.1.4/doc/source/images/run_instance_walkthrough.png0000664000567000056710000047653413064447140025061 0ustar jenkinsjenkins00000000000000‰PNG  IHDR§}¾ç½{ pHYsœœ&Í:4}IDATxÚì] \MÙ?W½Ô ½PR– YÊhAY*d-»ìzB E+•-YZ,-dyÙ_þc˜lC™1̨¬CeFÙfZ,…B…^ôÊýŸ{ï{·×Û¼ŠTÎ÷“çÜsÏ=÷Üû½ß³ÝßýeÇB£…2ºˆ¿…´‰ÿ+r • ´ˆ8÷ØglgÅŽ-@ñ÷m¡acHÀË.k3 á¶õª+Rù»6Ò2à†at Ž¿¿™À¸ú]i`+ƒa}HIŒW툟Eü})$Pÿeì›i⢢È8^Ža*4’ "##qüvhÒÛø!Öú :<ªÌÓN;àêêê´0 üz½è/}áóXžYEwùT ~ƒa­“÷yظÄÐÏ|uL%/…£gíFç0gãù#«ÇP/<°Ç{cX6pÀñs_ð’è2/°ÅÈLf8t>üµ±·_2¸ û)N4‡ÇF††zЦŒˆˆðöö†a¥§æ–âºLE2êí½æ¶¯Cj2§WÆÏ_0è$¨vÞ&‘šk“^ÙØ¶MZL$Ž^å&ZÕÐäÑ¿Ù"r©ghY΀¿I ‚³'Äs©€yD I„¥ý\±ÈÔdâð@ßi Ÿ?ÅußóR¶ëY/­:RÄÀß×A±WéÔåÔ-`Ãó˜N#Ö¨ä}é¡‹h )wë0óLjJsCâ!Q9Æ+2‚ªiú†XŠ–[wðR_*éΩJlÇì —8V4·¯w/ ó µ;ÑiL ëŠãÿÑ{K3T7žUž5Nåàóx ¥+ ùH©O@íÒ£þK-aC‘»¡Vk…qÌB‰dŒœœò«SÊζ¦éuê¹Àaþt7»«6EÄ'KýZ²•TÓôˆ?Äâ¯1V¡¿ü SÁç÷íØJjâ‚—µu‰ÆÜ¸ñÞÊÊGvö±1®ˆ¿/†°-[G;¹‹_!ƒqçE™©ŽšX|xøz±H+«$I®Š‘GDòË*jˆ¿/IòhˆQø‰_"I^uª\ «Ä}C6þ CŠ'nÆ8&ºù(÷¦ÊG-ݶæ†ã±Ä©·Yô•—Û¥?bì†y4)þróŠõõXßêð§µ:vþð:Oµ¹{ÿ¡Å œéM~þC†nwHXÎÑ~y™e9.TdªöNúyS³1TØ¢o¹üÓÙ SùöúsÀ°jb³WžéŠa­‰É\Ì'…å¼ý´AÍ8¬M* ¶iE…C®•- Ê©—Š-ìDn¸AM*Yèõ0Ê­ Ì~k`¼—•ƒßÚZ²$pxú[ KÀ€Dr:Û ÃD¿L½ HOI1_t Ïôdž㿚_Úd~MêصgÍâõ`‚‘ý{þuûG_ç}/òLÔsŠ÷p˜^­ÞÓíNöwHµYŽƒ?)zêó· ·hþ(ÐÒ¤éf>> ¥ÿÉc»záx)LøtF„„ø»9…'ãÅA¡—rÊ’OÍ[n^ ÀTÈÊÎÐЛ©©!!!SÊŽ±À\øŒ^Æ>¬†s9°1;OÝÙv(ŇäXÀÀžíæÆåp yÄtF›Éù'Y`!}ö$ç“ó&ºäfÈbv8-áñ(è|\1â91‰õ£-he•7BOÖÅø‡CòV,t \ÌЛz†pfßEÛ×ø-Û°5ëv²éØQ’GQ|äW¾/|eÔ­ùÃÄ’E„LvtÜ¿0-ñ¥\¾{Ñj<>‡;·A´ÜØ(ú餧Ⴣ‰€9Å®oÓy:~Üź \.—»O2¥ÃFÃÈkkkÉ«zöÛÛZN°ïÉ#¤à1Ð9ßÏê6<y4%Q‘½¼µžR‘BaùÇÙÎÎêhþå‹a±Ë<*~÷~ïžÝ”•å] —÷j*pïÞå^½†(=Améû×ïÞ¿ÒÑ!º¯*Í¿ñõ6Ùùk³Þ=O¬y¨·hÿÈe~uþ‚ƒƒýüüÔÕ믒9|üt›ª®fo-%%%Y‰Ÿ=éÐQEʳ¢>ôWŸäÁ.¨(yw Ë¥6Ÿø%ÍǪ“›‘Qfbâ‰ø`æÌ™õIž¬]—î<²35›¥ab¢¶yËúË×"þÈñ»‘Qý\ÉÅÔ‡mõ:ÉÚÛF§£˜Îädµb¹NãÓ53‰aØÅÜUËÀÐÆÞOôI?=àÍèÁÄqÜqˆá‰¤ó1lÇ÷Ü•)ÀÕuÎëØ#'€­§›Z4çWÚÎÓÞÀ01'nr¼F»Eý #‰ËÝŠ®ùkbC<]ÛDÇžœâê bcá±8~ù‹\‰ò(Dnñ^暪øƒäí\GLHþ•õ6‹4©>»7w“H–ÚjNæàR1ˆ¿œiƒMÚµò›˜-›Ö_g8ù¯ØÍÔ]s6ÅÅytÄ4‚.å–PäAlÝrÂV¤þă²ÌòÏá`±±_¸Ï9vH=™þüªY{fúsñû"ßLGt²N¿ùÛ ±ÒkòÂK§´‡?™â¦5ôqÒÍYë¹NÄè† ëÖ¬Y×ÈêO÷u{áŸpV‰“5w"ç5¹©C+ïdœú¥æ*ð[„é2Š[9Gt“‚~ÿ8>ƒŽÿZÆŸ©àÀ/ÿa1 °b*Î× ¼žJ¼%øPòÖ£’ý“ü€œø=[,)ù×s\'Âæ¾6µõôç¿ <€ ~©—GDÛBú`Û.8&I"on]ÝV þ¼zõªmÛ¶õp%WN-Åq˜Fö¦WÇ‹‘o-uvFøµÃ'±øs¿\i„ü©w0>>žÞ\8,$ÍÈY„¹ŽÙ³êÇÛ»FÙ†Hät›ÎÀF`þŸXå„JCñ:ݪÊÞW4M] ßþ:÷Î5ÓÑvb‡äÿ›¯ÛMÖ¢¥¥»`-Êd6ç“åìÕ«=ê¿Ô7äÛ_O¬NfóK„þßÞÜ5Å7ÏÂM&S Öج H 6€›ãÆ/DüÕ+jd}ô(OÀx¢f±‡ãã躴‹aTV¶?|À¡:íÉbWtsæL/Yó×$yhü÷-`gÚ-,|óè¹Kå“'„ë‰[§L‘ü4¾1™65ûÝÁ{×ø3 ݺuéÓ«‡œÄS¦ø z›çöYöª¥Ý¥Ñ]oÓ´¿†ʬG÷ß—•ÉçO0hâ=½?_Mí’âOä4cýM¡m‰Œ1“ø zéË€êÑȨ?Aff´±±š, Æ¢úSÚÚÚõv1弦ȃÔùkÈÖÿ=26î €Ø¬š¿ÁâÅ‹ëçJÊÊù*j2ÆÄ(ܵkÓ’%Z±±W&L€üâ”S,ëIŽãþ‹ÂjüÙbX§çãVþ 'ªæ\ª›bY¾‹ÄÃ7Š'†äÁß„ÁGà<²ÕôoÜí_’ðöŽ%®—ZUòþÞÇì³|HÂÔ†L>ôïàýÝ|*Âðkþ‚{]ùSî ]7ÈÕæ¯)ǘ|ÜXYàž÷÷öòòU"CðîsÔ{-:õ¬êÔðD'æv­ÅصgÆÜ|;¨“2e¿{d“6$ØúóºÃÐ"%uõR*V‹þtlÇŒéKký)d¢Hp¯Um1s…f»Ô¦RÈ-æe ™+,$œbjii‰p© #YZZÌ>˘¢ùóy…Å¥0%³'¼Un–ëdÔk±Áòaö À»;a$KÅôüµ^OÓê×È ÃÇŸ7Ë›¶ÐçŠ`æÙ³â&Ðþ)´‚>«ªÕ|H& LÈÝ—½˜ôs¾øÊ~€9§(ûÝ„Ô'®œ:bºx¡…t˜x”Âñ&ÁwZZ-lQÿ¥¡’=fJâqÜ’Ã={Ø þê5š¿†ÝËâ›ã4­èu †á¸ÀˆmPÎêEf#Ç룦¥¿ùUI¯XRÙ‹‰þµ/õÚìÈL'«Sâ³Ûäõa#‚Íš^MŠ?+sS aóðæÓ'8H&Î ÅŠ¶‚áá/ׂlM°ý“š©4òD+RÑ ÿÆ6Dþ¯7šÚüµdýùߛޣ‡ã\€íˆŒ|éí½ñ÷mrçAKñ¥âºšZ*nííÝÍ_WC ÄrP_’䉊RŒB9ö×|>‡ÁpCüPQ©'ßÂ5³¿–ë?„ÁÀ·þênôûëàm`23¯ qìŽNܧõyU2ü_\¼`ž¢OÃßLMG56þ¾ý5ÛÉmîF_ÚÀd‘¡¡??ªŸ‹ùœÿë*¿’ëμòÒ¹ngu.•œÅæ>ÚÃ6Zd¾60m}(üM ÖON¾ÝùûBö×ܸª±w=¶òí¯ ³2€ŽðÍ?~³ZmÈØÆv;Çï—ö] ™Üe˜f–*ñ»ï|^kMÍF\6RÈ·¿1¨êûÚuÑÄ.øàÙsGÛv.öٖ”æ4åû÷Æ>í¢xæàÁ’yó4Ðø½a¡´¤H]CS»¢›óæ-GþCu× ß`àdT6÷éÓ«»")•”•»i÷²Díß·ÇÖèç»À€n/‹B8—k}âDø”)D{9b¤²È2ô¨þüFH¼šN‘'>™"Ãþš"O2õ_¾p×í"ók1  uv–³@,ÿÕ7Ä–½•¹ä¡ö¯A">áwG‡ &þãÿ 6ñWßÈâNwô?›þœ·¬=s;éÛ78œšWëj1X2½ˆ ¶À›2gJIùoØ0¤¿zGö±t6ØNš`S†Ø®;jê2_¢ÕäH ÏeEõgàòä@ßÀñW¯¨Ñü5édw¿¬½•jJäÆ4-û¥SqÖ“œd±[­ÙSR"]}¶–šXIÙ õ?¿–.v†0øÇwf¥|Ã=kV@¯Hù³hä?òfÍòjD—Üôí¯P“•R©)6NŸ5 ÿˆ?Äâñ‡€øCü! þˆ?Äâñ‡øC@ü! þˆ?Äâñ÷=ð׸ÜÍ" ý!þ0¼¼¼¢¢¢à/ Sø«à±]RS³ßg££!aÝ\Â\S¸Ù‘ žr¥>›T$¼ruч÷ñIlÖ-üj¨Øýaìx( ˆˆˆoïÀB>Šˆ³±5Kºœi "å(wo*C O…ɼÓðpÅ/:–ôX÷¹Q£©?©;nÇU<=%Ÿß×Aõàè€ú„ÿÈ”°‘‚,:Oצ`XLàfNäàúÓ±3ºÖÿ•Còà¯û8'2Æ[þ!©©©Õ’14§×À´ð'©ZŽ1Ž Pu€¬GþÔ“z‡uìÓ€r:C…c1ìK‘w3lÀ¬Øòìœ4Ñ’%¿¶i#µ: ÞÉ‘lMÓ7Dý‰®ï&¹Ö›¤ò$ËÌç‹À2àzv ¶lû•íË>0ôN‹!p, ôjÀûÅ?ñ†qc‰W3)ʬ{¨Ñ3LØÀðäDkvv¦}p7W… Ì|ÿLÛ:ÆPäù_|>¼Mñ­ šý×R³q‡TPsDâUWÒ6ºT ìXô¦Ò—Œo«ÛëùZm7üsŠ÷.}‡²i€rî‘þ\EBò„£º`øk­Aòo“mпï×oMº™´àgœÃ¤ÜOž•¾CßÔtä7æ/åØ^ëé Bvœ¦b pÜq ûtþåŸB=£Ësn”d\p{æ³q½ù?Ô«7Ïu¯Ÿe_âLiÓü• Xæ“@%ؽBBBuèÓ±=öEÀËÀ>&DÙÁ‘ s¸´/t÷¡5d;¹¬ì‚éXÁ¦çæÖEw£¸'$/#;3­—ÕÊÕgé–-}{´\¶Ðe[øB¢ß©?'pxµ.FÉÛJVä¤ ÿ#˜ %À î÷‘Él.õ6%wÁï’S¦ <ºÝjæ¢çLºœùíõɃ¿ÔM‡ð&ßiÅŸ ^/8Ú »Ëz;‰ã9$7¢0›à+š@l!G.I© ÷@ wªïàâ#šxºGôéVëd]ÆêU«` Gùiu[¾œ¨QÇUíW­K¢Öœ Àÿ` rØ2Ÿ+ b0ðNá„Z»­¼Rͨ,ÄÇïã÷to õg“D;ì=íM¥¹ Q„´ÑoŒS83+¤·n}áç§#;oâï @ŽÿOŒÿ¡}G-‰qºøBŸ¼±œb×êà ?¿µrKš‡øûŠ&Œ^ -¹ ““[¹ºI»þñGÔ°aÕzO8®Œaó¿í%7µú“äÝþ;£o"F:yUõ䇲·Oó2»u%^AÛØÈL:l˜`çÁÔÝ­ÉN2jÿ¾(òªZ+Š<A‘×pÇ_ÁÁÄ+((¨Þ®BDòÈÎH<|°Ì‰ˆ¢Û,Í´ª)5N¡«ÛªF?~ÿ‚€Ì:t¨Þ.æÖÓb£ÚÎÐÄBÆâG_¬ê˹ºi5®Åê£þtvv®Ÿ+ÙËý±ßÈIRwI£PN­_õ- ‹<ÙÊÄ¡C›W4Jþ¼‰æü !Ö`eJ•‘ H úZ;Ž{¯qøUNøð¸Ä;|ü—Ûo­Z‘½2̦ãû¿ãaÃbooLYñ®·Íê«Ú›Ÿ¶$™GœJ¶rÒ¿Èeüvå¯v]{+˜8..ÔÉ©Ü:ƒÕøôï²õÆ?~ò Šæ”áøåH‚­Üü\Ø£N”m9(Âñ}ŽD|Vò϶¾i—¼â—ïþ;qi'*ÍC°œã7Ë*ÞzÕ•é£ù?àÛtÚY5wyt¯÷¥/æ©¿°°°iFzd“š¿ÞV::ys›1ëÓÉUDç¯iòrÏÿÏÀ>ÙÉsL\ôy0g¡§^NTØÔÆÚ1༠ÈùOjn)©ºa§K<î"ž¾Ú7ñYD|–„h ÷1§¹ñ«Ø¼–¼&ŒúŽÄnÑ„OÑ "Æm¡/)mX±¬£ôÇ̆çúUvG®®(`7ºV£‰´î.óÈùëå,wˆ¦£É·B8KT|dóVäì¬)|¶8(4møû–ø-n§ér?aóæ/·ÿ ²²út邸«_·QÎ|]!k¯¿< ûö»,”ÙIéÒÅ鯾Á`0Zzõ¦™V$G. WÄrÂ\Ý$ׯ‚½³…¨þü6è¤Ûv_âÏ$tµ$ÖÎ|ùï?£¬e*ÉÕ˜×Þµ+tÉ¢;zö,Ü8wÔþ}{8Np„t>_ .Y"° 7õ_$Ü¥K—9sæÔÛõK¼ÔÝ| üÊ“·‡Él&&KÄ_<==Y¬ú›Ë€C1ò¨Èsû#W¯Z)‘¨ýû–Âßÿ?IMû{ú$¢ 4õ–c¥AÔ“÷î]~ö4wä(gž;·1÷?› n¥ß}üï>4ƒñùƒ23oçå•Õ8Ûõ¯š{pp°±±1Öç< üíi5 þ‘¯s+îݸ‡ÙÓ Ã†V¦EôCƒÜcøûâ¬ÚzTÞ°a㸅¾´î‘ILbþóNXKS)ÿµju7%(Èf}Aã5”Æ_+[ 2™)”…&õòAäžjRaÊÀ—÷÷ÓµôÞÂÂB2ñnê”bAŒ––•!$Üu™ˆÇZýÖà8_XYµõèÚ©;ñµ$¶¿¦Y¼´ëàÄØ‡tŒªZ3•æðÜ@xÿƒ Á•,’Hª ü¾úŸÌ>(@žD Å'Ý?&Lù/ãS%Ÿ @ÙÑÆ”øZš- >»~¢Hâ«WK­ô©3è*´°ðY¶–¨ÿR¯˜7sªìÉ3u1ûëAƒ¼e½§˜ãóÕ Ä_Û—¡a”“l+Ì äˆuÖêm¸ptöÞê-ƒá„ô÷ ’%Á3œÍ¦ÕG¢IÁ¡P‚¢­]VlwKl^?ödÀt4~ÿFFn:mX¤©ôi×ÔÛ;,ú6'[>O:²‘×ëOïeŠ'¶è»´±_/òø“àààÎ;×›  áȵ¿ÎȈ61Û‹ì¯E`ii9zôèz»Ø#ȵ¿– ˆ|ñÂFG§â@=“'kWì¯ut’@ü50ÔÈÈwm-¼=Ëk5®øW_½1Ô÷W]­ëð±“}l«^ÒÊ·¿¾|9bÈ’ñ¹ù<}]&øÎí¯ÓÁÃÙk2yÎ͈ÂñmégbBƼûª—QYù (lMÇ/ õüŒ'z½7‘  šâøb½½p (´m„õ籿6³'l€ªÜ#•=̾²EÞóÜG8(êÿº¤rב¬žOô¶7d”}àdÊï|HÌPârJCÙ?òQûW¯Xµr¥âö×>)Ð] €`Ø%p2èþù|¶æº ñCÃãôS¡ým-ÈþºÞ!Ïþ€jÆ/†¤!=lmÔé¯ÁPøàúÅ“ÆÉ™8…¤ý òÿùM)„¿ç/%=ÈüÇ{ÑšN'¿žLHØ÷ôÙ›En„ÃHÿFv½M³ýëß×â“Âc8£níZ¶Àé•65þ„õ§’^¯¾T8ñPôÊY.è‰ú³›ñG…OžäOžìŽøû¶äUƒ½³gê³wZJ%O “'3â✜ ‚< JÊÊYOžvéÔQ>yœœZ#ý58¼SiK‡)ûk¹hŒö×6nçB¬{vù—³CÆ“N¬àÒ½ii߀èݤçú÷Jú´i÷›†&ß#¼w·×µì»†Íb|UVl»Äì?é× fºÍ̧ÇN›Æ! ±{ED˜x{Û!þ¾Š òé0Eá69ÁIßyÙY …!ÿÉ ®.ò¿«&Vâó>yþ“++T•” Š¿|'û«l¤¿oÀâö×r毑órÐô毯¤êví%)ËþšÏç0b3g®ˆ¼&XÚ®Á`€Ápkì׋쯈?Äâñ‡€øC@ü!þˆ?Äâñ‡€øC@ü!þˆ?Äâñ‡P+þ¾ª[+T}" ù!Ô†áÄwÄGe—1µ!tÁñÿ.ù¾ùºõª+É)–±r•ȦŽß–“RñžµhÉ%¯ü+p¼œò‡K9É%|ìÈâµgÔ 3bá±›a#­'²âó ó µô´ë¿aXÈO)Ó‹ŠjíÙçÁcuÉ:$dmñË監÷ž»òÂf\{0b7~a!Ë^aà^@9ޫ׵ƒ‘åÿîQ鶆0,q摈gs¾¶{ßïE~éû˜» Â“·\9á§PÅ\|uuh™wøð6²*×ÏróUžü¦`ÄŒ´"š8«†o­½c** Ã¢†{èB{•0åž8þ†>K°bw50pWE|û¶Õ¿/¦éêÍûðïÃR*< 3Öݸ»8I«»À½’yŠ¥ápã%÷ʨ\Å6k/ã^ÄF·Áy]¢—O¡6#m0juor¨"¼¢7Ô< ½\øg+b,wîq°C'±ù’c®zÓcó`ÀÀã—¥žKÑlë-$?„ú½ú (¹Ž±§ÓGØ—†q1{¹Dßèh¬Mz;üD_À•ptÉvtäŠ,¾"«fô2Ç¢Ó«uÑa¯ž9”í=Þ[x.¨y:ÛÐSÿNê¦xÍû•òGòC¨'¤œæzmˆ¶¶›J=Ó7Žmt°w„a[‡isÚÞ ŠˆÃñR[Û1ü¤$oan;v¾{°Ç4ùÙ†ú{Æ_¼â¹ö>qÜœbmãµ"¬))IÐ`¦%)’m`xt Èf|²à¨¤´T@/[&<ËÈ*áçÄûkç䇀€€ä÷Ýö-÷lÍö‹¿òËnóØ £Ö9—ßùýâoŽŽš¢‘Ÿ>} {¹reP‹}ãú>%åwýúU[¶¶´´òðá7K–¬B´"ù5hì9xÄj õrHjbðøY™¯+€àü¡íŠ.˜³ÑÃC4X@L{Íš5[¹²=íR:㮡IïáŠûÑ£íFFª0`5þˆ/­®®´d‰yeÅ|%ee$?„„›ÿ±Ê ÀßÌ2ìòþ¯¸ð%K×ðy€ÁäñxÞ&êœlð xLm¦0›ô´L3sú@c>ÔAî£ }#xTÎËRcV~_Äçå”ëë±óËìuÕ¨“Š”‰ÇÌÔb`£96-mƒ™™¨=_uä°ˆ¥áàI‰ÌµD/+÷W_m›0&ä–êkI|Ã/â34a ×ËLub“)¸&Ÿøðrr¨²=ÊÉ72Ð8ÍØØœ¸(áÞܼbx9…ù9ZðF) θS¹Ö]z‚‡Y{fú“BÀP/ãñ UÇf·sŠ÷ ãÎí5a€:¤ „¯­ÁÈÏþW·£.L cZêt’Ì™Ö7fÒï7)£êhGÇÃññsµÍ”™…=Fg/v¬Å\g';ö¢¹ÛÄ[шˆ>>«%3§´‡uuÃÿH:=-ßÌ\`Hkëÿ )¼ŒwÕœ¯°¨\S“¥\/€ ”33󉔅YZÚ]¾wù]Ú²ŽvWXMv%LœÃÁ½ó8~ *HÃ"¬w2aÊ@ÔÕÕíÉçˆhÃúâøí ä×ëmGß_ÿ™ô yã }Ž\âŸ$^Jqnµ},L×%óÓ̉J§˜Õ’©&ùí‚á‚KÙû©EÛ_@íÁlƒmßFËgP9¤ƒÞ¯öƒ5° ˜ºŠÐØb.Ž¢R²Ú°˜LƱœŠé¬bîú³´â[XýÖÀK3 ‹<õÁ ˜Çù¹däà·õéi•ï¸mV_…×Ò¡å›B K˜¼KÔÞ”Œ‡øéùüius®ÍU…§÷©ÿLÓ‚JØ…èúZwwÂz˜œ¸AZš-8낳Ƅ µº¦+Ú37“ëRª¨2åŸÅÉ}´“Àˆ[jv2YêJ@«3ÜŽâ š îžY’––VÈÉjï´ç:ªu¥›V¸ µG%x–KäÁÔÒ¼à<16ˆ` Âñ`AíVV‚Z?`'Ô^•Ád¦?þtä>v iÉ)Æù'ÉúýˆÀˆ6Á#ˆ/ ö௽5uÔtá‡õàÁ7”!›M½wæe“æä."ÛyE{Øì‡0ðê½nâÉ0*1¡¥~kD‹4e鮸í‹Ùlêaär +üéÄ%³Èôb»D/Dô#º8~™Š¡>oe¶5‡k Lß%r/‘€QCí Åà奄C±v/½Ì«Øz¯«¨M²¼T3è2rà¤s¨½©&ô3œ'Ñp½ôñ©¶Fs¤µ›w µP,ˆ ¼S"x@cíÖV¿•SÜZ<¢X³fÔb¼äk·#*Á‰ÑëðhúàT$ª6¡ö„‘ÁtdçÎæH~Ÿ‡ ÷oÅãOw׺p”*$¡?awÂç‡Ú#~ed"—"ÅøèÚ¹ã¡c§LmG×ôÀ}® B奋Ì“Làã³FlyJ¨=Øó Z;YKô5TM/¬M†øÍ0›0ÔËó´”YÓ°þÒÏ®ÝÎ]þò³ŸEzzG334ó‰ð­á<}RHX˜½sm&`îüqÖy–³O×ÜÇ;ô;7¯ª€â犥€Ú#º ÑáŸhüÑ£üÖÊ=¹k­ø¡l¶™™úwÈ5’_CÄÊ¢•9úË…žý­9üø®OgmÓYŸ1¹Öï¼”ìòíÁ°f æ¼#æõRÀ™3IëŠWV’s˜ŠâÐá÷Îs}T¿S‹k$¿Œ™ãGRÍ[#GÎY$5MÂÈU+W’Á<¶H(­MK=´¤ Ý•ŽÄ½ubß /õ¨A™Éww‚É›³g£Æ“2 ”ŸßêÍë¶”%óÜïšb$¿F€~2]1˜ ´WK,õX%CŸÀ‰]×bç%5^W—øC@òkLÀqüÇSçžþ÷Úw6œ=eB]Œ­EñþíçO=~\Ö\ëýC;;;Wt·‘ü>G=z‚‚‚š$=‘;v:ŸÞ48þQaÚØ⟫ÙSÆÕ(ç쬆]s0-Z34ÐAÙ¼åÅŠåk‘Nü¤ÀÈÈþúùù5=bn?}«Ì`ˆjO~4ü΋2¢/ªó¹ ^ 0bj„ÖÞgú½Ëu 32ÊLL<‘ZüÄÑôÚ½×oKŸòš)תc EøêQêp›ÁÒÛÒˆ ÞÕß¼+5²IDÒz‘åÒ§Ç™Šå’å°îào<‡…aCh« 6o¥¬YNE½”½Xçj¯.‡·5²Ÿ•u¶vÚRà×—Ÿ­ˆÉ%õLcØbî“6Ÿš˜„Í' Øþ^ÇÓ~ï€Ð[n1m0ï?ïcÊ=a$~… Øœ (÷ã¤q&¡ ò„Pùb*x×-ÛTm©z¡ÇÇ“V—’…7œÿ[ÎÍ;x¦?<5øPª{¿:ÞHQR±ß!aáöÎË`À¬=QõÅÜ|;¨“€#ãk%01;2‰9!hmKSéF('Ï_š<ÆN,²K—çâ–užb+Õ¸LÎþ²Wî7Gu–_R5gæ—ÉßÝ"1q²ü‹†Y€Ð›Ô]á=ðxEfîi1â2(,,”•-Z]VOFä}`£¨Si6^ÝK‡¡©ó64> L„–WÒÏùÒ’£ÔAXZӪÊ΢¥xÚµûÄX¶sïþŠœ‹Ò±‚¯¢¸'¼WÕ”yçï¼qã‘j¾éØRÔ%§´)&ŽZZZŠä `2ÅÏûmQRð‚ @íÙ&ׂ¯h“k¬4áö“à³O);±?>`á->S’žÎ33cJÜŽdÇeβ8;OFšAS/U¨¬¬TjB_I/šÏ¦æ0kX¨;X¨;%êç-E?ff^u´Š.}¯Ñ±ãH3H~UÈÎÎîÖ­I9H6ÕQ«E—½³öVÑEo45[OE‚Aò«†ÜÜÜ&&?JB\¹ÑºkŸuvï¶5kV.•kpppPPûå\þq¶fku¤$?q<~ü¸I3l°ˆŽÙeë8ONÊÌ+¿Îv$¼B˜~^{è7¥wîĘšÊs^þÂߟ0yQiŽ”‚ä' ùùùM›!O%Ÿi*'Õ²5ýÌ· þþH H~²±~ýz¢}ˆŽöôlâ&Qá[¶7³«jeöÊŠŠó‡w¬^U§/Îߺ-ÔÏO§z‡âãÅK¥ æ y ùÉ}xÈ÷C‡m’Üœ¹˜ÒÙ¤/åä.¶WIYyìoz’&)þàgÛÉ*1‡­÷ %‡1íÂçJóó›Ó“4¯ G´Õ2@RAò“2Œ9tèÐ?4µÙðìç¯Þbê´öB)Þ¿úÛÌ)å$Û²uýr?öC[­ßÉÿ‘¹’Ÿœ›%›·EŽœ½¨vÇö4jÏî¢ùÒ?•}øpûr?Ú–+69™ec3 i¦>ä'´ö$Œ$)‡¹|Ò‡¬‘±1éÍ–°5)æe—Ïçñy÷ã“‘ù0߸;ù13Ÿ_˜—­¥ß=3㑱‰¯0¿œ¥K¥Ï/âëj2efQþj=êС“ɤ\ÜÂe<Èg-UÊæ“.zÅ%däš™è›ûýš¶u´˜—^Qï·Ô.xu¥¥‰ðŒ¤ó_â'=3ÓŒð&L8ÿ]¼ÿ‘TƒÒúéN×Z{¬ì§&_ûËf ¸ÝY,'ÄÕ­­Ën¤kSÂÞ%ôfE ¥r1©%˜†Ã&›b$˜z’_Èõ_àï”*3eÁs”üÚ[m0ð@3;‹µ`' ø©‡ß¿ T† Î ýá‚H;ÂÀÚ8Ï„ý–"¦–î·&r’`úcˆùnÊü:Ä’ÚÅ­ŠººØ§ ´Ó^áfµ"¥ÜÈôü_vÚŽqðéhÁjÙòª®NfHx¿Ý4Çÿ£"™Làˆaƒ¬O–7ÈÍðYö [õM¬±wí?4hì P7/×ÍZ¶•ÌYR{Ëk'…ÿ­®SÞÝró|Év8¼ôWú÷+QQ!^^+‘l¾ºü­ZÁßx sjÒ½ôØ¢À°ñ‚CùÁ^Z{€6¶Î¬š½žÈùGt­…¹mN9®Ïbçú·­rÚ,ÍüÚÚÊ8ÍŠhÙ3ëÕƒÀjÉÉÒû?aÌeúêH?*¢fÿ8À¿o5õ–‚P¼\«¶ÒPðt„ö ´ºÂŸ]»q¢‰³lß#óÛ±·oË‘fÂØù !kaä:ú·­O÷¸_ ¦}¨: .^®¯;Þ×}±XÎ;b –’ëŠå0×!")a(ÌZ}=ƒÎê˲§g°Š¬ƒ8Âs’ׇÁ€EPnÚñ”A)% k²;ºví:¤4õÒdѧg·ãçÿ42³ªé´—kìSÅ2 í©Ùjê]BBèoI >‘¡xów;4Ò/€Å*€ñ1!“ÝG…†&ûL4­J|Ùào¯9x»ôBFkMB{ÑQ…ž^ˆ"$¿&ic††mÞ"u9ÛÏ"/ó/;[Ùû]KJvC]îÀaĨãLÆ‹«PñÄÊd̈ð4K؉í­K6²1;ßxz¡¡‘ü¾¬X¿}O«ƒ¢ï»•ðÊÞí[˜êØÊO¦¡A5Œ5øî!>þ££ãRwD ’ß÷„}{QÐðÍcæ.•šæú¹£‹]æ×Œ§í­H~ß1ýWÈÚeZíUaìX™ƒ9doä÷«Ãikk/^¼¸ “tàh¼ùP©»rïÝkÓOY¹6<¾›YðêCCé_í?ðjÁ|ôŠÉO6¨÷ïMU{7s_7'ש•¥=ý^¦¯øÄÒŠøØ¥€ñJ¦E+ø'óK¾óÛRCÄ}èf´ IÉOk×®=~üxÓc%&vÿàñ³š«Öìåêeoߨôê,'Me%GI Yü9ugn®1¾±Ûˆ°þY<—0¢èf¤ uˆ¼Í#ùIÁ´iMÍøÜ•T¨½ÚÛªµ6¡ìf0jÞúÂN(Œ5½zT<£Ý ^òÃs«}ï·b¹ÎÏÇ"¦N÷Aš©ùeó€!“4¥.¾ì{†}€$'·Éqœ“dMùFèT÷ š‘6ØÅg†ï[™'ÿoo²ˆ7E¡6دÚNÉ'â(cN¡E%}`–…}pÚy.^vÍvö¡ä“±¥w¢Ù»²OÄ~`¨îÞë4eŒåÊ“jÀ* Üð¢ÐVšÂ„ETæE­gõ[ƒa}H%“"eÙµ”·¬µ«²æÓ–qïà/Èæ°lߣR¿“ËLé+ª\KÏìеW3 ß,9[³{÷¦Å‹N)Ÿ‚)Ï‚ömq0a¶è.©˜:½¹z¦’Í×–Ÿ!ì‘\L85ÜaÒÅ ëW‰‹&žYîž³^{«í"’ŒZ~[G¡?ýµ2à%nÞWµ²Šß|øhz0gô°Iñ\ÌØ8‹0¹æíý=sác¾ÐQï>Ç.'v=00àò^ñ_¾ Êïîù£Á'NÄêQ’0ô»œ½u] m»wú$<#v]÷Ä0¯c¡‚öÁ^¬à‚RX¸uã^¿100e“ÿ‰Ud#©ÚéÝk‘‹µ.›L<ÆoÝù­N”ö(U§ž8îï{Âdîy¸—º"³zé(ܿϢ=¬¯€ÙØm¤›ÝA¾Vé´›]ËÝ‘“ÁÝ_“ϯmi¶ÈL‡ø¸$ý9ÏÌrx’BÙ¦fKyIGiïfH¤åJo²ê ‡•¬§›É˜¥R?z[âv²Eë¶]zWõ- Ÿåd޸쥰k]YˆŽÚ0eŠf‡ŽUÝˤä÷ü-‡@«Ø"ùɆººz^^ øùù5In<ÎÿÐ\“ ÷±#¶W«ƒÁGÚËõß—ÏÍ¡è`±{C\ Ÿyz‰/ønkÓ‚|§!ø$÷CÙ UµVH*H~â :|ø0Ôac¥¬œÿðMjOô2J1ïîu‡Ãä$‹‹ srjMkO¨ªýDþ¼\#ùI`îܹMŒ’½‡ì7ª–‹éõ°sï~÷… ¤î½ôÇ6¨½Ú–+öúµ¶¢Å¥ëE~ ÍËõ£œ|#"Ûܼb}=•› ŸâñøL&ž®¸ðK·Ñ/RkíQ4nÖŸWod)âÄÖ)SêÔ‡0ðL=ɯAy¹†èÐòM!нUìužÂ‚sûg_ðhGûAêý×FX 2”<¶akÔöá3ˆ9ºx¹nÞ¶ƒdÎP{¼Â" ¥IØnóJ“H™™™ol¬Ëã&Y-òx„oßœœ×ddàWRab¦‡WÉb*EK#¡¯þ¾¾ü”—k.—ËlkŸ›öð™Qê-š›KЯä!kÄŽm¤hÅj#ÕÁË5SCz“©¥ê¸añrS–å86æ'üÄ6f4ðøu0ì”äàþä—¸¶A9Éëaý{O>Žãþ0àÐB i¦!Œý¾—k3fçcðà”±Y]¼\'Œ2 Ë9*꥗W»Àø5¼ôKĽÅ9\A…EXºÐ_(Q›IÁ ¸Êð…7oR,( ®¬˜‡4ƒ¦^š,zu58y1Ű&+ûQ ½\â”Є—×ê]ÓÌ®Æu­¾ÔÞÏÇ?N†L`üš4&·Þ½Ãnzm *Š úöãëÅõãÇ=Í›7«EδÛO$¿&_O©î•y¢‹¹„‹ÎòÓ4oN-X/×÷25{OEn?‘ü¾/ î!ø2Ï®•½t‹–g™·Æ ¤Tcÿ+Ä;t~yÙ½ûúô‘ޟܵûõ’ÅD'¶—1¢Éï;†¬Å¢ÉϦÖÙ2TÔúô‘¹tÃ’ÅèÆ#ù!°)4ÔažtoðŽìYáç]Ë|ñÊ_~Ù6~KêÎÈÈ—ÞÞkÐÍGòûNqæÏ{_˜ËÒÄÈ9‹(«k~ù‡~·%Ç~¥=oïvT²C‡^;;":ü¾„„…Ù;{RÚS¸'© u˜qíÒœÉcå$+-Ý­®^³¢³s¨Ãƒß͛狨AòkâøßéD¨½Úk2ÐîÖ“â~dµi±5ÕyóZ†#‹3$¿¦;²­†Ö%†Jó{öº/ÿXvÿþ ÚÖ%gÿD’_SFjZªÅ°j½Ç»¯­]2°3ÌÚ37%Ư²_zîá|“ÐÜ4ËŸôaü¬-IËçT}ßwäDÉœ%µç`è–Í¡—[9ý(f¢á%-¹ ÖZë5†J:œßºuƒŸšAòk¢øXV*ºI|ñðœ—n1íAx’oÏ´`~ß–·áóÎìü!Ư1̹@…„™h}ö\P{ ì?p*Œ°£(ck· ëœà¢‡´jÙq„ä×d±d3í?-­uv‹nR–ÖB#ìð߆ß/T›_y•€¾XÎ/}|ÄýJµ®t'Œ<Å´áê¶q„ä×”qþðY+¹+4züã¬ó,)‹®ùø¬yþ|gûöµ·™þë/þý?H~Mþ+?Í/bhÖâØÔßN,˜;GÖÞöíÝÉÙËÚÍ ¸"í!ù}èÜQ·3ÇOŸ3²Rôã ßâvú/÷3•­= Ô›ƒÍ[Ö¯X®¨_¿ݦu'D ’ß÷…i«¦@7n =R%åj|]øßž¾‹3Óå5ð¶H/]YÁß»Å}IµIѼ¼‰ y …‹+µi¨@òû¾±zÕJÉHS_ï:f«¤Ìp_"ž³žXˆ¼ "ù! ù! ù! ù! ù! ù! ù! ù! ù! ù! ù! ù! ù! ù! ù! ù! ù! ù! ù! ù! ù! ù! 4Vùá8Žîjýüüü¾Ü\½ô‡zÎì Ãl'7Uþÿ®¯,*–X‹ÏÍÉM­ZTTÔ?oC‹øŒT3&bÉï;Æ,~Ò¬èÀ™ÄôXÜ‘Ørœ£‚a +ˆ„übÄÂ_Åå‡ÁcE gÖ-;'í}9ŠÉÏÃËɯ1a"çˆ%5SyþPKcº¶N‡þ‹snUÓÜ(a@á8KDå8ÎÙ¤õ³Î NVTNtæ¢:7˜°kð™%q½ÂðL™ÁÇKÎbmÇ¥ïÑ4[D¤˜° ?½˜Jo½êJòÆAH~ A¿¿^?ò0B°Ä4'­ÃTîÍÛnWøö­=ƒEEE,Ëæ50¼h _¬ázž¼Mqí… Z¿~=䋪1¿‡õ,üóÝ”mÇMÎSÀa(3M³Á98®/êbº @­BÃBù½=*½ˆõ÷ð ‹„qăš@ª…’orÖÔÔtò\‘ C%€5hµë˜ T+TJ*gú¿å•¼÷ðiö¨ëê{zÀÚÃ6AAb¾ÓNÀ Ù8ÛÖº¹©Á–ÜÕµê’üêцâu'ìv5Š©ÑRá,aL- Á"þgî Zfkφ98::ÆÇãû"ƒ,,,.¦æØ±@HDDu"óÈÈÈÏf¸0fÛà ±;Øš·éîãapóâÞmÿ~„S1ÛrÞ~¤¬ór²æJ• Éï @WI¥=¶P™„l¶àP{8¾Ó £U'Ø5 ¿F ÓÙb–êôáÄäÁ_ë©Í.æ¶°· q@|n0÷‚4&ëKNëÖ 6ÃsÖ=JæàyîÅöqíɈŠWðMt3å².Ú… ÄÇÇ›ÞÁðÚ(L#Ҧɞr÷™(™”F‡í\|ì„)éÈuQq뤴œH~uÃûïÄb(AƉtM‘aíêóà\€Š•“ÿÐV¿³ú­`-•› ‹SR½÷‹7/åT !NBCˆÖ Ûa'Ij©ü2vN0q¶ :‡ó¦wQ€¾›—¿AU§QR‹p|·¿ÿ:ù'J9ͼåÎ ¿>0îò÷0ªø%5è—‘-ðuŸër”*Øäíw`à!޹s'zš‚üŒ—œþ©`õô›ÍKÛí!1 ì7àÀ‰4S¥8®Ža…"³/d’=cüâªåûú#,ÿ˜O´¨Ù60ár"†Ý]müÍhMKOÿ‹«„ùüƒa?€òUÔ!/ÈŒü\ŽRe;±´XúŒ 4¼ž§Äô1L•ì!×´Û,–íq·™³ÿ¶iS=•ôs5„ü‘ü>éëŽÀ?9Óba*€ËžKÛe”‰§dõ_†ãË` \<Ûð7Uzæz ù!-5Ed³újʦÁ‚:ÇUªæ«Þ`XëÏÏļ8Y=AÑôØ<Û6X¤³@V‚s±ŸÌ\÷†c®»[i|¾Ä_;$?„úÁÃjóºÔp7yã b¿ªå¢óUs>Žsî„ÉÊêLi ºàøð¿µI¯¡°´´à®báО ÞÒ3ÌîŠÔS_;$?„úA|Nwç0ãðN‚xÑå¢82o óºý û³-*&ìðCæ[[4û¯‘6õïææDÞŒ+Þb¥ü‘üêiìç´<Ò¦91„žÜ€Kdtµåì̉÷= Ñ Êy…}ڭäؼSo¼ûçÜÔ"#×GïÚj; ø9Ù¬ô©Å …È!·*-Ï9¿Ñ³^®È›ñ¯?’Bý ü§Ú³œzXPj¤Å`¨ùÏÇ.找('ú¨Ö6^+ÂàÈ6Ž|c.(;‘ó¬|cÎðQŽ-t „ãR[Û13N?ýé±ùÖÄœ—ÍöôâÆ^ Ïõ/ñªÖÑ~aT[GÈ_;$?„zìþ,úHMM%þ^íEy|rµ×É;ØIøÆ\ZIi©"̤$"ú976ªê\p3>±FeþÚù#ù! ù! ù! ù! ù!|O¸ÀlÕºC7cÑÈ¢‚çYwn¸°gÕ%çÝ»7ÍšÕZCCI429å½F+£>}ÆÖ±ØaaÁíÅ"Ož,6d&«uGD+’_CGô.Žíd¶‘…µä.Míö}GNºó¢,á`ôªÀ€šæ¼sçFwwíÅ‹µ$wÙX· €Ø ”FŽ\P‹bgdD›˜¨Ijbòdça °ÀNK» ’BÅ•yP{ŸMæ0ÏŠÐTGMÁl?U¼i¦µ÷Ù”#GVÞºÙ¯_ >Æ»ru÷àAJP{ŸM©¥}i{ôÑež«‘ü ¢Z°Z×(½" üX–Û\í‚âÙöë§þ¡lªÚ"EŸ8±eÊ”O/óÔþåÌ–ñ–#ù!4 \Ï*PSoY Å~V5ÒUµf7nDYYyÉOÆ+ý·FÚ£0~‚FxØzÿ€µH~ ›·FŽœ³¨vÇÆÄîópu‘?$«E¶VVÌ?ÿüqèPyÓ_è<1¿Ì^WÚ+TËC]ýîäE€©ÉdU½ÞÍÍ+Ö×cŠ ¬MBÜ 1N6ÏÅcj‹>ééi™æ&E­g»Ábˆ–VP$/ïY¶¾‘‰ÀûPE¯¼“Éäñxð—J©2xž±fefN¤Ïxañø99È’ðó øºPŒjÇR“ ‚K+‚ =222¢ +—r–.‹Qµ7÷Q†>yßDï‰âÈØ9ÞÄý—«ÛÆòý…/â€Ýkûµýó_ñ×ì»ãÛ¯ŒÇË’Û¥G/¢Mãñ[0²\µçä\40„Ò—8LÝë³r²Y/ö!/ŽsLá'"d›u ¹û0‡;7Þ!çÖì zŠ”örR¾µ.lQ“‹c…ÕŠÜÂr}âKEà€¹%àœÌ}{]&>/·o¯’™ùÔØ¸#Õ¦z¿„üú¢Î'‰µI„'¨=ðú¬®Ñ¸D¯ΞÊê·†z²‹Ð4YŸcˆL~lÓ F4gª‰zË%:3úÝ0ìP0ýÖX`ØkàL:öŽ_[ Ÿ–BemG K"ÉËH566‡{J P{o­FPå9îÖaç™PÚ07Úu’­0 pã3„ê^ÁÌW30Ž;îÿ7qiÁ±xsêXLÄ· ÍQ'|o]*ݯ À,y,Àœj£~úèAF7g*Mñ­ ¬~< yl‚Š÷Ÿ·Œ,ró Ÿåû´ts¢Fšy£öv7ïmõÛÕ+no4QªÑøuÏšcÖžxvÓ}Áþ¯ P8+­DŸþœøì}`§±°áá™a×D‡a„8á®r>_¥ºóóŸÒò#ø³ ƒÅ%®Î=pü…Úƒ¿P{ÄY&Œ0S¸¨,MÁSäÌr͉ïÖý/•‡ÛÁÛX„‡»¨4'û$МÙ<=ù_3›®˜éE‒ -]“Kþavá ù‘Xo« Í©¼‚|fо³¡ø“1º? —±yž]lÔ üè3õL“OêšÂÈj•¿¶ÙŒ|„ÿ=RÝaŸ «{¡ö ²îç‰f š›h¢i`æP{ÇÙ–¸´Ú$i2ïƒ-Ñ<`ØY•xž€ž×® ·ßX`­Sqg Ù*YÀ=m²¾(bÍÇ÷_˜ ÷æ?#I˜1}JÜÞsDñj®=Bx¤Æ®¾Â^=.TÉ'™‚8äI«‰EE¢Ô ú´rNW̬Úö¬ß•|0Ǭq<-º¨Ò{½ØqR“|ò›Ûn¡6‰˜‚†ønµ']~”/ƒ}?õ´ðwsésج áµ*$„ð–£Çí[:NaÇŸàêÞÙœËÕg€½Ël7'.'ngh¨{à77§¬âž?:s9b팻…mîýDt;ù 4ªÛs7zÉîàÈ>Ú7l×€½!Ëç:;\Ê©ÈÿÉï?•AÁ¾Ó¨ò&ÜݸÛ6± F‹\1ãAq•2…9Ä„„%ܺd°¶ ´ ¤éü¦áUòÛæ9ÈÇ•¸4XTXHÑKÙAØUuQíÒ»…çp¢×µûX«•ÙœKU-¤Áp@z*âK¬!óyð7üt)Guw˜[<çŸÄÜ·öúÄ^³ lGGn|<ŽÿçæÄæÄqkÊŠÞ—#±;´;™Œ=4#32¿PÛÁ£Èޱ¶åkË "ÓÏB¾ˆžm8qkÍ…½_›³Ã‡þ[¹ ÉOĹöQ0%³Ï²Z—,!1^Ö.JíòAù™J=d&–³K±b| œÛ9vA×Qí`MkïÉÝ›¦:C$Ó|(û¤ªÖL4æyî`Qò£Ñàææí¤>ýõZªü¾¾?ñ¡÷~ «W­¼ó¢¬.9 îo.5þæ_L[Û¢1íoÿš;s¶¾2è¡ûLÔ­JôÝí[f}©¬“nÝöÂÏ·.¶cÍ0%%$?„oôˉfCìkw,þñ]kéï6mmÙbÿuѳ陪 f-&˜½Çi–)qO/eeW³±á/{S?ßµ’cKÅñ×_Úýû£©„€y3¦Ôº4ë,Ï® ¤xœëìg3±v²ÏrÓ?æïÿ™~ç<·e-Êü÷ßýûÿ>¹FòkˆÐgVæòjÜË»{ÝTg˜œ¬ö;w¾rwo[ó-ül ç¹¾†Î›×¦¦O`Ÿ>Ó¿[¢‘ü"X­Z´//z^¡ªø!Ó¯L3â³ÉÜÝWîÜ⾤ Ä+`ŠUóæîÚ½i‰4RQY¡®¤<û{&ɯ¢][Ívüv+³]GCù)±Š}:°LО@KV¾{WвåéϦ¼xQiøpEµGaÉâUä—ï?›’L®ýÎYFòkÐÕϸìÇ߮ü¥oÒOrïû—O´[47ê¢_Ól[¶„CD×›×Ïtï™ÇbIyΞ­7nÉðáµ)3õUn7rŽ“†5“L°eKÁòå«åLä ù!4¨©ªN.ø  ïEÁ³üü¶mÛtéD:êÓé^—œ-T½ò$ýmÉ==#MÒà¸qu-¶»êíåãÇiåü†ú}•H[šåË«H~z:Úz:Ú_#çN̾^±;w6GÜ!ù! ù!Ô 7ÿ¾wóÚ•wE¯Ô[±Ìû[ÙôÿbMÊõëSSóÞ¿¯ÔÑQ9r¤®®ºÛH~ŸGp0±¨]«V­¼½½›*C‡~:aJÁ4og`3©Êæ’z5ÿçϼ—º×.çÐÐàÀ@Âõ€ðîÓÞ‚|>žqW×Ì|R’ßg ©©Ù$¹yUòþY™’©\´¡Sç:|kjÜSñœãã·8:jPÚ“ 33^^¾GEe ’Ÿt())UVV:;;7=bbÿØÔ$ES·Ñ ß诫"g“››BoÆUTšÁ‚äd›öERAòû?{×EΆ3Â"ìÒ”¢R„E¥(`,X»ØNš Tð(ŠOAl€åÔ_=Ö;°{vÐé¥üðL×ð®V :PDŒ )¨â‘ѱ˜¢ˆg`³üD¡Kâ`ù'¦rþ‹ÄÌ <'fMöŸšÃ‹·Þ€¬QY æö”,ŸYiÄ⡟Qån¨Kà¡$’]ü&¸îw[*)ë{N[C™N¢­}ò=9hó¾!Ýi«®¤D¥æž®c„¥%oÄ+ïbB™®Wª”•Å€0ÍN¿ cU»îŸ1Ñ×Q·u§¶Í[€%E8@ÒʾƒÇ/=½ÜÚü/ôËZÈÊ=VíË{®®âͳ™ûºr”ÖÚ(!9ûMùOζ³é3±¹HAÂ6ÚI’ÿ³Ç:¹ÖI Wbî=žÉb‡"”°}‚~VÖ=-­.ÕÞ‰ßîK«³Ñ •ëÙBÞ :Cƒ2O/£ÕOàI”_÷û,‰Öª©¹7óÏPSy¶–[¹ær©CƪÌAÝ4Qÿí´ÛÁyTw·èÅ}eñSMÒëMΪHN=nn.ê)` vðž_‘UÀ¯M?ºêOŸ@}GÍ5—ö—ç‰Øø¥}À˜BñkÑGžÏA‘x‚¬”‚È‹´ÏÀ}hÇtƒé¼£–†ìç§*Å@Ÿ˜ŸŸ/ ™î‡ù%"ÂÁpƒ3´' å ¯ye }tj)î9ò›v8õ¼Þ àºßuÙhö*ú)è\¯>}«í^úÏm¹7÷´À³*>}äVWпÑvàé½ps;ºÞ;P1ã›Ñï‹H,gQã‘(ĺÊíœ:Ô1Xݯûýàà®Èã>{sÇG޶NB™{…[¡­F¨oåàIqë¦á6iM±u¤¶»¹[Å[E'€VònÁNÑ‹“øþ,„ÌœŒo=¾Gú}Wxúôiûöí[X©zœ0Dh¼«;¯û—F{šÈ!ªméOñpÇ0„üù?ìi¬=™ŸM°¤UO¿Ž}EèG7,…¿©F‰Ì‡ó|j#3èLJ¬l‹z•ÿ,KY£Þ¦¤éšþ¨yrÿ·óíÔ)@ ŸäääZRy¼Ï{Öú #~g„Q@5{>\8vl£Þ|þ2qè×’1Únp#ÌT¿_ר±~1DM’â’R,(  _ ÇWÎê÷Ô°sk’>Nß@|S4X^ è×Ò1ïÃg“tºÖ{‰]+nq`@m[UNß@ åí­Eôû)0veÒݧ2Šõ°EÝŠ@ÝÚ×%¼[}÷ÙÛ; è÷ÁÒ ý¶Ý{{S—À…ïõ7¯»}$·bγN3ÈV¯y±`þ( ßO‡.“=~úVB±Z[Ñä¦ß°h]¯˜YÌY;þ·vÚTÙZ;{ò,Öˆó%¡ €~?)t:R3 ÊÊÊV­^=`ü4)Vù^¥¥FO›:E†Å4jgÝ€˜§M¥ÖÎ>}zçâß§œ+x˜›#½s׃ÀÀ¥r°¼è@<‹R DÚ5Ô¾ 0Ú·ïêäÜUØGU BGèý¾_aYhݺu¿~ýzöìÙ‚ 韻îüû¯ÈŽÓ©×äXC¬5ýýÔ©•Ï&&•Þ§Gï.5jª¬\[ ЯFèééÝ¿ŸÃá´`îíÿód—žýÄ5Œû‰¾vÓ5¶@<[×çnŸûëœúƼjU¿¿êСRU9»(!t;>0ìÔ¹7èW &NœH™o©8‘ðæÞƒ ?ýÆã×fë/Y†ˆ˜{_ Ø©sÚ¡ƒIãÆÃN|@¿Ц\ œó¾ƒ»Ž%ZKa4j'Uî}DDtÝ“1n¼Â³§›4Ú{U€~¢hÕª•··wË+•ó·2”ÚÕ{cXîÑÐh/qôèÚÑ£ÁÈ'Я2/^ÜòŠdSÔŽ¾#~iع«×®_P³ù ¯Ð_¦ÑŽ-›–~Þ=ôkáh0÷0;Ö¼$üÔ0îÑ0d?ÂÝL( _ó‚“ŸÃTVûVWß}àHwkÛÆÄ¼*4пšÁ’Óg6":ËŒèäN>Œ¤Mž¥§ç°ÙjîZî‘YÕ8Ë͹§ªÖjH³ÓÏ” ’I’›{6ªÀÒÃP,;¿XS™šƒ”SÈe¼ÉTÖÔãp8ϲ²tÙlì`2™\Üû'ú‘äÅìŒ4M]¾…sì™q?G·­8CžÚÖ#==…Í6Áá³²²ØljP!5-ÛØPä"ÏŒ7'«¦Â¬šžœìûj”± B„’Š¥<·i±ƒŒ”ðù댲ڠó~½. AL†×‘æM$3/&—0ºÈk)FJòÅ©'…°¡Ñ¯ •Ú„÷É0ùŽwoÞ¨©6Á¨¯OÑÞD„žÑl$#íy>kèJa¾jãÓ)êö°Aüáï.bÿˆ¶]>Ujœä>A&:üj§¢(ÁÔµ  õJpT´Šq_>gÈ¢¡WÐÇij&œÆ%Ãöû8zSAXT½§í[~ãRIÛ<ÂÐãÄåu#úÌ=Á²ðéͳróŠ»xûÍMs{~àprždët1à=c¸ÒLFMmCìv‹u9öÜ÷|6w`soÙ¨…~˜*~>Nm‰q³½~|›sxAt„y$á¹]· ÿ\Wä`gO¦Ååß5ñ[ÐÚ~ÈÒíÇG^ÄžA»Žë¶ºÉhÓËÃ^¥A­” ^¹yÖ8]lÁ \yÇ`ìã1Lû¿ë³9ÃÖý5­gjRTDmSv»‡fEF,ÃJ=°Éxÿ]æž»»Ó£¢9H‰JFÂN:I=m_z±}P"éììœɶ›²tã…Éì§tžŽs‚…*²8ŒáË’bVºŽYó(ë"à9 üBL‹ÜÆ'gcÓVWÔ{í¿-÷P¹íÀ˯ˆW‹P/I!9× ÉË–JkmUT)Û ¯ê6òTÈT£.ß÷Ègv,>rZXÕ? æpc¢mäéßêý¦ï‰Ú¨ÒÁpðÐþiéa9%›ÔHK9Úéíp‹Yù[ÔT‡‰}ÛÎAïm¨gÙ ­^ž¿­¦Qã¼³îÝÆÐ6&8œOLfkJ!9ŸÌÖôh'‡SVŒÄîq KæG¦<ÿåDNQ™š¼Xƃ—êòô)€ï…~€æ€ap,Ÿpæ¾|ß¡þÝp¯XiábJëúèãÆ'þ Á¦ªŸ%ítœy"7ý‹1³X¿¦¬Ó3IµB{¶Ñc-„Ñ9tûvd~|{ð9ë¹Ó,Ÿ¤õÄ:0|vMY§˜5‚Xrr¤ÕÔ p€~-'wo´u©÷òI K÷òÞAí̪†)+#ÅĈâ´q,ÃCÝw;5!¬ü Õ¸óTzËöûŽBƒUÏ²ß ÇpêtX¹úµdø-h¤™]êÕúÿõ'wä( &ÛæÀµžÃÚ>cjÊGް·ßLÞòÙL½nÙ™°ýö©ÉD„¨¦f¿yGö÷ö:&ˆaê˜ö ôkéxxëz'#ó†K|æj×@¿‘£<éîßxs9„è77’qq”ÅŠnˆ~éba¦fÁ¹‡¿“’²²¸TËs㦂9°ìè×âá0´_ƒÐ@¹¶7ã³L;j%×+ÂäÚ1Çvuúý`çqX*õ=ëæ©X#W§ZtÔ2 óókWߘK¹®âðè÷“@W§cJÚÝVmê±ÕÑÕ¿öÍšþåÝêýü–„†.¯9Åã˜, ( ßOCj4òFV„Ôf~i±>Õ{>|t­“ο_ ™•©¨¥íÀ„}€~?'Ì´”î=ÌÊ+i%«XM[”øÌ}zçæ°!õž%ÖIÇ!‹}û6LšT½ëô4iMÍ!ZÚJP@¿Ÿ]:i ùœOºüòež‚‚¢UoÞÌ2©îj Ÿ¡9iÒ¯åÍË××oœzûþ½ºªZÔ«|¶!d<ÐP-û4G´L–bÿþ¿@öý5âŸôŒkI‰o _±ddzš÷37mª˜¯_ß•œüüÝÛ²vª­ ¤¦a¹ ôûF>.\(.ÞbŸ#{üÉîMM­WjßwÔd?ýnðÜþmó¼mXÌ´¥]ì07ÇeÍñ§¬ŒLNncf6ô«C† 9}ú4u-”{o8²Þ"š{5Áfâ ÌÃO¹æÆõ°R·ÞÞ^ºK»bb„™YAYY¤˜˜;èW ,,,0ýÔZè$üÝv·ZÇÀ­UuƒW­ ô÷¯Kà Wü:§Nïô1 Šúï¿.úúV@ _5˜2¥NÂ?z:¡îÜ£açêº:ÀoAíÁÂÖÿæãÛöøñÛÒ,Ù6Z‚ÅDÂHzCXÊU¬çÔ׿÷â…J»v`d èW¥ýÙ"[žZÝÍp–­Ëœœ¼|5åš\»‰¹‡½GvÇ–çð¼_D@1Y”˜¨n½—f£=á'DËvíúýª´?[^‘œý箲†VÃÎÍû,]K[Ü¢A;ì7¿¿÷ï’ó[eÖ-€ä™DȈæž÷È…ÉU$ñÏ?×î „úµd$Ù`îѸxåz¿ÞÕ¬WÚ³g•£#ßÅÉ8&E¼•ƒÜÂñoˆð“+ ÂM¤Q:|¸4”Î×£A¨>6]–ï tü¶7#°ÏIðlþŠvi(û¼UO©.$Y2¿Í-¬‹Øh3qFcbøÌ¬ÞâËäÉ7ÏäS}BÓÊiÏ'›¯×WµCˆ±vÝoóæ.Î4;ý„íÌ xH;ìœÜÈnÓOÎ7skkxÄÓT ZšX°|¡SP ½Ž•[fRl9xMc{”м4ã> vl79|¹u7÷1}|ºt7᥃+$m*.T™ö…¡È;„{)ŠÅ·"œÏ<õ ¡­vcâßö&ÏÌuwvèæ½‡¾º—ûÐá¿´Q!¬tPÂCRä¾BÃmŒ äŒM' -#È«^ëS>v×–$Xtb^ ßÑW@#¹‡¡ØN£†2mÕȘgÏRÂ|íÆgb!‰E#½”rü3‚ð‰Ž¤ŒU#OÂzX‡Y)f&Uü¶\xéÄÈ ò=ãúݺŠˆÚƒ.QS #·¶Ztñliñ‚%®èƒ¥‰}¡ªM2Îí5´C‡ 2Ëýãb£yì­ _dtlyª¬Ã·ž¤Ý4÷¬*±ú0Ž-òpDT?쎈<éŠÐîcîk|#ŸáÄìNNPCaV„ð}5¤¬ï9m e»…67È÷ä Íø–<§­º’”š{ºîZDÐ$¤ÞãK[7’-]…ŸîÕŠ^…f2[a¾ýR6O²vò¶'ôIâZìÔ= ú–H=ÌÏÏrÜ9wÏ”­IW¢vsp%÷ùë¢Òme+vÑåEò}V^ˆÎmŸg3ý?aºj£„äì7I+û:ŸD3ÎSÄ8ò [á …©T!†¡ÓËHr:vÿb!=ñ°¡eR!'?_˜{Tª^ÝwÜ7wÏ$2!'ãZðÕÐÀ^T »Ž_ˆY-¢«Ù鉴ãyÖé–ÅÏYMmé8QXÅù ÿ޾r©H´VMͽ™†Zi¾µÜÌ.—ËkL¨2uÓDý·ÓnçQÝÝ¢÷•5@HÒëMΪHK;jØ•çê>޼e“ºyb{ÄR„^ÐMMš–E×ÿüug§ëàÞAvZ(.3ò èg<û„ÙŬîþa ´äÙr'ÓýÂ_YY+ÆŸR>–ñ‚9["çòðXÓê#Õ×@»ø‡VTô¸|–¬0–¯|¡T’S”WÌ¥žØ i‚<…+ÊRú¨¹ÿUAUMY™$·äS$D‚ ÅLÄ~äø#- XÐñ‹ŸSái)”á;úÚe£Ù«Pè§`¹ùêÓw±úÑî…¡ðÜ–{sO <‹9XÌJV'>–TÚÀxt/NzB9÷Öãïõ)›}M 9ÄÄ:Áš£ïoä³ ûB5EÅ”WaÖíDeåzôIêøâà®Èã>{sÇG޶NB™{…[¡aaî+2Z·®bE„{ZqG›ÚÙÝN yçx +ž3>«(‹nvú"µ múƒ—†S© 5Ò§—ôî}—ô4+ =Nà^òxWw^÷/ö4‘CTÛÒŸâáŽaùó;~ØÓ8Y9z2?cŽUÿ,Sj£GÓ$½¨æFÀhªç¼ª'ŸòqNAÇLj‹»”‹û)?¿„ç'¦¬Ì„rúµp<{®Ñ¹Þ]Mz„¦?jêF2£µ²r…yù>L˜Eôk¹`”}ld í3ZXÕ?&¦À©n¹¢ëÊ›¯ê?tÈx(  _KÆ~}iåzQuÜÃpr ±$Nµë{E¡GÔhg(B™$¹ž ¨Ùd…׆)XüƒÐ ì@¨˜ö$9ó­n&¾H’ÛääU¡€€~-Ù©—5hT"ng¸Q@öpÓ^¡„$ÂRýQR/žÿ*îõqRO•”pI2ÈOÓ]àfé6xMB&êÅÜœ¾ª°¿ ЯÅc”­Í¾gôÍê½Ån«ÏÜ…µÙ¢îAú¾£¾¸é iY+ µ¨­Ý»“èä1,z~1™«#á(Ü7ŒŽ¿ž‹ªj°âè÷s`ÒˆÁn?Rl[¹!ê¦&[{z nò¯“ɶfóG;‹Éx[„lq—/ÔI;Ô©âU»OVÅ?ß@›è÷“a@wßwììm?±.‹_dõ12¨sÜnïÞn‘‘+ÿÉ7£[ípËÖ¿Í}}Ý&nP"@¿Ÿ ³§My’“[€dˆVbµËJNm_¿¥ñ2²³b¢Ãœœk³_]ü^NLÌÖ×W è÷“¢ƒšjžceHHÿqS™Òådø\–xl¯ãäIŠr²Fõä 'gjBiÎóô³çNº¸Tp,/µ}Û£À…KX°¾è QuLÅhvØ SSg»¸Tz˯¢‚B~ý ú‰bûöí´ÃÕÕµ[¹ÆøPˆˆ°™è&ΨØäòjü¡!ƒj·WoLÌýùOòÝ ã>Îçðð—K@¿Úðüùsþ=´\î¥fd#e‚h5ÔÉCäP/»qoy¦æo_Œs™èPÿ‡WðôémttŽŽ‚°?“Ù*0P¡(.—䀉f@¿ê±téÒ   YÙ;2¾u׋¡_Þe¡{?û‹éÙýØšu¹˜³s¯ö0 !'ÿçæÍy‹€'@¿ê¡  Ð" &1=».Ü£!¯ÔË Q;©ºb1Å곇‡JJJ¸‰‰7PèW pǯå•Êþ¿ÎvéÑ·Þ-Õœ÷Æj_|[UßhML˜»w‡º¸ø[€~¢íÏ–W$þ:Ó¥‡eN$Z‰Õ¾Íî]!®® ±áâ¢ðèÑus Я…C¯AÜ£aë2§Ö–BÃm·èèÜFèôkшûû’º~£¶­ YPÍNc»v‡¸º4ÊtRÞË •¶ºPFÍN?S‚H&InîÙ¨KC±ìübMeÊDyN!—ñ&SYSÃá<ËÊÒe³±ƒÉdr’àYkÏÎHÓÔ5¤#Áž÷stÛŠ3ä©ÝäÒÓSØl>++‹Í¦&:¥¦ejâ€\D¿ÏâæäqÕª³o–‘‘¡««‹ÏÍ{úLSOWï$êl|ˆÁdf¤e° ìúQF0ƒÝ¯ê¥Ï¥±fË®^?9rЯùé—Âûf¨òPE~ç Bm”Š:”Vöj¶D@"IY¹¾ëOYÚ,w¬H,Àá“–õ¶\v…¦.þ)1ê…ósz‹‚HIL4¶´„7YtyE/q;{ó|„TzýN^Tĸô_Y@/&¾PïŠBU2YW×4;'_K]…6ïY[aÊ]UB’C×§&%Þ!Ïüð–€:téVËÑw$Ãä;Þ½y£¦*×l ùˆ¤ˆ×ÈR@˜¯ÚøtŠº=lJn ±D;û•Œ·ææ>A¼m蟖†zH‚ÿfÜÄÊJØž§4B‹NåÙÙS¦cMõh²Ú©òÆ";¬Gåç´ÕÃÜxTÄ–»Êqçóá)Û¼­ZÐÛ©´Í# =N\^7¢ÏÜ\!»Þ<#ó9¯¸‹·ßÜ4·ç'çI¶Nj¥_!‡+ÍdÔ$o ïË) Œ/u;·åpʲ²^²Ùjˆû);¿LSY 1Zs8Ÿ˜L¾Q³œ¢25yI!·Xöƒ—ššò8ækÐWn?§¶Æ¿Ä¸Ù^?¾Í9¼ :¿‹<’ð܈®[Ю+r°³'Óâòïšø­?hm?déŒöã#/bÏ ]Çu[Ýd´éåa¯ÇÒ v^¹yÖ8]lÁ \yÇ`ìã1Lû¿ë³9ÃÖý5­gjRTDmSv»‡fEF,ÃJ=°Éx‚§ =÷^“] gœ¶¢çèØB¯üƒä:…åPTÄ—C¨å¼4V¥d.u.ÚQnd>å ߟ6+Ø»Ã0ÜàÀîI«®Ü‹°ÁŽi«ø;@)ÉWÚÌéñÓ;kÝ£ÉH·”Åò ÉÈeIŸ¶keÙ9!Y„Z³üõØ~gFg#b>{ÊÚaòæh·çÿœ5;÷ù…T¿Ð°Úa>r†ùHÊ¡Œ?ã)Jøº˜aúÅÆÇQžÃByË¬í¢¨ð´'B”%Ÿ ½Ú DM­œMEHÎâ[ª›`¡Š,cGxIJ¤˜•®cÖ<ʺˆ¸D(¿Ðϧ±ÿÔzPÕ±±±”n|âlªúñYÒNÇ™'rÓkU26–·y ¦t\5Fë‚¿—yè"DgúLçjì_Ñ湯%=4Üòé£TkI( _ËÅÉÝku^=±4,ÜûôújWÍöÔee¤˜oÏЬz'¤PkíW¯y¼`>Яå"ÀoA#­\s óª†~ÇŽ};¶Qâµ`>,}úµt<ɸÓA·kƒOïß§ú©acÇþÚ€ùÖ?V:rЯ¥c„•Yƒ°­X1B5¾OO×f³3óÈQ³¡h€~?>æÜ—TÓ«ïY§voô¯y¹›m³zÍòóÛÕ7æ¼¼**P,@¿Ÿ&FñçþV3´¨û)'wmðÿò’¼ó—¬Y»|þ¼z0ðÁî;u‚BúýD°³é_VVöoÞG‚hõåÐÙuáùó–$%²´,übÈ’i ‰_:õ€~?!ÄÄÄŒUY'Ï'´Ó7­ÉÎ|â‘è_g»£vúõŠÙÒrþ^¿~…¯oõmÊOeîÝoÓ½û ( ßO ÛÖ´ãINî¹s òedå,,,ŒØåcëÚ×—ÿ"!=ýü·Þ½ý¨ª*gc3RA±}kIÔ½;ä=ÐPŽjªS'7GÌlö@üújõÛw_¾Èk£¤Ð§GSj÷SÁŽ¿{ÿQC]ÝÐp$ä3ÐÀGvî«»YÏÔ´©÷’mµ:¶¥f´Ð/‹ß’E/úö4iXÌû÷¯ž8‘ZHÁhúô¡+ÃKúÕüÅÆF3°¾è÷#éîSÅ64÷ª‚%£€dRs‹ ÛH0õ°Rñ ãPgÝBš{ÕÂÌŒ…PÌí9Ý»…O ßχ[#̽/†$ˆV饷ÿ>â2©Ns©CW-÷ó¯Ó{¿îݘX K¹Îâ Xéôûiñô%æ^½NéÞľÃÇ&ÕTÜ«¨"Œh„`si ßO£!Wè÷ò¥6gl}¹WŽ(` Ðï§ÀÞ£qì^vîǯÍ:*Öt´³îë§Š7Yt ”Я…£ÁÜÃh-•û2_µ­rÕC®ž0^¾Á17`¢6è÷ƒaeHˆý”F 6^O»7ª:úרØÅnçà eôk±h$÷04Ù=ªõ¯ÓÔíZѽ è÷sáÎNו ï L}Ö¥%oÄ%*ù;x{ÔøúMÑÕ% 8€~?"3·Þ¡l~æßüßàž‰.°¾ZÀ@ü½tÆ  mg¿ÈÆ’Zôªä3zÂïèÏd %•ö„{N˜Ð Šè÷SÀR_£MP£v_¶ QÊugÔ¯ š­0aÂ8( ßOÌ¥ó©JªíëÔ-#˺«ÖiX7Ay+÷êªû÷˜8Vúý~> 4Öݵw¿Ñ—BOîÞPëÖÕÁ-4t¹Ÿ_í/Ö‰‡»NœhôûIá:y"þNºz3¯è­ŽqoáCï¾ÈqioToîQðó£ÖÎ9²U¿ ¡oPi.õÞ½ÅÃì'ÊÉ«ÂÎ@?²ìÕ³ßvÆ72æ1cfVõœ<²èýÐú ô@?è€~Ðý ú@?è€~Ðý ú@?ô€~èýÐú ô@?èýÐú ô@?è€~Ðý ú@?ô¾ú‘$ ¹h{∠~ÀÏ‚ xÿMIòŸ ß· „\?üƾQ£žRýhoÛ5·âçuç‡)»Cˆw£– /%®èÓÔI+$ÅšVNp=ãL,$-å›8¹Ö‘Xÿµg\suxL@ü€ï $ùAè#”ì¶ïIÔ¤|E”£ÕÎ%jb'ôñ¹ ðÉùFEóHZ5Ê×lPÌeK lâs;ŠI’)âzüÿí‹û@ +;×韜‘zî÷-‡ž|Ð14öòóQcVcÇŽIHH”|’5ºo#“+]‘¥üdç܈R7w·Ð` 5òùz¾gƵˆßÿ—ûú£V Ÿù³å”çùcÇ>IH(é÷6×¢r7õ|\î'•”ØÅ;‰{,þ¬*d7Šؾ>.ñ–”¤ÚÜ•Kt•ù÷FŸU"¡ioôaÚÌ£)!¾ö¡†ƒø€jÑå~ÌD=§ýÛ~é¸y‰Ÿ­ñþ]Ë⻄Ã=¿à«>`½Ñ$/¦ïø…òRõ·Ë]ßüI,ùXî@ˆ~Òo¶#'§ýø¹)A¸’äûÐ4úy*ú(ï¿Î­OA ›Ós=§ï£žÔ9!öĪo’fx2^‡ˆÏDz„À3ëÂ]„†Ë÷ÄzC‰­y4틳þîûÙ(ð‰t%›åúTŽÞþäå„ ùÊwõM€…,vDg’é‘‚ÐDuª=xâŸn nKå?—´`{fĸ“V”ò¡Ä‹ù—•Gº› v ®`aJ^ñ3¶D1Iˆ§‘¡Vƒø}Gض`Õæ5a§Ž×5A>Œï)yÎNîŠJR­uÆ…ÎéµêgC4IÆPìK¸ …Zþvü:MH&_D[OC5ì^’ðêÛ¥—CTÇxá¥Þ»F¾ÞÈÑGU„×þf\ˆÒèÎ"^ï™XLêœóT¹ûGÝù0ÃP² WÃÊcþ°hàªØ€cSiw ýÏÀ–ê§÷Àé±WéÄóg.ŽžŸôU·]ß©‡íc)8Òr‚ô[2¡Ú‚ø}ßà>Ù'ÑñÚí4?ìøôA)q»}}#–ûbŸ¸<ÒN¹)/gOñ¸uyåC@¯úñ9fO‡ª?þë‹pòªÔÎdéB¼7Ïiš¸¨gÅ’¡P*)aƒx¯ß샬”ðï<ßg¹_7±Lžr¤®ìK¬¤~oú}Žçìèª?Aø“þ!¤zT9E§ÒØf R±‰ü0™êíÖUÊmÒòÉÍœfI–„:> ¦ |µ&íÈüƒÒBœ„6#DµŽÓ¸)Ÿ¡ëZÿ(9»ï“”³ûü¬ã/µF®³bu[ˆß7_ùºú“ÿ†Ð>lÝÕN>«k'9WH”LØË¹oDô%D͵»??•ž„}ÈÎÊ8>1•r 'Ï8 fÇö– äQ"õ÷Q&Ç)/#«w±‰³øojCùd?QÐ’V–LˆóÓfemœ˜*``Å\ó&o|#Ý®ÎU±£_õ똠GtSwÝÕ"_ ¹¤U½èñÆ…±é˜jÓχ,ðZ½:‚Ö}’ü øÐpˆõªþ*;HØßØû é-Üü]sÿ3¤¿HTôº³6ÔA *’ìÕ„²÷娵_®êѪ>š#Ö’äÚjO÷I$} ƒø}7(¤ÿÍœöå°ªþdNH¹–+Ÿ—OÿÞEDß>9ß,©ŒÔ1%2Ò¯]KÎ())1”^±:þ1:‹H^ùY’ðŠ×0/ä+_wg/kà}D– ñÎËò'ò­KþàkBõ ùó ®QWáfTL1O|"»ûH’sùL9-ÝõD«Èt?ÊËvI®»žt>#;ëÓ[â9‹ØÜi[}Óýº´Ã1"¡÷.tâ§m H÷ ±"jâ¿àV¾OŸú·4±€îŽˆß·…ýoÛ¤ÎQ+=ÄéUA›þJõ°7¢},§«ÚÓ ç†ï®t¢@Þ\öÅD¬Ûñ?_S1^,áõJ}OmþØi«AèäY~óUß$©9l©çÖ­\ï2ÆÒEÐa­²vŠßEÓ˜I>ÝBé–çŒjưÊX ë5u044\Ä ~ß°D™òæt‰Î£ÇjÊ•Oô¬W'ˆ6#èÁC7÷1Q‘GxÞÔÈž`9ajŠRR§8{ûí¥ÝË­ÛüeŠÎÿCÆùÚ‡¦Íí!>™Žµ~}8! ñ¦3Tz©SOœ÷ïjÛhbéÐC¿½6Bôˆ¤Oùè—@访ŽéAÍøç<ÛjÚïzJù)>ê·€ WÛUÉ@^Žá|ë4öpÌaA~ rrs‹‰Šª©Ávï¡3"N‹x^{Iš«ðÝ6o~EÊzx:}9®Ò¬í»Î›Žp5Vi–f m?í!þN…4 õ¸/H?į©ðÅ÷"Õ<µ•† {Fn­8WŸ×v«îu›‹^ùD‘x„_º ‰32²–›õýBj ÿ¹µJiˆ®1µÑ•¨ åc¶QŠ./Rè»Ò¢-±àì«Y2×nüýÇÄ€?°}% 3K½6¨†…äÇâ϶·ÁÛ'’n&Ò£øk±Qê…ý[wÆ@R=æÚYè6<­oÏrƒo ܵŠQ„ÌÓ«b.ýêðáyXè†ÔÌ\EUíYséñD%ûz\µ÷…8ya«×áÀªšK—Ïn⹓?zú ~@‹ÓX”HõŸ“yƒ:Áƒ§O]J’+補|c´?€vwëf„ÄåjYHnÖSŸ©¡ÓÕȈšÏ×Ëo—ð­ˆU2‡V/Èò§OT'&"d5ÆmŹÂ-ž* à/~@½ƒÈËÑ!VD—¶AtbT´Eï }L  ¶YséptŸŸ>,£‰G~ôô@ü€ƒé±ätºvéèÊ…¿Ι8‡ˆ7CJ­cÛvÝÁ?••©ÞJ- ÉÕ”ùÝU- MM%„ò/k…‡÷OÎ7BóöDfÒrnÑó¨ðÕžA¬ŽD ÷û«$Ýß g÷+º²” – B!$¯,z_ËŒûÑGÌïK̯¸dHÂÛkÙ&Êì=ý? E èæ³رéξ£cF kÕòÙ›ƒÒ–T>£ö…ä|H’´©¾Á†F¾NFUë„Dzü¡t{ ˈº…|,• è¤Ò’“}hšÖøÿÕ¹²*B·P³v•~ôô@ü€ùžó‹Ó•Yì)ž]¥è^äÙÚÓ`oÏäu=—Ôë6CB‚T¿mVÏš©ðäÉíºCÍñ~^œJ¼öÍ•CVQ%8dU`@&eÜüç^Ñ÷|´¶a\?ÓÂf;B}‹Ó:ðü+õrèÎ\Ñ‹Kœô÷<¿‘/ð?CFÌú¾—ã¯q[Ùh«OѲŽîAO¨@yd¤ÈR;oo•²ÒR1ñº>a8ï³üýÛ~…~ꤛˆˆ_9²/¬Õ8_hØ„d6uRr.í.1wÑd4*’0+âèà¦Ù´ÁI""±ìúà7³ó$y±‘iˆ YêT¯S¸ÿEJÌÔîãýèRØ7©UE7ëzïMRè͇äKÛëV<SÖ÷œ¶&]ðsÓõ·}:Tà cUæŽGœ»“™›ŸÛáß[à¿g4å>4ÑÊöïÔÜÓõJ‰¡yߺ+-y7y²|uuÒi%îúºàõÛ¶“mÐ!©^:¹VŽ–«£t;¿éÛfª©k-·Û4н·²¯É†$ÿàK¡¿Óa–=Ž\VCJZ·nuàÀ– æÔñ÷Ø?mšREjûþB&Y ~’U’Wy„ÐFˆÇ26ìÌÈI¼RÒ»·fuO¸˜à?k\O9y²€øñqZDD§UwÌâíiêå®yê\išyK–µvVB&/ÀXk­Ã YøÄ+"0 i[Ùe&ÆóÚ›$V©¢¯‚¬pu/$Åâôê–SL,.ØûʇéyyùyE„F8îºã¢/rE'»²˜ø[y$ɸɻ"Ò±3}ŸLI2ècIeV´5±uN9 ¾,¾åm^¼•`yuó:ÐL’Ü’}ÂSkäf'7·˜¨(d°ªøPÛº$‰É„T;7¯{Tl$D§±ÖŸq&TÛ\àÜÞ ’zƒÍ±nn‡q&í‰V‰qŽ8½ÿÒ߇⎳è”÷Z®. ·9å>Œ”è<³J¹Z Ÿ‰óDƒDù/R°ÒÉkALÄj„¬± ï‘]bQ™I'¸„$¹•“wÿ`‰ž— ™’þû(Cã8q B¢æ­Ô éÜvsyd÷½Rg½¡o5Öéýá˜Ü„*ô«C™ÙÈÍÍ1*jÏ’\1ZU9· rQîâÝ‹Ž—{g\ýÒ¸$»F1Úë²…J´VE¦Sÿš‹Ýùg|›[§æ^Æ’öÞïéL %D·SKÍåPín%-D]‡"îý™˜ ª2ÿwp}ì·Ko‡éˆS!;XK{2qD³Ò/«[G›XÝòÛ>ãÊ ÏˆÇ¸"õŠŠ¼º4q½Nìvç w÷_~p(Η%2®Ë«B,–Ø÷õ»X<€ø¡Ôlôë:»ªÏJúyŠÝÁ³Ö°TfÞP£|2/ÒO=ú™ˆŸPEIbÂ,¼”¸¢í“^*z ¦«šâ"ÎR¾8Iù`™¡Ë®1."KŽ‰çµ» |(Ëü4øi†SOÏí¶EO$ïí¼ÝôãXðÜ×áÉÀa¾VmM/Ý ’‰(åøëÏ4 ëœ$6¸¾P\äp.´Ct¥´eþoˆÈY]¼DÒ@ =’–ò¨hº†‚Ù6'ò¢s—(œrô§Àn¬|ÅÕÂù}Ë\%.ûÑ}P[tx¨ ÛiŸÀO0|­¹VNw4‰Æù(„R¾»«%~Ù]â8&ân핇Ns—»Äö˜kÎ+ú8™ ˜Ã1ØgÆÚ Lƒþå9ŒR: ”°òáCËgmJw9· ±¯pQrþ£‹""ñå\±69=Z‰‰ÕÈÍ^EÖpTЕ}wkÅ 3Gø=?fñ”²cóè¡ÖCq)[¦^§z„ÇÜôþ¬R¬•XYY™XÍéá÷ü¸¥ßÏÃELŒ¨GVW–lMÛÉ$9럠KŠ••…ýqe4wâÊÃä<È®šÂSk;í‡OÆUë1Âʇ͹JÞòrÞàçËIO×%I':ž2áÆ àg¿’ÄOà mÛÿé¿#žAÛ-W\H\¸¡%nëN-.¯n´ øê¬µD’´²o’ÓóÜýS°×ÚMäkm>åù¹±êZt€÷…¬ÜÓ>з¸ѸçyáþCÕcRÆÑÏâs…¤U.UÅÓ²²¢=ú­Žœ~ó_D5Îüv_š¥ÿlu6ºÿ¿!qV}D|ŠÏÑ3¾m˜•ž¹ý—À˜Û$ùZž?B,,®¥e=Oß?BÀ8IÄÈu–svlŸÚVÏx¥î\n-IûÆÕ¾F#’e] %œsÂi«zVÕ4T‹è¤ëΖº¿Û¹‚Ÿ îÕ>ø|ni/+"+/oßÜAt¶× U“W’±S‚ w>`èwBûgü”‰Õ¯{„{&á_Êý¬’gg,X=—äçðÂ1«Ѷ³éZwÚL_‹îþWõÙS¥(±.δ³ÝÃH]««íŸæ Ç«ç«Œ„ž9¸Kº¤äíÉ¥K©†þt@ëF[H›o\MVÈã^øò­‡.›I]^wM¨çWCȈ?NwÊ=ôçS“ʶ3‹r³ÄÚ|1Á:Ú¦]nÀÒ³7ÿævÌnQB@û&É=yɺ.+«ê§‚5Œ~¹HUæ%ZVònYyk÷Í ©Ì¦„;¼ñ ”л¢rO\µV…Ühïß}uÚv.H+ë²ÍŒ3èv.]µb’²,EÇu™Ñ©SOÐ?Te”ÌÖcÙ6ÏrÇb’¤GÞª¢z~–zjhééñÔC9E“NѼ‡Iy€„*Žèd2Z8yw’¤˜—Êï}^•P—¾¼0ÔdÝDÒ§ÒuñV·x5é1÷¿Jò&°ý÷Ž÷ß[ÝíÔ˜¤üü|QžwŸO’óy#Bü ¡ž‚Üà¥ËÉ/*®œ§ŒZÒ _~zÕTq8…gm×ÜpÖ£úU£ŒÏå½™¸ì É{'-zûÕŸU‘`^°*ÉctÆ,W‘ôT“UïÃ/—>¤ã>ê¿®Iº glyÈ5ä´5"ñûˆ”i•¢¬yܵÑo¥JÓ8 =N¤zðÝŽnüWàö{^«o/¯'èÏ™å8ò†Sùþ}Rsð½Ssý…ü‘#QÕ£“ž¾Õ@O´vŸ£KáqÅ›Ïô3þ²ø)·e_ºtªo_™Úƒ…Yº¿ ^¿Ì’…/J¾•Þ¨},¥=ycΦQ‘ÉÑ[4Ì5Vcû%¦ÎE ¬ÃŠ{û!uÏj[[sªï/2H+ääOÑ•y1Ÿb‚‰PøGåîÑ1Û±äô±"qVu`œ:Y6ÔV´įi x~5ñ³Ièyý5¡¬¬\ïsLe妙$Äd*?î’˜†žÕÜQ}op4îøÅë»tû#YårØç¨¤.#GŒ®c4yyMCjZº¤»oß“ýê«%þ AZMÂ]±º˜Âܶ½hÆt£º_N_𞽫'û“ÄÄ™âüäPjõÍÓp:f³ßüyu KGÆŒ:^»˜²R©º)(Ô{«I)Ù¬GFšÚ·âÛèßp@ù@ü“F_¼rCN‹ýõʧ÷l©¯òÑ6lþÆM+æ4ÑFLãá$I-ŒÓiG’µmûì©R›6ƒTÕøÎLKÇâÝ;í©(íµŸkÖäÍŸÊâ„Я·þÞ½¯çàQ_çŠeï ÚHµò›×ðýæx.:urÇPÛ²¯–K¡¡/üü»1‚ŒŒŠŒŒÇšµËçÏûJ†¹ËJ¥oþ#9>léâªÃ çIOs_\¸|£{ßAÍw•V¥ŸRO¹þÒÛ' µ†¿ƒ‚‚–.m^›™Û·½™6Õ×ÏO¬©"œ?oI)·depH3§\<(èéÒ¥K-, vƒø€šÑ^µ‹Ãˆ%%†8y4mä÷®žëfh`ÐY§Û/M¹qàRÞ’Äsgc´µ‹´uZ7aÌ$Ùzíº'óç-ž>£žM :å«×à^ JÓŽ9ßJ•ýT"in>Šwˆ¨$%$èé—Xׯ[o5ÚQZ^©aQŸË.Úik?¼K'M£ÑÛ/Í6ƒøæK.îUð|ìX™G•vGúbÂcOÏ‚@už…Úp,˜Ïßbã¦þýd %“ØîÝoºꙘÚC-ñPAá )·Òÿûûü9 I¦–~×6:I´–ªúsYQîãÇþ{ú(üՠ~|›Ý½æ|Í4[÷«èV–•–ž=“œüÔ´‡R7ÃÖjêŸEJ¥ÿÇMIy)+#io7²2µ¹’aWêóõ1Ç3@à.xýädÜ‘‚‚÷Æ&JúâÊm*M@ýü¹Uv–Ôô´;¯ûöélÝoíïâuâ45ŒØúøS[562e? :tÊК×qÈ+ >½©Ï÷%ÅŽN5î—ÛªÒÖ¡>#G@­€øâ?À×ûwï6nÜ(â9zôh}}}Èœ7îd\8y¼]GÎ&,…š‚•–rý{óAêu «þ6–½¾eŠÉOqñ[îß7`€Œ‘Qm-³ºø÷ûÏd«q®22m¡¬ ~€BFFæóçÏeBû¸tìØ”ï‡ÃÞ#µfJw2¢v*’Pn?ÔÙóË”gè™ôÆì¾õâ¢6Nz’våœ÷¯`î§ëÂ"½½ÚJJµB²·gâÏÏÒìØÚÕ•^#qœöÙ³çuŸ^ýµtÌ¡@üõâE‹‚‚‚h·¬¬ì¸qã O~>yAJNAM» »÷ÀÆÇÖF½C¿qS±~âp®ŸŠýu¶{3tòJCCƒýüÛ1Z#ÿ&X6î計ÐmüÙ·¯pèI Š V@üuŤI“öí£ö·SPP`±X!ß?"6GZqÔ1n–áÊÖL¦Õg¬‚q;Ã4‘ì}ø}ËúÙ³•±ò5OV@èTöãOe¥f:Ѐøê]]]111 WWWÈïO^æ¼áXuþ ײŸâ%ðáõ #íOhèr?¿vXùš;Áš[ã^ࡃ‰ãÆÏ‡ªñ|‹!ù£Ã‡eO^ù«nðÝÉ|ÀªÕkü4HNHîå+›°ò}Í/WVùè![W¯/Ôˆðc+P]f²4ðu7Gíðp›V?á+{ŸýdgŸ>ß` ]LŒÐÕ»{íZž…Ũ9?@ÝÁÉ~^¢©._[ˆüœ¦š<«jH¾Ovv¶¦¦&de“`ÇÞƒßJùhôñËÊz½ü;1r@™o˜f3³¼‚‚l%%¨„€]ü mv'ç;›´Á¿œ "†2èNÓËÉH+äl6ßb‡Ãe¢Âô¬|ž7==C—ͦv¥är¸ fqNÖów[G žƒ‡Édºk#/qìÔxæ9y8-6[d62==E—m‚ÏÍÈHWÐÐUæGƒ2ÒÓ‹I–±!?—Ë`PW|Íeê d#+ï…º®‰|ùN™Ø£ð“´®–2•r^Xï‚à9ØØQH’;²|_‘iKp?EàS´âU•B…,–"ü, µfRʇS窠·g ¹Á´Êò@…,¡™à0GŸ‘£tÙÁNxÔócü…gvöæôéy$¹g „¯äòÌLyµB:1©IIz––leµÔðÁ&Û’é¾XpJ°0Æ8·wŽÑ!ß,¤/§¬†Îûõ¶IœP¸á-}Gµdn hÉ­‹¸…$Í$ÞcOJù><¯&I¼L O¬ª|åˆCLccS‚–ðÆG*BÁU‚L÷ÃX,/ë$ŽZ]:1’Ì¥ïglÒª#µä§‹µ)ùƒ‡‰!çæo„6–ɳ¢÷ËN@-ؼe›Õ§úž•ÿô±rûŽ žs¥Ôe„ÚL|VqNN©šª\½â¼våJÅoÀ™ï!ëÆc=xp¥sçÞP‹?°øùúÚXË 4àc2!Õ£øîV„thÕŽÚåa­kŠÄx¨™ÀÍå}çf"Jü8‰òƒü'†ì ªÂ#PŒ~ê¼A!KT„Pؘî'ìì+…±4¶<_hbeEÇöø¿<J¥èACC SSÄ}ù\pGC¯T?ì·ZîHР!“úÞCþÃ_}’FÕeY˜)ý-Ȫ»Ù5®¸‹¶=>ÖM[—T Iß/Ôþ/BBªR?mó§øÁ©qÔ(èåu#<מKÍå n1—Á¢Ë÷‡#…{æJT?;vzç¿ÍÏíð§žþ÷Ònɶc_˜Á÷QhÃ~tﮌjg9~½ÎÉ| ÕNSQ"ˆ“FwK›º$øáÃSúRµ‡!w\Í“£,åHú'IFòŽ”¦¦ä)´—×ärp9ŸÌÖÙrJ2ºšå‚Êy›žõ^‹­Æ¬CbΜIñüØâ7pÕrJ:°-âÐi¦¢Æ¢Õki!I'?w§´g¯}×ÿAúÈ¡WÉÁ»ñÎ^OŸ»>8˜~íÞNU~ôŠêágƒO"¹ÌÛÍaìǵû£—, ÆŽå\œö«sIP$Ž<&Äëlök-ÝáAsÇS ðûyš m0l'8˜ÞÔ«ÇŒµHGÎ2‘|ã>ÖÙùÐò­Ñiq‘˼–¹Ìݨ¦¢Ç ¢À;K¡0㬃»»ŽÞÀÐÈx{ÇgIâ …-ó+’5\d;¡FgW’tõvûdõqñw¶1ײ+¿#nHÈÚ€êß»p–y{ãL0íDÆRݱ¥3Ú»ÏXY[’Pbôbgw÷­‘‘"|Åu»ú[}.Ýã^‘î,ŽÛloçàì¿n÷Jêö{{nÔê¤ÇêЊ—6’,qÀ«¤±oçòàÂy™LgQy~œÁkõx!+Ý/Ôþ/B^¹Š)¯”ƪ hç¾GT»mÏxå÷~OgZ(!¢¨·Ž–C36;îñ›tÜçÍz¸R‰ÐÅk™£w·VYÙþúâ|%—£ÓÅ`Ïhfœù¹è‰™f½ÜèÆªr&^|á¤!£¬V—?þ¤S§/#FênKæ%uæ7¸w›sa¡Õh9,$#X¿nJÙìa¬Æ¹þ'¡•I’^Ø¿˜Œd³eÑ›4B~c¹dÖˆÂÂP…?¶øÑ°œ0*û1C#c*~µ1 ð¤{-Œ€þp¿O0d4»)ÉHW ŒÐ²ð(ÚD…a¨ZGGó»MNƒM|Õaä§ÜY:ÍåÇy˜ÚÎ-ÜŽ××,?>K¾³Ml$¿ùNëÖå›Wó_ÊýJ’»èCáчk¸£TvÏaJ°°\q>¼Š=/¸~J¢b¾˜$K§å–NKšR›Jñùmõ¥¶ë;Aèu‰Ýô@»é<—u#æãéùmøó0±ñqü;£.¸_AQw1×´¦ûÔ‚O‹E½tKMàW9cUæ¦;<ûd‹?T\5†ù™ñÇE´ʬ|TÐGÕàcÑ3A„PAåeÜq—þb‚Y,ÜE{W—[{DFóZF|¤¸)ä§#)¦›6î|’å#¥C2r†_T>D½ù#  Z‚ø5Ó£oOÿoŸ—Xàb]b££›ðòqñ±ßõýþôxr/­{ß!‚Ÿêý¦Ïe<ÞEY$Wé`È륑}Žr"—x.˜ñÑû÷í3}){uAZrˆ9u½zG=™n½SÓ‡ü:czÛfnÇ·eÈJ¶û5Ø‘Ì_ùÆízÌ\¯ÒFKƨwj¦Ó†ÅóÚ÷sêaƒîh(VNÉm³N_^´gj<¾¬l»˜Xmª¼„¿aTÄ2rÑðí]x£8‘×'9Ì»ÇTïH Û¦à1]xã¢òÚÝ‚×áF•8ö 9pöñmÝnË|M¾˜cc°yøéÅP ^¼xÑ®];ȇïC†þ©ÀáXÝN´îË7ñó©éãçPß Ãgv¡}7lÛN;y“nͧL¡þM™Ã_½Þe?äǂ̯_9·ý¤û_÷~3­¤yÏŸÕ%Á„˜ØöÈWîîµ™t ªs¤ÜÉ'€?Nj>Ò2v¤¥ÐpKy0厾ˇd&ÔqþÏãli[Û_  @ü5B^^2áû„™{õº°Á“g~ËIjXn?s¥ªÎÝäÙîu]çî0æ’‡Ï&étíñ­@”r»èÖ=¼’²Vlì'©o˜ik×½œ7w1Tˆ F¼~ýÄï;ÇØA–GÏ_Òb›~ýK·*ý$ù©H]£~}(¯˜èuNÎßfÁ_XX(Äð¼yó¦}ûöß9Fìºfý§Y_ó¢Ù©—† &ÖQq'ç¹^k×.UZZìk¦yëÖB°Õñ| ……… ?üæûÞ}ðèåBAEµ¹¯EŸS.œ˜2yRc"éÔÉ!‹ÐUË›i'?ÄÆ~=jÖÌ™ðH€øꀢ"°´òÃÀ ³ŽB7Rÿ}‡$•T›¥¿.†Ê®<2cŠs÷Æ)_…fûSæôèýš)[öíãŒ5ÕÁAjÄPW¼yó2áÇ‚™1mô…¬ 8i†Dë&˜ZB tjÏÏÙ³XLé®Sš~¿\??J3î_J¿{môè¦Q© 7n>Ÿ<Ù{Ò$¨?ôü~øûÑŽôŒ‡=ÒgÄ$…6õ8¿ôÓåøXmÛýñ¯îó|›;Áºz}ÛÌîÝ!/OÚÛ3ëÃY'O?šã1GŠ©ÐYuÖ…Zñ@Ïïg[·ÛoˆçÃÇO?yZøº¨„[".Ε•ÕPk×¥“¶¸¸€tRFnßÌðÑäÉ^">œâ×Ùÿ}ù2‡Ãá|.#%¥Z+((iv4h£¬%Óµõ@ü.q(‚–‰N;àÏ”`&KÑÀ ŸÄÐüÀÈ„’$o¦e<|ð àenñû7e¥ÜV­Ä¤XÒòmT4µ´û˜te0¾»m„9ï³ÿ½sññÓüWùŸŠ9Ÿ©žŸd+ }½žjÆP¬?@ ((HÄmfffkk 9ó#"úà±2ò³±uEñI(w0P®¾Û—^PJ•žš™–ìå1ë+¼}óïï[ŽNœ¤¨Ù±5¿Û',záOµ[ìÞä}(”•‘;wé÷è1Êâ¨7–.]*¬ |? Þqvþo‡Íê¥]7«! ‹D‹mŒ?i¯¸qÓ®^è¢ýöΞŠõãï$Û‘²¯Ý¢Ð‚[H¨¨,-B©[–RTJ–êºE´H;i£=Zn¡E ÚÜnhÑ¢B«ÒB‹C÷ýÏœ9Žc)*÷ù~çwÞyç™9ïÌožwÞEé÷~Z-góÑc«5zó«© uî‚üý¿§·"1}ºB¯Šf±ðš5¯èv¤â4•.]ºÐ­]ÔÔÔ&Akñ¶Ãö½‡´ Í…håk.´ †‘ÿ³_–%í\·°ºió€Ë6n^í>[ÆzlsxÆÏO°ûFß½SÖ‘LÚ€øMÂËË‹tþ ƈ#àl´ &žQéó»Î°Q-ºK繤žŒÝàï»à‡e¯[·|®§ ©|-gpo a„nß¿ÿ„¸•´Œ*”Ähssó‚‚III8¿>›¶Å²ÿóʆ£Çù›y â²Râß—CZê&#ã¤òýƒÕÕ…:¶*B? ôØÀyøÅùÀ,»–ûôg*¸Œ|a%ºžrÞÒlè·n».rÙO“=^üüä'¬¶±Åñ€6LÆ—o=”SêÑZ(hé_¾–5hÀ7t6ؾ=´)ÊGnhð8Ý„»ˆqTýd/Ò.W ¤Üä[…­èž=+'O^…hWâgBiÕá)Ë’ã™7»M!»ƒNiû綘 $0ÆyÆŠ±ÑwÌKS™s¡Ãt3¥V?±ÆÄã‹iË ÿƒWÈú›‡ØMm]˜„`Ó?{|fÚ´¦V¤ç/|VGó˜YgEtÚº Oˆ:ƒÐ°ÒÛ]MbuŒ³ýbfLèÙÔ'w‰\»ÜÓkÜaö!~y¡ZŠ1w4@“Eþ}~±Ì°åž¾q‘á¤8b|¼Ý“ñn6qQ‡É€¥ƒkR\´ÅÊ¿“|zz@­²µˆKHÞp½Ì]§ŒVú[²ìÍ?Γòó.<–šM®ênd×ëy|R¢ÓЄ Ó‘±­CZBµ¬mÕ=+1Ya|ÂK…ˆ,@®®S¢£w/I}lÜÞ£«›MtÔá]÷*Õ“G¡cሒc3Ùý‘QùBx±kÚáh2eJ16x¼N¤¿§­«gBt$B31ÞLBµÌ¨÷M¦Ñu°¬ŠKÊ.ÂXºöÚúg©äÚŸâ—8¸N‰‹Þm³.;aŽhmÃJ þîd†‡}üâ§Ö±§A¡­}˜|l{T=ÝzFF¼Í¾jªô‰M»Ÿ±2Û`•‰‡òöÙt뺂?füM ò¬úúz†‡Gj/>—ùg:O¤¿v Í1Ú¬¼˜àó_QÁóY­®|$R]UV®ŽX0ß»)‰O¥\sqiê£Ê“1Nê_qµþõÑ9Hþg+ɹ|õ(Këÿ—»Q—‚ ‡p…ñèFsvž*·W ½ˆß‡§ä?ÞqpùÔM‘¼§ÓžMìÚ0òæ˜Ãî¿KÇhß!ÊÃßèw.™ÓUÜùòéÁÕ-7VŠô÷vÇ!uv’xg;!˜š}ˆÔBJoÓãóØñ¶»&8qªž„2Zt1u™aɵžâžø²÷î/Ø ¡D*^:+&8Ç•»Ç^wˆ˜¸«ŽÁÔCqfr,U B;F ¢sC(Š´_€¼þûSò•†²4§r3÷ˆãz\õµ!í-©»¼+꟥1—ìŽíPl[«îÕ6Œq5tø‚OIË UÙÒËkfC¿ïaÚM¾ÍŽ{õˆüÒâwÆeœ[ri™¸BÀÕ¹×§ìx¼{ªÚ‡í£Ÿc¬Gî„Î!,l!AÈ¢?ßqóŒ0öJ_>˜Xþ5ío—\<“8zz/îbæš.+s¸‹2>vkà—Жgl{ļ3™q^/e›M€Ý㨘µæiÆç³ O}“%jýš6¤-3$kÏOÎ\eþ¹<~SÓä]”Gº³NP­9^Ÿö¡g’Ux‰”´O~OgêK"¢D[N!«I•V--D}̇÷ÎÜD:ÜæMìÄä·£u|ìÑ?/~¥Ú‘JÙÍHãIú„t$RÏpâä§Z=ÎdLVâíÞõsÕž‰ÐÒ-‡. ¾´ú*ç÷…”‘{Oõ(<|ü©Ní‡>=ã“ëÙ¨ÁJJ}¹ãp~7á–VmÔ…s©F¹µ\œðÍ3×w€"€ø@Ûƒ!"»¨åþW–;'<ŕӔß*’iÅ~»‡íÆqý9#Lªe-æ,"d˜UH7”UèϦÐ)Q)ù×C½·±©ZµoŠ“!oMã?©§÷m\üä´ÿþûÂÀ\³ "ˆªÒ¬, øCcs·8ô$T ·5Ï·Z+Î@¨»§#2êvJQ´nÞ_âú'È B®®ãlZö`¬L+¢Z¡òJÒ<‚ ¦ÛuuÓŽºNeÕõ™ÀØvHZÂ2>¥$Ú´K­G[ ó¡P„?h{Lwü#ùÊMy•ž?cgX¤Z»±µ„ÄdȰ&fóàAåÀ5j2y%3X†æãu*ÄL‡ÜIyh°ÉÓcì· ”wâvRDfñU7f!BÆb®ìÅ(Ò Þwìô«Á^9n1» Þœ[i¼Ä?5X¡I¤³XÇËÛ÷iŠ à€ø@Û¤êÓ»V·!q{Ä¢… ›˜xòäûö­˜4‰ÓÓ }#A¸#åaÊHpÍ`D¨í+ÂQìfËnnwG *êa{{§y3aW{¾%%-ñùÖ/íÈmݸy:+’,ƒöÏ ª³êsy';(<ˆ´UF ²eÛN}+ûÖ2à̾¨¦+ͤIþéW ¦çpèÈmÉéå]ۇ㠌澎§>„$Ø-«·ª“3‹êN£¡.é86Îú˜QMgǼ|ÅÞ½¡Ÿâm™™.Î %©l…7XI;"ødžFƒçïÙ³bòäT †öhº·»òXKŒ-éÈÅ?—ëöîÝŠ âmÛ1–•••‡Ï¤«iëÿœ=¬òò¢'ß§|4“'ûïܹÊÙ¹óO;K6¼õðp…ò€ø@»¹T:vœ`14=ãZU'i1IÙ–ÛQÙˆ~ÿÁ¬œ}ÈÿÁÁÁò-zrRRøŽ÷ðè åñ€vˆ‘Þòÿó—EÇ“Nê[6ó$G_â ~À·ðêÕ«mÛ¶Õ‰tvvVPP€“ÓvÉÎ}túø_Rò¿õÔÖÿât²U••ný“›yåwCcó!ƒ[Ób\yæÌæ¬ÅC‡ˆ ò•„Ÿ|NKýTQAØÙ:tS„ßñ¾YYÙÊÊJÞ .äååAùÚ"ñ‰)¬ª{4¢:+Œ˜<³ÑMø:vTÓÖ§û f¿,#ÿ¿{õ"ó|¢×ÜŸ`ð¿•ïÂW­÷ð¡š}høÁá#äÝJ©› ƒƒ ;˜HÇìß_¬Óß@­—1”Äø–,YL‡edd¦M›ç¤ ‘”vµ¢+kôï¡køã¹IÈ*˜MœA !«¢üâÑ=Þs=ZÀÉÃaaúùËuèˆüý›¡ÏßĉâÝ#?‡J¶–‘UƒR€øMbäÈ‘§NQ3—JJJvì?DÛ`ûÞC}G*¨õk‰Ìù„†Np!UðĶˆÅ‹6O¦˜¼4$0PžT¾–°ÙÆV ¡ / Ï”kjh €ø ¯¯úôiyyù &ÀÙøõyþºøÎãBa?£Õã(oRo^HtœøCs#D¬ùÓ{ž,=¼ A¸9xšI ”™’‰Ð}¥Æž¢;\õW¨3½m}äùäsOœ¸>jÔ|(*ˆÐK–,“Ð&ؼ=ÎÀÒNº«ÊÏÜiß!V«""}¼=¿Ëá«:u:‚T>Þ8§†¤øéÊ–;.¼Øµ¶¶¹Æ.r=èl¿U cg‚ðÍgE)w¤Ô1“ÅQG?m·ðìþÏú¢fÅxKv¶’¶¶”ÄÚ6«#×›ÚOo•]›Mr‰Ý;ÝñoT¾Ê[·7ŽYw¤Í¾š Òik:Œ6!Â0¦_.R3È“ÊGþŸíc¿u^IFBÊÔ½A„ë²§·EÏ¿¬|Õ:ÚA[ûéÙ³»MM§@É@ü€ï¡  @YY™'‚Yð¼BYQ¬Ñ _(ÔÚð+´DžíнGN´–òÑü>bÜŠð•þ¾ š¾É‰£Fw©/C¸qÃÅ”ª•²ƒ"ùIDõª´¢ÍbÒܪc8Ó»§”D›vÁº„[¦ÒH\`óuLM™OžÜèÖ­\Å@;?]‚ÈD&_ ìÂ3 #6Ü*s×"—²2sÅST–¦n£,&“ŸÁÈÍÉ餠¦ Îÿºà~1–PS‘¦îµL&ƒ½JFMSŒqc{3?½¿. 6¿¢ô ?ƒZ]{»Khª×îÀb±øùK î}%W±JŠrŸ•hjqÚ›1_¿È/z©¨¦Ã›NNŽ8Û: i@)ÑÖâÞÍY¹¹ù*jj,®1ˆ•““+.­¢ ÃàQVf޶޿e¤ÁCø”ó”Ð¤Ž½V†tâ‚û¹¨“Ì´§XEEcœ““)®¢£À@%×V« fǧÐ1t>¦"%ËÞ÷ÿDeÎàOd|Ê_ñ¤w1ùJÉ<}ÎÃl„©À<‰møè4þKˈ^G1þGSô)¹k2Ÿ¬ôtu##Mi…¬µ#t¶šÒù—bLÞÓ#Œ‰8ã‹™‹”“Î=³´Ò£­%9éø›ãöŽX""¤l,'Ì/—Ô:"ßêcüÅ £©‚Lˆ2\rúMðp5ôá8AŒáíóÇËk„45uÈ O)ÞÉŽz{œ¢Ò“R“äÕ×êÌd|¶'!oKݦ£­›_u;zM2ûóXö*õbA6☾â0B‚èâË:‡³~ƒÌÒìñù1ˆ²ªê–€H_2@°ÏŒ—81*õ}°±&óÎ>¯/m‡¾ôµ¶îÚ3ÐÜö[·zýô±ôoJåoŸ³„E<#RúâE¥‚ü·¨4Qü´¨‡Ñ¯ux£¬Üp?}½à?qpã9XY eg%÷‡—@{?!ò_èÕúœw v!•M*¢¯jÝ+¹WWj{ò*t˜ð;å‹ Ðå®’fÿ—ëÙŸy7yù”JªÇš/,|¢¢Á™úz=m0ÒR§¾0Ò`O+ÃG§,Ö16¦…äñÝ—ÜÄÜûO'R8Gvë÷—eõø‹¤÷ò¬à™¶®zõÅ3òQVIˆûìOFý.ÙèªhÒ_ÎP¯?;CåF3üMK ¯ö/‹^ ¤J‡å•¨£f¿æ¦Ÿ1m‚"ûho>¦¦ðÛ˜b¿Ê¼^®ÔãílÊvÕ@èëÕsnž¦a—iûÒBõ‰áÛQŠËÕÒš“OþÖÓ$¨_ˆ¡flkSR¸cõW~£v@y“wñöÆ1I#²©We—VñX•’UÈD¬R¿]tʘLaCL’úÝâ§÷<¯—²Í¾w;»³œæ¹œq)7ñ£{wDå{Êtᔽy„å”Åù+¸yÒô3Þƒ_fðŽÕ’¡Ÿ¬ƒÓ©+7'ù¼–åþ5W×yë ²ý7EÅšš ìzˆÏ †`u–‘áŽÊ]™“S¤¨® V}«(xð¢14{6^U~æì? ~@;©ö¤nÜeÏý¼Ý=-4Æ%ž}¿‹sÅǶÚ-8ÅPü¾³o éž4y¬ÿôu3‘Ñ_íËÈ‚ì#> ctÇyàÛÔ…Q”sÜn¼¥ª³OìÎk2I‰ œñ›ÛŒ ¨­Ï&”Ü·³´bHvõ[½QS¦#ëÍ•U±æ9 ™¿^A†’+‘®f«=«Ø¹ †,߈xqî;77UuÓ°¨›¾^^I|!!!ô~©­d»Û/ÃïÝl-Ý{;1*È3hmvÊJôòôsZF&“è„aœ{n—Õܳ]»oˆ âç9¢¬nÉv4Vjøy¹Ö¡±¸»®Ÿ!e13×Ñn!ª>iõè´dɚjáƒs†’‡²¾/£÷XŒüÜn?{7oÍ^ìÝ…ý»8x98ωz¬$ÿ”û‹OWãÜhŒŠ?Äé;eÉþ]¿˜z|R"7%¿”AÀ<êÖ0ÖC›Sõj0Ï•ss oñb=Íâ£Ì訰µk©<«án•ÀÉÑÒu-{—ng§§_=®7$¿³¹!µaN‰Ãœê‘¶=•I¨ƒí]¢ÖXˆò¦^õ? z«ú’î’„´Zl|<§ŽÈÒNRŠ÷¾ÆôÛìM9IkŸs:aQu~ÆÚ¸¸úÖ* œ8©p¹££#oÒ7Ÿ’¯àžØ€9œ¡ׯ&ÔùYÝyì¯ùMÇzÄ傤öoÔΨ`–ÕRû3+•Ó•M[ž±á•àÍãDЧýHmßñuûצPA(¯ÎÅÄS9Q^òŒÌ’^$½¼·už;++Eœ#©6"ºÐž}ƒ˜ð‡¢ÁãYEï’«vôýÙO^&ÆóÝ6e˜¹Ïø×4„¡}ª ‡¡ÞÓÖúsáöÝ{KU;ú÷,-­‚;,ÐŽÄï‡`à_²rÌÈaÑu‡EMO—P?2ÌéíQíó˜ßB‡¦zf/:VB{ÝÖN=Ûyž:•úš:‡Ó{¡×HNÊò·y73.§Ä$»¸÷§n7Þ]Ü¿=ÊÔ¨)6oßöÆÛ›Ó·]ZGÇ[§ádkc]ë>ÒyãÞFV;úCãÇåqôåâ9“÷M\rø oo™b‰ñœG­7oElm'C@ü íÑ[Uyyè «©ž?gwB]bN_®ÿöñ½ùžsš˜‰›«{EÅAfáV^ðâ_e…šÊyek«Xë/¦>ÿÜÕ Jâm“Eþûþ:Í™´¨5 þ­R”kzz†ˆDR"¢Ú3³*YüK ^±5{бJ>ä>/ÓÔä8…/ ^½zW¥­ÃéI7òÌÍy!Sݰ“ɬb òœü÷t‹PRše¤+¹)sr^ˆ+Ê*ˆÑ5Ÿ•¹Þªô”eU·ݸé­ûì(<ˆ´a&±ÿÄ™^¿·Âij®b½.è×Oë›¶²´š™°þÉÚ¹ód¦á=þ´„ZÆ~šÞ„ÆQégo™j)(#+Âm@Úš #$"2¯G©i*œõ[bvʸ8ªL\ÿ™RSS„nóé%pZ\?ãédÊR*^!ÂÈm÷°¯¼Ž£ØM@+D榕DßÞû”ñ€öÀÄQÃCÃÂ-œæü̾ʽa¢¯#$¯õÛÚÚÎ Z9w*»q¦ FýØm;RïK2Ž›ÝÁ˜ÊöBª7©î¥ú:E÷Žà4R¥ÞrVTq;à¢Zgñ³‚::²Ü›Iüæ×‘þ ¡À ~ÐNðó}ôäi^Q©tW¥–ÞW]:¾oÖôšâxlÀúÞ2ÃWnòž¬±Æ`Çñ…„LÓ3NÙŠÝfî4ýcÌ£âÿy%1¨?YY>‹/ëŽг5HDHêèšã[Ͼt¿½ÚR“ÁzÝ;dƒ·!(ÉŸ‰Šr‚£œ2oyyíi@Å»ú€ò ~оPíö›j7tónå”ZdÌO~>t.>nÎ,×¾?¦|ˆÛ±Õ€š6«âÝRvœP@€1ùe:}Œ)g°n‰µkÇ#TN†”µµã9 H¹“õ¼ÑÖóª “îÀnæÉmª=v(:v÷“ ©ïÿµ›ÛM@ü ½Ñ··Z_v`EøÊ¡v΂ŒNÍ’íÙ1.S§Šuî¤9˵Ùmöó£$0?ÿŸ®?¡þ°¢Bu†½fh›âüFò|òDòLJ¾Ë4ÿ5®þP,?øÀf(÷Ñã„Cû FÛ‹IÊ6}sâßÊ+'ää¬-G‹ý=[ü…¢ŠÊïä‡8°žÁ¨=šñM9ܽ+ú×±³ÝÝDEe»uC.Ó  ~ð_EMU)À¿îœ>?)xò´ä]q‹ÅÇסsçÎ rrj=øùk¬î7͹µl¶·¯«µÌÒâ‚Ç7^½zQZZZU…%%%•º©ÉȪqÓôîM}Ä€†é¡Ôü´!ƒ"âC44à§@üÄ€¦P^ÁʾûàùóçJJ*+*:täctê$+#Û·WO) ±_Óæ·o²óòo½.*)e~®ªB‚‚%%DëÚ]Eu(ü ˆÐ<ך »sçή®®"""pfÚ"—³ï¤Ÿ9idýC´¦!¥¬Šª¬J”Ï*г—5Ó$]?w‚Ÿ¯ƒ£½í϶³vÅ®–—ë8bdgnœ¤õAHý¡©BèûSmÿ³Š=»ßMu¶—‘S‡ßñ¾==½ŒŒ ¸8(_Ûc¼",Üt’«€ CNe¤Ã¬ïÈDw5“e6[Óî±µ£ ׂÓ=¸0ç^õX1D ''‰ïÈ¡kW?9„RÉÏ»·•{öÏ™» â4sss®ø)))9;;Ã9i+\øç ñIwU±pžÛŒÙYO.B¨èeYʾhïfžG",l©ŸŸ\OuÔS½Ù*]%$;Ι+Ptê…OššÖRÒÝ¡l ~@ãwîÜyüøñp6Ú×ïå—”±$»öhѽ˜Mr%}Áó·yÏõh.Ù#?-g°ÉN¥\¹ÌìÛ×A¤“$”Äh==½—/_B…g›€ØZògµV:Á%ùÊMC-Õ΢ßY<ž=IìÔùq‹Ê/ƒ%¬\Y´`Áb(-ˆð5ÌÍÍá$üú`Œ$¦üä)HäUzæ•¢â›W‡êë¶ññ+íìºüü |Á™]»Âœœü Ø ~жÙ÷×i =ãÖÚ»¸j¿nÜþý[¦ôKN^ËV¾ÖÁÉI|ÇŽ°©SAÿ€ö%~&‘Vž²,9nQóû.!»ƒNßøXj5Í«um(auhzâ2fÁ0S¡Ö5xêTñÈÈåžžÐh'â—Gª¥s‡Âe1©I/_Ÿ_,3l¹ƒ§o\d8)Ž_ jò07›¸¨ÃdÀÒÁ5).ÚbåßI>=‚j`í`k—¼áz™»Nƒ1&##Œ‰’eoþq˜”Ÿwáà±ÔìhrUw#»^Ïã“ò†&Ô˜X˜ŽŒmÒâ¨em«îY‰yÈ ã^*Ddru½{Iê›`ãô]Ýl¢£ïºWé¨þ˜<  G”›É®ÑBåWáAÆ6®i‡£É”)ÅØàñ:‘þž¶®ž Ñ‘ÍÄx3}DÕ2£6÷¢7iÒ§„8:[zK[‡$ÒÈá›ñé™ul{i"%”Pi#gG H㇬';”6†]'Ž@¿sÉœ®âÎçO®~l¹±R¤¿·;©³“Ä;Û áÀÔìC¤RzËÛ]œ8­ìÈÇZ£ES—–\ë)î)€/û!TL‹œ`7„ ©.dx鬘àWî{Ý!bâ®:S­Ñ2“c9ÏÔí1ˆÎ ¡(Ò~„úô§𳕲4§r3÷ˆ¾L/Çã«ÎÑQÕ›¬"YRDëØ†ñ+2>²òrrÚ`´kk>rí>-“…/ #H{Ò({8´Gí¶t®éu¹f€ËÊœ™Éø`Ø­+H[ž±íóÎdÆy½”mþƒ¸ñ»ÇQ1kÍÓŒ-Îgžú&KúènbJ©ôA3Ž‹ëçaÜxO ¾}û6Nš4JÐöů³IæÆI¤<Oñ[2IßÕjí—¤‹ùEEûæ'SiÖβ¼‚U'Â<¸(ÎQ¦¿ï”¢æB Ýÿ÷àg楣%ˆU©Þ'.Ìë2dWZŽ.qSËhÒâÉ=J.-|ãje÷žwž†¶žÉQy¼Ãlú*tç.«ÞAÄØ÷&ÝÓüü(S•^¤ý× Þ»­Ÿ×pÒ¨ÛûÝ9¯Uö†êë\½ÿí=BÏ$«ð)iŸüžÎÔ—DD‰¶œBV!“ªaÕÒBÔÇ|8qïÌM¤£Ç½ ¨Ää·£u|ìÑ?/~¥Ú‘JÙÍHãIú„t”J7ÖÓ¢”ù‘*aùî‘*IY"/ÄB´"ƒ¾ˆ(wÍOÙ­ëýd+Ê@JùHnR›…ìÝ^£Ñ”òÝ'àðO,s«í†?«Ë-5Ïm¯·˜Ý£-köU;þ6µ‹GÔ.blýâhçuž@{¿’þïæŒ~ÿÃød@yì"Œ›ôŠž!&Óè  [í€â×/ëF]?}pg§ŠŠÉÔ3œ8ùé†V3“•¸@»™îDµI§é–C— _Z}•ÇóûBÊȽ§z >þT§vÍ"«ô½´jãoòº)ê tV@e£CÈôN*s/éŸRL‰Ô¿¸¼YjoãûÑêêÄã¹S·:‹¨ël dµ²²"×Q€psˆ á×”D»ÝÉœ CŠiO”˜Þ0„NøÅÜÕzzeÆácvÊÇO•P„?h{HHך™]Ëý¯,wNxŠ+gw«H¦ûíð¶ÇõçtŽ0©&˘³ˆaV¡/;0(«ÐŸ'M¡S²•¦‡zocS´jß'Cjãj²ÓS¨6ÞZ\RõâÅ£´OFŸGJ2H7R#Ì¡;BÝ1ÀuÚx¨§!Ɔu#‘7ìÅ®P¡êêÅSÕ'Z}dG[š¢¥óÜTúwÄp](Bˆ´=¦NŸt9[¡ûO™Á‹TËa7v VÍA·&Ï‹ûô)®ç–nùj¡j9”`þMNþ×Âb!ÄÚ$¬o[݆S±›šØÏdÒ$ŸÃì'ˆ·ªÉ|½{Û€ø@›e¬¹é†è˜Ác&·–Yg6]ùhì'øed¬ÑÓëÔZ6§¤˜™ø ~Жñp¾û@‚–‰e+¨Èþh¯ï™ÞHOoÞáÃá66?{ÖxŒ;fgË›™Y@±@ü Í3ÅÞ–YV–òwN7uÍŸ³G¡ÛiÉß§|466¾7…¸Ï–úigéøñªÑ£]µµ¡¼ ~Ð^` 1ùýHâIÍ‚BŒÝ×±¨ðÀÀÀ~l~0÷Ù Qõ”~-jð³§å]GÖ‡r€ø@;dœ5’{Ö­»÷óŸöhÔ¼™w`•^8ž0w¶[v'ÂæÂÏo ùÕª?çÍ“îСCóÚ¼o_Ùˆcºþ¦eñ€vŽvŸÞä‡ ¤]½–ÿøi?“zÅõéåãëÏyzÌFH¸ïl·²ÙÇçt€TA'' iiþ‘é]»>˜›(«èNšÅñ€ÿÆúÈ~õæíÁCñü‚ÂjÚ%䕾¸MEéÃìk÷ne an8@‡Š‘ë5¸¯Ÿf3WI.]J¸p!g°¡Ô C>~~âK›Ü¿ÇHK{Á°ç$Ì :Q89Á€ø€¬”äœYMñÛ„ûw³@c~‰ö††¶ä§Ñd꽨€øˆüçÁ'§^κœ&§ÜCI½¯”b’}*yû4÷N^ÎuÕ^}ìF›wìØÊàËÂôã‰é|¾ˆF°(xüùÚßÌü‚Ï–æºZ}¬à‡@üà¿Îó7%±Û¢M'Î"z韯¤ï$&Ù{ Qov3ÑÛoXQóþ]I:ÔGKkð@ŸcóΡ}úéêŠa9y4cz#••É}¸E“_E¯X;cß.˜·&j@üà?DáÛ÷û÷î:~B‚Ns~07Ëñäÿì—enü-'.jØ2*¸5&äIâ""|ÎÎ’?˜•Œ,¿ï9„¶‘á°/ýü—@‘@ü =s3ïEáÛbÙߺ³•¯™Qí7üýŧ+íó˜9£¹²  ”oÔÃû>üüIŒÞ½ûÝ„ñsE „ ~Ю¨¬Â{žèkh&ËhÙ ø:ð ¶žråQQ—?jôTýAoÔ¼o$Åç4mes„´³ /5¸>Ñ•!µ’©×}|ÿ^´K—ï¶DÇxxS’=|”¬¥Õˆ«JÔ¤Gq¹áæ%)阳ó|(Eˆ´1$å»~u}©|ç ™©SÚö[³Ì@Äkm9¥Ä'Lm9…}O™§Ž#絤”÷RÈ*djË36Ü*3”—Bz0?¿™o¨mO& ÓÇ?aª~a¢!qy¥ªª*¾ÆFT)È/è¡Úø¨+G’šw›¥Y}êï\2Ø0\Q‹%åÒÈþ£pÖè£~aã•1¶'c¶ÆNWˆó r\§Š±Ã×÷òäÉG(Bˆ´=ªªX_Y«-O5*&OÏêî™2jšÔ.„ŒHÃb”¤Ñðä‘õ*K[V8«è¢ÇúÛY…ï°a >—Î$WmØs˜EnÒ­á9âqe%_ãoªl‘ Ñòæp`)½|¥•Ø’ûaô4D(6£ŒšÞ(íÂ2fˆ ÏC¨”Œ™¨þ!)¹XÒ||¢yãûèEñ€¶ÇÓ{9ýlp•/©yã¶gmšÈ£…Œ‘O™3 –jsäÐ(«ðTßK/Æïpý«¿v´¶ÌàÄ—L„ÑÔÃÌ#ò ÉÕÒÈÕËz<ÏÍÖîÚøÄñºÚ¤\nâÑ‘ú·ÑÂ-Žþ“ \éx‡Õþz˜rl9þ-©¦å‰¥ÚÈGÐq!i+-Ë¿_¿ß  ~Ðö0þE'¼žPÑÒÕë0“wN¿¹3ç² ªÓ«ÚȲ'Õ†“à¯/ /·ÿ¹:jXãâÇè$µñĉâ_ס„ ~Ðn™5šÃýõÛ’CGŽ 5±¹²}y/›U 2¸ÿ÷Nà÷%ø:ò;;SRajƯ Ð<“-°XÂkó},Ñê…ñ€ÿÒ’b³§O¥ÃÑÛwJ*tSíoð­™|zõ$=ù¨»»GgQ$gÐÒ6;8zs„öå½;NŸ.%%õmw€ªJá»^öíÛkÀ€Ñüü¨Éý/ÄÚ®ÓœyËÊ˯^ÏÎ{øðõ«BÖçÏ|üüU•¬||â2Ý”•u´û*ÈHs’Ê©îç÷ó –“ëåï_«YÊ›×ù·n_yòäÕÛ·¥Lf%ÆHP¨ƒ„˜p×ߤz÷¤SíD"øÁ?ê!,$4ÔPŸü´!›¥¤U†Ußñ?ñ?ñ?ñ?ñ?ñ?ñ?Ä@üÄ@üÄ@üÄ@üÄ@üÄ@üÄ@üÄñƒS€øˆ€øˆ€øˆ€øˆ€øˆ€øˆ€øˆâ‡1†³€Û |Ê |Ê |Ê |ðKãçzû3£¬ìmTToü‹sëÃÿz„ÓwmôO×ǃŒdò÷‰^éÂMS’½3hg6({û.*.¶MïF»þÛóù2_ká'»à×@ù࿈™¡ÀŒ±‘dÀbIœµbMü$Ó¹iìÀÚµÑ)ë¶&UÇó*ß\í©\µl å Õ&fqmJ16ûæ<+Šndf’ߢÍ*?¤]†ÔÖ”y8JÊ¿ Êc6Žg‰¯xVG³Ò8¸»Núù•ö*œ+.®…mûܶN¥@ÃÑYG‚â<Æ ° |ð«àJzrä×U/„8Ê÷ú|0¸;¹Nâ‰l´g $ënTK”…ƒªÃAP_úky䡲çaë²ò %ä»Ïš¿X]†÷VÀŒÛ´ö̹L$,i5ÑÅÞj`ƒ»`½È<“ý’tXZK7“Ù!×Ët„è°.AP&J- 2¸O×$fd#aቓçZk²$?éüÝŠŠ kkkÚø£IH%ì6ÐTKŠ\Î9›øø3”-Í4¨(7sÕ¦-…ïÊõÌ&¹;Zp™½UÅpKëè Ïó·Šãb¡xƒòÐoVšFÒK»þpþ°åô*MžkËÂ749<€+Ëÿ˜Iþw øy¶~(çºuµ~Î]Œ Vñø+oýh2ü"y±¢årÝÑ*ŘQ7»çŠºÔw×™ø©usÙXþ©!Žò]½) A®G%HY å잣尡ưhê‘£ciOVVVd¸cò(¹2Ί¶ß ãä—£Ù(RAµÝ“IåS%ˆa¯åå !ø»MEˆR>ãÙ{ð^ÊÉ£«1mÖÕzÉöIšô-(‘;[‚LÅP0{劓¸àV±šÉ‘½AÁøÒ’Pcba:Êß0­§¯§eOÛe[f y\L‚!EFlÁ§gòfA]iÇ ?ÝÜBVf;A:Qu¡yÕ²§Z–>8!®Fé´ Ñã‡t²=™åî:B›=HûUI¹DhB”ò±G´,Ðm°@ÉÈÌy;Þö§ÀßZ/ã¥ÒT%eÀ™S …FR6"Ämõ’µ-öŪ‰ bŒ†ær‘Q¥«OÂøŸæµlé¢Eh˜,¸¸4í:'RA:©Æt¸?ä'íì9êjèpý€3l‡Sç¼zã ÷mvìv=-QpÊ©Dš•Wè -¥A¥'™IÊùu½ò2ÑqB‡Xä _wÎÎÆì~”0J5(_ÃaG’c\_J-*Ñ.kúEÚøzuƒÈ=î/©1è˜ËQYª¨öüdTA´–Íù©ÛÔ-ç6¸jš—N~§*Š‹Ð1Kv¦; æIrˆöœT&mk~Ë.F-½X+âv)%½îrêf¹'VÏy&b+Ù­ppZô„íùÛW m´@Zyù¨'­Y²ýn(ƒ~±âÉr AláÝE)ä'Æ€ìòÐ(|}èïEk¯%P+1 $“Mc×4Æ­0G•©­boɵ•ê–¾ì jÈ÷®ÄGÓðähK·?ÌÙhé©Ýt]n-ùÜç’;M»yo!æ^kFt#¿Ël2ÎHG™ŽúêFÊã!´ôâJ®QM‡,V.*EŠøa·u=”÷Ó1<Éu]]uk–˜eE,$ÆÏYå‡ Ê÷Ëð"'Íßwq\õ,ªcdùgx„¥¾Ú¯c^Aòªµ§ž••½ÝÎÛÇj¹K^0°;B¤? ß¹Ád!Fha:'Lµ©lk÷Ò²‡è7d‡\ê$жpÅØ•–+B„ê¸/Õ(Ÿ Æèö¢:üDóNm½$`±·±dýx)å®(ý j&Ù1û9~[Ê_ãÔUê\Bþ_>›[Õ¼%œý,²Ü‹·òY.** -(߯AÔªÊLO²2Hb_·¸…öõ­9¿ºq:2’ªx ‹Šû1‚´‰àlv‹5(›mÍÙ{‘;U7HU¬øR²€Ý{*ýAT¦jEk;KuEˆR‘Œ"¤ÓáºÊ„ítüõd*t…D†w]í¨CºMŒ–(‰ºÚºÖɤ"w‡€ÚT2à°ënœSï–¶Ù!–S¥,ÐÍ ?ÙÅz[ß›~i§K_q¾Êˆ9ºV›QÃÑrö}.Å×À,ü U£™ùfºŽT„Ò¼„'-t'@ù~ÕÙS=’²G©S…×dã´G´JýÞÌ/Ø«®´úñÒ­þD `¶1–Ñcµ¤$ùñ®xÉþÿ©œE}u›„¥|ûÖÛ±ÕãçÚØ¥ZE"69Æ!ú²ôÅeEŠùe&NøžyãÀnžê¤+íijé®{Éu2ãïél‰¦’‡¼ÛYc»nùªŽ>)#Ì‚O£§±QÓß ¸úà³}^ø°5ì ÇpÏÍóÂ-èN«TÓ°Ë(œ:äºÒ3¸Žxê(¾ |¿os:ßôñÇ7Cé`êCœ¾b„1õr›|HE·‚µŒƒrÈø´@ãà+T3köÅàe¢™VÀs…`®7æeBD¦Õì$ñÓR]8H™~\ãù©Ì?Ÿ·jB,UB ¯&­Æ'µšÓW6øˆÓåqá§èHß½Â&õàäBI<ég¬:=hÉ¥Åâƒ9o#D(½§„Ó¬œ ·gíž.¹Rö07·Û,ÌíF–sl¹–õâš½¬?í1”§o›c<½¦kÔ®ë%Ž:]àbø¿à7\¯_˽èlòs¼5BBHTi$gYrtÅóëÖ“¦úH˜{­ p†Êòí0$º:öí¢‰ñꜴÅaëþÉÉG¸“Ùxûe³iaû}ƪ5ã:”ÿ«X}ȯ""öÃW íõåÈD¡kÖP*5¤—ä—’˜‹‹‚|æO¿ƒDäf͘>±¦ÝôÐÅ!kº !$ÚdžŽQ0÷[³†rUôxÏFÒ®¥aÔ˜s’ýÇl‰ ªn2#²f ¹¹*ÃÄÕ ÄÈŠ+{–––IìZ'¢Ö›•¥¼鵪߻rѪ®Å¢Ù:gسòì$Ÿ~Y›¬ÝQQýíŒ?Æ“´“®˜z)ÖcÀåÐ> ð®ë±)è$¦fÖ, «ÄÇÇרˆ¦qT¬qý|ŒæÕŠñöön&›”UG™ µqA ¯¨›CÃæéY:%Z:Õ÷ÚšíXP¾ã\ÒÎãÛW*A†‘xW©5–Ý©ö-{KRßwFÑ+h÷ÈïÄ˰QrÁœÆRC*"ªóPDÚÛà,F‚œlP[Àšb?){ìz˜…-¯³jéá¥9Ì \¼î†»·"¶ö›!ÒÚ’÷/\LŒ‰©øüéç7ÍðÍô¢ïºÈàþžÕk‹9’y˜óF$ãêw#œUˆyÇËË‹üæ¼')\A(\€òµ2Ò’UMOÌõÃî§s^âÅE†×Jq§Õ{Çö >qìzÎ _âÑå3áV¤ðÆbæM‚Ñ—cáº`òCÙ\Õj$P§]k}Œr:“1ÔyÚm—s^MŠIÖ{kQ~¸yÊ? |¿ƒÇÙ£hê½·ë¾'Ñ“ºÕ¬`ÏæÕÝÂíVÒ–ú[©›!DUZÒÕ€µïû©´ìM‰ÊŽsí÷uiQîT­UÍÛ A¸•!«8nëæ ÷E´F© ©?KÆ :pµë‘ná˽„|Ó˜âto*ùÀª·ªý½úŸÊyº|pÊ÷k¡`¾!Jù¶þ¡4Xî`„PÙ=z˼ä(~´¥~Ëp±>fQZÝ÷?ŒžH5§t´´S12Ôè;t¤çÅ¡óJörw»|eïæ6æSés¢»Í[9º™éõbí¦­øÝç­hcèÆƒÛ–¦>ij’Ÿ¬µ#t¨žIÏxÑ#(ÞÊgÒ‹zìÚP·jÙc5¾Nk™u£§G²ÛãУóªøüß=æaª~ølèŒÓ%ŠššŽŽàz”¯õ©ÈÝ+ FuªÓÁˆ$äô›†{ è°hß:©çõXÇ’äXʯJNH+ÆbBeô+73qBGe>âl±ÔDOhçÞÁåœúM‚ø wÀ§O#oÊ#T'WWzV”¤8„ÝÝà€d<.¥}MWWײ·OãØb,?ž}´Wa{¢™ëí©å˜ê9íÜßȉÕ}< ¢‡zD7ãS™öù?ÑOË*ì6{ôÂk¶&ïRóÉH£?ÿv„«áëTæüÝëÄuéÿèd(7AÌγºcœµe¿¬lÜøwv÷ph!cc¼ÌgDžªyõÖ“ùv¾å¸À~”¯àï9 ³Œ þºCH\-bé±\G ´R¹Ž1Ý©.3™Ó°cו×ìéÓ B,ÑBv'){Fsâbú%©O߇У…~'ñËD,Ag€nŽa{HÏèÙXhÙCìùÀçÏ/x½±òôÒ§:ÉÊÙº†Kþ!Ä~'ÑÕy¢~Ž8›}÷ì<œ7½¶Gz9‡p‹™Ò‡G:»cOÜ]ÃÙGyµXíüÒËFA—”›Û‡‘÷ŽìÍÙ–¶Ž:ÒrÅ­ Ń=¢/‘‡™À9ÒÂ'.iñ¸¾¯ìétïž™G=PåZÑÝePÞ6êÄt7šý|‰Ú8m™F_³&zxPÕ-£|+ fD^åh³ŠN^>§ƒ¾,·KÌ7ð-Çö |Íf£âWÞ´‰.ÃxYƒâ×`ú€D\{2Ð)Øe/w¡ÞŽ›ò’Ï(ð6äÙEV]Ôζ‹îWòÄ_éøŒcMMV'“zÝ¢'¬89á i¹G]t‡‘ ¿‰œzòýϱ}uc*ÚÏß>mÃG“ÎÒ“Þ]KI>ú®g~ð†æ=/ÈHüû<§È=›$Ðu ‘:»ŸY¾šL,¯¬¸töô®ô  eC—;È·KŒ_ÊÛY¢W4 #éÀö}‰e2¶tž>q áãB(ëÜþ-;’ÊðD÷ùÍ>ˆn[·å€v“ãÈßÉz‚ºÕ{.)geF‡’ÿtHf¯úÒ¼çGŒšW=nµ™™•Ñ¢‹iˤb»Íˆ{Z6.üO÷ WJÜõ¿s`êÚ†ë > F×7  wÐÄž¬|Æ$ºXÇE,AHÔ$Þš»X[·å€vœ9ý½t”ÒR„Œm\—-šo¤ÃõÄŸ=W”¡†õš±3{¡I—¯Ì{î–ð\1ÄÜ~í-reQÑsÔQ•§Ò²g³òb‚aœ·¡ãÚËbîß{;že‹f$p½RÕõ!Ó§“fp^‹×7`£%G6ؽkòB• «Ï9Nêt½ãBVÕ²Á«"–«n$ùôk®“ÝÖí@ù }À(ÍÙ+¢ù½v8Úø0ýšVµ‚=K¸‚4§T^¥«²2õ>é‹óžK+(ÉÊ!Dݸ¥Ù[Å9N¡·%eQ#J_r\KÕ£ÆÞ¯¢CøV¦Ç㓪D§W磅sìΡBs×]ŒœcȨg€û±ŠÉ¥¥¬R–4%.ÝÉD(ÿú„F×;.&§_Ðz¬?ñ̈á:Þg’Ø"Ÿ‡Íu®Ûºý(´éÓ˜„ñ$VÉóèµáÁ몣  ÏQ÷•yÏëðè§OKF¼Ï_–"õÎßgmü#ʤ¬‹G–/ú3!-‹Ž\7w°X¿7 Œ@ÄÏ?YW<)¯väû3æÔD´{j­ƒhÞ³ÝÖí@ù ýÀ/¦èI~Ȱ—ɾ'×I56ïyºp]G®®®µV¼{…Pç±V{ð¸øÔql¸BˆQCÂ.œãW'Wq<}õ´´™­ÃEV{\­‰†x½Õ¬]»Ñ2C?“ê×+ÉùÕ9.„Äéa"Òÿ7ðõb,]óL JküÓÖí@ù `õL'š ;T3Óç]µáJI­¤ÇfÄì¢7ÇQÕ½½ÁyÏ}ôúÐCËÊ·]8ÇÏW…g¡¬m.ĶjïÐ`ÅwÈMiÎMê-VÜJoòóF5óOjvâîµ  Å=ÛBÔ@¸6Íñ˜½¡T‚÷&}\_¯¼KtìÍÙ¶š¢f•¶n?Êí„ë³^ç¯Zµö䙋Ÿ$'«bï:ÛÑz7Æ¥vƯ ¤­g!&9ê+óžKˬ¾z@bvØa2ÇyÔâa™8 ÷êÜ…Kß’wc,µ,2J[Qø»Meh8cìœ~46rÏþü¼Ww22³˜½`®š4§¼ôÐZØ›ùÅyÆ]{ˆ°dü±Xi~$U‚]¡9hlÝã"—ùz‘r¸6î">šêìÑÌ#¾¶uûP>h?ðK«„E„}ñŽŸ–Z³ôÕyÏõ&,º>aïÆbjú±ñIÍh­‘µ#ùùÒÚ:LŠœÎ³Ö>`ý—Ž‹ƒw°C ÏaÞÖí@ù”@ù”@ù”@ù”åP>¾‡*Œ/\É|pÿî»—ÏA3‚¨b±Ê™ŸÄ¥åº÷P2h Cø—²ùí›Ì¬¬«UJˆó •øã‡*!¡**¢Zš}zª™þŠçWݺ;çñãÇe;¢ø:òŸ+pIq¥hg¾ž=%tú “‘ëeå€!ïEÑ‘øƒbR²z&’Ý5ÈÏ—RÞ+©zp>åaößf#­ôï×z²SÓ¶düýzÄðÎýû3̆w4þ¥²½(¼wúô~ñv³ùZY¶™¥÷í?$"ÒÁÜ\´Oߎ}úŠ!$ö…´i7nž/€*­-†ÿ›wì:¬ÓìÙß9R—.|þþòUU©+V˜çí# (òsNué§;›¶ÄûÌ—ñ™/÷}9ÐXðxË…óeÎξPzAùøfVF¬5±q$eïdzRÓ¦&ü _áíéÁÏßRIÑ«Ôô‹S§JþxV||©·ßÈâk=»¥Ou\ÜŠ‘#DøÈýxVÊJ‚Î΂‡… 2AJZŠ1(MâcÙ瘘˜á“Üš7ÛSfþuîrwy)í¾šÍns|üJc#†­­X3æIªˆ² [êç·¤¥Î5f­\º`\óæ:~¼xaaÒÑ#„õ¸ÙPžAùh„'¯ÞžO¿4tü´–È\µßÀÅo’RÎ[š mÆl£¢–»ºJD‡–°ÙÏO.2r™§çâæW½ª’­Û76»ìÑÈËóûoÌÖðé3 æ”€/óò݇´ËWû nÁr¢âR‚B'Ï¥š3i– 7o^>k–t‹žOO™õ–ÍñhVñÃåK—EÊ·œÙ:t˜>C Ä”€/RVÁJL>©k:º¥w$ÈèÄS¾‘s·ŸfïÌjÿþð™3%ÂÉ™ã!³råÒ š©ÚãõëWµ¨ìq™æÒùÈ‘-ãÆÍ„Ê@]ÖFDX8Íù9ûê,!sïú%Mõž;~ÿúìiòHóN-TÉYŸ ä6m=+àdzZ±lÞ<ÙŸc6éùb½Ê•‘UƒBÊ@ ›¶íüi²G£®k¸jM¤¿ïüïu›ªnÞα´èü3mvq¿w/½W/£É$ãêVooéŸi¶Œ,ÿáÃÙØø@9å€ÃDzϪ}~ÿùû5wœrá¢ÙÁß±í†M¡î2?Ù`A™™W~Hù0~ý¦„ ºüdËml:oݺrÆŒPÚAù X¿6Ârªg«ìº¨äýwlÅü”÷ÇD‰V1ø?ÄXooÿþq̶ÐéÓ¥ZÅr«Q VEY«Í€ò@ëóæC©‰ckí]CØáI6£,¿i«[v{¿ïò‚ÿ*+0~<±„xÕw;|}úµÖ©Vç_»6ÂËk!”yP>ø¯³uóÆŸü†¯ï?–~›|T–ÙÛ×uøT ·¼ê°å’9‰ÁZu”dœQÑÏøI®í×ÁHI‰33søÖ#=|xµmçV<Õ––bPàAù@½·®ÚCGå>*PSUnª~Ûh[[?²Ö¬ÉC¡*+ ¡¯ƒ£ØmH*srŠÕÄjnå99ïÔ4ª‡P£ˆËH)H Ð˹9OY 5F†XËË{þGú¹¢²uOµšZÇÄÄmVV.PìAùà¿ËÙËו{÷ku3ŽŽ÷[ÐÔ–‡%%ub*Ê „î°¢ôJXã(¶ßv\\ÿ„ƒ›A\ÔÛ 1T ž;jk\•öºG±²ÎŠè´užu¡aÛÛn yºéDFe"¤ñìí»TUVò}K—Œ2æc;;ñV?Õ¯ ØƒòÀšÿ³w&ðPuo?c™±oe)†¬£Be©hAYêUQZ½¡"¤¬½¯èÿ6ÒJ;íx[Þx[©·Ô[êmzKEYZ¨P£bÒ îÿÞ¹c¬1Ê’·óý̇;gÎ=÷¹çž{~ç9çÜsofeÚiè÷ºJ5øŠÖÙÙ·œi±Ä~¹0ÂYhÔØ°èü"uiÔÊìÔŠ¬MQ";ëE$çÎÜUÆþ"ˆOÖ{d¥ñôë[ÃM…Àg,¤8a²Ç@¤pR‹±lw8LJJðüùCVV®üŸã_¥M™*ÞëY=ÑV{¨|ȼòÀïÁŒAœ—ùñÃiƒÛèŠLçøyÌrFôÒ-Tï<4¡óÚ‰PO¦™Æ­Æ]À¦nœ0/ˆ0óå \I-îJã`ƒÇPÀ{!Öýš„¢-—¶¹~½Ðª3/røèã÷ |ƒ4‹ oR5L`á‡Ê×—(..VWWïU˜Å%,uéÒâbåÞ³¤´´TAY¹·^ÁÙ»çÞ…¨ëuz¤º´´VYIªôÅ3åjͲ…Rþâ™Bóð!‹KÞ¾÷`øPƒcæÜ-4ÜrzËÆ°?T8ÏP +ÈÄ&ýz=Ù'ë ®òw, $jß]¾ýy0ÒFj‡KwÎPªoøÖ8Ѧ,ÿ-áÚ§mKÈ ‰NãPCòwrѳsnCåƒÊÇe ‚d5l»þv:y¹m—›²‚žáúmi0¨T*Š¢½hRå­õT“ó(zQ…JÍb ß8Y¬¶h×EùÖ«(¯Æ²«Q£^ªLðs¯F-È=sÑ»‹ŠÊ ªÍrÞH©É)éÿœs~së½>ælv°û;çõa½œWÌ&W¥’™`¢·»€iÜÉK“——Ïò×´ÔAs¨ç vúŒ×;›ŒÝÈǵHÔ~Ö¾cóWºkGfžÍX×r¯«ÔÍ”½hE±¹Ó\"n"§iúüÚA:î×ÚçêÛ9NO—û<Ãå¿Z®,#ºU‰¯5s bµ³ïFçР´Ê†o†(ºûs3*¼-«-ßuQh¾õ{5Ÿ<©€Â•g‚dƒ1XmŽm³_Vž`æøÉ×+©ìœì|™*ê x)e3™$29?7W\Y[Y†T^œÇ@eµ©ø$2&“Iæü¤¨M“æx"DàìÆ&‘Áû;+—ïú%Ð…DÆ.ÎPÊÒt”[šÂfææQ¤U0§Š—Hq^>Wä„ÈTWW·Ú£,¿¨œFã¼óŒÍf“HE¹ÙÚ4cì[yiQƒÉý‰ë¬ä½yWkdŒ‡°ù4©üÜl¶¬¶pãIÂâX`.[H‘¦óÅÕ˜ZØÀd²É¤ªÜüRm ;jî¡ß¶Ý>{Œp-§Ru™Lâž`Æ5÷ìØœ¿El–:çÚ–æ?`°‘¦éW—Erë¬Å/vš,R?⪵0wÉðãrmh²ÙVu5vI‰Ü®.-*yǦpEd–—¾dT©jk“[å0–WÕ(ÅÈ@w¥ZìÛìRrrsóe¨ÊŠÝ¢ðùEÏÉJš-¯¾bŠq;£þ G­™ª'ö‰Éã–äj6‰"avõùÒ¦RÍÛü’òZM=®ˆÞ,¨àeNÍëge•,M]-îù|*/}U©¦¡Õ˜&ÏÍz]ÊÍÊ*­íc£ Ÿ‡FžÙš4¬'¡é#±@«Ø•×Gþ½"0uòß;¾C@ù3ú®…[èÓ4)¨rDxžÉµ%ËŽ«ªôgqdE·:;$,‹e°Ø1ë:˜ BUG:‘×hš×raÌg-“A¶†}U‚ÜÛóI)ƒh´æ÷à§§˜ìñ¼Oñ =ÏümXytĵåÆ$²§h•V#¾xwîÁôíÌf‘ẖ“W1(dA( Pù0 ³±û˜#{$%V5 «ªÊÿþEqü*·€äø5€£‹Â ÁÍgZrŸøýææ‘œh·öfƲA Þãæd—œvzËO¾ÆŸ°Â9ÛhE©ü­â¶Ç ì@Ö#g_º›ˆ ²κ%©… ©W|Âê¸ÕÍÛ;91è¯FsC)œ#Ú;¹e¤%›íèY^²ÜÆ %tx;[$¦^Æ| 3¶Â—9¾!ŠÊàŽ‘f€V|™l”&D¸JÃÜìë’3î–¡¨‡QÇ&ñpF|Ú›÷´øDìܹï—1#!N®Þi)‰Àz7znÞœ³FTrþ'c‰…8©O»TÄB« ÜöpÏùhqcaù&gàÿ?w¼R PÖ_« 2k6£á7îŽÏuœw$ݘ‚ŸQ€–~f:¬˜HЯ½7kùàqùœ¼½Ó°ì•‚˜ÓÂ< ¥EÕ91©3tþǺš´+{‹më¤2èÍŸðÜÆò \N-ÄëM4Aâðö™–˜ð§ÿÉWOýy9‹ šÆvîàtR6'fŒUË}[\J'›(†NÞi‰X’ Pt{×û|oß l¥|Üy÷(žWÊXóaŒÔT&|»” U¡/æˆî´äø|DÄÇI.³BOÎñ[ôû–mDˆ‰¦üŽÇ̇žR›®¦SÁ£/ÀR b:»ÍNM>8!Í\É÷ ~l–•mûŽ–V˜äÝ"ÐÔq\ªcËÔÂ}¹›¾á‰ mkótkó¦ÕEj:7R¸okÄHIÕײYB$a¾zJO¥e•Ò~}TÆÒ¨¨ËÕQ”&7‹>äø„™e‰V hÓk’S^o¤ À‘g\4vaòi=jߥœŸäWcûQ ãNÑŠ7ÀÖ7Ó‘š#.o[L^%’“‘…•k²b7&hÚþ$Zè¶ãW…œ«ˆµ–KŠÃª-„èkH¼_ëe ¨÷Ù9äLú–‰¹fo ¶^Ëðž LK0?bnØ`k·xßÜ–7aúÝ=ˆÔŠKw\Ž Ìâ ²_sÃõ÷¼š]Xs^Ö£µºbI Þ2¼ö<[m#>LG¤~À¥E²X]‰‰æ Žuýû̼DӢёàÃ9ÎMòû;Yj¤ET>¼ëªí`¬7¼ñÕ_%œÆ°®*^h°jËËo*ªŒÆ‹@tŸ™Ïr±…m×”n_¸®_C@â«z^9¯r¯*8`鸾Å~Ö#86Hš¶e#ÞL&J³Kl îÆ‹)ø„3fIaƒËÅmÝ╚µoÏáü™Ä»UyŒkçy®mCÚ±­M4yþb“ãs!,ÇòØÓ½S¯'ÜvŽ»¿tѰºµ²ð./µ±žn ;œrÓô[3DC”wÁ[›~å2ÓãgûßqLJñäöŒµ?ia2¸é×ÝÅ'…ÕµW⛚՘¦òV¹þè0ɬ©ñ­ömy)·1R¬ÉÌC ÝÕ³µÛx¿··%²ÑØU9—|ÛOk6ªª`¥’ƒ[üdlDkÓP wÄIê3h©|h}=?6×ÕG•‹€¿"(ÔÊ5T7 PÛƒž¾>ƒã>ç,Žõ0›Þ6kíŸÅãM•?O¹Œ ?•DOrnh”vpˆiQ´ZM^å¡úz@ òI¼®Œ¹þ×?†5ÄîâÝîŒÆúF¥yJ5,v›‰ÝÎ4ݦ!¯_€¦ *z*T­í…»Ûxÿ§Žãú£/Ñ)*Ø>¥9Ü^ â8umŽ:ˆà58Ça-ÍÉbHi¼„°Ÿ4 ‡†Ûßrë˜GGxHšH³†s;&µ¦,¯¸±‡ª=Û¾dC+Zu½œ¹°\q¼ÿpíûj{ÍqEÎÝ“wá oþÅV æáyùв6D³ïSè@yI§JImáqÆ8R V¢·¹2¼qœž‘߉ìÍ“›km—òP ê¢Ü÷†„xs4x«Œ¬9O7·:@£ò½+mwí±Ùø†¼ìðˆï™BAm^WS„¯.¸ªª¯[3“»gqq…ºz×,Íf Š‘ù}6N‚¢Ú:0®8!ñÙ3T¨c…Y’ÓeÀ)Ì€¡:¼h—“2WçöO5$‘¾NJeO<Îgñ†¢eì¿øN¼ACi³{ªùäÕ†<üȆ•ãÞmel.uÆ54r–™·ÃT|ÜÅ{¨G©¹µÉ´¬ìàR^{¼Ûè²dwƒù]÷>/—1‡nŽ8·¦DbM1¼N¾tãÆkw‘›ä‘; ù×ÀbV6¥ÞøEfôB“6ÄîÑŸ×OÇ—½J¼BŠHIY™—"V6¾U¹òî.˜{D87ïÌÏ:Æcè×Þó*’–žöÁ)»| U¨ƒí6Ý=néÄÜââ$ß±Ø æÞú |É$£–Ù†ßáš¶Ñç×ÓÁGæ¸z›¶/ÛpÀ¬ Ù»¼$æ†ý–¦a ã~ò; ­Ý«GÖÃîóÞëÏDM–6:N¿–@|'Tª•y‹‡j@ªPY¿RÆdyäÙÎÍ‚ÃTÍuUr¬Ç¨;9÷€è,ÞE—P9[Ö/Õ¶¢â­¢;Å­_P@nq)Ę™© E%¹‡‚]Âtá„ÞÆOÿ~­»;S÷%P°¦Wáý½;÷èfïüÜsü–¡àß{_´ådŸpkjjhp3=ؤƒ‰òKÍ@1-õbŠs[¯%’’áKÊË¿¦ÖnX‡s>•º¼:Å›Wª|? )J–aTÖÊ´rÛ‹Ž¡N¹d È4ìž:à³ñÑŠI"ÆKÿ¥_Ÿßد`§`9w³C¥s¤ç ûßôZY´áºøŒµ3Þ¸^„­ìMç‹—(XQ#9E듸(šµổ3Úœ¼ÊÉÃ( PùpŒ@=׆F„mß“â»?#ÈÝ ´ŠºzÝhç²… É*3Qw·Ðéºé·¹j^#PÍÖ‡p;‹VY8מ˜îÁöË=¹pET¥¹GYÖ¾[Š(æY®ðrݳë´éÎ(”ñØyvP¦œêƒ7l¬rgë8Ð×ãM<}çìðÛ6e¬šÛ²q>å#³8/láB9ÿ4> khÒétü¸ »ÜAÑ­QK"7½V<ÅL2géfeEnkŸpáîýëÙýçrÝ­õš¹³dS̯ãC/¡>®ØÍ”Œû’I­AQ–³ý”e«KYì—ë¶ÿKä†gàÌn ÅæD¡)+ÚÌØ–6TÔÒ7sSlwM PLw»üsÆ?…SšXŽáí{à—®×:]˜›~u¨×’w™+­t$A…=/ýÖð.Ÿ4Õž¾N§­,'Žm>ñÿ“±”¾A(ÀF®¤Äm–!® fOT` ¿ÿ±aKŽ¥ú9b!D'¤¢/}œ"#W¯F«/&îܸqn«}[\J`v-Ïdÿ’eaª*Z,´[º§ô©ßzQI"‰ðBü#bI,¦×²ƒÝ¯P“Ç ù…‹i1ëé]/ßùPˆJó§`Šè¯‡2#™ÉŠüyïÚð°ûï¯îõ0"š*È 6(ÊqN•&bºeRnJ?r*çU¥‘RË–¤ª_O¶°Ø|e“ù™Læœû3»ñ)ÕÕœ¡fv-›$T]ZQ¨£Ñ¸ïF//­`°D´Õ%˜Ì:2³‹ŠYªY ¨%cŒßZ¶cÜÞqO_Ýqöºœü)Ná‰õ9ä•Y™g¥%Ò¤%8emˆúõØÚu¥r*òEŒDui,tVx›ïÙû0!-á¥OBdä™óÌ„ê¿/ïÜx!.Þßíê璘½j5y•ÛŒ••¯1•ØÉ-ÂL½R½x_}ùS;Lgø5ônÙÍÄ ºáÔÔŒ©ÅURê‘IÄ6ñ –¨Ä†ô¥uR3Òyóð b‚™LjZ8?| öw¥?–ÒNJMåÅ çØÐdàÏë2rôkúdr\RZÓÓ±š¿Òª¡)÷E“ØÌòÊæÏN $Ei^„ðÅ*¼ÜàÙV^^Þ"÷¤H-lî7¬yf’’¸§<¾ÁrvÎ+±cÎÂëfeY5»Y÷ ‰"½¤áB`®RlBrëôÛÜ‹wùðhK‡µ™E4G¿†±)rø’ð/å‘”B“Üç&NŽŠKl——ÃQ ²÷ŽãˆVû6»”Úã禟ۭ·ÇË'©Mf÷\ÜÆÄ»5‰vÄ6g©SOO"2÷5§®+¹Ã±ƒƒñœp™‰ t“ÅCRlÈëX\}~t¯÷&gÏeóaèëÉñ«šB "\œãýßÓ7 7 ïqhXŽY·8kˆ;Ýì&gàãmü¼ÒáÁà ‹Î¬õýàþ'cÎCŽ ÆÆó›„/i˜hŠy†± _è  D­žÃ+ %Ê£a£¡“ÓÞ‚(ZI©ÜbÓjò*§ÓÔ 0@å뺫 ¥€¸pOgYqÆÆÈ´f#1UÊ®n/¹ åä›Îe†îH¥uþ9´œms×ååUsoïøÅ !”¦”ƇÇz赟È×íÕ…¹ÑW(¸‡ú˸|®ç˜ìÈŸç»L“³ÉyÕìyÆ‹Ã|)Ÿ™¦Á™ßh >(`ë^pÓñY@-oÅN·`÷þjN”Á¨û®x}½°ÉˆÉ•¯+ £ÝÓU€öÂ,,uÇåIŽÞ+=#µKŽn´è@Á¢Æ¯áÉi_‘È×íÕ…¹ÑW’êÑõ‹=Wîöl+üîÕ¿Çñ§|rrׯW™™}똖&MçáÃDÿ£­³>HùÀO cÇîÔÇŽ À«^¿âç3?ÛL @a€Êü¸LŸ6åé¶©wÍPTRá?ò… ùW¾×/€Xg,yù’¯×äÞ¼%b2¼Sç8tèäâ Äz7«ïÞ+·™ >T>H7sðàÁY³fÁ|ø>‘–¿ñÇÞQ“fö¢ ¬Ê²™Np«§NE¬rðeðч˜?ž¹kžW¸„8œÈR0ö»ßኸúžf2¢ÓgzâDeï*Š Îžå‹=T>H·“ŸŸ3á{¦Ÿb¿Þ5àÜŸL"Âù¯§ošÛî[^…Êø-Œ:Y9rdÙ¥¹·ú×S¤õ8‹vŠÐéÓ°ŸÇ.›©ÒO‘Ð<"E6®8zü_AÖƒéÂôI~®÷òöøŠ3õpŸVWwUP°×–¡Ù»1Ïs0,óPù çŸì?–A3×+GGj?Ïûy~g÷—åMqlÚàÔ#Ü*ž3Ûq±hg8g"¥‘ã¸&kññ± õ:í£¯KYbìßÁò›6ÏþŠ“UV¼)þ¸€bo]k= Xà¡òA úÚÞ:tzòŽˆð°Îîe;Ñëð±.3ºlzŠÆ;;l³³8nM_®ÛNÌj¦¤››åWÈqŠýgÖMažÏê;ß{yyÃÒ•àÌšöSÜ–mc={ø¸Õ¯‹ƒ–}ݾ´aõõOºJBDSÓù²dëÖ§!Á_?,ª¦f»úThXÿÎêÏ5¶¶f°¨Cåƒ@ ¸LŸþøÍ+E¥;"@qÞÃQ†z_·;f½fíÕà•S'ëB‚ùÆDB‚—ŸËŒ³±–èIËwì( „Í òA &(õS¸pérO*_úÞø¯èçl.!‘))«]]e{Æ`C~¨á .|AA õQUU9ââ=ô¶ íÛËa!‡Ê@Z2gÆ´µãlfùôÀ±þþc÷7ÊÓ´…—²v±ïnƒY,ñ[7?L˜8¤KRÓdvòä={{´æy;Êöš¿o¨|¤m‚—FGG;ú„tëQ.¦î]âï×%I‰‘¥ œ98}z7.FÃfS|í17¨ Óœ<ÙçðáMÎÎ"Ý*~§O#“&ýÌç‹ã!Pù ”+V¬Ûo=«»&žý}GÈÒ®tAädNœàqútŠ]·ŒœU}”»|åC×Ê‹‹ÿÉ“‰VVõdr·Lõ' /‹üý÷¸áÃttºÀËa³)ëׄ/G»wжÎh­Aæ«W¯ î×%=Ÿ·n’KJ«¡ìAåƒ@ FANsþŽŸ>ËxÿÁp¬ÃW§SQøàÁ›‹}{Àæ9skÙ¬ØÕ«==åû}å2Ü(*´#¡|‚Íа°9=“Õ˜¸†…E>yz-3óâùÊÎÏ¢é´cyK#F˜Â •|%ŽvøÂþýõðþ¿¦öÓÉâ’| HýƒËÕ0™ó=\­GŽè¹ž$Œ¹;lÖ§ø¸ ÃGHÕ ý+.–ÓEžªÁ¿å‚çΡEEï¼~þyÙR¨y¨|HW0qœ%öa³ÙGÓÿÊϽ§e`¤Öc¾›iš ZÔ¸jWEyá»w¥,V IJR^QQKCSXCL™úÝå¶ùÈi؇«ÐuueeE>TÔÖ±E„ÅädÈÈö3‰Å¥•éA$(dÝAê}Ëfy ìÓç²TT„}`©ƒ@åƒ@ *@ òÁ,€@ T>@ òA þ(-gÜþ÷þëÏ?T¾cÕ0Q„EÄ¥¤û+«1Ð4P廳­{úô¯'O J_U1*Ùl*(„HHõS$kh 4 M‚k:C òAº€Õ«W×××Ût:û+&&¶d |[J_…Å®=tôäëźÃ-êÐÒF`ŸÖѪ8wûq^ö?ˆ«‹‹¤¥m..:ýçÑÛš"–£Åi Ò@¤U¬Šºº½W®TeeU¯mf_ß ÊùZú÷ïÿìÙ3b›Ífà é»<)yýÇþ½£&ÏbiËÏëéTÕ°¶‘Çø|eïVó‘£Fõ°Í§Nm¨¯¯ûé'©  ~FDF–À>|¼sgãÕkÌE B…HðÒC òA:ÇܹsW®\Ù4ÄÆÆfKßâÝGæŽ-›læ,p˜ø»“„EÆÍ˜‡mlIÜmme¥«©Þ6gžß$Lª4é+ßU;lû¼|‘pìø{??øsT>Hg@f‹-iii1fKbÇÞ-C{Ï€oOjôO³?²jbbׄ‡vã;oY5/wíóóSüö¤Tûù)œ;»NEÕX_<, ¨|~QSSãuxà é+Ô¡hlì{ÿ.L“$,j7wñ¶]{]œ§ÉIKu¹Í7®'Š™]"{½$k>åoÛ~cÑÂpX* Pù |Ñ´Ãvuö>~ú¼oR×Ê‘“f^ιG£*i©«ua²þ¹ÎÞN\TŒÜ勊 ,Z(»24¾î•¼OØÕÙWøÄªÝ±c‡M½¶MÔõ†<{ŠÖ=ÓÖìñ;p væL)î³94´?±–T>HÇ 8ðùóç°«³OP‡¢‰;wv«ìôWôðÑ=99)9iéoLêÒÅÍÓ¦v¯ìDD(­]÷¿àe¿ÂrÊé•+W®Î>All¬½G@Ï óüþH;¼ðgoIä}åu ¼C²gl^Öïø±Ç)>°¨@ òAÚA‰»:¿’Rõ˜ì˜;¸¬Ž]ö ³=Oœ<ëæ&ד6›˜Ö¾}÷\Nv ,0¨|öˆˆˆ€™ðÃb×JÊöëùãZÍö~ô¤HO‹úûîÛãá!×Ã+)‘ââ’e•é۬߰Ánîâž?.‰$ò÷_¿BùjYoÇ T¼tñà˜±p‘3T>¤ÏRý‰efïÜ[GÇŸs¸qÇÂtX§öÚº}[@@¿Þ²¹¤ô%,6¨|Hf[Âv›™Þ½h@öí›S>´vÂD©^4xöl©œœÓFFv°ð@ òAø¢´¸XY]½ý8ÅÅÅêêê­c!ÌòRYYšó²k êö®£~šóîýY)I>ãŸJßf¬Þ:E3âäê–’¬w£çæQ(Šb-‹Ê[ÿ“ F¯zó²§æ"6ÜÞ;à1n3àä§ðQË+'/ȘD é4¦>íR EI­Î×J–ÿUTþÌÐ6ŠÏ˜¹¹©4ƒïâýéW®ÜƒÊéËÊWóûÓÔA Ü2Ûñ«BÎUÄZË%ÅÅ"RÉù)ñ~­— ÞCdç3é[&æš½1Øz,„ýT†¢˜ 37l°µ[¼onË)éw÷ R+.Ý=r9j$0‹+È |Í ×ß™qÛ°˜ w_JWù¶Ìèchö)PwŽÏMÊ¢[X`F.Ð{c¼pW4'ýjÅBt"qÉ×Ý—«§Ÿºnï` À~ÌÚrÎxÏÀt) ;ÐÆj ;ÃúÕOáæ¢ !ŽsFÛ±DˆsübæÔáî×¥Ô$’ŠKËÉÊ öÀ3èD:&*Úm™DdÂÕç‚«VdüÖvuÆ‘I9°u"õóšýö¾eä?ùêô„PÂæVÀSÀy^>«”ú·<_HǾ ͸Óû Õ¥¥µÊJR¥/ž)hÖ¡M„”¿x¦0 skrJ÷SÎ/(Öæã~Ù9…4™ï!뤤E`ùôeåc}á nx£$”Ôr§ª‚„Dy¹šc*#ç¤)pþšÏr±…mõñqÿß¾p \¿† ÜW}ªç•ó”ÃÂ@‡æoIPƒ?ô‹åÞ8¤¾ñŸ§Öø“MÂ&™5Š7Å/-¬†7|Ãy @ÄH±ˆæg4ix»#‚ÃÜŒ¹kO{­»¸tïTÚ4IKb=„sDIS~®_É£ ¦ÿ…Ç4ÍÝÖç iŸòw§ó0RjÒÔÿ9çüæÖ{}ÌÙì`÷wÎëÃ&z9¯˜M±’™`¢·»€iÜÉÎæñ£|Ïž} =廼âWË•e 4Aºá«GÍÜ‚ØA]ž{4=)X„ }Yù$ñ 4æú‡p3îì2gK¸‹·aµ@š›‚Jó”jXì6+ºhºMC^¿à8s ¨è©Pµ¶î¤™÷ÃÀ4†è<æ3dê•6¢o§gäw"{ódÐ𒄪*?¶ˆv¨uédÿVÒóø˜( ƒW-Eù5Iô‹ ~$r®î /¤Œ ¤I€ùó«[¾_ ; *uh›ç i‡Ç…Ū-ù®¾bŠq;£þ G­™ª'ö‰ÉãŽXW³I ð«Ï—Ð8[ó6¿¤¼VS+¢7 *Hdî uÍëge•,M]-â'ö§òÒW•jZiòüÅ—|=$§ªÚ§%,"‹(d;Š.$¾ªAî!ž”2X€FãÞLf™T“›ÿ^›¦Ìk+?)­dšVÇåêê‚°Aú²òaîÝÖYÆæRg\C#g™y;LÅ\6Ô{¨G©¹µÉ´¬ìàR|ÁåoÄ6º,ÙÝ`þA×½O7,æÐÍÑçÖƒHÌ Á=ËK7nܘ±öw©±I¹Ã ,fe³Qê_dFŸAÑÛ_N¿Ûwe>¤>Û;5ñ>Z«®„"ÈÙ²¾x©¶×Ý;Åï6¯‰0´Ëp}pÈ×€ˆs ÆÌL1(*É=ì~ Ù£²ÓêïÂuc[$›Ÿ2KÇíPv^ëåU™ªÂ"¯J°|Z{&‰‚ËKbnØo15n£ï ˆ””•y)`b7#öüoæT» À|u‹Èœ³øérPÑ•Ø)íçëó…´ÏÛŠ·ª-k‰(>¬¬@c¤¦2áÛ¥ÌP¨ }1Gt§%ÇçãÊg’ˬГsüý¾eb¢)¿ã1ó¡§Ô¦k`€éTpãè °ˆ˜În³S“NH³öPå;~l–•íøŽ¶_•±4*êru”EãPñ@#À‡Ÿð ³,qXá ³Sôw²¬KË*g¡ $P A–#t_ÜÊ(ÂZu íE^¡!HßV>£EPϵ¡aÛ÷¤øîÏrÇŸÔ±ŠºzÝhç²… É*3Qw·Ðéºé·¹jÞb¥(š­ávz­²p®=1#܃3ì—{rከJs²¬}·Q̳\áåºg×iÓQ(ã±óì L9ÕoØ4!ÀÖq ¯Ç;9Ç.ݬ¬ˆoPT­×ÔšG_µ2#ÿÜüuñš:V˜\…ž+¤ÓéÄqñ½úi¸ü†¾ôqŠŒ\u¾­¾˜¸sãF«ø¨’lý5û’–œF“û!²â€v-Ïdÿ’eaª*Z,ŽìñÎÈ6l³ŠR‹Hi»Ì–µ]±Œ,&÷ø>%³ôâÏ¡» Ní˜d˜îvùçŒ [+vDÏÀ™ ܊͉BSðž¡èwçE‘ñ }³ó_¬Ñ@¤CØF–Î>&°qSŒéÔ}I†ƒ¥°ßfê é¢ôÍCxijRyhp‹ó…´ÏçÏ5­-;1•ØÉ-ÂL½R3ò ç†ÏðãÞáj6Asñ®?¼÷ÃpjjÆT^dýI©G&q¡œ¿Q‰ éKë¤f¤ób’äÍÃðQC#G?#"DÉ<ˆ;E†±ßCË:5ÁšŠ‹ÃÓl0†·WTBZC£×;Î0ïýOÅ8²ˆQ͸´ £qrB{üÜôñs[Ÿ‘‘ ž³²¬šÝ¬f!Q¤ìç¦Ú7îBR“”D °}Ñ$,VRZ`3ËËË›e(J"Ž˜”šÚ$T‘÷5ÜKÿˉÓh›ãÒ$GbÓ°IæË‡û kzmž/¤½^ ¤ áövà¯D6»*ç’oû‰`þœª ÖøÃÚvƒ[üdlDkÓP P ©Ï`qkkø2åïÜÔMÔö †§¯ÏÀ¿U>€;Z<ÖÃlzË%o?Æà‹À•"9@„Pó«ÚW¾/e Ò§”ï›øgy‘‡þš•$à4Þ®’±øª0â/|„4{FXi|x¬‡Þ×Pœ±12-¯iHE•rÆŸ«¿ŸóýÁmë‘G^Þšóts«#X»‚÷Ó»ÐöŒY6¿ÑŽêÞ3 …‚Ú¼6n!Q¾œ§êêz>Ï.®8!ñÙ3T¨aqLq¹½©y€¡:|j_EO\Co^A"Ïôk>‘DÅDa)‚üÈÊGþòóp½‰…Ûò;nËùžœÖ…GWw\Þà«}§çûƒÓ_±©¸©û°¶ÏëÂû{wîÑ Ìøƒ3`Ïñ[†‚ï}|ЖófíA¨©¡ÁÍô`“Ö‚Yj‚Œi©SœÇŽný«¤Œ,?6¿etbt­èØê”K–‘€LÀŸVL1^ú/ýú|ζˆv¿BM—½ ÓbÖÓ?»^8¾ó¡<•çO7ÀÑ?$›‘ÌdEþ¼wmxØý÷W÷âÞáÑT)@^°AQŽóˆ*MÄt=ʤÜ”~äTΫJ#¥–3Æð'#o^³Ú a;ÆítCÛköºœü|$EãC}yeVgæÇYi‰€ò!ô-Üɨ[èÓ4)(ÒAÑÎsÒÎÉÉ<(ÛÖaoya1 **¤m:4sæL˜ß!b¢¢¥…w•5tx!ž‹ÛxKŸŒŽÝšDîêÌœµ L==‰ÈÁD ëÊÝ\) ÆCfpÂe&.ÐmLI±!¯`qõùѽÞC0jq£¡|ÍÈÕÑ‘h?‚‚±ñü&_—„OlØMðhO-¼¡×—Gºjº/Ÿ¹÷ða…¥%,D¨|¶¨¨¨€™ðÝRøàNSåëV\Ï1Ù‘?Ïw™"&g“óªÙ3ò•%†ÃøR¾ÃÌÈý²Ž$C òA¾HMM Ì„ï–:6«'ç¹r·g[á÷®eåOùÔ5F=|t[_¯÷Ÿ%;Ö–T>HÛ|þüfÂwˤI“¾‡eo¤deù|êÔû^W¾»wÅ a_'*äK^E]Ì„ïMµûþ8jhiÛ›%„Y9gºÿñGšk6];­WÈÈ(44„Å•òDDà’öß5¬êªÞ5àìIÃÂBù?zôœKYÆXöÚ"=uµ¢sݧÁ’ÊÊ×Wñöt;‘us öà^9:‚ÖMŸ5«³{½|Ñ›óK¶n{áïïK**_æYîÝÞR¾ÓIÛÂBC:»×œ9K/_Þ`aÑ n_]­˜ýxXf Pù í!* WxúÞY¼Ð{ï¡4£±ö=|ÜÚ .úº}?×ÈÀîù¼Z»®(,Ì –T>T¾>®ëó'a‘3yãᅥ{Ïÿº}­m<×®[¼¬OüÏUÑeKÃ`i@åƒ@åû/0ÂpHÜæ­c§Ïë±#þ•´-4dé·¤q)kSMuù\#!!¡ D†¥••ï?Bàb_úêÕö=p¬ÛgÒ–- üÆD…„iØ<{vQM­Æ’IG޼qu›Ë *¤cà —>DDXXtt´£OH·åß‹éîs\¿=)ÕÁ÷î•’„Ÿ(+u«+&ý|ÅŠ°„@ òA Ï÷«ß»Õó»ž~ÈÃmŽ0‰ÔU :1?OâIõm­AÝRÒêëÈû“Þ@Ùƒ@åƒt111˜ }Îó‹Û´eÔ”9$á®Ô€ô}ñXâ]n°¶ÎÈ·o•RRþpu•éÚ”óËV¾òôt…¥•}¾ÿ8þ~7sîÝ{R8´‹6«|‘_œÿ¨;d@NŽêêºnÝÿ.”§P»"Iøø7^óuteay€@åƒ@Ÿï‡ÀÄh(öÙ´-aÐáʃô¿:ºjÆ_G’‚—.;bhwÛ¼lÙ¯¯_çíߟ¶`¬€€ÀW§“žŽbå6 `9,¨|èóýpø/òÁþîØ½WRVAßܪSû–?½矋þþ‹‡…õÜ3pýûë,Zñüù½C‡Ïu—ïׯ5›-úûïåjjJ³á¥‡@åƒ@Ÿï‡fÁÏøkõ?->qJLBRkð09U6cÖ¼{ýôAγ§-ÆÛØŒ6Á>½bðÀCC‚qóì_û²sž›˜Ê3’jà ¬«¾ÿ@àÚõWR’¢Ž?9yxÈÁË ÊùVàS ÿt©ëúûÛ>~|ü´°¬¬¢šY…Ö£"b¢ò2rƒ4Ô•ô©fúÔïÇæ =&Län—–ä¾xùäÝ;«–M”””ìßU}˜¡! ¾r•ÒeÔÕÕÁÞÎÿ$’&FCû–ÍÊ*4ì¯*¤{©©©¡P(0 *ß§OŸ òA T¾Kù`&ü÷xý¶òνû¥/^|¨|ËúĬGQ’°ˆ¸”teÕ!úƒª|w£hQÑ_yùOKJ?¾¯¬e±êq ¡~Šd 4};’0œ‡Êù6öíÛ÷ìÙ3AH$’ˆˆÈÂ… »d©FH¯P[‡:v²¤0_w„…šîýáØ§u´*2ï<~|çs\\¤%Å{ÑæçÏÎþyôÆÀ"–ª‰ª! @ë©›uuIW®TeeU§mnפ†@åƒ|S§N‹‹«¯¯œI.Ø_%%%({}”ÂWå÷î2w˜a0Êût_^Em´Š¶ñô=ûJÒ¶a#L-L‡õ°Í§OÇÕ|fM"دÃȘÿ7z´öàcvvÜ•ª-o#‚@åƒt))©!•••0[ú¶ÄÇO˜ãã0ïkÞ($D"uÆܶkï¸1–zZš=`ó¥‹ÛêÐOvv˜Œ‘¿bwcc2ö)-MLM}ïï×p@åƒtÂç#„ÐÍÍ æIßbWÊ!5Ý!ž]ðÒ†‘“fV±?Ç¬Ž  í>ƒÙŸ_oÛ¾3wò$¾1)eea…óÖ+È2d, ¨|¾°µµÍÈÈ ¶ÅÅÅ%$$`žôêP466¶k_TD"‰Øyøoß“<í§IýäeºÜæì;»Qð!€¾Mþ±/ñùsá–­t?ßX* Pù 3bÄžòÉËËà é+0k؉»wuÓûùÌíoäæj)Ëéiuå:/§Nm7N¬;žŸðó•] Ë*„_ÔÕÕ§L™ó¡OPîMØ™8nú¼î;Ä@ÚË—ÏÐ'ú]4ì÷çŸë&O– ‘î³94´LLtx8|9-*¤#´µµóóó_¿~ ³¢O€¢èÖí;¬füÜÝRPU+|’+'ó¶Ÿü·.}íꎉ¶”n•=‚ðp¥5kW†CÏ•Ò.3gÎ\¹råøñãaVô VÇ®±óðï™c©jÑN¤ÿé5wη$R]•«ÐE!÷а!ÁýSS·:;û¢Êù"‚ˆ‰‰1fÅ÷ÏÁc=&{#&NûÆÙž‡ÿ8>o^!ÊË 5a@åƒ|¹™3áû§E…ÅzayÕ ®‹î=|>áÁvBˆñ.L ­Gí»”3Ÿ›gÌ”…u+ä¿¢|oÓ0ÙÛ§ÜÝo~º#Aˆ ±4ÿƒÐh4®»Ãd“#·¨œÂÎÍÍצÑð*—Íd“ÈÕ¥E%šÇ‡à„¸{1Édr´»‡ã•êÊœ[—Y†%B¥ÑZè+377[›fŒí›ŸŸ+£ª­ÀMäçæV£#"}6›DÂøŽM3ÐæycEe¯U´¥d `|צ*à–sÒÁ£0˜MϨº¬(ê(´#õlfn~EZåK^‹\âåB3PoË0¶{ÄJ§ð_È  {ZºÓ$r^Fª¬ˆÍÍX” ˆSäfcÓÜœ@–i¸4xšEx&S›æ<‰L&ý0·Ç• ™Ö3½zÝ Šd'ú &ÙKñ³ê} Z4¡E¡ Ü ÷þÌ>ÞÞš,µÚ`Ü%4Û‘9Ó)ÙÃPQú矴Q£œ` éóÊ8ÜXï&d# E“¸D€a> ñ g2ËÐaÅÿ“1‰Ô1E— 9œÆPÓ.acE‰¸ 4,í ³287ºÑŠRù[E´¥æXP(²Õ¹{‹Àí™Îw¦V,!û¡ñ±ñ®û&ÏÕã¹ XL^²Ø†·›SbrÚõjÔ”\ˆ šÆvîàtR6'ý+aüˆÎàrj!'$ŠÄoo×ÄÄ”ÈKø¡$ 7{Ã䌻ŒÁü3Îiøhagô€ªäpÎúµ÷áf’m{Ã'ü¨Ž[ݼ½“þj4·åcX-rÉJb©y'«  Ãý‰?Å<ÝÁêHz¥¹=´V­õi2ïm¢8y¤%ư :×”ÈØ'Ócú£¥1¹Û¦øg¡( °) оA~XÖQß$¥e7fžÝè;â(9Ûfûü¡œ?²ÄwÑM­=|Ÿ1ŸäŸÐÒéDÒê¦j{ÃÓ×gpZ6/ß6¸}\ "ÜE®£Ô|¢Ÿãeh‚?†+¹Pù ÿå{R "÷;·rOr4ì6öw‚èë±±Œ}x0Zx«3D–¨(±Z¾|1 ZtÁ¥H^HËŠFßÃxH·’Èinoj\„"¥Ÿ<·Y…Ë©²ñD޾D§¨€·ÉHÆí·¦#DÒO]·w0`?öS9qD›ígÆ`ßæ&eÑ-,°dè½1^¸+:7( ¯ÜQ¬†³qàž À‡sœ3zÊiöŽ4°ˆgl„N·“9šó²­µÐKJðFá„¶ŸKèy-^ˆ»“O.³¥ahî÷x$==«µ=èµ–Ë%·>M;ÃúÕOáæ¢ !Ë –þö†Œ- ôWÅ´2Ä÷¸ý0p0¯n–Àï8d„Ya—æNviök"ÈŠ¬Òœ‹…>ÀBØfå½µ˜ì±~°>Oª¾a§÷AªKKk••¤J_.Ÿs•ïYBT§Ü> ¬^!ÿå«Âª¿ &v6 ¬,Äý$b{¬§ˆÇ\e^Hk,¦Xñ¶Kj¿ "OïäA¾¸Þ ¯Ãm¬ þw”ÀuTø£Ã$³q,L7ÝñÐ/–k²¾è[qN  {[g\È,)lpѸ nÌßj猸™SpÀÒqý—~mKLF£3‘”†÷5…¶4¬¡ƒ± {ÚÊØæ§ù€ˆ‘bͳZ˜hÅ€µ 0§›•¼F8úÂòœÿ0/UßbÊ"²³:X¹ÿÒ†æKÆ9Äâ— ¤éÞøøé³U«iˆ‘R“ÎnýŸsÎonc¯œÍvç¼>ì`¢—óŠÙD+‰ &z» ˜Æ!ôè?Ê÷ôi5R=Ó¢cc¨S.YF’æ\&9„ÝÚ¹@Y…ºÒ~ËŠô9XÁ{xãz‘©Yç^¡¯' «WÈAù"C ¬”ây­þš;ˆØðê‡;8Õ,NÞ…€¾9V£¶§5yÛŠœƒ¿*d¼·“Ù<¢x‡nVk6ŽÓ3ò;‘½yrs©h S"Ùc>C¦^iöÛË—x·IH“:ÂÇâêÖ­ÿuxhÇõ„÷ ^@”Z.·(,®Ø"—šJÈåô󃌿dXk{øäP ê¢ÜFø;°jÛVÌû%éÍmâHKÕËÌðòÕ+"Â̹Ÿ;üÒl/Ö“7ÂH¿Œ¨½ÊroäæˆökYÝ_}Åã8vFýŽ>Z3UOì“)Æ%ìj6‰"avõùRªy»Ô¼Í/)¯ÕÔãúy7 *Hdî0jÍëge•,M]®¾²?•—¾ªTÓÐjL³W/_òcs¿~Â|ž†í·wÜæ—ºãì t9ùRœ›.!Ð=1:òaRf´›U,d…·ùž½;«|êê¬^!ÿå³Z}Äb‚‚ì<”ñùÑŸ~Ñ»,~»@ÖÀïõg¢&K§_K ¾DrÖÛ]ö*yuÈÛV@^ìô´˜é¤Båy–1«RD¸Zâê5£¤,.ÌV%9{:Šþ:“Z}yäEBälY_¼TÛŠŠ/{q§ø}«(øȮ̇Ôg{§&ÞÇZÒl€¿± ÏÖ5ö=Žyf$-<íƒSvùªPÛmº{ Ñlì¿m͆Ø=úóúéá²WÉf7ý‰¬çÓ"—Hx‹x3ýHÀ |K—_hu+Ã8îàåî-íÉX<´Ã‹u ÆÌL1(*É=ì~³™—±Þñ{…µ=]÷>ø¹ßÉÆ*)æïÙí%5!÷Xîe æþn)¹·¥“­Xr:HEùqÆùÞ¾}§ÒJù¸ý(Þ•§¬@c¤¦2áÛ¥ÌP¨ }1Gt§%Çç#">Nr™zrŽß¢ß·l#BL4åwô”Út 0 n}ݯ˜DLg·Ù©É'¤Y“ñC%?6ËÊ òyv ÆÆMg­, ŸØØÿ™äÝ4fT‚ÇW䞬\=ZW‡ ÂJÒ·•pÂ.Þd7YV5ÁÖ–âV‡úøx-y—ù¸ÒJGTØÓ7!ü6:}Wèt¢×ßnÝ•SB *ú•q*P·$´@Ö{ÙâÈ4º÷·,$³ôâÏ¡» *.¡hrL@Ø/¡ÔÙ‡Ñ;øøû:]“B$Kçº/t:ÑÄîµhJY¤¢/}œ"#W¯F«/&îܸqîÒÍÊŠ:Dwg/2#ÿÜüuñš:Vع„ž+ú„O´‰ =vW€õ0^X?›8Í@w§è_Ïî?—ën­*j›œý 9óÎÙ)à·mÊØ¾»bB6î¸<Åw3UK‡2P€Ø·e.qBÝ|âï EÕø(ckò’~Ý™²ÇÙ´¥=­Ûò4iص<“ýK–…©ªhƒs¼ŒU×ò o`ÌõÀŸYÎܹþ^=g(KÒ¦ºèÚ‚eKßbYÄõl’&÷¬•m×Z¯²õèCß©ú?½QSSÓ:в±ÃÓÖT¾ãF@HèÉÅ'_Ï.¹ÌßÑH{bC3°]-ãOöH¥Qå¦1C|ÍMÌ·H§¾®Ž›E„¾Ÿ d~ª¤ˆËÁJÒç•ÃÂÅ û4#Ç&$7~“î7Œ¸ÁÃ×4´(ùMWhkQÆ%ÑÑx’Ò˜¤$î˜[x|Óåÿ|¹‰`Ɇ7nKÚ§iÜØöÞqöø£†Ý‰½¤µ¬S¬‰Ø¸8¢_ÔbYò™]Ь‘ïé†foÚÎO¤¼¼¼e ZA!5˜ë 懯áüÏ9´|¸_[¹Ä ‰Kn im ®´p­íi}t#G¿§‰¡=~núø¹N[“Œ _½.VóƒxCydu³¤ÔÔ¶ÒlÌp—  çÞh«ÃœÛÛ €¿Ùh쪜K¬ç‰ùsª*RX;Hnñ“±­ELC-¼IDRŸ ÀâÖN¾Ú¦ßS"ìð„üW”ï[P¢6Pü{ËÌ÷rv°µ³ˆ´ZõGb„mÇ;Ô=^¸”NnÖ¶­H=’ÑCæöîÑ$È”6f¡ðfe­9O7·:‚µ¾x?½+m²á]ßBéx î=R(¨ÍkãFæk¯ººþÛ}ÝâÒzuåÆs/.®PWïôkêëÈø<;*æ %Ý›ÿ=f95=«ÑuS“’zÍØÞ=úD…6z3S÷%Px]xïÎ=º€³ŠÑžã· ÿÞûø -g¿†[ƒPSCƒ›éÁ&,}¹Ô ÓR/¦8mcÕ)i¾º ß½cãYWÞ8G5+DQÞ“3ÕTêr”÷¨;ßT”‹*öƒ…•éSд¨÷ÊjçhøGÄ’X,²ƒÝ¯P“ÇeïÂÅ´˜õôÆ®Žï|(D¥ÇùÓ 0EôÀŸŠ›‘ÌdEþ¼wmxØý÷W÷âØáÑT)@^°AQŽ3ŒŠJ1]2©7¥9•óªÒH©å<.åªüØüþ}³á@&ó3™Ìy°]Ëd#d²`“ÀÚœì2™Òêœu‰ØÌÏ$²HNv)µåî”êêMD l’PuiE £ŽFãjZyiƒ%¢­.ÁdÖ‰ó(,dCåƒ@僴A\\\`` ̇ïôÞ*yòx NãhœçâÅ­£ÉèØ­I´#¶9K­˜zz‘¹+ܺ®ÜMl ÆCfpÂe&.ÐmLI±!¯`qõùѽÞCš Os2Ø€›õôdš~]Bñ;¶.ÎQÂYØ7óˆ:»ø …úOY¦¶¢õ·»äøÓècž0…ûÆyÆõIĆ;â“ LPt&…„ù|1ã}#® Ë ëg gć³øÑÀäŒçD"Mû¨ÂÌ"T>H-ô÷0¾gŠÜiª|ÝŠë9&;òçù.SÄälr^5{F¾êÍsá:ü$b2Ü€›¼¯+2¬T^ŒsœÌ™õš À¡_[ÒÙZ¯ÉÜkENŠ›‚ >ÄDHå“oµÃdOEæ=˜ˆ¯dë^p÷Y„¹ŒœÅð—Ù¸»§´´„YU Ë*Ò÷$õèãhž+w{¶~÷ZÖhþ”OYÕðî¿ ‡pç§(ÛMÏB@­£·Œ¾²¿XÀ+åC6›jâ†SxSA‰%~&5†ÜÝpd–…µ˜+jiÒte„Ï qñ#}ò¡…%cÆ ‡å•é{8NžTÖ#H/?$'**ÊäÓéïyÊG¬d¶ë·t·$×9ÚÒV1°¯FÄÚIŒ: Å=/•–Õ@ÿ;Y†2ŠÞ|Nlyù²ºEÈÇ”!Cm`ù@åƒ@ú”úŸ=pdØøI½iÄçsfNç?úøñ˜‡×¸àËaWI—è‡y¨œ4Ÿm†NÁ3#©ÖÔ…&eë.ã¹nM•¶˜ºbè‰GÑ/MÇÆ•@Nd)ûÝ ÙâÇŽ=ÕׇÅ•é› õ½<^uæð~Ã`þ㛚ÎÈÌ\om-A|±Ù¯Dÿ1g­kʺËYØ–UtØuãËË"«˜¢è¯_iš®Þ·IÑУoÁ—º› s!·R”NŸ†}»l¦J?EBóˆÌ#ܸâèñY¦ ¼â€¢B³f9À’ÊôUæ¹Î:zþ •6¬wt­Ÿ–¡^§}ôu)KŒý—ð’Ú¾ãÍ¢…p•é˼*Êï-å;“²=4xYg÷rq <a½Õx‰nµ Eã¶ÙY·¦/Hç>£Q_/:fŒ),3¨|HßfÑüy»R ·vìáã¢ÕŒyó¾rÕ#!Á~­ÞüÕ刦¦·\ÇuÍšâ°0wXf Pù >Ïp£¡Ÿ™L2¹'zùÌqß…_·ï˜1®kÖ® îߓ߾-¾4(–T>ä¿€!MoÓ¶Ëi=çÍœý}GÈÒ%ß’BpÐòÌÌ8ÞT—î†Í¦ Iƒ¥•ùà¿È‡³ÚÞ3 Ž•sþxPÀâoL¤é;<-87HS¤j’””מžs`9@åƒ@þSD„‡EGG;ú„tëQ]=7ÓiŠPÜ›Jʺ¹¹¯ž¿È8@¸;MŒŽ~±bÅ XB Pù ÿ XýÞ­žßí¿ÒfÏpëÌ¢-íC£+( ?z|COW´{L&'&–BÙƒ@åƒ@þãž_ü–mæ“g ‹tñ˜Öé}›B‚— vñb¡šš¦ŒwÊûöýîáÑůŠ}úTîõkÔÛ;– T>ä?N€ß¢ìsï>|l8Ö¾K¬z]ôønvxXh7,#;ÀÃ#lÝúÿ-ð‘ïeE¶l©˜;wÊ Aа<@ òA ?ÆChØgËŽêzFªßð&#´æÃéƒ{—-[:Ú°ÛWº\¶ô×ò²‚}û-Z$' ðõËpŸù  ˆøù…ÃbÊüpø-ðÂþîÜ›D–’¡´îÔ¾o Þ¼|ÞÏÏ/"<¬Ç VPÔôó[þâÅýŽºÏ•Wê߉ ®VôÀ %eÛ‰nðÒC òA ?4^žøs~ùÏÒÓOЉKj &§ªÑfÌÏŒ²§¹ÙEyFo5ÊÌjdï,Š6`ÀàÐÐÁØÆ¹sI·o™š* †HIµÑ Z_/tÿ¾Ðë¥d²¨ã”)nî°o•4 ­©¦íïGlW~¨Ê/(,{SVÅd¢õu¢b¢r2rƒ4Ô•õÔLõÔ˜úØlcãnÓð½Ò’Ü/Ÿ¼{Ç`±ØBB’’’ýû«RÕ‡*@ òA •ð(¯üp3çßW/ž¿¯|ÇúÄDëë…„EÅ¥¤ú+«¡éRðÝYŒ¢ÅÅgóóŸ¼,ýøžQËb¡B@\\¨Ÿ"™J`@³‡ëPC òA¾•˜˜E‰m:Žý¥P(0gú.u(zøXúó'tGX¨ë Uц}ZG« óNÞãìZ?Ûe†¬”d/Ú\Zr!õÏ«*Ê$‹ÑâêT’:U¹V±ÞÖ×ïÿçŸO—.~3vШQ³áµ†@åƒ| ’’’Ä6›ÍÆþ2™L˜-}”çoÞ&ïI4µ¦on…}:Œ/¯2p´Ê@l£ð;9%a¨‘ÑØ‘&=ló¹³›ß`:;Ëø/îø)Q£(Øî»wã.^¬òó " ÃKÊé^^^˜Û×4dÊ”)0[ú?}Þ·Ñf¦—ƒgà×Ü{$Ò'üAÀí{’,Fšèjõ€ÍW®lÿTô™ À×¼ºÈÐŒ}^½Þyø#0ðX Pù ü",ܬ½¬­­­§§³¥o±÷PšUÛ¡+^Ú`n?½†Í¦¯^Ö‹¹Ô²Ê7oݱdI?¾õ]µJýIŠ/n’Ò52²‡…•Âòòò¼Ïââb˜!}EcVÇv틊0ÿÏÞ# qÿï“m'(õSèr›ïÿ›TŬàÈ^—1v¬8‹õ|ó¦U‹ý—ÃRÊéooobn †££#̾B »vëöÝô~>“‰Ó²Ÿ>À` ÑÕîÂdÏž35‘’ïrƒ……û+ÄÆ® „e•Ò$‰ØÐÖÖÖ×ׇÒ'¨­C·nÛnå2¿û¡¢©Wþúއ‡êëvI‚éé¬mÈ"ÂÝgshh:=:"¾œ•ÒD‡'ìêì+ (ºqÓ&›YÞÝ} ¹þJ óÊÊ”¿uyè;·wYŽÃdY\pî^¨7TC[ê‹1ôIu ¿­Þ~"•7þ”1{‚¢í½;0"B z~¨|Ž!:ÔtÒò¢;›=k Ê €òдٴÙwðϳþ¼"ÍDbï|ŠòQ%\]Ý*s¬¤\\ü+3&‰þwà ê!0øVœ5óæ#”\Þù_fÀm•gEG33hÃÌ ’wí4!÷X¬w·[©~¨ýÔ³g)†…ýae5Å@ùhª”¼§ºöµh¬³ZŒ<{1jè@³ µ{Ÿ¯ýLÅÊ®ÝÆ†† .„År¼ðŠU5 lsrãIéÙýa{˲γæ¡Ø(M˜ßv›žÜˆ HI¡´Q>Š2Яni…æb¥[ô?éR›ð§)]–Ø!žLƒâü²ýwU…_ˆ›÷ýX´“{ÕF¥ä^¥­Ãî5›ÖK÷ð¾>–uL3j´ô?ÿ÷écƒÂ |àãàf¦±Uå8å.i))ªêêõ š’Z¤®&WŸ³|8¿3TÔ7Æ–cŸ¥¥·UU©§ÿ‹—vÀ©b‡å˜ƒ,ÖAþ®•ûLc)*ü yQð^o¦3±w¤U9 Þ×IOÔ‰ZÅbýÊZÏ8„¦í$ONòš‘ÔÔ9,µ…3ŠýµëzmÄÆ>ìÓ1ø^”/åâF óEe»ùÅþÒ J‹Ù_dgùq~OÄ?HÒ3иi`±š9|t¡›K=ýÇÅe Ъ’£©×*Ê«²Ï·üW¬DÿŠG”+¸¨Œ¬©¥Ú­ŠÏê™0QŽzÿž%"‚7,húÊ—BËÞþØL;¦ ÁŽÅâ°XüuæÒÝÍ-fééé n1›ä&$gò\Šiëé1ZVÌ-cç§%§¾aéuä™<1A(.›Íö²›:2fœº*¯º†›AG¢¡§WI_ù>â´õ è°%È·ÖVDC%$äSýÎüø‹‹ÅĘ3æëuÖ.3§’3^ªiÈ•ê+í[(­­¡Ä¤œã%—+|EùID¹£RMR_Í¥1§Næ%²ÚÙämBrZ™K5HŠg¦<|‘S¢oPÉOq|Ü#ù6jêJe’Åd²š¶žœX5U­ —ܱB´L òJªÊìJém¹ž¿æy”åUFK¯s©¥XÃ=ú>ˆŒ8ÛßfZãÛ-UêïyØ0Ù¯¬ÄŽ•\"òÌe+Œ TV‹¼t¤_ÿ‰xÂ&¯|n=lˆÅ¾ìÑRT àI`bèê¨äç6"ƒ2LY%o´ŒhôÓLŽLây°î§™ÌÌókÎYM4ͬ’¢ÂR sNÞê,/3Br9œù {’ÉÍ 6ovgÍc;‡WwW?¿ÉûîM)[Í€ñY-½á`kr5Ÿ2f'±XZ–v$<0Ž?³°¹8sFSœÄsqÓ`ù¥‡É–E2§¶a±B±µêv‹~´eû"-WÇôÝ-¦ÔâyWDˆ÷•Wž&Õ/%ZõÒø§f’KQòÕG¨ee˜[£éý«‘ɹò” „çï¥Êרº.ò[ÏOmÞ &6[‡ÉAÆl½µÇDœþ4 "c)êHåx bYR=¬\ø ÄëöVNwWkׯeEmâp8t²å˜C8Ýç^_)ϳùø÷ÚL+6*‘IÒ+V ÷è;ADô›X…N»G}«Ÿ>=§«óu×O¯Îì«f8ü?ÿ<î×/XÐô•ïq Y¶¿J«uöiÞÃp“þ;X²«EŸÜ}¼G!éýÎg±Zð_Ùô3?„žWbä²2—J°;Mµ"ÓæŸ 5—#¬p$ø.Æ’í4¥ÂÓUDQb¼HŽ¿ F©‘ì VØÍlãž¡g®Z 3&d?}(“ÆA;ÏÍâ'†Þ›åmjJG;K÷?ƒÙ»½æ‡ðĉ~˲kcDÈëó¼+zBÿ!Û»³©_îVþ_ÛlU/ïBÝ]Vs„Œ‹!‹åõÚˬ:Aí´.1ʽR^ ¸fñù, …@_¾»¹Ñ²e‘¯è~wµZ²ï"a”OšÖ¤ bXìrÄu$éµ.Ôßøûò”ŒXvwõ¾üγ—$ß%ì´"dî¾'ASÛÏê¾Ør+ýApŠ ùòXÙ½¶³vLàçZïÑw€†^÷·‰òÓÒJTUdÓž?UmÓNøß%óùS¥ŠîuÂi.÷o‚AW½:}ÆÞ¼Ó®Ýç.BÄÍÌ-âÈË}ž¯Õ¾9^¯à{P¾·„äeq ©ðŽÎKbì$þvÿi¶Ä¶OTË\ªb:ª|ÑêÔ’êýð>³s™žÅª)ž²Z½þ¼þ}L £ âo† 7©äÇÔ¨‹pÀCKÍÖG•I‹9ﲿÒp¨Í ”ÄMM*5dÜôS\ËÕri|—Ú#´P'Aû®x™ ©&ÂÑ}«æmËúöPv§/¼S{ÞwÃDßõ„6 ÝN"2nV›ÎÔû©¤]…‰c^²¤·Ô¡8ÿdŒ¼ ASoÒ_ùs»Ý`”››Y$0Ä¿Î{ÔÔ).)QѪ0‰¾Štša[ÕPoâ· ³ü;þåáaFºñéB#Xy|—ÁFº{¹©+÷<¨ò=|ôVxù=ášIËÁ±aÖõ9—ÿVK#=Û|NîééAùÀw¡|ËÜ;[X«ø•™>¼J³ü{¿óÞœ¼§îâ?¤S/Ú«M> Þ”m+óNžž”K˜ÚÎJ£ˆ¤ë4³ª²e€®¾ó©¸m#**0¹´ìñ£=áØutL…c/^ä0z)*A+ß ¨;£%õø2¨ri®>BÁôÁé)ÄvbŠ0-·„È öÕD™ÉY9DU’”¤FßÌëÂô*Њ™ ¯Ìª6 hS¦Êk÷P*5^xNªž¿Ò7<áŸÕ„Œe—ÞQá6ÄèÐ ]†™}Â=jB$£ø.ך˜¨±:'§&Z4Þó ÿ—w'ûþº)ö]ù…èÒÄ,í™_šKQUïÑ÷ôldfe+UQ>íM1ÃT•Ø„•Û[K•oÛ§ôÖýùÏ’»Ìx6ßãƒÀñÝOÿì<çÏí¿ñ]Œ´À½7MvëÒÆx4¹vü9}¯Ó¹|Ÿ6¶“‚ƒ˜±ìEFq^Õ'Í-ZT~¢é¯žJIÆRþ´ –Åêÿ<‚cpÄÚqPˆÿyBæßUàt>ÊWÄ–NÎÄnóÛÀ="½4x&£¦™fRTÓ¼·ÃÒÑù,qu·ðó‰° \h[Û:·ÒÍIÁ»×’R2xÉ‚¦­|üüèûüŽîa·hý0·X›gyPT¾»££ý¼œˆyæeH–•÷¶®üÂïí½™p³·7„‘åÆ˜3›Ý³Zfð^¯¶Tb ‡…s—…Q{W¯eÞìi—f¸ïIÌ³Ž¤¨ µ®KÝ5&¦b™Ië·{{kqøÑ Öt¥]ø=zØo$Z²¦ÁÔ GëeËÖ\ȧò/ìÚ²eÊ‚mªÊy^äy¡Ø¹ÎÏÜè§ÕÑœ¾w7·óÉ6+ÜOÜjVtÏO¼Sÿ2Ý쬽~=·ÿ|‚E'’U"tEÞ5eN¥Kë_~êÊæ1•‡c{eŸsZq/ôNfµ‹ÒÁ£Í8ãåp÷yó\^„ôÅêÈ¥—¯êïZ8{6[mEÝâ‘\íµ}¹Û¼áÛNÄ:,κÂOíŠxJbí–„\¢'_!fv·Eq'šmÙºÖxô¾Àîa]䈜Ǖ‡Fûç-ôh­Ö¡¨T×W\º)ûǹyL{%GÂ'}¯ÝlýbYÉùLËhÕ{ô=ñ®  ª£Yy…çPcźíÝÅî§çž~9½‡ÌB—‘úÚ‚:mÚz£ƒšù:lOXyú­T…}.vêedRyQÑz=ªµM.jM[œ·ó},Žx_Ýái,J~·¡µ­HÖ¼£ %DO”ÐÂWÜçÒ^ßµ´XÞgZšílþJxÇ¢ò-H_O]–ܲ@Û:F8@ùÀw¡|4¦ãíé_E7¶Pùž¢¡§³!ÿ÷ôœÇw›çé)0érIÏôÖ7€¿áåÅøSé(h³õô³òé$ˆ„ŽÖ³¢ 1µ] ˆÍ?DàÛÊÁ׊ù_¿48?”\‹`ÁN>¾¾üzQÓ…AgÓá´Ðw çò ©áŠ˜H233«©œ¬xiú#õ…ŽVˆ!cÔü#£æó+¹¸™yù?1ÄèàÌu EXv±Æ#íƒGV¸Æ#ƒyþ™$*öòt.»äy™™•+§¤¤?rA `q$AíSBN©àOÖpÞ<Ã*q²}ƒ*ܾJ÷è{¢Yuæ‚ÚNB\TØúý×ÄGÖ±€mϵV“ez?Êt©tÈ@_¯’ÏîSŸ@ÈÜ*õü6­íh2!†â"L³®‰“p³îæ¾ÄnÁƒØyŒY©/JøµÜÜ·eƒ'òëç9U†4Ôëûm?œò}ÍÕ»¶k+ý­emÇØ jiºÌ|Í‘€%CëðþÁìÞÂíÜwYC?æÒÄÅeIëò)a[–…<öõV5ìØº/ruS§ÌVP¬X÷߃¿ÏÁw_6‡SÕ±¬WÖú Þ½ÌÒ$e‡rRIõc Š‹™UwCÝ+.‹ÈR¤äaÕCÞ—ÔËN}÷¾Æci·ã9k*±›Ãi»Æ©”·Ãéž?_~óÚ8bµiE™c…fݰ]¬Ú“ê‡4Ô‡£„R |dfàí™ßä+.84ê#¼‹èVsuõ?_7êyù®úÈ_¿Úò´¡aÁ(埆ª²bÕêÎà}þ´¾Lº³w×:na„7ó“7º‹ü½÷9q¤*O>äiAÜ»w¾ºÈȨöÓ-0!ó ô‚/°éß·êQùõIsvveÜù[¤ Eþ>r>$*Óz³­Eר›¨Úë%û$ü2~ÉuFÌä˜jù%1$?º|ȼ˜^?Ú¯ÍÚxW­t³ñ's)^³îžÔŒ1C<‚â(jF-)ÉÍ‘’o!ŽR |4%t4ÛÝx–'&.Qæâ²ÄG¬¨¨ˆš]NÜÉjÇ3¦/^ Y»ÉûÁä‹'wÝS$’r\¼;ӊ貄™.l\·hÙŒ½<=¼—©]÷\â¥!Kس6++ðš)9¾ÏÉǹm =z&>=O_¥r?®Vjmë“æââ ÕÞÞã% Kè°)Ia¼Äó¡Ñ•y O¶VkU¶ðìÝãNÝ—à{èímîܪ~ä;Z®¬ÝÛjÅxÚ4¾gÁ ·“WîáotYĸŒã¹Ë™¥S-ãr`{Ó]ÓËÏŽïuè*Ô<- k½ú$XW·‚Úxz¬Ö›¶E¯P‹^Â.z£è ºúãñeS•JúNöfë9¾¬Y73óM¥hå”Ê׃HHÈ60D P>P…W¯^!¾eïÆ +ßWeòynñ²3Ç’RŸ^aŒ<7çe÷NZõ‰Ä¨Ç Ú6k˜OzHAA¸>³ÐÝßQOªtš½,.Ê€òÐôàH7h§üi+÷T;=ö¿W"{wÒ¨O J-;Þ¸Ö³'§Rj_Å­¼®Õ̬;Ê€òÐô=rÄ‹Â÷"ÍyµŠúˆùPΟý5”/-%KU]±žž?nÞ£Çp”å éÑRAþäþƒFCF7bXÅï~?¾þþ--icë¿/ž 5_¢¸¦RõWü¸tô)P>š’Ü5ÿGö/^0¯þþõõG†‡o´´”ár Ùl‰„„çÚzm˜e,§É«µTâÏÏY\˜ð(›#/§ÎŸ€³¸°XL"?-+5Ÿ¥×™K6åñ„Óœï§(«-{Å%Åb¢ŒÏÜ÷zz‚ÙË2Ó²r‹$´Õ›s¹ïyóŠŽ3%@ùhªØM° >Ù¾»Q£œ½‹X ò±¡Þ½“ $ŸÃa¦²6S ‰bfñq°5 Š½Ê hyîOQ‘¶Žý‚ü#I·±Ô-‹µ]–Ä-³n‰Q· o¸:‡Ã,oeÝ;,ä2jG…÷ç¸Dä\îDûÔ4ëI¢nð–ô·a9ò–·löŒNåºgOöŒÓQr”€&LîËç„4Žòû+`¡›ëdž3fîÿÎ1 ŒQþ¼e,O¤î©ú!;È1üæ[­¾QÆšv ü½7‹µ– fnÖn£žD2c3hÏüÅ —GlYaÎ&¯º³ä~ã×ç¯:9Ô.1œÞÈbÑÊZÂ[Þ’í7Èv‘ÝZ E :£Ì(Mû©“ß{ÀÄÒºÏË*|;iÒ¤O +-ÝšÍ@ÖO•é#Ó§/yEÈÛÄkf£"*ù7Y¾!1H󦯦¬zeŸFÂËY’òå-;‘¯×¯áîƒ@ùhú˜ö1yÿFŠÓ «­^!Í\] ´(ßzÚíwìéóÓ¤;ã…C»¸¹|f$áá¯--«’¸Ùç²îTNGFöòJ/ný–è™>éö‘ÒÅ ‡)9¤fl´W^EÈÀšÏÀ Ÿ` ;7_ÉÀù!]$¥8(-ÊÀw‚“à ïu>VS]à\w£Â]œf^ÌZ’= G?|ºÝ{Lé2–c‰–Ô j³Mðêßå(Ê÷Úc¾þI²„Xmö8³ñDb¶|FéLž)+=fRp±¦üfyÈ1:²”ô j-•ùšçí=†ð:¹lY~üä¿b;½FÍ^.‹r ||W,ñp÷òò鏸«ž%ñæ¥1#,ÅÄÄ>/ÁZ’Ù9Ý­&°øSQ›ÚñW¦ä ÖbœéɨזCämññŸ*^\®U`pù,-NžLS¥‘„¼d\X,G3wǾYò¼w•-o ”€ï‡åË—{¯[g5Õõ+Åá¤õO#ØRR_*B³§O¥ïÞîܹÆ8›««µk#!ì¢IÛŒõÇHQ=º­}_ßå-€òÐ-?m¿ù µ‘`á~Ã÷o[´`¾¨è~*Ûµ3x•§²ç}3¦+TëafвJ«K&2užuO×ò4Y!1¹àæíÇ(ÊÀwÎÜ9Ž·îÇ^½k0ðËÌNÉÍxöïËžî_«UVNeÆtÏÍ›WÛÏTh.óe¦!ýmgöÏG ÔPAyP>~ºééÒ¿ßü÷´îع­N×O¨ðmØ_{æÏŸß»kǯæùó—fe&ïÛ~pÎlEÖ'Çq¾YqI³9³15'€òðã1Çqýw÷¾’Í¥;÷ù¸iÆrž=¼zñ¬³“ÓK°¢’Æ\ç_ÒR‚„ØÚ*ªª~Äà}‰Ä¡Ã9 -†‚[ |üÐÌœ:™þû$åÙ™Ó§%Ùín=[´Ö¬ÞÀËËHL¸ôàN/³þæ¦}5Î"vªjzîîÌ"ïA7n$+õ0dÉÊVS JQ" wů^M5rÄÏ?£n@ù¥´Woë6׉¿÷úí£Ä¤Œÿ2Þr¹>¼——h¡¨ÐA£šN;cv„üô¤ÙÂÂÖÂB°M‚ÏŸ=ÉÉË.**mÖ¼¹L«–ªšš=:w‘ìÜ·@ùµ"'#m¤ßµi¥™6éî€òP>Ê@ù(?Ožÿ÷89)'3óÝ»|êý1 I999vm»évø6L•¼{’ü÷‹Ôç¹9ùEEïEDšI7—l©¬¤¥ÑKV¾ n(€òÏåÏ?ÿ|÷î{÷îÝô_yyykkkäL“æêí{‘çζPQmßÕH^YEE»»ŠvEu!äZRfò½[Onßè¤ßsŒÕ FV»÷oCÃwÝ»ÿ¦O_鞆ìšuèÀ"DZÈK!áÉÉ…W®äç½ú0jÄÕÖß |à“xùòåÛ·oùÛ©©©ôߌŒ dKå=EíÜõG‹–ªŒû±­cQ= ¶´N¾ôÞ¾ÿèêÙcS§MWk¥ÜÀiNN:rüÖô©ŠÃ‡sè_íž54$èoóʉ“áïK$¬­]qß”|³fÍÚ¸q£°‹EÙpbÐt (ÊoÇNmƒÞ}?i­v•6湦­ß4k–£œŒt¤9=íâ©ÓW.hõ ÁG”c¬Û«›Ÿ?;ú | Þp8¾²Û·oodd„liZ\¼þïó”¤þ6Ó>311‰¡vsþ}‘waÙ0_I¨7lZå4GÙÑQé3c21‘61!AAk ÓªUGåõBZZº¬ÂóÙ³gȦŦ­ÛMGþÜ­íë®"ÓB¹ÿØé¶ø9Ïv”’”üâ ÎιzaÑÂV_0N[[…Ì̈Çώ‡ñ | >Wxš››#Cš E­õñùJk²šèp,ü‚…iï–Šò_0ÚÄ'ÁEE©´P}ñ+)‰/¶eóêyó—¢l(¨ƒ² OTu6-V®\9Òqñ׋_¯×À˜ø½ºé¨*™n/Ïž†HH¾Ôj/õõÒEV4¼×®ûª²ÇG«KϨë1#öå°?W®^¾¼XXœÚ¡ä×NóÒ¥ª[ýÖ¸¸þ‚B | 6øžèÕÙTضs—Õ´jÐÒéÑwÏþ —ÙŸ U½};nÈ™†Ió\…sÿÛ7xÈTå5ÂápdddPÕÙ$¸|û~Á#òŒf£m}6lt_´ð“cX¿i£»{«K0‹Õ¬C‡7¯_ý'#Û@ù@Œ7™Ð$ø7>ÖdhCO²3püŒ')ÏÚ«·ý„°ÿüó{CÊ M‰-›æÍÿ@ù@¨©©!¾}vþÔËʦáÏ+&.vìˆËÇIQ¼132 Ÿæyó[ÆÄíÛw,Š €òÐT¡(JAµÑ¦l6c}å†i¯žêø‰M£GË4VšŸ=KA±P>š0ûÓïoÕˆ ˆýhå“ 1Á“&ÉÆÇ†ëZ¢ð(M’â‚‚ÆM€é(Ûÿ²rë?¶ýÖíVV²›æˆ¿oBùÀ÷£|)7j˜/*Ûͧ(ö—NPZÌþ"ã)êb¸5 ñIÍÌ5lØ.U=t(ÈÕyN=ý‡…&vï¦Ò¸iž4A…|/Ê—BËÞþØL;EzÏŽÅâ°XÅÔ«¤=º›[ÌÒÓÓã{är‹Ù$7!9“çRœðH[OѲbn±;?-9õ K¯£:)u„â²Ùl/»©#cÆ©«òÆðr3èH4ôô*é+ßgBBœ¶žöÑ£ùÖÚJ‚hÈ£„„|Уߙq±˜sÆœb½Î‚ÕÒ¸™iÉ/Õ´ äJõ•vÈ-”ÖÖPbR΋‡ñ’˾¢üŒ$¢ÜQ©f©§£}‘û¶µ¶6»<Þ&$§•ER)mü«Þ(ær ›],¸ºyUmUy|4&§Ï„šX6þº‰Òr1™™¹yóFO°ZkQôs߉ò¹õ°!{ø²GHQ¼ ‹Eˆ¡«£’ŸÿÙˆ Ê0e•¼Ñ2¢ÑO392‰çÁºŸFHd23á¡9gI4Ñ4³JŠ #¼¾[Ì9y«³¼Ìè/Ä\§E~ÂÞ€drs‚Í›]ÁYóØÎáÄÕÝÕÏÇoò¾{AStKÂø,‹–Þp°µ ¹šO³“X,-K;ÇŸbÑ\œ9£© ‰N⹸i°üRˆƒÃ䀀Ë"™SÛ°X!„ØZu »EH?ŠºÄ»"-WÇôÝ-¦ÔâyWDˆ÷•Wž&Õwpc±üè”8Ž ð?ær:Ý«å.^-+ÃİX¾q\)m÷9]æ»á},‡Ócÿƒ»Ž":ΦԼÑjÌB0ü¨øî(¦Å›¼ìo!{˜ÕÓgnö-ccéo!ÍÑÑ÷úöE M_ù§eû«ôíÎ>ÍÓ°›ôßÁ’]-úûäîcœ©¤KôcÈbµà…´–äÂÌ¡¤ç•¹¬Ì¥ìNS­È´ù§BÍå+\P›ê»dK¶SД öE%Æ‹äø j”Éb…ÝÌ6î)zæªÕ0cBöÓ‡2ùg´3ñÜ,~bè½)QÞ¦¦t´³tÿ3˜½Û+a>-{¹E«Í »6vA„¼>Ï»¢'ôß²½;›úån%|®%srYvì‘×èþÛr¹Dºè)‹ÄÅò‰zíU9m:ô¡úÔÛ<̦ͬx²;8ŽÖæQªHi5²Î=–oÐU»Õ}PL ÍNú†•Ÿ–V¢ª"›öü©j›vÂGø.™ÏŸ*Ut¯‡Í×"î߃®zuú¼v3z良—!i)YªêŠõñ©¡!ƒò¾å{KH^·Ò ¡¼$ÆNâo÷ŸfKüè··j™KULG•¯KZR½qÞÇ+ýÃØ^ÕSV ØŸ7"®)atTüͰá&•ü˜uxh©Ùú¨ÒNæ¼Ë"r¼½¡6ƒHP75©Ôå£M›nµ\Ÿ=)§ÅÕµW2›Zù<Á+ b¡N‚ö]ñêÓ®RÚ¼MÉ–3ÙOæßOåk°8d%’)‚/€²WJhãAë´îPAlôU„*»;͈¿°­31~Û0Ë¿ã_f¤ŸÎRÄ<¾Ë`#Ý=‰\ƒl!¿{ï^}”ïÁý¼¡C¾â*j¿DqL¥êî;Ú¥Ks!ð=(ß2÷ÎÖ*~e¦OA,KªGþ½ß yÁwxxñÒ©mÕ&Ÿoʶ•y'OOÊ%Lm'·ÒgnfVU¶ ÐÕw>·mDEéªPDË?ÚŽ]GÇT8öâE£I¢ô'³%P/žºßXUç©ÅÚ çG»Ö¦;§óÊܽ,¾¦2˜Bl'öØ2@±RÚ\ýý83™…ŠÔyJ·eåNûƒ§P"¿ž±ñ·‰äDžüǺû"ˆþ&¤å_9m溳ÉeqÒz½»Ù”̺ؖoì‚o—™Í[WV>Áëœb™RUbVno-U¾mw`œÒ[÷ç?Kî2ãÙ|#,püD÷Ó?;Ïùsûo|#-ÅßpïM“Ýz…´1M®N?é\¾OÛIÁA ÏEø¼Ü7õIs‹ôÎb­0³éý6ø2¯EÙŸÃq¡YY÷ ¹L†ÚQá}X,GB4m­J‚žePþÊ—dÊ_qŽ‹æä9‰AÝŽ8¸ûHÙÛÄmYC4Íz’¨¼frÚCD^ÀõáN•Üãwl7p¾ãàØ+Àÿ ។Ý,ÿm6G¾}x—ßÑ=ì­ækˉòóÝíçåD<È3ï(C²¬¼·uåÛmÞÞ›ù7{{ó×£³Üsf³{bVË ž~ØR‰-Î]RDí]½–v‰H»4Ã}Obžu$E­uõXê®1é0ËÌo¹ÝÛ[‹ÃÖ›-í«%=ì7-YÓ`ê…£õ²ek.äSù—vmÙ2eÁ6UåŽÁ΢É*º"ïr&…››‹Ë Ãy'‚GòÌı½²Ï9­¸z'S›í*i3÷[A§_q-´Ä1èùó®RÅSô§ßRe´±Èçr«VØ™•Wx5V¬»Nb±ûé¹§_Nï!³Ðe¤¾öA ý£ƒšù:lOXyú­T…}.vêed2·R<âõZiˆ-%BJ˜Ê†È£S™’’&ÐËå[V˜³É«î,9Z€ûDEÌ55ïL»g³wDç/nG|O¾ñüÑÔM:pŒu¿ëmTžîÍáôUÚ%†÷!d"‹5_~óyEwç;ÞW·zKl_ .®ýWiæBùÀ÷ |4¦ãíé_¥ÇÍÇ?¨|OÑÐÓÙÿ€{zÎã»Íóô˜t¹¤‚gBVøð7¼¼?b*ý-d¶ž~¶B>‘ÐÑzVt!¦¶ ±ù‡|[9øòfÞ(ë¢À%×Á"Ø_°0¯/ß\3]tvñ0N }§pþ!ßÀ®ˆ‰$33³R¶())•]H)£æ5_èJ«¤­,ýìvƒæ-TéêôG:룄6"ªqÔvâ¢ÂÖï¿&>Ò©ö8h{®µš,ýiDdºT:d ¯WÉg÷L#º˜úBæV­U¨÷3­n«OxVqØ´À>#ßæ=8LdÕúmQÞBÙ“~4GÈÛ]šèðÄvÅ´ ;wÁ3%Ëô[33R­ö$•Üi˯¯-ˆ”XBþ*ýü€B¾åûš«wm×ö›ë´A›k6ÆZš.3_s$`Éк¼0{·p# ÷]VðѰ _èⲤµ.ŠW“†Ã©faزƫõ¼{™¥¿^Êå¤ò;gU¡¸˜yÐXu7ԽⲈ,EJVGaa}Ò\TȈM`œ 3À4•£¼rõ|ÚåM1?õŒ ˜wí4!šÅ(œO åÌ,ÈS× ¿¯b9ý²å@8i7äc³+'ŸŸø—e.Rl9”"å#3oÏü3õÞEt‚눱› õ¥«iÓJY!¿Šcð>Úzz™tgï®?tÜÂo£?NÞè.ò÷ÞçÄ‘ª<ó€§q7îÞùz袺b\`Bæè_:`Ó¿šqpâ’õªíÌÎ)yÐÑ.6î‘wÑ3¦—2?AÔR36Ú+¯"d Gš¶,“R²çl¢}ܽ™FL;hB_(QÛ8\ÞäÄòˆ-•W›û’Qj¿Þ½;³sçµ|—‰æÍ•QŠ”€¦D‡vmn<Ë/——%>bEEE„´ÐìrâNV;EFö.^ Y»ÉûÁä‹'wÝS$’r\¼;ÓŠè²Ä‹>:.ˆ[´lÆÞ žw^]ÞËÔ®{.ñÒ%ìY›•x-Дßçäã\¿¶†=Ÿž§¯RÙZj©Úº~DÛÖ!NáÊÏcl6çan€a##RVzÌ>¤àbMù fvw½ÚºîŒ¹Ç"ê ×mÅ.épêø„SIÌlŸrƃ¼7‹¹š3—Öáµ–|õâx{!LËúYJºŠû¼hÿŽ»#ƒþ¥¨,c'%7ÓE­€òÐäxñø¾†^ycë´¹s«ú‘ïh¹>@0;3oÒãiÓøž3ÜN^¹‡¿Ñeã2Žç.?d–Ny´ŒËAìMwM/?;¾×¡«Pó´]zÉHGm¦3ª¾U¯`«^|)˜Œr­ƒË›çÍgþd.¨~iáëËÌ1¦7j@i«£¤ç{öU%åkxr²dôÃB Ê@¤J«ˆC!»&;¡>Œ7®þÞ èHÈëÆÍ·?%ÏuÆ#Ê@Ó¤¤à]ã&àúÙ‡ivõ÷ob2!*j³™YcœÕïÞ%@ùhªØO|êÒµ¶:]ÍîlÛîcƒ<N5bŽš<­UKJŠEEÅò¤aûwxz|¢ŠXZÙûú®rskÙ ÎÊR=ªJ €òð=`fb´a‹ï ‰Ž vÆø '?Yöø8;-º~}§‘§¡’Ü,âBê„ñcPZ”€ï„EóÜÖ­ß0ÔιΕ÷ìå`‹Ï}¶Å$ed 33ï()5„©º~CÚâEËPN”€ï Å‹ÖmØ8ÔÖ髞…›ñ¬U ùVJŠŸ•ŽŽYttš¤Dns™¯;ºÎË+}ùòå(!ÊÀ÷(~‹úlØ8䫉_NÊ=UÅ:íÕ¿T„¦¦®];®®žÖ²åײü||^Bö”€ï÷E ×ú¬jçôÅg5»vØú§‘JŠò_6ZcãÑÉI7ÿ÷¿¨!C¾ð8‡‚w2ì{êîŽJNåà{ÇÓ}ñé³çß‹sÔ;}™•XÅawÑÑ~¥khöPo§¿~ÚE •¿”`Ÿ;ËRk«Ûq†“&¤P>~TØRR‹™EÕÃ/DÆ^édØ«m経yÏyöøî8Íe&ŽÓÍÕ¥QÒÜ«÷˜^½É«¼ôà?EE‰©isMMVMž Þqbþá^»–n9ÔФ×0“^¸çÊàaiÞþÑ%%%±·Rž¥dge½ãæS(qqqyùÖmÚtë¤×ݨë@£®ßB‚eåTfÌXÈßÎÌH|ø(.5õeN·¨¨DDTDZZ¼¥²¼†f{ަÄÂw@ù5=T¢¢Æ†Ýè_J³’²ýýP>Ê@ù• (ê~Òó—™™oßæꃔ¤”’b ½öêbbbßlš32n¤¾x’“÷ª¨¨DLTT¦¹t«VZmÛ¡C €òj -+ïÌé3¯_åhu1hÝAOŒ£Ô‚þ y¸›Y˜ž|7ùþ­‚ü·ƒ‡tÓÕnl}.¹øwÀÕ«™=zp ØÊÊ¢ÊÊ´k3BÄy‡¹„Ü)xwûö¿Ü«WòÛ´‘>|¦¸7@ù$0øDþë×FƒGYZ×âÕLDUK‡þÞ¢±‡ÎœOO~4w¶£¨hC?†i/θ2yr‹ÅT©Å§¤T3cciþòî\nÐÞß³»uoob27@ùøAÙóçެ|×OZ®]§G_úy'éÆù“ü @æчŽFΙ­äîÑêcòÙÍg)ò*$d½šš¦‰‰ €òðqûñÓèK}GLúÌxTÚ µs>v.º„ûjܨá_1ŵqóªYŽJs•?3&kk9Br6n\é2wê?”€‚ß÷èÜkÀçË^š]{н׭[âñU&ÃÌιzú­¾`œ ¶ŠßUXØÖÄë°(ß5k}Ö[N™û5b¶šêê·c§“£ý—mù{üøa½´›¢ðŬ¯Ï..ÎýŸÉ¡Y“ÅØŸ“˜Ô/êãMM­ŽÉ©ï<5ú—dÿ8EþîÊÂQ_CùÔ5$P„”€&Æ™Y-kW>V*-{m?Ñ`n|úV¾›‹ ;ú)³q=k¤Â˜_ú*ì&äï~Q,3ß;«ÆkÑŽkTØw]#þòl§ßª/œf|úÝZNõ*'«>iVT¬û‰¶r´ ÿËöqþÂã$Ò®E©™üÉlé÷¥âl¹ñ8.”AÌÑ8FÄúRɶôÿ»]·ÚoM 7\wýâ;³mígQRz" |41¸oëèÞrnñ¢½Jgâ,2OéÙ*ūՌVú%þê/'\;©LŽO½¨¯¶<>ýF³IAÑÚ¿\O´[5þ:l0ܳ·~+öð WWMîºÜX_Å!>= ¦sÖ«ÙŒÍiV§Ÿ·- 6÷ g¦¨²n«¯hÙ»Ë Ð“¢ YŽý–÷=iÅ%1gy.,–cB‰­räqZö(ÊŸ|k¾ýT½Z_Ò4}P>>ŠúP»÷À½\@(òs[âì{y[ï­wÈž¿Ð‡Fù=åGHaT3¨ÿZZ›ÞÐ[7ÖѶ•É£7;–\9).Ó™„Ô¨|,«>i®Ÿ¯’yÑþóYŽ»Æð'Ë‹¢m×N´ÈÑÛg#†*[\'VlBºó]hròYçVœ%ÄhË– „0¶â©+…z¦µÕg6kåP>šµ½Ùß0êE&¶*m½ó± nÜ:ã´!dåDÖ¢•“ö$ò]4:u o‹é°ýϱµIZ³fõIsqa}õ&÷êùÎÎqÛ» ¾ƒ8ÿ?1iúC+4!EÂþ è¦Ju˜1ò¡a.*mêÐØÂ I)"å I¡¨¨XËÑY?­´Ø·a²W_…}îYÉpB6øžûkùàÌs³O‰‰Où·Â¡_ 0êÅL o¥Bx"UÔÆœéó&zÍΤ>k<´L½Æ*dç”Ôóê䌇X‘cÎwÌ– “ëM[œ§ùîkÇŸ!c¦“R=,ÃzJ%ö÷­¢~¢·íô·¬¼¡W{ü™¢mÛ¢(MŠÎÔ¿®áØû„{„ì+•=šíjÎÆSãÿ»§ß²“þï<—¸×Dò?ZãhQœE[V…Œ %¦>™Bœù¡þ>å=À@`Í=žXKbZªÖk¹†gÏêh,*Ìy[ 0×B©Í,Öü·%D´ÃrSÂb9òÝ©ã¼èã|3OŠí™öšöŽe~ëzs½BÝf¯Çñ^„| Ü)µ²]ÇR7ar=ÝâÓÝê“ÝŽ:õñ¦¤$^»S¯U±B—Èë±Â°"Ê…°Eh:š*­7åùavK=ׇûrMÍPˆ”€¦Fò½[Õ+_ÃRü6§gÝúø4Ð×&$û[ȺÂŒjP>š ¯23¾…d$ܸܳýØúøÔ7°ÎÌÜ¡¤$ÖèiîkÚå@ùhzôdñ-$#ë¿´ú{>}æÕôiŠ›àÇ›ëë[¢ü(Mãî]CÎGiuéÙ˜‰ >Œµ[ï2Í9žoÇŽ=vwGñP>š&‰ÿÆ6®òÝ}ØjªK£œ=!*üçqc>6Ô\'·Œÿ‚”[6J?—fmÛµF±P>š6þÛV»KŸ—E}P×Ðø„€’R2û÷¿vœ¥ÐðyµeËËyóf¢Ì(M‘ÜÔ§ ¯|aû·{z|bGÇYžÁÁëmlt}öôtåiS­P`”€ï)Çz¯]g5͵ÁÎøàòùO–=>ý̬ÿûïlˆ«ó½~3cäTu(ß K<=vìÞÛgø„8WAæóž†úŸ‰’²Vh¨øàÁÄÄX æ S-üå@ùø®˜a÷óŸGOšÿªg)y›ý¡«ÕNûó£6Ì~ß¾S¦H³X;jš½¼Ò—/_Ž ||oHŠ‹ÛµçÀ!K›¯gí}(ÌïÝCÿKE8uêƒ}ÇŽ•úz–ŸÏKÈ€òðÝ"&&fo7iãæ-Cl¾xä®Dô4èþE¬=a&Mr»xñ@›Ö9´%¿lÌ…2»÷¤¸»/CÁP>¾gDDDÜ-Üþû®îý-¥å¾ÌÈõászrÖÉÀ“óßfï °wøbC.\mÙJÉÉiŠ€òðCà<Ë>;ïÕî];Ožý™­h÷þù_›Öm¾žìñáH+Ø;xž>µ«e«#£ÏšÛ,=­ÅÑà‡..¿ (? r²´ñwïqâ©c!ýÆN“bKTðf,róü EE¥IÖ£,Í#~²§ÿï‘ù0xðGëß­ÛìÈKOæLuqGP>~P:uÐêä¾ø]AÁ¿Žéö4m¡Ò¦ÿï¹oîߌI{–2f¬ÍôÉ%Í66L#åýQ§N^êÖ½…i_G¤&Ï%zýZ³‹?ëÛW×Ôtl÷n¸çÊ DJRÒ~š-ûÁ“”ïÞMñìu^‹Çû’©æ2-UZëèhuïj¨9æ[H³®Žýc´íýû„{‘=NKËyõª¨¸˜i&Ó\L¹¥´¦¦z—ÎýŒMdŒMp“”P:íÕé_J0KDD¯Ó@ú‡{ |”€òP>Ê@ù(å |”€òP>P>Ê@ù(å |”€òP>Ê@ù(åå |”€òP>Ê@ù(å |”€òP>P>Ê@ù(å |”€òP>Ê@ù(åå |”€òP>Ê@ù(å |”€òP>P>Ê@ù(å |”€òP>Ê@ù(åå |”€òP>Ê@ù(å |”€ò€ò! üXÊGQr¾[«Y°úÀꫬ>°úÀêX}@ñ֬å’h!‡NërÜåjôÏÝműrÐóʸ»Léud?ÙþQU|<(±ï(RmD'[H­ä¨?ÿŸ¸M½›B¾qÃÖzœL)”’’úØ€ÜnrÓk˜tæ®5k!tûEä^2—ûæs·$yË‚µXìÚ2—’mÕªS·Æ= Õ•Ùx’°ú€šPöMB’JÝ¥¥¨&’o…÷DZØ~¶+Ý´Êô’¬¸+Þ$r·äYÄÖ]aFkSäµùf x¤°ú€ªfA¯?¯¯”7ZVæp`Öì‰c.YUµÕ ®Ø[îvXv.Ä´ŠõB[qŒÉg¨Ib“HÂòYcC*ÙIKÆ/æ©Ó~“„¬Ä¦ƒtoçm›G±Ä%„Ü$ȵÎAqL‘ÅÛ=Ú’ÂrVaá--9»dŒÇvs¥¢BÏr/ʸv"0(*EÈGâ‚~ŠœÂã¶En`õ•‘ëùëÕµÑ&žçK"‡Ùÿ‘bz¥ns'\Ç 7¿è/øÛkP5M+´§©©™+hÀ;æh6ú¦° yÍw–Ÿàs=‘hh&Å55“FÌxœ³q×Y'ƒâΗïw²÷tšY‰Ç½ð›‡…Ó¶šbŸ¼:Üÿ—¡U»-¦ÅØ›:ÖÒf¿ñ¢ï‚õíïXk'Õ#HØEÅ#9m­ú·—ÝË\:UlÇsšçHÈ£;Ú*sŒß±îÂê¡Â]X3oý5{Ƥ¸š"ÖrÙºãÜ>üþ¶ÜÛ8Ý—ÓXðwÒÆþå~_Ÿ7”\!&‹=Eç§ õÕMrciù•ïÎK¥6«ò¶ŠÓ#M˜êWÁL-ÇrᑃÆV('‘†RýËÏ¥ì“qOßFqpY'jý_bâV÷Á» X}ÀG`ìqÀ;Lh€ßÉžg¬ü†·*ówcU…axz^g„?ˆ+ÒqÎîµ/\ÇûÞáíÅûù÷üs³¦È˃“æ•ÙE†‡Vž4–ô#euføe«Í•l+«Ž/ÃÂʾò,µ<°”lº’7ßDVàô>ÖF´GHÅ`ffÒ¬7/"ãË2p×»’ŧÓ}„î]upƒì:Ú½r•{Ä´©5B¶îÒ‹CÕçóßK•®©jæh%Æ•×2$nué»Õ…èó ÙÝÜ­Û•ærò¦×Vö7.µ¡£7//½AL5óÄŒ}w'Ùw–,=ãB&}«òLk©Ê7ÎÚÞ½uá?~1üÝðãä7Ë ×Ãöø(ªÐešd¸++â-¬>àsQöpêJ[kÇÿ·Lµæ¸Þ¾“·iëß‚ØÎÏöì°t|i³ö¯ÀUŽL—× ›——Uˆl¿›×㨥 ß<)hî+¾$T¶‰÷7%ÆŒ?T1ÉÇÖ¸+,»¿$/Vøèz÷ý ‡»+Uwqõh¿ÀêêÒ€Õ‹/Z¬´i$ÿñÓZ§OÅ@³òV¢1ý”ߘz}}jOÞ¸9àÒOáÐ_Ü×)-obò¾â£ýCfrôºÁfžç?6”˜®#Åíí>´ëúJëc$Gm¥^å.]<Æ®­neˆjFO†»tw×hêæ„–ë¶m+œ-ËMÞ‚¤”·|hjÊW &g`eM– 5ĽÌÎ'„-6î— ž“¶wúº½‘˽L¢—=ƒvNÓ“e·ò4ðú‰ßáÓá—û“?÷]/lÎîÇ4è=Œ®¼šIßúÚ.å^J.ýVs ßð¾0ù€Õ|!Ì}Ž.>ßz}¼`w‰«Ó­Î Ÿw‘×úW?äç Ù®bÍÿ€_ïYn::Ÿò4‘ù³÷Úº^fžWË÷[Ϻzg§q™½òò –ÊÏ5Žr”êâIùð·‹sݽvåzÌŘˆð Aî¬ë(~¥¦uùô'm9´ÉdµJ¯²6¨õ#TÎ/ø;®æQšßÞ±ï< $ëë[RS½Â¨8’žJÛs•gªÔi–¶ øÝHÛNÜäºv´ß°Wïꉲ!—›ÖMeºÎ*ŒXåÞy˜Ï]ÆéÔ–Ý‹õºi‡#?ï;š›r^Ø,ºâ*†· °ú_„¼{Ç\l­ƒâêò§>l×þ3ÍÚ5«*I>ìöFB¢°@¢÷¸‰úÊ_·´D¯è-\µOZze¼\¦ÔØy}èø¿oé< }¦Œîáàë æsþT„¢ ƒÄø MP¡µ?i½úGEÖjLðŽ‘N'+ºŽ Ù2â‡ÌÛÜ;ÉÜ 6Ø8Ûr“Üq¬bò þψ>qößû‰ÿÞOxp9DqÖ­…Ý´õ-èŸíxßÁåÓä¼Hýï‘«ºÈy?¿ÌRT޾Y‹²‰ñ›°ÎzeÜmü÷|M¼Í¯²ô`mÕ çN_ýGÙþ®IÍMžŒ×òò>Ö¥ÂPU¢¿`zÙÄ-£)m­V-[)+)*ªt3 ê¬>Pñ¿M2pú«¾¾SBíû…Úb¿÷qÀÔößö•q2PZ¸¬«•¾ò×]ˆV\²b5©‚Dã‹Ð³½¦fÓßC½ÖÙŒîŽõ®øZ(Œ8î¤Vqi> Ž:k§ñÑR¥?gÇò]'½n•»,>¿SÿUSÊýHö Í:eXV‡BÛŸ;ìšH ,íz´}áLp×Âùÿ ‹Êµ…ɬ>P—]ä¨1^xVqšÞ^wÏ/Ó«<‹YnغéæŒT…Óš÷0ìÒ} ñÂýÎÓË¿i®DŸ ë¡HÞQ }†™V¨œ~—zíï˜Ë×ã9@H ÍÎúf¦Í:VILåÛÝs¸¾í'÷ÚÉàcaQ‰ÙïÅVÐê4pðOcx×X3¹ Ñ—/ÅDþ{ï%?Û[¨hôhQéZ*çAÜ,>Õ¡ÛãéœM+.m^) 8¦sk© “lo¿}“Eï©+(PèÑZè˜Ú/'ÎɼI$%™xZ˜Îµª·m;aÓ –? jÿŽM:ߊ Ó*ìgW².ÜÎ%w\ªaUaER“¯ßþ;ávŸØ±º–u¿LŠ \CÈ`{Ÿuw(ÏX÷IÖëÞ BEÇEWIA7»ã¶Žê\>sIÁáÃo‹„v¬ÖÝI6œ!dEZȳ¾‰YCŠŠ^Vt((úâö$ùõ§ñ«Ëä#.ºòb‡öÛ.îpPõ o^Ö”WÆÈ=+´7Zýº\ÓϦ‚½=áèÔŽ"•êPb)*-fÛS—²ØâÂ+vH¶ÿÊ>;“ ‚^ËÀêûq‰÷VÉäÛtùÝü^Õö‘·ò8N-ºc'Ú5ÁÒÎfô˜¡f½;wq£ËÙk¹n¹l3oƒÐ =Ëa%Ó7†1FnùêϪ~ôÅ.ŸR…ø-÷ÄlájÓâÇéhOªke*ÃCIWÇ3ê¹Ç Ÿ_EÔÃWÙ†¯b6L‰‰Z͉ÜÝvÝ샞WÝúU‚íWòœL>{nî‚K-†—Ëæèu‡~ïv¢²·Kæ2ÿ{_ÉôR²´ð¥jVkê<‰Æô£w÷ØÐÆOÞã#ÃF/«rþÓz!¯n˜ãuîuÃáÄåÃVöç›ð ÿ{<›ž‹b“všÊ‘Y%éKg»†>BŠAXlñ5öòÇ¿8³P} ÄÆÃІši|_8òý4›¸=«¶kÜ[ƒõ*°±ä[”îS¬ûA0/¹ZÝFpH]Íþ¬áÂãU'fa„¢¹¬ªj1‹UVU…c±XJ•”â)¹akcׇóM ’œšùq¼¼E’­‰¤Ù+½Ö_OV©§¤¤„$¯y’j?›_EÌ>’:¯wÝ/ú¿;Ú Cü#VM ûܶIÿ“oCZ£'Ÿêc€o˜üMqʦLtþr%ÿœúã1IO?ZKr:tU5Z'*õPHðµ¹£ôähX*)³÷ ³÷4½"§DA*åõ7¢I+ÉIÒf·¦8^®²g±Ê«ª̲UU¾¾­²ÇÍ×I£ê'ÏÎN~Â@Ær[Þ¤_Ì<ª0z8/TÅ gsí¯8ÉkÚ”šÄ@,8ç²ÎØZ7’|ÓÏ¢''ó¢,¦íòp`ß óÎ|ÒP–” K)H|ÄÓŠ¿<³P} Ÿß5úy;ò™—™ñ‹é»¯êÜŒ÷MúæíWÂío-”œ·å˜Náêæ³  «œvÝ踤—e¦½åÿ}l¾Â±ù­øÏÌ+(GÔ;pÂtÿµ]”g8Œðon5*š±ýNSþ5šc<§›6íÚ¥h³nƒûê¹&ß6¶@ÈŽÚïâ´,¼­î³HI‹GL7ý;¼ñvr#gY#{Û9·,/§‘Ù·.Ù44÷‹™îMÜ wf úº*ôaÓ ‘‡ + ÅmñOÝ`ÜúqI{W,éo7oŒ–\‹– “š™Âƒ@jì ?Ç^¿Å¹Ò˜LZÈBh„â½Öâz!bòÈÄvò0 )QQ¡t?;ÿÔïH;AL¤© ³·—oÉwÅÇȧ÷ÒB¢ì[€œÑ\í^¡ˆÒU”ÛEËmõcðĄ̂$__¨Ç™ ¦:Ïoú×ó ³€(9^Ï E  )×-øÃÞу÷‡/Åjü¬Ô³zSw4¹ÊÕ+i^fÍFÀÓïøBóÅu]鎡Wó­÷Ï`½œC7þÍõ¿]ÞtßÙ*a…¯¬ËB¼•wÍÉCb~ë'_·Õ«Ÿ>‡¢fn6È o}={Ðën ‚´ª.&¤êŽÜ=p8v¡«oòwÑ=¤¡çdjð±¤eÆRëCòÚI·Å6ŽÆÝˆdÆê^Šîx*òŒØÈ†á‚‡§àž¹§ßôæ¬Â]„½,uV7ÏUœÇ Øe2 ø³`~H¼t%"6åAFjJ ßsHYÏL]Iwа±S& ×–CaÁþ ú:=œi9å^›¡8ö<ŸsV|ˆg|HK'm½šã1Fá[/5Ü7!Ûx¥Ò¸z©–l§/i×lø1<8â„ÐÇ!hu€eýz¸rlÙàcË|Z­Û«íX/QRw ÅaÁK{¬S6ÐA¾ar——Há–°wúúf¬–”›\ýtõ Ímu¢(õЩC š‰‘OaÂêïÒ>F2Š©x`KX§Ì“çêKÎmÑ·Jð³ˆm†¡5šr5s…yœ½u1¼üþ·+”oU¥·û ¥ùGZÚüOÙiIý<1ì©ö:Ñ<~$]¢#bз~6=]ëej‚6Õ§Ÿ²îÏs½yë0þ‚̾çßj=•m­tÏJ‰Å¶ˆóþ9¿ç¦”ÕmÜsqg-Ý„7õ.v×øò/µ÷ü¤`Á°]ܰ`@õ탠0æwIVqjRttTô{¯²ß×-÷J•ÑÒÐ101·n¡%ßÌX<ª¤ÙÎ-;î‘ÊJ¹¡Ê-]Hñ¯í(º»JÒÍ+‘Q÷S3 PYÜ;ì-ÝA6cǵФ×çpŸ4Ô‡™zéxð…‹qéå2²hÁ;œ–©Õ\»…õ¹M^Kß;µ+àì[&¡PzÊ*ë™MÆ„aèÝ”tÆï߀Èìò2a ª¤ªö@£‘y²¯á›‚úbWÃ"nÞ¹Ÿ‘U.£B-Èz/£¤5ÐbÌø cuåÉí7£òп·nµ"ñº²VGñµ ´Û¾u‚÷`e­¼ µéé½LfÕŸ^Y‰˜¨Ô©+’>wª¢ŒØË‘ÿaF{ö:«¬”3$–QVÐÖ79ÊÚLOñ«ñ”òRQ·œDÿÏÅæ`ž1$Ôô Œþê'†ÐØ«*±òS‚ƒ‚/ÞŽ+–EÞ 2Z£§Ï[8c˜$',Çy.§wùMf/ L賓¡?µŸ•ÙVvó¯Í^]zhã´Ê»„¡.ÂÌO ¿q=þAö{eé²Ì÷ÂÊZ‡Œ`›]é«VÅŽÎ,hžöª¤fšU˜²e²‘*…sS°y±×Ýælâ[-:Xú·Ê/»³LV[¡Q©·Ÿ,¶Kì¨>à»ô]×d¶¹|ÓIŠæ.žæßtë¹ØÖî(ºãí°­E19xú®éÍžh0u͹©kZ¦f8Û\~Ôx’Ó= ZL‚霦­ž.g2×äÕðµÌbÛ7ÆŠ¢hèáw¾¥Uk rz 7`[ ‡ÉJÓyùÞr`Ø?À°`ÿ.a@õ …‰ÉìqÎÌÔˆcûö…?lÖWJD¶­˜ÇÞ×uº’´g,¡…ð*q|+U’Œxƒ¨±ªpþóç™oß½ ­úòA¾ _XŸÍZŒ ZŽf£`‘âóA§f>ñ^½œ,!!äÿÑw>±+½Ì$À°‚aÁþ]Âþ€ê:”fVÈdç¼H‹:äíuœ|QjÀ_Ä»Šï:·gq“8Ÿ‘f|-C峜²Æ.B¶=jíŒwùÅ"†ýƒ öïöTðÛa1‹ŠÞde¾z‘ö(ýªÇ¦)껺"wR¥ ǘþ¥pk8”[À@6+ÇI>Ff‰ ¿{.JLÛgPZÁ ÙYYßm’†¢p#‡ª]õñ`Ø?ß°`@õÀ7PcC¶çsضynláASɯ_4…G64r ÓZšoþ]^y]ãCqZ6“ÿîÔ9 5c$Í᫚1«åÈò˾üè‰#<ãZZV tƒ6š¤ñ?o»O´eeezKJˆHê Ÿ¨+/†íÚ†ûw ûªèDÌÃJ"mÄFòՃͤ‚Ûq¦MâýÕ’u?ª*uJ;0SáÀL}=ýäñ]1ÖÒü‡Rwãv"zæº)1Í-žýÄÝÐôÉd#Í1Î+Må[ ¶8ÒYwydXŽozÝqö$¢?Ç^Ÿùüæù˜l®+7i¦›îÅþÊš1 vw¶? ú€Î…èˆ0EJÒüÜÜ\ƒ®·éÝt–ÿκRÞD¦Â2ÄÎñ»%§$#ˆt‰fdd¶úZ%ë-üÇêjÆæÁi¶Ú¯lq «igŇl‹G‘v¦ò­Ëm9±öIC}òö:Ls ª_›-94(™ÿ«•¡ÁÿΖÃþ1†ûw ûªètˆé¸F¸~÷ùtdz¨c Ç$­6£èæ–ÏÕ AÑo¶yÇÀxÇ@0l73,ØP}¨>TªÕ€êÕ&Õ€ê@õÀPÅb•”2KË+ªkYÕµµ‚8/H"EE)" اC¨aUTV2+‹kªY5µ,‚Äñ*…$I¦ÒÁ> вªLffðjVfpÁ …ÈB4*E†H#ªø“*ÀhfþûgÏ^¾ÍÍùøþ-Vñ¥KɈÐ%Ä$¤È"¢T‘ŒI;"B òD çÿR)úŒÔ0Ê*JK°­¬äSÉÇ¢ÒOJ>bgÉöTPUUÑÑP%‰`aŽ•kŠ?=~‘ù0+³èõëJ¼ "/G””"ÈH Òhx:O¥â©„ڜ֨­­-aÔ25>VU¾¯~—ÏPT õQQSR2&‘©`ோuYé³/ïgf½ÏÉ­@P¤gO¢¤$AFF.ŽÇlN¥àÅ„±Îþò¥–Q\ó‰Qó¾ º¨ˆ•—WUYYÛ³¥Š¼ªŠ¡„¤2P}@§¦ŠUð0#=ùþçâòª}å•Ô{È÷î©5Û¾5(¼^XLÛ${*}}ôq^i^öó¼WO?,RÖÐnf")Aë6ªãË‹Wï&¾ÊÏÿ¢£Mé«ERR"H þf ÐűMPIY諃9•Y))•˜2ÑÒêab<–.Þ»{êê¼¼ÿâf¾ªPS#ii“ÔTIzú8=}LÙ‰}k`BBÒ2ØFÐÔhr¤ E#³³YO2*ÓÒ™22ä!F†jêfðTTðû©®A£î=J¼CS` Ý[ͤ—ÚϾ(‘LQê«‹mÜŸÙeQw#³ÓS¥dåÇX–‘ìñç)½´´³ÿÝÊÆ›kjÔÔÅö5Id== ¶q~ùËLJ*“‘¦ZYMèñg·J¡5¯s#"n<úR…Sô§L›Š‰êŸûe‡PRÂ6›1\1ùì鳇wâËÊÊj-‡h대§  ú€_ʧRæ•ðëùÙ/5™*öí?JµßoŒ Qˆ¬9ÐÛ°ý,FÉ¥°cŸ?ZŽ¥×O«K™õ¥ 2êdzZ©™¹°E§ŸôoŒŒ€™™0¶aû¥e7Ξýœûºjôí~#ÿ ±Wy'!8>î}¿”aÄ$ot0ym»/næ;éqqŸû÷“1b¶  Õ?‹*VuDÌÝŒw´,˜[a[g‹!EDÌpô$¶dªªÜw8´ŠY:eòd9©.¥=XÉÉÁ7£òFŒµ±¡b[g‹ ˆ0~ÊöL0,VöÙ3¾ï‹j¦Nž,%­ÖU‹5Šfg_>ñ±Ž6eäHacc™ÎAcc*¶a0)éÑ·ËF4`€¨>è@>””8q‚*J`>ZA{`ç0H2²žŒíyyy 6±°nÞ #YÍÊÙ²5ØÐ€:o®DW4²”4aÕJ™ÒÏï1#›š*ÖÙ‹ ZÃÝ'€CfÌ=ªë-B$ Œ-Œ¢µÿnÿ·‚‰®vs!‘Eáa úà›a”W ¡ËÈÙÌ_ÞÕÓ",&>ÎÁYVâ³m‡éСƃô:‹Þ«* =~PBBpýzÙ®ndQ<–Šòò²]»6iŒï”‚¯úzdÀë×å˜ÀÆ´S—68'à¶JÓ~çÎí¯ª"LŸ¶/BÕí¬£è™°›EïÞ˜MœÕ,ÿ˜tQ„ÅFÛ.ùXðÆÛwÛÂ…ö¿}½‡ø;ûž>)^°@\@àÏ12•Š_¾\ê]AÞ¶7þ=oNI¥Î·Üœ«§N§.Z$ÙÛ÷ZÑ~S¦ÐX,tß~_Cƒþ'€ê€6ÈÿXr(h€…•:gVÌ? ™^£mÏ\8-''7Áfôo‰Ã—Šì}ûY޵³ëñGYV†à¶Jæäɳ¢"â6cþþ¡•ÇOøÓÅñ«WËü‘'pNN’Éɯ¼ì—Ã`?Õ-›œž5bæ¢?»«'`‹þ¦Ñ{þúä½¹qüT’ó2)Y ;œBps“ÚxÙpÐk]}XÝ@õ'/‡©¬4´šÔ­R-*.5|Ú‚½{÷,^â(.öÓgAŒù_aQéüùÝÊÈtš ‹‹”ŸßaûytñÞ¿òÒ¯sÂÎ_LY¹BêO9Ù&8œÀâE=BCÒX5¬Áƒÿ‚‡€ê6ç#þ++ý<Èr\7L»EØbòÜýÿûŸ³³3•òGC%%|ýöóìYâmø«þœ•Vðú}%ö‹,Dî©ÕËȤ–\{{H2’®Ò ¯!ˆrÃ}¸ÚI`$] ÞÀBŽe¸›~KÈBBNN’G‰ˆHýšœ-,ˆ9u6Ùe¹´€®ì9¶´ƒŸ„Èýú€G€êº;Ñ÷åegšMœÓm-@¡Œž¸/(hårçŸt‰ÌÌ+’ [<¬œ$ ¥ÃYì]ûu–ÖæŠÂÄêìÇ/B§ùÛqëñ![Cæ´¿ôÒqÇÅ‘¾ãLaaüüyA+–{âðøŸ§_*Þ:½t©€ë¶{ÁÚŽ‰’’½då4àA ú€îËDzòøÈ°Qsws;HÈöêÕGóÌ¥kSÇwüÄ÷ÕUŸÎœIuZÚFWzÔc¶ä3™YgÞ0ëÈpí….ócN¿ ÔkxÝçÇÆÚÍ;žÝpº}ÀŠG5¾éJˆiWÃ×ι[ï 8,öÞtSI^]Qbì¼™BP¶²9<ΠÎ+çñŒa{Ï×{P¼ÞVèGì -C02¿xåÀÄ ‹~v†ž9wÜÚF “šÝ¹Tãp¶³%BŽý5JP}@'åÒå«} ÌD˜B} iäñý¹oòzÉulÈ7‚MDDÚR ºãŒÌìÄÆŸ âN H_ûõý¬LT÷—’“$Ê™ q1«÷Xâ=ÀÍ󂘎Ï~a¥È®”œ³ÁÎiGÓàÖ‚ºò:îNgi‘1W}iõŽ Ûn™Í{h%W‚â°Äâé4¶Ìc¤& × 6” ›t`Ó¹…R9gŽ*M»‹¹oÝéaJåˆÀdC¥ 4…±1áÿû››¢  ÷ó²2=í4‚ ýûQ TKI jkÓÿ»}ÒÒr6X@õÝ‘¹ù ô‡Spégdùßvó:²³kIÉ“'OJÛµLœ¤v ˆ •Qig¯¥ß=äÅXù@¢ÓB*#î[°!b¯Z+ rÛå¨ Cÿ]ÊóÇàý›0‰#ùØõ‹‰}‘mYÈ£™%Ö”g±œëiRŽD%p|‰‚ZŠHJr>ø1c¡ÑÉ=lÉg¶u-WòaõãÇPÙ#yä§¼|X\S×üWM¤÷2Õk3•#FHDýwkÁß?Mõ¡è­è—Ó¦Ò¡<ó nIðñÉ27«€EüTÐIH¼«ÒOìPŒ²æ£„èwï‹d¥%;*Ì»‰ÿ}Ó²$-ËAضa×üz§ÔƒôìØº"铉ܗ~U[áT±½´ZÏ×Ö’Ö«ýRÅù¦ô¿eÓÙòNŒX?t¯²º± â*X:Ê7•æ˲*dÀP}½m¶¯©ªâ"#«òó2ääÊ‚r/^\ÅKK MöåÍÒ‘ùˆüøDL·†16iëR4}nHÔƒZ_íz÷¤—üG]ì>Š‚PSBâ³c7ÞJýg.¯–ñÉkñ~u:Õwñ÷Ém-ñôôû?Iõ¥§¿ÒÑiÚc™™“ymC]9¯^»Çè£>\ïG—m,z™y'ñÍÓWÅŒ/˜–&ÐzÓûk+ 1“ý½<~B×®e›™Ác@õññ"t©ºmw_w´Ã”‘ ˜êcäÆÎãÉu4]{+vÓÐNoVèd¢íùÎP±¦¯¼W¾¸¼ŠJ£uo+qßÁÂM~Vu+¼ $ª.Òu½“²cÈO½ãþŽä3+Œ¶–Dº!´AÊ‹WU©4ÊŸw§w=žf½& ‹…Zl zuy‹Ëæ(ìU0ÆÎˆ'µ*©¡'žðŽ/¼T°[¡}³÷‹ ðLxæRM ˆp²žÀé3Øz)à÷ÓäôŸJù^/?è¨Ð>~x(€c/X×¶WÉþ™¬ ÞÓý=ÏŸ”ÂlƃµMú¹qœ›=CÐMÊfÿxmóÃmã÷Ñ7,©µʨâ6ÐU6zÀò; b!xØú8íÞƒÛÝø*V3­MÙkK˜z-?Íð™¶ûžáÞ×~ðÑ­¨(x#²ð'å`n.ÓbXS™vâÊ„5ϸûö§·NíÑäè´5ÏÌÖ¹ÿ€ê«>c¿zÚ¶°T¶ê:½ï`9â—b®%Ø8s< {Κ¦Æ.Õ¡“m/|Ûº¬—ÑDµ“znɾŠß¹ž=Ñ¿TV”“ÈTxô¨>6áî:6¾éØN@r…£ÿ§2ßýRpBEvV^ôÑKÏ Ç«³+ ù÷‚ì Âùƒèo{ñô~îQæ£ÝÔΈ‘O”K™åÔÍõ^f> üë­ñØóYuNCÜŸG{s¦+ö3wCÜN‡“ƒ{…çÖùPq;yÕwº&g¿ØÛLÜ3YóÁˬþÉÎ;stÁûÑM¶p“-éìŽ+[ï–xŠ"HÞÁå ìüoðEWeYàIûA­'å ÎZ¯Ë2À¼÷/ÕÀ â¶¼Úfê¦?£6\¹°^‹]A)ÎŽ˜é¦{±k[¸PuÞÁ•#fâu:`¸–³Ì½ÖT§Ä-ôÀ»9vìQÒ^…ë0ÉPt/pÞŒEá|1Så~"ÄÛ@ª]‘gÜYÛqVúÊj¹×f ãË_…)ëç7[%/»êcoï{ Þ£òxŸˆ3«Õ¾¥cNëv¨/„‰[ðK†­J©÷ÔË6öA°©¯%#u§1n'‚hî*~bw¼‰Íåÿ6Ê;|+ºG^†ÎëÃwñ¼ÕJ=·å´§!·áBc#…Ü<¶8Ú”Ö¾›hˆWÈ_©¶î—0·›|-Ûn7h+ám[/ÜEÞfW¾’Ó•¬=cyÙút7±/{^ûÙ'_†NçÙ!ç쥩‡ë‹e[º×‡>x‚Ø$–_3 ðÊöº°8©SŽN¡»ãC—òìÉz³vÁòmù ¤Ýæ€]kFWµP†±SVMŸç›Ãʺ“缦àÚ¦=×mõ®l»àu òß½oG“ÚD×+ÆHm;ˆ_Öí;AFĬ_„=„qŒ#ävßE.¾YdXW8qŒcãåv$òKî4³Úˆ £C2/è4Sí¬º³×Öió•úߦ =¥ùæÛo|zù™Ù’ÞQˆÃñ›â—]½Ï¤Õ{³%jÓßußnpåÑÛ¸losò†P­¬m^!i&›÷,è×¢@—f|(ê(#¿Ÿ-)Õî'š ¬Ç9Ö|Ô‹© ±Zú¼b:E'4öÿµ£¸cÈ6ÇÖ>ÕMõß0տ͘|²rÕEE•?£Ta~`V âôæëEÖÖHdxд5AWíªB¶’1ùI±v†Ç=™uM.ž>Nµ…“*Þ^ãH¾­){=t®n=ÕÄ7¤0tçS!ÍžÂUãîEºÙuΑ,Kš=Ûân!VþóUÓƒýã>ò…(³þ´ý†©òRî=À•;3e›7[ç÷ŸYüМ֎YX}U$¾/|þS'Ѡ먾Ï1¾Éçvõ]cÉWÿš™‰Î¬ûQ¸AWÚ «‰É¹¤¿ÜÉ‘7óÕQSÕù4BL×ÄÇn6®ª*a»Þu·Þ•‡¢ì9Ñ^ŸTQ˜yÌaÀ1ýà4æ9m¬^[¼wò§ó>êhÅ«ë•Á¶iÖËN>AÃ49â‚¥â¤m3úž½y%ë»Úúds4ãÍhµàp¬b]mʉ[ÑíµRöp5g¨}nE<ÔÅÜÖaðn‡¹é¬£Z‚-×Ôuܳ{s?¯e[ =gƒÞÅ«a…¬%‘*罕°Þ¦Ô%½åØuÉÝb‚ïmj詼gÓäÚø6Z¯¾Ü.äúNèÍvy¨¯»ÈR>ÐÊÿIø2™*N¿žmsì°*iùRcNj ½u¥Ùë¹(±xŸÇjŒ‡G‡ëÎ7”ö™øä¼½fÛ‘ï0+5…'°—YÌŽ,ÇrÐPqRs~“ýÃÍêíînaãë®N<{1/q¼\{JuÛv¨/„†ÎîuB.¹Oð 1“¦¤³öy¤¢£}FêyÜä“åÅ_ۼ莬”É–cóUÍŒªíÔyñÅùLÆ$"ë~­í¾»tìB+m"X‡)±ò„Õ”oº‰ÖÛ&ÌÍcšÊµw,~› ×lËz«÷(ïš”àuÉ}ìxyvÔ‚Vìà~lÕŽ¦ïScï>óæäµÛ1Çoi¼,«ÂîÊ®7Ú˜²oöÎ7‚ ú²“Ž-3ADŸ„ÎÕÄJ–r±ˆJPZ…6÷¾/>8WÜn­Õk»Êï6S†ë:‘6ÜPHMšƒj¿3t¯>¸²Ý«¶yݶîJ‰vÜ€]€Ò²R¥-¥¹/ž?éÁªb æ§Ì„“[ްÝ'ž[÷ÝÐ^ PÎb}å&³]ºïqlÇ|Û­SNyY½ÕÊånK§³¸98kÄÌýÉ©þk^žÕƵ5–ˆpʦ©ˆÀó-Òºç0w‹M‘§V°ëÀ5/§ ðâ„É,*k%ŠBT‘ªÊŠ3rég˜1¿)D¢‡V0KÈ”î°[Zž'Lmq¾2i½˜¬Q~¦Î®Çe¦³¦5÷R+Ù0ÀÍ SY £ÒŸò¦Ãa¾¼cª2^-Þl{Œ—R3A“ç®S ݘí©çè‰ z“-fÿ¥eaØ[]‘F!HÍq­û„io–Ž áßp¤o,s¹)'pFÒº!û“Û%ßqœ; ú­ƒê&¯i¯&¯LöUõx8zëN½5ÏͶ®ñèÅd›³°6ýœ!"Tú¹Ê€êã>†yÝïhíèCøÄ®­b•­ ^mƒÒgÞõ[¯0Ù·åXÆcyž³Mì%gžöé=ÚÉqC쎜²ÕæÖ_éóW/t:¿®Iø¦›âýy-{Aaâ¹[k°`³‡&íkО>¬:‰UÅí!T|l=·M`Š+ÞÏïçK%‘Hë‡ i|åÎn-sÑfkêŒìðȰ“V¿Ïg|ªÌ\¹’ôô£µd}“‹yll½ã}.nMöŒËcÚÒÞ­ë–}N[8é½Ç«¡böà‰:Ô+OžAÖ4´B0îr[¬fª?8ì—€ã¾D‰ZJHJ6rá膽&­=‘ï+5•Ê'÷æ¦È£¾F¨01îÞFêàu_ynd7kžÝ’C¯½ߎzs{ì€ÔÂDïú‚j1i4⛀ O?au?ZÝ”_Ér~›KoNô»gèrÓ^cÁŽúe>Ým摈 úa¼Û9Ë{9÷ %<{ËMdU|ô»µ˜pÆË6­·:h½‘¥×]WŸ«ã÷ŒeÜßëñZiÁ±úg-—ìßttEè¼>/Žy±çq±o½¥Äw?x°¢R³#2Cg›")qHîËOìŸTaŽ˜Ì´×!ÛëÚ¸Mž0r˜éäƒèÂà–ît„6h ŠnÉI K8¾zù{ÆÇO8ä£8‚d#© I Ä‚ÖöuÛ¸+÷7µãìÔÖÔ àÛÑ õîàMoꟻ3o¼áî]<“è>yhï}•&bK>dô¡@§ºE¢Åšó+î³› [Aouìªqu9(mneˆq¼xޓɇö¯à5{àÕ.æG(ìNk;Jx|‹E$tÀt Õ5µ‚0áZ³®¶–ÕáÁVW×¶¶,{5‚\â‡ìÜi¸â–6áÖô€…Z2šÇ–|˜*{T?*BQ5¾U(ey=v㽌”šý:ÜËõB˜ùy×.&_ åjݨ¸ÚÏ¿hÈ~ ±ªžZœzÍ`"ŠNÌIL »›´Ú¹”Q̬@8O­$%!—¨ÒØ~9‰ª›\‡‘Ôæ,¬–MžB‚‚5µ5PðTR/UEöSÆsË góEÍj«¢ŒÄ·´þºòõ M¿„¹s8,k¨B—ÕWÈê‘Unè.ÔžáT*÷s`a_P$â7¼’y]I†¨¨÷Ó@J¿Ô9+……±[/e{7TeŒ9Ù‚½¾­Â”àCž34¥¨dõóõŒó)m\Àm•(z_ÖV¼ˆHûÍÐ-u yôK]j”ÂØÓ¯e‰ßùï±Ò×å¨Y…¶g5°:»~ê(;Tµ\¿ƒå—ÄSíÎkO^6ráôut»yã‡Gèµç&j2¡Fûi;á­Xo¸ûk¯!á^ÿ­7|¶Â söX5k¸‚²õ’ËÇæïøgºãæ9§D%xÝ™¶•!Bxýs(»Ö•Ÿ‘ø_dÄåË[,×d׊]ÛLk’‘![#“þ ]³d†4J£Q£W= ONCJÚyÝöÞ•­Ý€]‚±¼òsÛþÔ6ý{lE“YðKïû˜ýµÑÕÀ|ÿ³;uµK¢àwÉ$ÞIåYršÐŽ1Mhm Z[Û!’m‚PUŠÍ}›%;~Œ™Q„Õƒ¸ºß ^°ÓvŠ=½ÑEìëì&~õdæÖµÊÊX8D°ÅKPää§:bÒÐy¶"Ƕ¯whЩ„òóú_?-“¶z®Éa?µÖ-\ã4HšF¤Ñ‰Ñ+²ÂS Ò6ÒÊ,¬M¨ªª!… à¨>.Ê»î¿*;+ëæb*îàäë õø†|”$/:Ð?•½Ëî¹gl5 ùç<æ¼9šÓiŠKž÷TN{θÑ:äGF{ÇýãŸên\×7¾8Ћ]ãDFL×§5ˆ“³çR< y—.º½Ã5®™*o)ïK"}ÒÜžq'‘„›UÊÞãëPÏ1¶æëHcm=Gpt,‹™“õ¶Š(¬¤$‡½˜ÙÏÞ²]ÍcŸœ1å=§‹÷Ú¹¤4£9cv…>2]Êí‰0ïäFfüDm©ýê]Çú_sÿòŒ»÷ÏšKó£ë"–·ד=˜Âñ6ÐÿëTÑôÆÏAÖ…"ÈÍûÕ¾¶cÞ~3GŠ.ñð |[ä¿ÑJm@8̉»·Å?uC}æy-rkÎsc»ÝàÙÍZ»=…¤=vhO!$ ± TVÙÖÛ¡,<÷ô>N3èÊlç kº&Þ÷Ú¸h†?÷&úqë!$£mg„/t¥1çÃ÷ñÄßbp«Ö>Ñr½}üÒ¸«ÇY½&4Œ|ò"¹-tç@ϾΩ.­Âýs5ò™—™1B꣭ˆ„ç$s¦Í@–Ýüà¿àD¡ºž¡éª =›öýØêJ‘“$ƽ­JìÁ<¼ù)šs<&-²;cÆ7H`Ò–3;qS]Ï!žFbžˆ×Ë{¼÷Ê…epË»îæ;WöãÅ%œ$¿­ä¥¯ÉU´\Ζ[«d½¥IÄL×Fü·ÉâëÓÙàuBз¶FNá'fÄ.k3¶@iGä Ñßa¥61ÝuºÄpÚ®¯r¿0ðÊòÈ¿©Ý¶Æä»¶sjövØi®6q”ÖdžÔVœâäQˆ®jÆæ<4ß ;)…S ¯|ëRMŠ:»!ë{n¢o¸V+ oõ¬|n¶_x2ˆ³¼e·,_cæq3–óYÁóèïŽU³væwT7Ï$h­ç–a¸-MR9´ˆ]1ÿêN÷Þãï9À9îŸÁ¸ê=O9}yû´q+‘ÄåtÜr·¨Lr[×më®DÚcºÎºR¯è×ZÍ®wì?)nCd›ùjc²äðÖ¦³›EQÚ ßí»ÍV>Ûo­»¿á3"ïÓNôóE†CjîǺ2®ÑË9 {…_8AÄtßÝ£óŒæE­5Ô]Ûô*L®ŸVN¯£‘£°e]˜zõaö>A_InK‚1 ÞJJËu”‘{ö2ÌË‹AÑZNà—ä*÷åêtêÊû»—×tàœ¹En.*/÷³–à•ÊÍe)) 5.ÌŸ8ª©ñ[‰Ü' ݹ×ÊÕ‰£úʸý'Ï¡›6°§HÝÞxŠTdÎ÷W¥f/jíï—=:l¸õW3W׿<¸Þè;UšsQ)m$<7ËF’=›‹s”ßÖÝS=õÎÄznÆyÖ{|úò”iãÎ"ñ'è¸nQ~Κì^%)¼Y[Ç¢cÛœ…µÑ}Ä*+eIJ©@@õ5BkÊvÝÞÊbÐ ­|H3ÞŒ¢››|†c6kÛÂÊ¢¥)šËPtY«q“÷ÍF};Iš¬ÌDW"ßaÊÂsèÂf<¢.õõƒûÜy;yó16ûåqCj#+}^E«f,ÐÒéõ t yÝêôkmGã;¬Ô(SýOõCo+ßGvhöBMåFï@ÑFª«›³É;Çi/’»ú[›š‹ÌwÜDß}­¯Û,Eì¢h`7’y Šþp¬š-ÛMå=ÂPo»Ó›8LEW4ühûº­ß•í4]gG‚&Fýøî„l¯f=è¹ÞOumWP$Õ%©ï–´âAdà†ÔwuFi‘L~¥¢8õø»©ß}zsŽU¯ßâÆyzL¸d‚Q]ƒü—Øeб¿ƒÌ[ëMð6çU?~&ˆäž=I/_V©©‘~8°roSWÏxd}ìÎ ­Iå~¦®®My‡†±ÕMR¹×ÊÙé:²>ÂMòøq§cy b&À=Ä‘'f˜/SlGž¯ŸRwèé­Ò¾V§RÁ)Ì…ºäŽé­úìùgUUåŸT°ÕTež>ûØDõ™zmB½šõNuŒtlêØÚ©-¡he“‰Ú´Ã£¸oN`ã§ÖpÞÜS˲Ñ/´É=Òæ,¬ <}ЍªJ €êëTTvÍhWuƒr–´AÛÐ+£-_°rÚ/»Ð7Di×HC—›¼²îÙù¼iQ9E6FŸlÑÖPODiÅí¬†nœß}{üÂkuñ,Úƒ†v¿œ§ZR}]¢¼ÔçÉ[½„×µ“uQ‹ŒZœ• ­b¾Ï{3pÎÌŒŠîõääç¡úï ‚7H wîS/«mΧ7¢¡œv§üd}ù P'DtcÈéœ3G•¦±ç9Ýõ¯Çp¶•r".)Yâœ^VV7ûÈÂb ={V0~œåOÊ{}=›Ýû¬F ÿªöÕ.@jÊ{‹a°F;€êë\Ð=b[ÿ´ß¹  úEÿé&åÌ`CzkÍR]ðBߥå‘èò–*bæÉ诚­áW^«‹gÐ,M }·ý[Q^J¦ŠüaI#©-H}·à[Ïzž|w€þ`<¾#‡ªÐukÓ‡Õ=ztŠ—µÙÖµ»¸] 1ä4f› )ñHî+&‚”ŸÜs—ãÁ+ùvûÕøâAºùÕŒ@l,kàÀ^‚ÒOJ UDJ]–˜Ä42†{#;[¸º¦¸Š!˜@õÝòå!(h`64ý΃FŽk ÌÏÙÏ3ÜV­ìðGŽÔ»~ýñìÙÓÅNˆøc/ýºÙÿ›‘ÊÍ9…:òK³\ôÞ½ç«V®þ©Y9z”­ÿî݃Qñx\·/׸ˆë¹ÖÖ£àP}@÷ÅÒÄpïÓŒ¼Ì§ò*šÝÜq×/Yÿ5®cú¸ôÓ±ÉHžœR®¯÷CkpG@œ»ðÚÀ7·dÑ‘®ñMêÀaJH|v¬ç­ÔU x303žÛ^ë@Sœ9—7vŒÉÏkèãB¡Ò‡×:w>sÚTz7/Õ·£ñ½{õPTЃg€ê€?//¯õë׃à;ø{Î,?¿]tiyаh·5³»Q½ú÷ÕøIáO²tç.ïÞ½ˆ’’ß½ ÕyÿOí³)æ–”áþ¹UĞͥŠÛŽWÙxš\ÞÚÜM~~íhêµü4ÃgÚî{z„{õáO²UCB^ 1|=* ßCBL[gø/ÈÐÁƒ'çæúß¹SnlLí¶¥úÕ+ú“ŒLGGwxʨ> »C&‘-Y²oï^Ëé …(Ýq(TæƒØÚšê 6cÞ%ð‚„¥ŽËwùû-Z$I£}ç[›¢e‰¢­M‚B3€¢8»(¦7<䟈²ÉÏfYE¡[·z–¬sLÚêwûÓ_K_쇺zÆÆŠ?•O™êô˲uÊçààíDbù AÝQø½}-~åÊ3çe«@õ†¸˜èü¿=zxÄL{‘Ô­Òþ6-©„ññïÙ3ö…HdÑÅ‹ì÷ï?à´T’JÅwBS$%¾Äìò<öÙsÍ¡ÆGÇÆŒ !߯úîݧdgÎëú‹S4wîÊ @o2Y@[›Ü­JuaäÉÓÎË\ð‚P}ÀENFréÒ¥†V“èRrÝ$Õ)Q—ÄÅé¿@òq£Éºº®Ú°}üxš²²Pç³iNè¿sB›=ôý’ïÒEXùë%{óç÷ä½ýìØ±oÜx1Õ>f©þô¡ÇCésfO’“ׂG€ê€6¢7@·¯ÆCG${©h6ý“’&PU~ãl¨±É çe¿7&j¦ËU‡ì©ýÃ`–ÓÉÔדpqYÛybEî±rå?·oº™ó÷ßBBN£Š :U,ˆÏ_½z<¾T´2‰´ÌqqAч£‡÷h˜+hôëê)ÂU³.òÓ쯷j…KgyyH ¬ú\R°sçccªA—ïðYSMÚ¼%[G§ÖÉÉ£sÆpèÐYf5§ÎÔÔTÌšEëú>üü H$ÂüyN$r÷]£@õÀ÷##ÙÃ}µÛ'FIPP€jÿÁ*ýwI½WU~ûòi)Y¹µk×â;ßü¢b2®®ÿ0Ë‹ýüöêêR-,ºdݽ²B44ô5]‚ºníÚN>‰½3œÑššS§**™³fÒ»b»Š ]ºTöþ}™½Ýbª°<¬Tüâ41w·UŸKK††Id½ácˆB]cüÒüWñ7# ŒM—/uìäQ¥Pé..k++>9„ÃUOž,&,Ü5Æû½z%qéòƒÁ4;{·®ô-€£ý°[·Ž=|˜=aMII¨KÄüC‘Ä™³/deécÇØ ˆð€P}ÐaˆŠˆ,[â€í¤<ο¦ÖRÿÁxΨLjÊ>&D†‘)”©“&šêu%)B"‹ÎŸ¿’-¥2/î‹Ò(lnFÆãq0ªŸ>ˆ_¾šƒÃ Lš4~åŠI]·`6{Ø0äsIÁáÃÇðøš¿ÆŠÐÅ;cµª²BôÆO¹¯‹§L6[²d¨>øyèõÓÂ6lçÁ£ô7#Å%¥µMF©"¿7V8äcè(Ù^½ÇZ[ésj×¥ŠáªU†ØÎË— áá·%zÆØÓh¿ýuþL4,ü¹œ¼˜Õ(ãùó'ÿ95Ädþþ›­·ÅoOž<_PPjc#¦¦öÛ[ÿpùùW¯¾ÀámlŒÇ‡É9TüZöׯ6lçsiéíøøŒGÉ’²½ûèÒ¥dQ¸š•ÿ*íñƒ$aQQccc3ý~CëþaFVUâì<Û©`ÇD_OºÿR^ŽljFéÝ‹ðk"Àb‘Ò ÄÇç’ÉDc“Ašê¦ê®òÚß4zOnÏO´¦æÉ“¸;ñ÷˜U¦¦4ð+]kk‰ÏŸ“ããr+*XFCúè?ÜÁa" ‹#*BÕÕÑÄ60ÅσDUU‚m`Š_†ŒŒ:¶Õ€ê@õ€ê@õ úàG¨AÑÙyÏ_¾*È[\˜¹‹‰‹Š÷ ‹ˆQEÅ„H"‰D Qp8AÁZŒššÚêê/•¬/ÌR³ôsyIñçâ•e¥"ô’²²}”•u4úI$°mhuaÑýÌW9¹Ÿòò+kª‘=¥¥¢4¼8O¥ PȘñ€«©AjjÑêj´¼¬¶¢²–Q\SRR[ü©ú}! Ûé!!(ßSTY 3óYL ª~_ E7mÚ„ýßÄÝËË‹ÿ§œœ\¿~ý ÀbðËxSÄH¸›”ýø<ËÍ‹Ž.z÷ZQ³¿JÿÁêƒÍ°í'¾ðLLb[½KÄÝÇ/&V2Ëõ Œ,ŒáÿDqòñCÒíèØW¯˜ººTccªÅPA‹¡Ò?ïrx<®¯&Ûê2_¾Êˆ++.®12RøaÔÔÔz÷îýþý{èÛ BiÅ—ƒ*hô1ÝþL`]£^šýwøíee­û›Öú«e•Þ«ªFrq‘þ#nÿ·íOºDÞÛÿnFe-_.ÙmܳaÔ(úÑ`ÿ VÂMÐyTóEÒíØ„Ç/ H%‚ÐhZƒ‡[,Gk] Æý5ôÁ[D?¶ø)í»ƒ)öÖ÷|ˆè®‰OÙlÜž0ûit:d Ðy(¯¨Š¼r~Ä »nn‡žý‹Þ½¹y;nÄPÓ­);vü޽]n2`²%´´9Ù¬[·N 6n=€ß¬ú^„.U· àîëŽv˜2RS}ŒÜعc<¹Ž¦koÅnÚém L´=¯Uü`8 JÈŸ“¬WDÕEº®wRv i÷I$ÎÿÂU?teúÊ{å‹Ë«¨4˜í¹ns§@gáÌ¥+šM„þ õâ¾›~æVGv ÔéàÙ€¯ß8l` L‡Ž9ˆÍʶ_éé¾¥Ñ{‚5~›ê w×±ñMÇv’+õH|G<|÷#HÁ ÙYyÑG/=7¯Î/ÈÎÀ!œ?ˆþ¶Oïçe>ÚMàŒùD¹”YNÝ\ïevàÃÀ¿ÞØ=ŸUç4Äýy´·»«±Ÿ™¸kâv:œ¼Ø+<·Î‡ŠÛÉ«¾Ó¹ +{›‰{Æ!ëb>x™IÔyàˆ9ºàýè&[8ŽÉ–tögëÝCQÉ;¸|ÿ ¾èª, <éoßÒºÞ̽ÖT§dÝ¥pRµg}:{.Jy±ã“ÿ,KKõ^ÝN¼ôч§LÞŬš>Ï?6‡ÿBëNžóš> Q°aqR§B7Xfw|èRãö èåÛÑ“ÌØÏá´¯¶¯ÕÒdJJù]NÃlѽÀy3…g5„¢<ÊýDˆ·g6^ ñ ù+ÕÖ·›|-%¾YŒ½‹Y»`ù¶ˆ‡ü±µÛ°kÍèª;k›ÍŽvZÉ-ôÀ»9v¡˜ƒ¬Çiï-œ+¤î4ÆíDÍ]ÅOœÛ­¢Ê®úØÛû¨·„òxŸˆ3«9¥aÜßD¼Ñò*L_Wß«±cqÀpv3Ýt/vm3†•{mÆ0¾"­0eý|þ¨5:½þÖHÜ‚_2lUJ½¯^¶±‚M¥¾!àGxõæ}aÞkÝ¡6` ¼^ÛÀäZÄ™S'v`°eŸ_†½³Ygô§í×£ìWZp¬½H©NÛfª’äÙ*´®_âv!×wBoöù5iªý6ÎнúàvÊv‹†`mLÙi áHÙ‚ ú²“Ž-3ADŸ„ÎÕlG HÎÙJS³EÔÍw–2l—ˆµJVÜ÷haY‚P ½u¥=²ejbñ>Žq×o(í31ðÉy{M^%¬·M˜›Ç4•#_ɱ”³ˆET‚Ò*ì´¹™S|p®¸ÝZ«×v•ßm&;Úo¥msì01\¾Ô˜sÞÖI>#õ JŸy×o½ÂÄUÜ–cŒåyÎ6±—œå¸»½G;™"®qˆÝ‘S¶Ú\AŸ¿z¡ÓùuMÂ7ÝïÏkÙC ÏÝZƒ›}84iÏXƒöTvYuUs^司ØznsÓ=V¼Ÿß-NmžH¤õC4 ¾rg·–¹hKaÃÃŒû™–0rá$;eÑõƒ³¸¡ôŸ³ÓÔKOsÚ‚¢[r’ÂÂޝ^þžññSù(Ž ÙHjB± ñ[ŸFDfèlS$%É}ù©}ŸÜs˜k(®$ÀP´Ú\O¢›üÃË£ûžœ–1«™êû%à8™L$j)!)ÙÈ…£WöuWGÌ£Š~ÿ˜7ª0§},Ó^‡l¯kã6yÂÈa¦“¢ ƒ[ÊŽo±Òš†öOn-I¯¶0­/«kŸ‹[¤=ã’C¯½ß`‡ï£>/îyÔ5>c…6îÞFêàu­žh›è]ûXLø& ÈÓO˜Ü¥µ¿ðƒ (úöå3Ë™-y¨|›”˜Rˆ4^bL•–QPVèýmQ–>Ü`fµ At=»£Gc!·; ÑpŽ:éÞBWm\#?Nû‰6Á àå•Õ“ 56è¨0Ó3ŠçÍ•ø‘˜9™‘÷ØŸÁLttå¾>zíC¢úp½.£-µ´èÅ™˜L†Ûàw¨>R/UEÉA<·œp6_Ô¬¶*ÊH|Kë¯+_ß"ÔôCâüýXÖP-/C¾j—U¦7­Ç·.+¨Ü÷|a_P$â7Ìþ\Éý3DE½ŸRú¥ÎY),ŒÝz)Û»Õ ¾Ö}[Zn)ÉÇÈÐ#Û™ôOèš%3¤éT½êaxrRÒV°ßH³¢‰DôkG-u yôK]¢•ÂØ³³e‰|Æÿ¡¥sñúçPöuó3ÿ‹Œ¸|y‹åšìzõ»Öø‡¬ô³ p XTø©Áí{k“¤f3m³­ ™[£i˜íÈ_ø>22s("bB¤?¤½º¼Åes¦µÆØñnŽJFjè‰'¼ã /ìVh_‘à™ðÌ¥š@á~T$pÆÍµþßO“Ó*2½rr2‘R}EE)xD\â‡^Ói'®LXóŒ»ozKàÔMŽN[óÌl{R}**”÷ߘ˜À]ð[T¢¼ëîñ«²³²n.¦âH¾¾PïÕR’¼|è@ÿTö.»›±Õ$äŸóH˜óæhn‡CyÞS9ãFëP™<#îÿTwc]^ÊŠ½ÜØGL×§5H¸³çR< y—.º½£¾µ¿V]ÊâîÐ'ÍáwI¸Y¥ì=¾®ùck¾Ž4ÖÖsGDz˜9Yo«ˆÂJJrß»œPñ­ðDŽÚ¹w®® "ãÑnÿ4öž(©ã¡f„ÄÝmd(,9|K4½ñsu¡ró~µ¯íØÅå7sq¤èJ ’›Y”ý¶˜%,©$Go—^`UÇP«CᛇÎÑ2œãâŶå£MÄëâÂãkiM³ãû­DbWúÊ*K¿=kbv…>2]ÚŸ—ÆûÜ3ÞZ›óíÓz÷lBÑjkÞÀ¾Â}+Öµ/亼ØâŸº¡¾Ðæy-rû; íü€äm~˜D;ºØ©Mtݸ¢áCݶƒHñeݾ3ä`DÌúEfìv¹ r»ï"ß,2¬kÔÂ1Ž—Û‘Ès,}¸ÓÌj#‚Œɼ ÓÌT)UwöÚ:m¾RÿÛt¡§4®ápãÓËÏÌ–ôŽBŽß¿ìê}&­ÞÛ˜-Q›þ®k<Ä•Go_ಽ!ÌÉBµ²¶y…¤™lNܳ _KÉí!‘œØQF.x—)%Ý1ËÓY;X#‘áAÓÖ]µ« ý?{gU÷Æñ3Æ 3Ĩײ¦BÙzQB‹ÊÒBŠTT/Ò¢”JÑF‹¢•êýG½½E{i§…KJ…Š6²U–-ëXî6kb¥<ßÏÔ\çž{î9Ï9gîùݳ ï М¸(GÝ“­¦ÜkŒº|væÔô‹*ïðhžC£f<òlXlÒ<¯XvTÔc;}‘&_tÄå„£ÿË–ŠØå°ý>‡?žIÉúOùåP~•êcÍÜ{y~•ªÕnG-‘¯Wòvô9¸Tñ°º€¥y*xíƒÛÝÊÏœ1Á«èÝ;T*³w©¼¥´`6×[uŠ0½}®©mÌyM®•ÈÙ·™=.~Ê.‰­n­Àüºvû—âVT•Ea™ñf¢ô@–…úٟʤ©«¿ÚB×&QÞ~gd™Kz<ö–£¯;Ò¼Ôd)#¶«k;Žm‹D ¸ì÷óPw‰Þ kˆ7ãì•]ÖSW¡GËpËÝ"ÒH_Ûê^l^èãq¶T×z_KC)XÚé£ ºš¡Ç?4û¨c7ÂùÀÜÖi¿ÏÔ@¦99­ò¨Øo‚<}•÷ZèùÛ„Sç \ï±m,n[›3 ‡Ò ùUvlÿ^+‰ ¡gtânFÖ»çcÞï~ÅJ#zî§Ž[Ö:ë#s\ÃÅ(z®‡-78†°bÈŠ=³L½º‘T²FŸÒº0´¶›~;yÑüꊾ*KíU 6Âl¿ðc”••qóôëÜß—¬”·¯„k©µœDTY”{:`Ûtw‹£s›†4³Û ]Q[û•Gæ.1å“´Cß»gœò’zßÛdÅÃo]^Ëœô0{ü¬Cñ‰~ŒÚùWf«Ù\_g„x¶X Fo·‰i\ ¹ÞrûÌJº©O ˜¡îõùb«#½Aäå¯*/ë.#—–~áçïžßËÅ4#Ó'îÕwq=q˜x"-¹Öº½é襞ên^Ï’™˜üÚR…DϘÊÔúJAæJ1×FzÉQ©•t1çuWMJ® dú¹¼ÆÇÂ7Æ@”È ¶àÎ%Q£›4wÛÛ‚˜S4*‚]vÚ9nós•\k«òcí~~TQÑ€Õ×ãðx¨‰¿Bõ1P™± Ãv±áQÞ3óüöiúª*ØÖÖn{çÞs=˜o…L² Öu7)Ÿ ̧µ“ȨUiXG›Ã~ÏĶ ì4Qdµöãf…­lþ#ñk»µº;Ô&c*NÞ[ùÖÙ6)Ÿ¸Bh_Ê&Z¬þ5©%Aï—ýhu†”{hÇ—|ßg%$i¼Ãvw)rí°¯Ë”Ã¬Ã=ª[eÐWa’­ö¾°ÚûÝ—·ãX[˜ÎFþÀ÷ÓPWÇÉÅÆˆÉÜ#‡·|hò÷9íÖæÑ¥sÖN3àG@_âÎÐ%2þ7À¹±G‘8z]ÈÊ'ô®ÂÐ\µzjã¤\1C]ôêÊI/b„yéãIòï¡•¬¾+¼’Ó¥,âxÿ¤NâC p5Ô×w—‘këêˆÄîÚ£¯!îÑ#÷ìÑ]yW•p÷pò•V}~%QÑtɇ”£ž³$ýçi Þ͈|šŠ‹Úüøå¹ÆÁ.êQ±SšüŒž¦Œ|ÓÊ+ªÀ!þòž7ÎÚÔÔ½{_3 C@+ 4½xõ¡•Š>×&†@@5ÔjnÔD€_¦ú~-ÕŸl>2…å©Q&Á=ÖÛµ>3#êÁšîÕ‘Z¤Ñ ù’[y/½^Áî%ÎSU×ëeg¾~×=Fþ}ü7„H¬¡²Ñm¬´eç‰#<™RíɃ)›]u ½y ÓX‰œß5”‘uQE× ¨…ù•Ð`CbÔSkðœ„î22‘H(-mèÞŒÓqu-Ö»% {ÑQÕ9êìæü_ǶíLm/³P^^Û4v€J3@êðA¬/2h¨*oÒÀB¡aôéŽýq?œ\m-ÉЗUŸ€{TW;šú,dÛ ¶A=ynÃx¬÷.¢ã™ÜQ_3ä/t…Ÿ’‘‘Ö¹?^®¶sm92/lßÌ8’#3SJ„‡&ü=|4ÓKÉ¿ÝìMŽë§ §…P<Š>}á…fãt»‚{>»`n]?Åáa¾Ñ´ÌtLtb'Ìêòbž~|ÝöH£ef~êþ¼Ó™ˆU*˜‘w{l n‘‡”‘ª–èZz¶Ü;%Þg`£sÑvëëôïiªCIBŸÇrîp˜§(úu­‚ùTÉFÕZòÖnLiŠ®ûxâÆ¿¨ˆ‹ÆvôiÕü d¥ÇvàZ“KÿJp)ÑÎÒD£õÞ0“>/£Øøìò7Xõæ©Æ¡¦óò̯ÈûoêŽDuÌÞ£ræ´¾ FÇ^~ C€ôÓÿßÃcóFÌ‹X¯«±¾í]*™~:¸¼‘V޼Faj6…Ùœ…–,ŠÏìÄ,¥ŸrDźmGu)IõÐÏ~0j }Øjyuë6’b(¶ç ‰«3Cõ•W×1š²°-ž¼|wá|[y·Ý½6ÈU޵Ž)o[Ï+nå¨äà˜¯4@×ð¢¹TÛ þÞþö²?Üèøð“à‡:ªèYd¥$êêËKŠx)‚ízÐt}’èÊVPÜ'æ.îÀC¿áž‰¹ž¬?0ŠÓíÊ–»d­NæZ}÷åí9RßÄMõpŸ>|±ÅˆÆeEk¢–É^¢}ÿe¨ÚÁ½²³Ò‡ Ö]Fæ@&ásr¨’’ßß?¦ïµój÷ Ï’KÚ:ŠzFx~;4о†Y0­Ö£ˆÁÄ4lb•½wïJ䡀ꀮñ¿ÿýoÒ¤Iýû÷Sû( òþÍ åÆ-pþ ˆR¢eÓ½·#ÄŠÚĨ G|{'µêSöûá¶³º1*C‡J%&üˆêûè¯ãzý:oÊäq` P}Ð5jjjH$غÄÄ1†~ûö ÑÖÇqüi“¬¸•ìsí»zÕ»OT5´ðݺÀÈ3}wúŒŸÐ¯ûóü½y[¯¡!I Â/6¨>Õ=O?ò5­—q‘ª#Æ‚5µüí‹„5n«»ù MàÖ×—»yóÓ”)0“ Q©¼11©«V®S€ê€.S]]ÍÍÍ v€®b9i¢ïî½ÒŠÊñ>nЇ·¯71åäìþGª¾¾Ý?ÿxgdÔÈÉqõq#‡„äONä‚=@õ@×%­¡Ö½ƒ² ï`7ÇöèÿN´[‚ç軕(=!Z@PX[C­‡ÂŸk·`ÿƒ+\Ÿúð8ψDb¦–T:P}Ðejjj¸¸¸Àð}ˆ‹ O›a}ådà„Ù p}Rø}|ù¤¤°ào»Ù=w ^áyv3ýýÎ._.F àú ‘㟒RßÌ›ë 5T|UUU0¼~„Á ²Æ&w/geß×Ò^ð.éCÆ»E÷¸º–deevð`˜‹‹×·zü^½âÏZàäu T|'0©~­¡C„øùNó3²v$’È}$ÕoÝ©©®þ ’‰ŒŒ–­­ OТŢü|}¥[õî]ÎOŸ>äÕ?ôõ@· ;@ÊyÉÒƒÿÔ5™. *ñg'CÑWO)(*M4Ÿô3ï+$$çºbÕƒ{,,(}`q©S¥ââ"66CýÕ?Duu5‘; @7À×Ç}Û™‹W߯ÇêšXþ±?î…^Z°p¡åì¦@äâquÝ~,22wî\?u´gN¶ppp²Ã‚yB‚ f€ê€¥ªª 6ë€ndæ´)ïsrO߯=Á\H¢ÿŸ”4¬þAè9I)i÷µk~mLŒÆÏ+.z¿k÷qssþŠÔh ã>}ºHL¬rÍÚP›@õ@÷óú Û )á¾Æíñ³7Žï5eV?¡ß^ïáÐíÓ‡yxyæÚ„Þ%Á«WmHMõñ‰°³’ üþäðç ÍŸ·”@„—q ú û¨©©áåå;@·£­>ŒöyòìÅ•€ ,lÓÉ~8¬>ñ^Ø—’bçÅ ¹{ßhðG®Y32--ÎËëæÜy‚²2¿åd? #^½Z•ýùïùK¸I|Pw@õ@7S]]-,, v€â/õa´ÏË”wWŽù®'¯ªõÛH‘Êâè›×(‚Bs¬¦qröêg¥‚‚ΦM:>¼Ø¹ëʈýFéý6˨–– ^Éäááš>͉“S¬@õ@©>˜×=Š’¢ÊÚ5ÔÚÚ«¡22´'šSDÄ{gTq µ¯Ýÿ˜‘6múôe‹~##÷ï?lõªaõuµ7o'¿Ê¶²п—ê¨ú:Ò»5ÉÉyÓ,‡Ûÿ=*¨>èqÕÇÅÅv€Ÿ‘@°±4§””•‡Ý¾™•–:ä/}YeõÞñ[PöèÎò/eŒm¦Mù}Œç$›üml‚ª*‹ÃB/&%gRtu{Åìå/_„®_ÿð)¯ÜØÄpÂý  N€ê€Ÿ¥ú`5øÉPøxgM· Ô××'$¿~û ººJIM[zð0<ÇÏÛ¼îKaÒã¹ï3‡ S75RÝ~ÞŸddYÀÔÌÞÔŒ~üæmt샸Ғª#)ýEÄãq?-E…Â÷#sRR‹ÔÔú¡kcc åTüljjj`„'ü*ðxü_jª´óÏìܼ¸Ç )o^¸¸û+–’“ì¶-骾ädf¥¦äe¿“쯦>LCUyøÀ™}Á΃éÓ>ÌãÏŸ3c’^dÑ´Ÿ²*ÿ@ü\w¹¢œ÷]ZÃëן32J$$ûi¨«ª(L³„7k úT ¤$ħM1E´O#E¥e©ïÞ}øø!?ïSiq!†02O?^> ‘Dâââæâ&ãð8NøC%'''S úद¦ð@õÀKUU ï@õÀ Lê€^K=†¥½Ï}“úîSö‡ÏŸòêxùù„„I¼|<|.n‰›ÈEÂápxN†5Ô×ÕÑþQ««©5UU_J*¿”U”–”V•—ñ ‰ˆK)(Ȥ@†Õ›Z‚Õ&¤¥%e¾/ÊùX]Cm&ˆŠ(¼€žDâ “9ðxĉÇÑr¤¡ÕÕa• UÕ %Åõ¥¥ ÅÅuŸòkKK…8ûKñÉÉË+Èâ&ñi@õÀ¯!<<<66ökw//¯¦cAAÁ¢¢¢õë×Ãd?øÉä~.}÷8ýu2ž@”(=P¹Ÿ€ðÕ¿hŸ.„ÂßÑÉWŸk?e½ÎI{›ÿ1CTr€†¦Æða}k “ÊŠwOŸF<^ˆ8ÐÁ¤¡ªÜbâa¤£Ë߉íZ ØÁÉõõ§_¿ª~õº:5µZZ𬡡®KKâïܽ÷öm…šYO×Ào`(Ús·ÃãqC”I´O£CFzú?QÑå……uº:Šº:–œ"”|P}ЃX[[?~ü[g¥¥¥ååå%$$ÀPÐsÔcØýGÏžÆÜWÕ7Ühê/ŒŒ¸Œ"íÃR€ž'ÇÞ—–™l:QˆÂÿ{[«{ý&äÖ­‚© ß´i|ý²Q—òò\´ãðËëWÿ»y«L\¼ŸñDKÁþP@õ@÷#+++%%•ýõ)%%¥ÚÚÚQ£F• ‡xžšuûú%!qi5Cã‰rƒ{[ô¤•iÚÁ‹÷9–U4m’1@ø½Œœÿ)òÜùh11´i”!CÄz[ôšº?å]Ý·¯HNVÈÔt>“-@õ@·2wî\ooï6ŽYYY+W®û@·SW¿–™¦kl9ÞÆ©÷GX@Tr¢íÚÁÕ»SŸ?±™m##%ÙÛ#ÕFFJ|V2ËFÐÙY´÷YLœ°|9]”ÆÇºs÷Ël )é¡PY@õ@÷@ ´µµ?~Üä‚Çã«««gΜùÛ½Ô€^Nu]ÃÑãAô'ZÑûÛÅ_Aí/Ú'éÝ«cGÏûÛQ¦¯Ô~Xù™³ÿTTÖͶ4-öÛYK‹‡öII¹çåuqþÜ)d5 â€ê€nÀÄÄäùóç555Ì?åääÄÅÅeeeÁ2Ð]ÔcXð¹KeŸ õ¦Øàó%R˜#?ß|H?y2höl»^¤ý°ºððƒéåóç ‰¿µ‘•”¸7m’Èzã³ãÚ¬ÙæýûƒJª~”3fœ8q‚v0pàÀªªªqãÆM »¸—sô4;"‰üÇ$J¬¿¼é<—øäø‹—..Z¸€›ø‹¢L}{þÊõ×NN"ã'ˆü1F–Àµf­ø³÷.^¼æ´`)ìûª~ ‰ÒÒÒÌÌLWWW0t •Õµ‡—Uœ0{á™@yU­þJ*þþF3ÒÖø5ýQX}ùñàýÒÒÄU+ÅÿH#«#© á>à¯7BCSË ª¨>ø~æÏŸïíímmmÍÍÍ Ö€çuVÎ¥SAc¬íIdÞ?8™"÷„9‹Þý°|…XŸ2²œ×üyB{öî^ù€ê~ ±ñio^XØöÁ´S„Ä´'˜ïó;àº|)gO.Ušòö|LLÖ’%ݰ_efÚíÇå´™QC5$9¾>{ýq‰â qš½e˜®€ 'Møí?à¿bù*N ÈÕü\ž¥d$ÄFM˜³°ÏZ@H¢¿êÀ£Ç–,詮΂‚—¯¾^µ²{ö^O:uÕbÝæñ‚³Û¬„Ûœµ^÷Æ`ãZ¶U_]°å»‹ò%kÇñc=d1q‚¹9åð¿E‹Ö@¥赪¯2%î^Tì‹Ô¼T…¢¢=ÎhŒ¶$Œ|ÅÛ5=ž!u1 [õJž¬ÐöFH+ªø©>ŒÓ+2ÌÑG¨¢Ö†…œ5œ>¯ÛARaHavVhø]³ñ=°=V|w®­GwîÈgêdŠn‡Z¯ ¼æH ÞÁøÔ‚GQóf Ëhv‘71;u|ªŽV}IÀà&Ã-݈²€öåýÈß]‡«'Œ

ëQ#O˜ÀµwÏG 4Q(r½Eõ…­jæ“L;8_µD³å@|wŸCåR˜}ÿØå·ºæƒH4לǎ:Na-ƒP³»töólåsu4bGÄŠr#«­M^æ< ˜òÁNorHz£ÓȵoïoW¢?‹÷ºF#·³a¤ã‹¼Â²}(¸¾æ3sã¸x» G4ÚYèe Ôèu!Íq~¯À¨m Çx#úšiÞKÝuùÊ>²ÜÞÑïV‹è*, 8í·à¯oØ£ò )ó ´ñrw ©GS:¥&¤ì.ò›mä~¹É«Û©TEVû=7rõÌy~Q™-o´ñô¯™ê­‚ =³Ä9øE³eüc‚—ê±# Ô‹vÆ–ÍfætÖGÕÇdiš‘PqNƒÑ1[ð8`žÍ°ôæPä'®=´]‡1숕G#½‚¦$Ú­¥'Ç-¼ÐÇHè;DKô?+ç-Ùßâ>HÞÂó¿›4غ¾ã²Äòè8שeZ÷ß=è<†Ð¹ÅXZ4÷èáö 4d_ñ+Jgöa¶r³ãŒ(y°¾M‰?Kú¿S圯¦ïŸÌ²ïk¢² =ØÓ©Á3Y%-󼽜ÕQ$敟·Q¤âæl½°Î˜Ü"žnÁ‡smƒiô«–~ž;ž¼nÓKµ\ŸIâðkû‡‘]Pü>õ•é<0 Žc˜®áõ°[sgYuc°U•?)\»¶'Vp©Cˆ{EtÀÈ={tWÞU%Ü=œ|ÀA¥•¶,‰‹¢K>„L­EŸþËp$9UdQB& 9þ¢ÄÁˆR[ÇòMEˆñ›“ú¬¸¾±¿¯Ž(Ð__³Û”ðø "·Ã¯Íž½Š@ïP}e‘> ÉGkíµ–|ˆÏJÃf5þ‘ï©!æEkFK®HNÝ£ÂlW¾;¦?p¾Åà ýu1Q[õ¨ÔRºëõF¼û²1L’vüþ´‚̬Nê'œ´Ž'U^P¥=mŠNWwÙ1HRürMS'‡¯µé²Ó¯°PºÌ«Íºh$kék£|>üjú¿ôFpÇÃ)z[±:33ΑaÈ0ªâ¾>SüÜ[/:vSs/Pc Åà†vNÚþNs“kµ÷Æ´†JŸ<6››zGæ`¡´¸œsÒ²<¤I>$g¢»DnN¾³rKÑUhãØEäv1ËÇbýúú$§Ã6Ûh\{z/a×èæ`Íôéi bHÙ¼‹Z–'–B|¯‚ça#Æ?MÓ†çºÑ[ç™7ÖË™0Ûñùå´g99»†]êH/|Tü?†qKž§1_WlÇ´€W! †°ò(v“]ìܨìJ}IÒwFM5tmÆ‹í² §—'ìUm=5/] Íj*Òñå—%šVZFze¼Ù(ËxApÄNÍqéØÃKg$Ôž“KìÄbñŽ šîáú[G­gêüÎíÃFÚ;ÏM6nôU‰Í»yÊ2ý€×嵓ͥèö\¹›y¿«wo˜ù?%úá›íŒàvb º·Ç(ÞŽÇ^ÎeF»òœ“®õz“ë #Šï£°âékëH“£Kõf.n™’Ì«ÎrS¢ß¦?ø"c( vhBzˆú‹G‘%eå¾nëó~w}øpžîÛÙW×b½[ºU£În^ÁßN§¢ªŠØ`Ɇ*k»¹–ͤ}ø‰í„Ws6Âç&ÎËè÷£=?ÔÇhiªw×”UU\øíòÏŸ3…„d¡ÈôÕGd ô¢ôëü§¾äA ½™Nk¤¾d5ÓiçݼûŽ&®¢·xé©'År6‹ºì"É<`ì¬\£‘ãgìT™C`þçmÂ×ßãÇêÙC™i›q48nÿdvDµTÖ]üÐ[·'61ûRfhÖÆìÝ{—~ˆ‰”a%!tüêÃo®.MÓ î̸Lp˜i—,¼yd63"d5Û=ú.´D±4ç_Û0l[f\hhìÉ5Ë?•|.ªÂ¡Ï‚e Äظ4šÒ"ئ4"ñ1sôQB4ÊJ-b/âÓû2 Å”|4dM¶Çp ŒÚÀÊ£'ÌÞ-“YƒžÝËxô‰D9”.»ZÒ¬jhÂàØ̯ i*÷’Œ°Û¡§=×|Ê))ª®B•…ŒSñq¯?›ŠýhYŠóg¤Å,*”)ùè7uzïÔ fg•‰j*ûöaß$ä&[7jSbŧnaäõÐuÇ5óý“Kžt¾ñ^ÎþÄa­óF‹m9¶2xžbÊ ¯@šÿñÿÛdÄág°Y%Ý„V¯ÓätäÙ›ïÆÍdeME·ìd}z™Ö¶„„4ìOÄ4làÏð÷©¯õ-¾¹GyõǸG ù¨µ, ñˆ‰ËÈË îÒ½¾<ó40ñ¥•©ß<Ф”ü7^Ò? v‰8½öã«q­ü´º¼‡Ç¢Pô$>qüýî ðåËÏÓ§ ôtnRt&b• fäÁƒÓZ¸kiØ¢ëÁE‹¬z2¶**ÏŸÇŽ ª 7¨>îþi?È™ÈcÛ)Ã…í¶ü ^>úHQÓjêjûr”ÀË!ò¹œÚäVÞ(½š‘o~"RÙˆó±ŸOm7± S&ª™_# Œ¾Ô4:Ë…†Ò{/%tTõWqT–ýÖÀ¸#tÝÑ,7¯[l#&ÀC¡ðÜ_ý,,> •vlWi·Ã“Hlg†ÆP•Aƒ¥°šÆDË¥¿ñ%J[Ÿø#%§:Ò4:Šv 3ãø¿6CDyHž²›/Cn$t!”Ë‘‹U–~Üì]±OWŒÐyAîÒÆ­Ýmê52ì€×MºoVºÑ\ÜWÏ'#oºøÊ‰ù»7Ì\²Õö B Ç÷;’QÙ7nHàgLÛûø®¡Gí¦'0s*ñß9^æã``çŸÊ»÷¹x‘§ß7É»+ÛVl i­IŽ#XR«º$1øÔ+Öy‡Ëyþ2ìÍë§îûfEÌ|‹H`lÖÝñ …–~Ú\Þ£ˆõ—Í|›ŒP÷¨¾Ò’×ÕÕ Ý=£ZC…T^ÝzŸw’b(¶ç ‰«3Cõ•W3ÆmrJaÛuìv8ûïÇù·Åd–©¾ ãBQU–•n&B_ÍÅ%bï¾q=khy¨ƒ½Cõ!ù}O^“˜¾ˆwäpüMÍ/wKã—î—H?ôŽ,t×3±DBP¨ËÖû̇ ²·[1zí¦%³%ç¾Eô¿Äµz¬„xÑ[½hüL-J³„;!Á]—uë‚{»›zÛZPÞø"SÀr®GôiN•ßnÞØ ‰Ê"í 7rO¶óÏᵕ™é©D^99Éï}bß £ký-/°†¢’çþf~Iô#>îî D`øØ(úa+CÑ’£ÝÜkJÑ4·Eƒ Rçc7¹Y íµè6ßbwwò7$weAÆÇâZ^9I¶ÌP™ñæ#ýÛ0êÕ9}VË¡ø ãŠöÄ9E·ó²Df¥%ÒeëÍ„]ÆL9Wÿ–šúMù„–<}Άى\dFÃè ûöéºr£¦K3Üß#6a§DolNÌDû±qûø0÷CƒH‡hh,9`7O+ SæNñˆ¾½Á%Ì©y¡ªbÈõúùš¡ðßɾ*^ÙÉ%á‡öãCn.EH¤sJÓ\7¯l–†¾GPñ e„ŽÜˆÜ´Ð@ˆÞ/g!éÿ9]ú°P·ñ…®ä„¹äîG,Ç/Ïö˜lFÈ8(íâPžvT̃ƒvÎ[¯6B±Цõåçæˆl@N'ï¸n?—ÔämÒ¶ˆ-7vâ*îï²_±«9ÌéžÁ*é¾^AI£¶>Úo?ì›/œ„%Šbît—‘óòRÅźów}¯-˜W»gx–ÜXÒÖQpIï’ ÂôÉ ðù‰eOZªîÓ§ ¨ƒ½Dõ±fî½<¿JÕj·£–ˆãWçýc.Õc<Є.`iž† ^»Çàv·ò3ggLð*ú2*•Ù©QÞRZ0›Ü­úE˜Þ>×Ô¶ æ¼&¡õKÍ;²o3{ \ü<”][Ýšµ8صÛo¼ ô·¢ª, ËŒ7¥²,¼ÐÏþTþ M]ýÕÒ¸6‰òö;Ã4XòØ[޾¨Fó:Š¥ŒØ~¬®í8¶-%à²ßÏCÝ%zƒ6nCÓùg¯ì²žº =Z.€[î‘Fú:ØV÷b#ðBψ³¥ºÖûZJÁÒNÑ[úô¸à‡auìF8˜‚;Ð:Íã÷™È4'§UûM§¯æ¿ò^ Öä!¶î– C" xšmk¹íÜœ•ëä1‚ßyåcK[³å}ñZ–%FZÒäé~LZû1 ÍŽ0åç͆Å\†Ð I"+÷|Ì»sû°Ai'¹É^F|]b„Tì·.p8Èðx|›5³!©³|{x#í~[YXÅþJ†äz9ÓmÌË›éoÿ¯¾í£ª£x–0ZcB“#1Œ±¤Ó&)ܦe³ü˜3$?…²ÒR.ž~ûû’•òö•p-µ–“ˆ*‹ÒbOlûînqtnãZv×}¬¨­ýÊ#s—˜òIzö½{Æ™!/©÷½MV<üÖåµÌY©³ÇÏ:ŸèÇÆœe¶šÍõuFˆ7a‹Õ`Äñv›˜Æšûè-·Ï¬¡E¯>%`†º×C曬ò¢ÈÍÛ¯º²ÛIIiI?><¶¶F&á°\uU7‰¬Ð T³9c†íbã¼g$æùíÓôUU°­­ÝÀ:÷†×ðø–ÌX†aË:Œ›”OÖæE¦È¨UiØ*Ô… x&¶M`§‰"«µ7+leó‰_Û­Õ½Ø ¤¶0SqòÞÊç°Î¶éí}ÜŽ !´/e-ÖŒ4©%Aï;xãËfuªû.`ímuŒ­hñGË~ußNÊR§~Ø2;Úa»Û”“ŽíÃí’ö;½Q;%––ä hÛz2¤i³¯¯—5i§u=žÈtG¶~cÿLêëê 6F.ç9¼åCSôç´[˜G—Î=Z;}Ì€}}‰;C—|ÈøßçÆEâèu!+ŸÐ» ;@sMÔê©3WÅ MtÑ«G('½ˆæ¥ ŒGÇ¿‡’þ¶EÉéRq¼ŒR'ñÁá8ðxÎj*•›Hüq#×ÕÕ‰Pؾ†È…£R«@õô"Õ÷k©†ìe‚°<5ÊÄ#ø£Çz»ÖgfD=XÓ=[¾UGj‘Fw:7Onå½töz«‚؇Óü©?DBÕ—òÎý)mÙybe›É_žì0˜²ÙUÇðЛ:¿/DÎïÊȺ¨ëÝkÌžm?"Oç¡bXCCC}·H>¦‘ë¨ Pؾ†Jmàæ†ÍA@õ±pêjGSŸ…lôÁ6¨'ïÀma}Ôº}9íÀŸúóJ¡ä|üع?^®¶S92/lßÌ8’#3ôU-c(qxhÂßÃG3½”<ð븧®‰~ zZÅ£èÓ^h6N·+¸çÃæå퇩8¼1Ì7šöƒ™ŽÉNì„Y[^Lâáë>#‹½LþðËóº$î-„ä£JÖêóýSV±]ÝÕã9ÒÜèï%ûâSþ…ˆÇãˆ\

/£Øøìò7Xõæ©Æ¡¦óò̯ÈûoêŽDuÌqåÌi}ŒŽ½üFg]?ýÿ=<6oļˆõºëÛÞ¥’駃˛„KKG^£Æ05›Âì?ÎBKÅgvb–âOyÂâݶ£º´¤Vnnl/Èmfïç·–ãYõÄQE=å'mÔòá#‡¤D?¨ƒ ú€žE\Dˆ“H()È£ˆ´¿9‡¦ë“DW¶‚â¸81wqú ÷LÌõdýQœnW:µT$²V's­¾ûòö©ï?â¦z¸O¾ØbDã²¢5QËd/Ѿÿ2Tíà^ÙYïÊ+v—‘I<‚Â"Ä̬Y®vNçÄ+H¦#ñËÙ[¦J2‚V±tq¼H;û³VbùËuÂy"ä±×g9'.ÊQ÷dXËp4F]>;sê@º´«L¼Ã£yé›™dÚ1ö¥]åþõPÊœçfRÿБ™”9öìXW×dà½>Ò½?-M\œo¢M7ÜDNžt>‘Ýt‘íµAKäX‚<5Ánb@HFSÆœõó19“€´*4H ŽHIù¢¨«C€ê¶ñòòÚ´iؾÅÁ*ïß&}KõýÎ¥D˦{oGˆ5µ‰Q#ŽøöκÚìŒws¬,»1*ÆÊ$&~l_õIjí^&aáŸëêób*c õÊÄû ÉGãîÖ`£ [ºdM ¾E_¼WÍ|Õ¸:Ou'¯ç4©61ùµ¥ CYU¦>ÐW 2WŠ1ظ6ÒKŽJ­¤_}Ù.zTTö~}IúÅ’¸–wÍ]£áéK׃ê¡ÙKLéj³qR%s‹?TÏ\%ÕËÄ×åìf,XŒ)Pµ¤ƒw ¾ÍA¶b™çŽÉYÓWDõŽØé>ŽnÏÌ—åLÎ0./§/ÇLú¦Að/“ó&¬˜TÐãÕßµs§²îhNN–4n%ûÄ\û®^•™üdª:‘ÐÖøk¸•ïÎm•øÈävó4_?ÝÀ”è•5æJ~ Xv !ñÃq3¢töÛE¬±µVA¶ÚÑõÕžÿÆ¢"è’)G=gI>äz7#òEnFm~ürƒ\ã´Ê%v_Má{µ|꺄È„䃒]mUhÉìh¥ïõû¬»Jž3 %Ä ¬w4UYqzÿC†‡µLɇèë›Gr ^ëÔ ãðC”ÅHd¨€ ú€ž—FDâÐáº/ÝU5¬««IŽ[¹Òµ{ƒÅsrŽ3èæ÷Ó,Û[MYDÕw÷ Ý•o7û½93Ý5¡iS´U‡n¼î®ÿ¹‰îèF0ÍÛGg îŠ(æ5m×%ð2›ååµÍ[t¶»)CòÑH·›1dÇQguýµWrÚs$ru¾äU}÷½û.Ë–ByÕü$¦N·ÛoÿçÜBýû¸)žD\=~‰›»ÛCÖÑyäÈŽ””j%¥v×Yfe»rK°ï.Q_Ú_âA;4éŽ+LL×½ ´^CÛ‰ä/ûhPe¤ª%º‚ž-÷N‰÷Ø@ÑvëëôïiªCIµ£ˆ¨GU.Ö'awÖì0ò½,…»ì¶ËÇä;VUá>VÅdDyÜM\m¯Álƒ”¼µÓ½Þé•—¯”Ž3ŒÌ#UTðó˜?×îÿûgâœ%œBŸ5Âû¤8n÷(íá=þìY‹öùísqkgœ'§ôæ³#‚ÓäМi32úëHƒ7n“ [—‰è)­Xk½pÊ^À¶xlðòÝ…óm†íîµA®ôuV¨Tf]ëÝ YŽeTÆŒ»q>k1—çZRÿøš®ò•™øöqéúéÕõLïmþüÚQßkùÙ’Öþ5 Ï‹[Ú)¡ úݾa„ÇOHTjµ®®T:P}ÀOEÂo3{ÎÙS‡&Ì^„çì‹”¼”ç9ï3ÚÏë¹[Èü æî÷Zî*ÊEl+üd­æaVmï®ãáŽy|’¨gT€ç·oDÑ·À0¦¬Â:r”T‹Çšk&Oë?Ûq¬-øŒ†ŽñÞFrðÐitŒóÞBûRSÑjgK@”ô¢ßógWCÕüäôŸb1-ìüQ£™8GŸJ{ñû7©/_,]´ §o$$"kkk¹ÿÅËEñxÜom4‚ˆPMä>eëþm}F;*r<µU}©)QÑiK–¬…ºªøe¨()RølŽÿ·ŒÕß$ž¾²ƒvz|tIqáO|L$¥Tí);wqZ " ø[?»¹mƒwÚ·{ª­ä‹‰&¦¥ÉªøõH‰‹._îâ¿ÿŸáã' Küá›hsàP\Ø IÉ¿çüÔãø)R+]Ýþùg·‰)_»‹»üYà/\¨äå%Ìë õ TÐ+ “HkÝV†\ K‰afõ§Žö¤çE\9û·½ƒ¸ˆðÏ¿;HrY¾>2òdttÖüù‚¦‘óó„ÿ;–<þLQ1%¨Y ú€Þ…åÓœü‚ãÿ>n’ˆ´ÜŸ”4Ôw㢨¸˜ÇÚ_<àÐÐp¶ºZîž½GÌÌø† &ýIFÆ0â…ó_…*ÝV¯ÃáñP¡@õ½IQ÷5n‰I¯oó1Ùš"$ö»§‡PäÅ ÎÁ~7‘Ø¢ÄO‘XµrCFÆSŸ°Ù³¥ûÿ‚ÃTú¥œú÷ü%Ü$>¨G ú€ÞŽÆÐ!´ÏóWo.øŽš:KH\ú·Ô{XCrô­‚O¹ þ&“z]¯šœÜð5k†¿ÿÜËëòì9‚Š \¿ga!ܼU—ú¶ÀÞ~!™Gê¨>àwBMy°Ú¦M)i™!Ç÷+©ë(¨iÿ6Q¯ùòàÖU2™gŽõtBïÞƒ~ÀšÕrr_îÜuQ[›×@Ÿô»Lª,ÿ"rñžƒÃÚÚÉx" ê ¨>àwEIAÖ}[mmí¥Ð[©oþš`!(&Ù;£ÊÑP—ò4:íí«©Ó—,pøŒ,)¡²z•J}]݈ӉÏ3fÌ•é¥] Ü÷ï×Ç'|´°6wî ¨  ú€?`e> ¡Ie_*n„‡§¥¼2|”œªVoˆ®¦üÉý[EFƧO1EÈô752ž“Óh¼­ÑxT]UvófÈógFéóëéõŠn´ŠrÁ°°ÜK'L1v츱c¡N€êþPøúñXO›‚íƒ^¼Œ­¬(¨þ×€!êxŽŸ·l#VQôâqìÇÌwƒ”‡ŽÒs˜;çO227‰ÏØx¾±1ý855öÁƒ‡EE•:ºüº:\x<î§E£¸X0*2ÿõ›Â¡C%uGhϰåTзЦBû0sòòãâãS^½Ä‡KË ˆIuÛµšÊÂì̬ô·¹Y™BâjÃÔ‡««j(ô‰á…ޤ}˜ÇŸ‹Þ?KŒJz‘‰Ã!eÊÀœ îÚ÷¯²’7=­áõë¢ÔwÅâ<ÃTTT ¦šóL…‚ªhHŠ‹Z˜™ Ú§‘¢Ò²Ôwï>|ü˜ŸŸWú¹Ã02o?ž~ü\d27‰‹‹¾l '54Ô744ÔÕUWWRkj*¾”V”•UWUyù„…%%¥åeÈËHsÊh¡‘Z}ÜÈB‚Æ›3n\³KEùç¬÷IïßgååTÕ×7ðñ(.2LƓȜ8×Pê°ú:¬¢¢®º¦¾¤¤¦¬¬¦¼¼¾_?¼ˆH?iiñþÒ ÒÒCU‡U‡BYÕì!Èϧ£¥Fû€)z^!eåÑÊÊ` P}¨>TªèÃÔcXFΧ·oßå~|ÿ9?¯®¶†—_OH„ÌËÇÃGá"‘ \$"‰¾š ž“«Çêêêë¨U•TjMÕ—’Ê/e•e%¥E…Uåeý(‚¢ÒòòrÃ+’I$°m3X}qÑ‹Ôôçï³ ?~¬ª¬lá%Pø9)‚¼I{›Œ0$%§$9pEHLJY‹öéB(ü‚œ|ý¹îÓûÄœô·Ÿ>dˆˆK«k¨i UÁãñ}ÇÈ5UYOn={–_W‹ B:”[B‚S[ˆCû/>„øØ D #g×ן{û¶æeruJj•„IKc¨ŠÊ\_22¨>øe‰ÄñãLJ‡‡wàGII‰æ $ü4Òs #ïÝËÉzנТºŽ¼ÆHÚ§çn‡çä””Lû4¹D$¼I}ö¨¢¤XMKÛÈ`$'çøL,+M¾ÿÖ«×å**äQz´ƒ‚‚PÿýÅÒ’”I“æ¹x v€ê€nƒ@ L˜0áÖ­[_5D”h§`l'ôõv)ìNú›d] #k‡ÞaŠˆøÄ9‹h7¢¿};ÃÆFa@ÿÞi¬îÁÃ#q fÚ,Z(Òû,"BX¶”.JŸ=;|ëVÙÌ™“dd4¡²€ê€îAWW7>>¾å8OÛ =µ;vòTMU•®©¥¢–Þo9-Ú'%ãí‰ÿŽÎž;_Qv@¯Ô{•W¯þ“—W3ÛVPo¤Øogduu2퓞þÀËëÚœÙu¡â€ê€n Í8ONNΩS§ÂØNèFê1ììå°‚œ÷#§Ø\¿uZ$äMurËÌ~îÌ+›YŠ2½Fûaõ‘‘ÿ$¿,?_˜LæëÒ¥•ÅùõQa ¹[¢R“YN%’e%¿38yy®M›$²sžúì¸mmm&+§•Tü-Çy*))áñxeee0 tÑñÉîÜ0´œ«~ ‡Ûjñ6gˆ\‚å£Q³*¢ [tºU‡m:d¶ùž™Ü½ÃÉTß ¸¶¤ %½šÊ…'¹¤ä„(í4ê rJò‹©ã’Ü®‡–ÁÕTÖÒ›dr×6ëSQ! Ì ¥¡¢3b*T+P}ðý899mÛ¶Í‚@ €5àÇIËÉ?süè˜éóÈýøÿàd\F6 Þ%>àgó:3'æÎ cÛÅ}Ö"†Æÿ <²lÉ¢ºEIqâÙs «W‹ãp8¶.xþòÿø=mTjU^Ö翨ÆÍÓL…ÛtÁq;µýßÅý ÿÃ=úï„¿¤qé±›_¡_2jÖr}.2Œp»cäûÉYuÉ=ï…^Ó%‹ã—[³<G7©>ÁA¢”fè©îîå»ç«š½ÂT²á- ,Äim-à¿t™;T:€^«ú*SâîEžHÍ+AÕQ(*ÚãŒÆhK’ÁøÀïHñv AgHc]LÂÖöw.y²N@Û!­¨â§ú”îhóuw€¿W<^ µ¶îʹÓz“­qø¾l1ÅÏ9Y—ÂnZ˜wèX]ðÉÐY³„ðøÎ%Ÿ¼±¡7N›C5uT–A\IzÍåê#å•DšærvžémJU6ftò«Æc•™i§N??³-ähµŠÄ;=ÈõŽí ¦z<Îg3æSwåáéç›–Ç"ïäKBþ&ËÙZ+?ä€7±ŒOb0kÛAÏg =>~¯üùñû2‹ U(?:ÚSV– 2”/4ôˆ™™T=€Þ¥úR‚—²;À<Ö0vš1A†¦úJ²¢æNò`:꯿µeL¯7Qmðt¢]ˆVDñÓqÐâîGÿŸ·C/¨S/]¼gwØ#ÁRþò¬(^M%òPàNßàbØíƒ”ùEÁÊ#ÆÝ >˜3\SR´›­õ¯¼<×€l-#¢©éÎÖvœÖc4Z;‘eÜ:TTÜ:SÇè´·“ADq‰»bGI]mw]úþ %eÇO!w:@vìÂÙ“¤¤‡B‘è-ª/líP3ŸdÚÁøª%šÜ-θûB(ÄììûÇ.¿Õ5DU˜ó8ÐQÇ)¬ejv—Îbž­|îÏ£î‚FìˆXQndµµÉËœ€gS>ØéMIot¹öíýíJô§Gñ^A×häv6Œt|‘WXV£·Ó×|fao7ôˆF# ½ 𦉳.¤9®Àïµáo$@Ïêý°Ô]—¡ì#Ëíýnµˆ®Â²€Ó~ þú†=*šò8ß@/‡qšz4¥SzaBÊî"¿ÙFî—›¼ºJõ±a=>ks#WÏœç•ÙòFO_ðš©Þ*ØÐhÑ3Kœƒ_4[Æ?&x©{0J½hglÙlÆaNg}T}L–&  ç4ø‚Çól†¥7‡"?qí© í:ŒF+FzMI´[KOŽ[x¡QW'ß³“k¬T»εu¤/.敟·Q¤Ó>ÞÉ£ãF‹aèò:3«--üx^½¸I¥‹B¥üõ§IsC›¬ªà}íŽû$™v=w\¼Y~bç:µŒ¼ãþ»Ç´ßÊ»n&1™ ÄÚìœí’켽Ⱥn3¶EM‘™±i~Û×°:Ò4:ŠV´þK ž×²y—î„SDôÎÏ»Sï hoDÈìQÅu2;‰êj=z¥eï’Ÿ™ýí¦`ýjŽ}ãF¸ýÜÙÝ&M1ÅÄ|Zí&öûš%34bÉá!¡TkÍçJ1ß`sRçýãÇ †GÜž7T@/Q}e‘> Éçv-·µäkD|V6«ñ|O 1¯gI®HNÝÃl|W¾;¦?p¾Åà ýu1Q[õ¨TÆ,ó‡kx÷ec½ûþ´‚̬Nê'œ´Ž'U^P¥µž‹NWwÙ1HRürMS{Ö×ÚtÙéWXèFÃ÷¢‘¬¥¯òùð«éÿNFß‚¢·«33ㆠ£*îë3ÅϽõ¢c·15gð5¦V ^ahç¤íï47¹ö˜J;F­¡–Ó¿6››zGæ`¡´¸œsÒ²<¤I>$g¢»DnN¾³rKÑUhãx9äv1ËÇbýúú$§Ã6Ûh\{z/a×èæ`Íôéi bˆ¢¼‹Z–'–B|¯‚ça#”yÞ^Îê(]Ó†çºÑˆË¼±^Îd)3wÊéËäo×óxF—©Šÿ§Ã0nɳcã4æëŠí˜ð*dÁVÅn²‹•]©/ùC+Úu˜k¬TûÚ:ÒÄmÅR=F¶°C\53†f_V$W`Œ’V{yíh OUžà3Ùo¬%»P/îÓ§Åðzs =&Ëv¾š¾rkïæÈôÊx³Q–ñÎ√ãÒ±‡—ÎH¨='×*À7k4‡øÒ·Ò2 ýxÝTн†WSGº3^pÐâ¬+kÙ2žÐðô W)“='æÏ´1{j*Òô~gj C|^§%äÉ]¦"¦¶,3ßH”tôwÔ# ýH^E‡ãS0‘PTy{ïSág1án[j2îée55€û}Í"knjÎŽG¶†€„»^›óFBr09€^ úˆ¬Ac”~w ”<¤·‰i²ê垦þ²â¼›wßÑ…ÑÛN¼ôÔklÙšE]vauk 0vÖG®ÑÈñ¿3vªL!0ƒsÈÆ6áëo‰ñcõ!‚Ì´ w×Ñ‚Í8·²;Ý;µ“#Xk¢ŸØÄìý›¡Y³w/£™‹‰”a%!tüêÃon+¤¿å±»sâ;a‚à H»dáÍ#³™!«ÙîÑw¡%Š¥9ÿÚ†aÛ2ãBCcO®Yþ©äsQ}D(%ÆÆ• Ñ”Á6¥‰™£¢QVj{ŸÞ”i(¦ä£?§M¶Çp ŒÚÀÊ£'ôæ;B&³==º7–Ñ!‰*r(!]îþiÏ…Lä±í”‹áÂvµUÁËG)jRM-×¶“‹¼Ì§Ùçrj“[yÛ娒h:¦²5æ4ŸÚ"(nbVÜ®f~T4l0úRÓè,Jï½”ÐaPÕ_ÅQYö[³@âvŒÐuD;°Ü¼n±˜…Âsõ³°ø$TÚY°] ¤ÝO"±7¯CU –Âj-w t&Ý«±…ñ{b»âvs­]؉a›‚ÆL~Á§ò.òÖ1$0cXYÞþm:,ÞD.Vñî †ä£?wæN£ä’]l›r‚qwÉ€4Ì" |´Ü>À,Ä©Ôñ^`YÈuó¯»;KÔÕ#àWó!¯°¾¶®ƒ}ï®l[±5‚¦µ&9Ž`©–ê’ÄàS¯Xç.çù˰·ÀG?uØ7+êdæ[DcéþŽ_ÙµôÓæòE´¿lJâãî ­ª"ûsQÝ·fô%ºj±î Bò¶.ò‚tßµEŸ‹„$5§^pxS€ƒd7Å…gÕÿEõ½aή¢"1,,ê @ïP}H~ßÓ×$f§‡/âÁ9ÓA³ÅËÝÒøåc†ûч¨1Æ›é™X¢ !(Ôeë}æ€CÙÛ­½S‡’Ù’sß"zƒ_âZ= VB‹¼ÜèßãgjQš›žç/$¸ë²n]powSo[ Ê¿Ô2,çÚxDŸF±áTùíÍݲH;ÃÜ“í<Æ3W4«ÌLÿH%òÊÉI~oëµøn]­éoy|a=kšSÉs3¿$úw÷"0|ìý°•¡hÉÑnî5¥hšÛ¢Á…?©ó±kÁ·wÖ¢Û|‹ÝÝÉßÐT•‹kyEä$ݘk_Ó•Fî ~®ßØ­Wùø3»Í§©þP W1c8‰Ö¦ èv^¼É¬ÈGºl½™°‹µ`ÎÕ¿¥¦þ‡Bhñ»Æ ³¨ŠëúdtgÍH#ßMR¸Mn×r}&±±esSoóKôlŠs¶×B·.Áà)ÿ*Í¿¸ÐX+K ©´%7ý¦‰;G¾™(¶êÐ[ÉÊÎåfcï8¥i®›W6¿™ó=‚Нh(Û täF䦅Bô~9 Iÿ‡ÈéÒ‡…º#q%'Ì%w?b9~y¶ÇÀd3BÆAi‡¶³[õÁA;ç­W›þÖwðk1"²õåçæˆl@N'ï¸n?—ÔämÒ¶ˆ-7V5\Åý]ö+v5‡9Ý3X%Ý×+(iÔÖGûí‡}+¹¼ÂÅ…Ý&Hr>%‹‰wöË©¦µ{ŸQۜȉ7” tô <8)#q²,óÑö(jÞ¬“a-žÓ&f§ŽOÕÁâ¼·ë®ËDhì[ÌZ©E0™çŽÉY?DH9ªÄñ餮1ÈÀ{}¤{ÖM¢¢çµ pÁ•–(5ƸèˆË Gÿ—-Âw9l¿ÏaÀšEJ²>ÿS%ÔA€^¢úX3÷^ž_¥jµÛQKÄñ«óŽþ1—ê1B°4OC¯Ýcp»[ù™³3&x}•Êì hÕRý…ñK'¦·Ï5µ­‚9¯ÙfRĈÙ·™=.~Ê.‰­n­Àüºvû—âVT•Ea™ñf¢ô@–…úٟʤ©«¿ÚBºí\ o¿3ÌÁl%½åèËÀ4¯üYʈíÇêÚŽcÛ"Q.ûý<Ô]¢7hã64ŸqöÊ.멫Уå¸åni¤¯ƒmu/6 /ôñŒ8[ªk½¯¥¡,íôQ]Ñã‚„}Ô±á|` î@ë4ßgj ÓœœVyTì7Až¾ÉÁÊ{-›tkí“6rq™:nY+/Ãs]Ùûż;RHhÃñû˜1lU\ñZoFäÓäé~LZû1 ÍŽ0¥ V€yÌ®Îq>±ØŠëZ“}'KøJ®x›¹G©³–¡~;YÜüFƒÚÎK>rIÕáj}¯®wLl[¤›ì(QˆzôZJKK¹yØXýõKVÊÛWµÔZN"ª,J‹=°í?º»ÅѹMƒÙ]Ò¿¢¶ö+7ŽÌ]bÊ'éÅÊ÷îg†¼¤Þ÷6Yñð[—×2gÌ?ëP|¢cvþ•Ùj6××!Þ„-VƒÇÛmbhÜ>³R„½ú”€ê^™o²:ê“çâ&×××UUW“¸¹ÜÈe%Å|ý:«ÙN]‘ÔŠÌ_¬%úO³ëÇ¢Çzê×mWwóxŽìØGÅ3u›(”$ÆŽÓ<®+jyxË'·@w߬»6kÔš÷U/xnI—|È%ìo}~ì)ëUÇÌV€úæ)&²ôvDÅ[OGçÝÎÚ µöÒ‘—DnÒ\mo bõ7V»ì´sÜæç8*¹ÖöGfíâñ8nGYé'>~1¨‰½@õ1P™± Ãv±áQÞ3óüöiúª*ØÖÖn{çÞPã‚íC² Öu7)Ÿ ̧µ“ȨUiØ*Ô… x&¶M`§‰"«µ7+leó‰_Û­Õ½Ø ¤¶0SqòÞÊç°Î¶éqÜŽ !´/e“Æî5©%Aï—ýh±I‡¹ÖŽ1Ù‰a“êˆ*¾ÿ3¿u÷LÑIñîÔO;¶Ÿuiÿ+²ÕÞV{;ö摊ý̾Þi=z-uµu£¹sÞò¡iPàç´[˜G—Î=Z;}Ì€}}‰;C—|ÈøßçÆEâèu!+ŸÐ» ;@sMÔê©s}Å MtÑ«G('½ˆæ¥ Œ¡1$]g(9]Ê"Ž—ñObã™ÊI¨¡Ö‘ºAô¡ÚÚ:.®ï],‡—§© ¾$.Š®Ð2µ}úö³ø…6²• " S 8e9pàDm¥ŠjpÛV«uÇ[q îº\¸·€2TDP@Ae¯@¸7­ƒVPÆïû¹–ËåòÜÝŸœÉçÆ®È‚2Ñ… Ô‰¸ âèž„Â)«nüpDeWÜêõ+‡m´áåºaÿ‹ãj–ÿŸÎâÜÌÖ¸íÂèËüM¢N»¨ jÄšöÇ”°º€½~é9þˆ…);uýúG‚pJ—Vå¾!"æÔM[‘¯©ŒZ]ƒî>€Ö”ú¾¯Jüz›†ÖI“åö"há„÷Ÿu}~óܤ°òš¹h߸/Í¥1÷´WËoïGF½½ÔÐjYò—^ÚênÛØFWÚ:ÚÛÂ&z¯ûË{á)ˆjw·¼|¶eŸ-¯[Ö¿-éÂÿ©{·îEeÿþ_$ö—Úüp*Õ´<\SÍ`4ÏÙÍt‘ªªÚÿø/ÃúÓü[ÅèÛ‰ë&(è)×V±ëz×5þ7‹wÖ.M’·ºÊV—=Öu3ÈêðpÒ³zsXP A¨¹þØESèŸþ§´Ñ‡¯­\×î D)§~²lx„%÷‡R—¯½$)›MÒé,ì†H}ÒQÿ±£©ãaz†>÷ mÉ%0ú4¹oª¥~kR=‘ä¢O=k¹4é‹ýq­P]mhóÿ¼ÊH§¥¦|y>1‘;½„2ެ\.‚ÛrWó#ÀÅð¸{ôÌRx=øó=u ĵz›D,}àH‚YýévùWV5ñåoS»G}›Í&×ež¤­ÓšÒfuy‰0Π7Oê“–V,(|ò¥Õý°Ë5?þÎ$³‚»ƒ®‹›a/I榞ę0‚ˆ¼W»*øÝ Ì·×mŸ~‰õSÀ@Ag¬ºÇøÐÓÉöƸ˜¿ ã2BóÄ!ÊÄGb§T]ƒýV±GPÌ="úQµÖÐ!Êõ-¦Lè!:Ø*Ðñ«êÃfSÙìZqqyì†H}ЂôÔU#ÃO~î«yU.ïGܼ^Jóþù¬Íô+玑RcV­Ù`7÷ñÓ- Ïk ~\»šâmÕ‹¨t• Në+ãwìåUñ;šÄmÿº¹{’õ¤È…V¦ ?\J¹`žÏ¼¼Þ{ÅêÛ4kh³‹½»¹:›ñ…²ççt’Wj®"«([¾zy‰$k?zSDvïxT"f?‹²ÿOꯋëï\À¾pçPr¥å„ß}6l¤lxFç±.¶2 ßdžª|xôÆŠ “Õ¿î{Ç‹,š¢û R´,I 1–¸ä›—Ïe»|t³Ùwãg7©)†ÎôøÜ韙A¼ÇÒøÜ¥uH©iʧ5z–¦î±/×ã?¿ücÙY/(C‚Gô˜în]YѪ¨Yêǹ?{öùÜÅ~s3Ò5´5›íã™FïÜ™‘šÊÖÕýÈi‚¶Ë~!—5½1™¡«¿|Ö®pç#dÈÇž`-½²ô½)òK£>˜ò9»iäÀ–x磻–ikvÆ>€ÔÐTË–-[²d êðèêf=NüTêkËè*òÅ#V¬$ˆºCQØ,Žô¶–øÔËÈZNFê#÷AÎ͸*ÆÆZñ÷Ó?šúÚ®ÛKY-ÿâý-4# x‡§~H(áÁKoï¡Øú Å °ëõûªÕ†½úÓèŒv¶i ÝÉñ¹“ÿí«²Åiê鳘Íy«I3³‘/./*æHJPÛMy-›ÚKù‘SÁccéšš2’RJØú å?<„…Í{Ù&^¿dÖÏÕª­¹+Æ×ϯÙ[vt4<‘:z´4ŠL’ôÈÈ ïi^(R|#Îýlƒ7ÿõêyºBÍ^ŠØ+á½úôg5ÿUFÌ̆%$®NJª04mýu(¼}LÚêçˆ%'}E¶­ŸI½ÜC÷·ë·;,xÐðÂy‡~[åщ?Z½ùÈ$Ÿ+éšÕ>ôÈvS)A<+[o;{v 1ﰯ记egßÖÏ¥8ïÐôU ܱò[çYÖÇÛ¡á>l×QïÚqzꨋ(¯ÂÛg¥­Î„É­òé–¢u«zvžÜ¾}>{³^â¹) t††`¼<5nÂÀ£ÏêŸ3íwh…Â*çƒq„E\ùS~³)Åo\Ϙ>·à@ê€ïAEIÑcô˜£·;Œ›J¢v¨m/ÉMO¸wÓ–OK/HRJiòc7o>àç/Æ.åMY6$xEÔd„7ÑžúÛ¨m—͘—5§z•‘Ó™Ü(‰evxõ¨¢Jë–Ú+Mfqà‰Û³øAêü“ÃvN8av4!áZ20©|+?æÕœ˜¿Ö}õ Cæ­C9K<”î+ÈfsçåÔ­ªój^›a¼6‰œXs•­a>¿ËC=2ïÖu“;yEäö¼ë£fœ=¡á|ßHi)¯"+Söì¹G3}°¯ õÀw£­®6釉۷mê;bK\²ƒluÖƒ›/³³¾Aä“×ñöž¶nív ï¡ÝŠ…¶‚ÛH˜lFlË!ˆþgCzÎ,dšZ­³9<;FðRVàýÀÂüˆ É–õ²âmE5QÎdDúG¥.¶¬†¥Ø­ø“ß³Gðî…h~$ÒIÞá\ú¶[·ƒ »ÖÍ¢uu¸ g»è¡«¼Wܘóòð±—3”?ºæÜUmh“PÖoCÄÅ™OË ¢ìÀÆ›‚… "—ºóЂkÂÒ}N Þ¾-š˜îç·{R|g dçÌ™½aãfc»íþâ.‚ˆ»tRRRb꓾årÅÅæÌñµsïAe͇OËÊôeû0×GqG4,B· ÓMŒÅe&%MŒûÒYu×§)e³?ù-BpãŽü¼ªO¶òÏU­÷Ñ›~ÐEêú¯Gp X•S¼~Æþ€Ô­ƒNŸ7ÇÿäÙÈèØ›6CÆP(Bír3kŠ^_8¶oâ¤I*JŠßá3[˜f£@{B¼Š+àpÔ©TÊ_Ržþêï§~TòÛº>º²Í“ "Ÿý½¯QA—ãžlZ7­,dù1ÞO§æ’d}î{¼7Û¶¾[¯üVŒ GqèÐÿpiMVþD̳÷Z˜2wf WUW»AÝô±g õ@ë2ÄÙ!ÿuÁŽÿ3î㤤¦Ýž6BÖÆ^:%++ø=Ï1+*áG"‚“źwg‚ƒ> jªÍVÉŸ­‚MaXÛïuì¡sjóÃWx¯£l™}”²šD¸ç‘¶õÏÜ1£Ýyoy6#³ÏöfDCoßQŸe”÷k]ù‡¿)¿ßN°&D%»Ñª¾¨äüsým—ù*ü} Uêm¯K\zB%”ùú U’ë$0ïç¤ÇONíÞ`í2RZ¾Íß`B7ÎdWVxMþ)ú}ïQ!½4ž\Z7îž•õ`Õª“£F»¤ûû³ñÏå«0åhÈ”4âÿn¼LðÃnqÀµeŸ]ý¨B¿OÝ‹OÊÖ½~M¸3°–ÞYúá,ïM¬ÎCtï·â7Ñ)AråС²¼WF…ûrSŸ¾³¹v&¤>hÅ õt æ?LMû;du/×QrÕÚäfµ)·¯¼ÈL÷úq²„8«µ­ªªñüùÆ9ÙÉË–=ZºkWÆnª²És²›oýir²U×þ Ú[´`ÇûÏŒŒº>¡©Ú}­%K–¤e>?¼g£–QO‹¶r¾¥ªüÆÅ“ÂÂ4ϱ£tzk^Ueƒ%K ò^=Y·î±‰Xÿ~¢ÿ¾È¬ÀèÏßOÊrIDáæZóò2y!§e»ì9£Gy1D%°¿ õ@[¥¥Ö%pþ¼ššš“ç.¤>Lîé8DV©K+ {$'=þfJb¼Û¡?Mù± Y^AwöìE$‡såÊÁ{±i#†Ikj‰´ÎU%IzL åæ­ÌÁƒº7;R´—Ïaáán.„›KIYùùÈKO&u5ë¥eܳ5¬›PuElÔù¼œì~Žî.Žwh›(Tjÿþãú÷'*+Š/^<›Ñ«·„­ £5t±VVHGœ}•–^àè`aÓËÁÖ–Š©Ú'qsÄ7‚;D|ÒÛ×o–è˜X¨u3¥ ÃO¥ò¤{73Ÿ¦hwÓïcÓû‡qcÚS‘¢ŽŽùéõiÚ­ë17òóˬ¬$--Eh4Ê7[¢"™èèüÄÄ|}}Å޽̇ ÓÄû©:SC}î Ï}•;66%9IH˜¦ª¥+×EKZQ¥Ù:©Øor3³ž¦dg¤ËtRènbÒÃØÀÄcXG(²¶–wŒ¼ˆ‹¿–˜A¤¾¾¤ŽMMjž"WV°ÒÒÈ”'…Ož¼•“céÚ¹¹I¸¹á€Ô@J rC]œîP¯°¸ôIZjVÖó¼¼—Eo^“$Ég‰KŠ2Y4CDTTˆ"D¥R¹Ó9µ²†SYYή¬*-)*+-®,+e‰ËÈvRRQVSUÕQW¥©š–¦¼ÈRÒ‡€6(/{›‘™ðâyFNî›üüŠšޤ$]JJ„%Fc2¨¢,•BPi’CÔpH‡;Me%§°¸ª¸¨²¸˜#.F•“SQVPUÕìÒÅÈÀa`ˆ÷2R@#Š„˜…©w@)Z“%£¯ßWwD@ê¤>@ê¤>@ê¤>hƒ8$ù,çÕÓ´Œ—Ù/ ^¿*-*`°ÄEÅĘbR"¢¢4SD„A¡ Q©Âdm-‡Ãáþ¿ª²¼ºª²¢¤¸¢´¤¬¤;YBº“œ¢R—Î] uµ$%ÄPÕ‘œ‚·‰Ï2_¼ÈÏέxóºZLŒ*)É„%$¨ Ád Q„…)$IÖÔµµdY9YYQ[\Ì)*âÖ°«Hyyº²’DgÕ.ZêV,qy©àû(++[³fÍ?§/[¶¬ñCYYÙž={ZZZ¢bß:}dRÚóøûq/ž>fŠK*kê)ªiJÈÈ+ês‡¯oÿi)çí“ǹ™©¹é)T]ÏÀ¨—…™Œ¤DGËxyy7bãc‹ÅŨ¢::"JJ4iYÂÌœ‰¿2¿¬­=–‘YýäqÕÃG‚bbÚÅÄØARJoo¤>€oÅbq³ÜíÛ·?3¶¶vuu5"À7ºû 1ö§¶VÇØBµ«¡‘Í@îÐË¢ Qå:«q£Þu9°°òéÕóâ”UílzëhªµÓ¤W•œ|ôÆÍŒòrN¯^bææ¢ÎNLîЋRWá âtUûôµ«å·ï–ª(‹Ùô²SÓè·=R@ rrrJNN.--ýè³™™™¾¾¾(@KËy]x>òÒËÌ4} ;µnÆý4¿Ï Âé" }K;îÀ/ª©=zúÅ“dsk{+aá6ÿáX^–zåÊé¤äÒ¾ýÄ--X†¾ËjˆÐ…úôãÜq'öb䵨{¥½zéô²&Lc`_@êh~ÞÞÞ=ΓKZZºwïÞ, Uh!%UÇNŸ}™™néänÚ×¥u} ÓŒz;ºÏDÝ}xûš½ƒEÛ똪­y{áRhRb±Ç(W71îÐzÖJ¥8:ˆs‚(MLÜ^ܯ¿‘…ÅPìH}ÍéSÇy Ží´°°@‰ZÂ݇O#Oëjnml7иկ­º¾ w¨®® =rªðUö¤ ã%ÄÅ[‘s²Ïï?pÛÒŠ5pËi ³•¯m÷îLîÀá¼:{nMzÛsüd Iì)H}ÍãŸÇyâØN€B’äù˜»ñ7®Z: wž8³m­<&bdãȉ¸“{Ãö\ qVë¬ò£ÇûNJ÷ðž;W±m™J¥8;ñμykß…ó%þ~ÞÈ~H}Íãƒã€ÿü­‚µdÉÔ Y¤åäܳ³ßˆILqÉŽ°½†½3=Þô?ŸŸ¦Q©Ôo³ÐÒâ¤Mÿ;:yJ'¹N´ŽPd{{VrråÆM+§{Ï¥ Ó°— õÀw“šqüˆÓDªµãlµZ7cq¹5ëƒçøÍú§ù½Î¿±cgäœ9Š4¥ãÙÀ€!/O]»~¥¿ïÏ4º(ö5¤>øR³_ž9rÀq¼w‡Š|2 Ê&}oþŸŸÏŒíñ+.Jضý¢¯¯B‡Š|rr´±£e6nZë;k>züúà[{S\vxÏNÇ1Óh4‘ŽY….šóÊ­»öü4åÇZDuUÞ_˜>CžÉê˜EîÜ…îæ&³cÇú©Óæa§hµ©¯üÉí+Q7R_•!%e`aïÐÏB™‰â´a$IîÛ·ÏÜ~]´Cÿƒ®¬­ÿ6çyøÅË®Žý[¢ýƒ‡w:¹HHJP;r‘uu…33D/œß=`à$ìz­+õ= ›ÙuÂ&Á¸©Ó´‘Ô¸©¯03j¢[`¢íÂËQ¿ôkõ%ªAŸpÔ<²àž½Þ0u"®^gIH)ªi£¶Žçöl²05•ë$ݼ-ßßK¥ÆFøC)á8@äÏ?_§(*vE5ZKê‹èîº*‰;²)¶b†£Ñ3«¶ÄËýZJ㲯î>‘b5´+ïüìœ;[½,§E4nÂxÂñC[Ï–?ØÀ2ñ%¬ô/uðøµa–ñ!÷C?ŸÐ{ÐÑôúI½R®®Ôåù_°ÞNfv41ïP„螟–EdÖÏ¡5ïÀéU£»ñÇ VÚÉE‹¯½^f'[?CÝ ¹ý©ë¥m~ãOŒuæM±âfQ •Ado÷›ì|¾ÑêjÍ 9<µç'êQ¾Ù…ås–X|"‚±Õ%¨a;;{Ç=Yû6xœCà‰†YçíO]5¦î[TuGO ŽÊh¼ ÅŽ,mò^³áÑògø„%¼«Ì†˜°™½›Ö¿¡Ôcœ†¿+£Ñ´C« W9ÏŒ#FÆ•6åßÈ¿2iŒwDú»V4ì]i)O¼ûõZ:8~Bosæ]|½ÊAö_‡ìÜk 'û­>{¿ñÚzýºéÏNܵ(¿óËrw)á~5®¿4Z“¥§Ž-1`_Þä—Ç´”†§ZÇ_<ªR·õÛG°¼ŽrG¼SÈ¿tëæ<Òm+o+²Tש~±È »ºæþÍh‡±SQ . EȨWß3çÎý0~Ls¶KÖ\¼”>eJ'TX`À€NçÏGLœˆÔÐJR_ñµUüÈ7ïtîû‘¯žâØ4rlýƒ¼¥¦ ˸_ò•ý“R× ¾¸—?Ým«óƒ»^¨í‚˜¨_{³ÙE¼©7ÄþÌ&IeîxÖ-µ±{§™ìf¾'±üˆ!7laâsô÷®&RÉóºåVr™uà!΋yÕ™ÇÔ‡¯£ÿ÷ÅSé;q§0>»R½%k\]…{E}¢Ê®Ú ÂÏ•…òýdΰ©Æ‚ æßgÂ4‹ Ó&&Uï6øHQ«Ø¥¼ˇº¬¸–C†+q×åð4óQ[·˜1·hLÞ[FgÖ§Õcu*¼Zxw´Å ^e®rW彞“8MÇhùÓÓ÷®Ä­éû®YW[Þ6†ò£ìËcæJÃ÷β!$†MìÖ„FˆŒ¿'kxìäeÚ‹¹м)gj8ÏüvJÙÁÌ[iªtŸSoüeÉ/náýÝö¦?X)ü>,äáÑ©Ýê~G7–L¸11*»ÜVYô¿½s”ûFZ[+¼ ¿œ‚íe¼:o;ógÙM_6¥R°×ÿ¤2’ÿn©>Ð×}ÕRCVØÁìÇ£”…¿´ÉÃÖúvwNœýûé¡ñóêf~äãÚòËî9a“x‘ûÉÞ?¸‘0X6×Alïò/ÿŽ@yý¶¢º¶}PuTtn\}SX(+Õl‡…¶ÄìhÂk×Á †‚€!ýÃü)>Gоí/1Áu={MmØ‘Ë ¸Í>Ûv{ã ˦|]©f×ðÂ/„ì]"èýiV³~ýeÞ(I§KD"Aì9u}ƒA‰O5fûË@;%Áº ˜2’ØÊ}‰÷¹íã+Â4ö\gëËݨºÌÙó7’ü-ãvxø}óý^¾y[A!ÞÈÄ3"þÆíB¢¯T£f¶‘Pì7Þ–ˆ‹&2Sß6­‘‚w %ˆ|\êοÄ0¤mÕýŽî†ñûÞœÇv½·sý þ5äètº÷Œ8¶ûTáÔ†äÓ'²`·íþÎÃãßx8mjwÑ©¦®óF¸èo;b;9eÏ»_‚`)QQ ïÚÐ߯¸¡vøhò¨™Æ_Üä¡‹ÖÚˆÚ´ìDÀ ¡*!3ç„Ö¶;¢,\Ã~X0é/"ñWO^MÖ†úʵ_,2@ôôQr·ž6Ÿz¶òÅí[qyÜÏ‚ÆDY Šjšjªÿ®çªäþR;çÕaºãñu3©Â]ŽÊ =ßȽ>þÊ{ó¼÷ò¾• zWƒ[wãšñ쾄„,ómõv…·I['ͨÂ[I²¹š54’¾ÿÆ€H}­!õ1ºè¨DôÛ~ß>ÞÍVùÉ·^H›ª4|ž‰}0MLpE¸7¥ì†i¥õÑë%Íw'Q°›+X‚ý+mu† Ln•O·­Øììûùù8±d—ûòwçuO]çÝóñ)¯­9.›–„Ïà룢ÂÉË«$9 •Š=à»§¾º3÷’ÿžkè±ÖË\ÎëÏ{mˆÙ<³7ÿ[µì2mi­ekûQÖ¾7Ïø?bÂæò¾è³Ù‚Þ¥ÒÆÑ¢²„ÿÿÆ“³½©ª~¯™¿Í>¸­­õïÙ=Ò¾!ÁAú¾ñï-ZKðãô…ÇËìz mCu""#ÖUž×Ȭ‹¯ƒ'ïÏëjfeû³{çï–»"ø :“ ï¬Ðà]æÝ•?‹økû¢²úókÛh£¤}7™øF/² ,jx~ä¡“kF ™KÜò“¦øÍ‹Lýg³ï-« \|½jiä¡"«Q6.”Öð ¶D(/KñÖ…Ú=”|a9ÁÚgÓ`ʦ÷·ÙñO;µw›óÞï¨ x€fÐ}ÂtΕFyþ³_‡L²Ùº0è·þ”ß>xFkÛVon! ë›eB™õÞ‹/æÎæ¿Õ¤Mvè¾üðaü³‰Ñ¿éÊÿöÀè³x¥UD  47Dú¢/ Ã)((dI4ápÉ’Ì');U³«…éDùÛ´B~ÛÅ›î¾sbý´hMí*«þçÞ&”±FAw¤ÏêË}øñ’}u…³ÿÍO½¼Zp.tÈ8DZ[bãƒùÇlçg<æÌB,î=B(å7Ó#Üé}¹ppŽwõ8OBFš,»)øKVégV‘FgQ©…Å¥RbÍPäÂWR‚3ú STÖEŠÛ’6M1ü1ªlû„Ù^ þÜzvlYt6…ÿïnô ×’Iå[ù1¯æÄüµî«O2oÊYâ¡T±ÒdvÐns{?HßDrØnà 'ÌŽ&„ç¸Ì¯¹akú†ð“ó†(×r?…¶Î¬FØüK =Fñ§Ì]9ŠW…y;íäˆA%—9¯ö=´œ ãÿ 'Ö\ek˜Ïï„ÄòPφ? °yGÖˆrš0î4ÊRÞ ÕÎ#³#x ¯&wiÿ¥^ü?ê•æÕdB¡IJRßeËʨbOh©Ï`ä’\Ó„5—^#—~úiÞUUÈ_ߟ&O~y6Bp=˜OµÌì6‹$g}vÝTV=#W½?IÎfn9—ø+,½4þà üâF1?¾näœwâÿY·÷–Õ”Fª_'‘ÓVü*1e§\ýÄÛ¿à]âDßÙ¼îð"•¡Y3B¿öwô%*áMyIŸ¨‚«Ÿ:{°Iu#õ‘;HrÇóXÜ$>WÏÏNè(ØÕlªp>>r·oûåyÑ•oÒÎ?Œ?|+`D?Õ¯ý*¹}ù§!>õ=Šô¾ ŽÎ¹Ëë*ü ³ùQ?©?MW¡³ñð‘“þ–ßæñ#üŽ[ø‘÷g/ÝiÇ3éŽj›ð™*L¯âý ¬R»ŠM§óÿ'&Âÿ—ù¥—¡—™É¼ÆúiØ2¥þäú¿µéG]^ß³'öÒc†ràýÀÂüˆ É–õ²âmE5Q.H°éw•ºØšo]¬á°<}öª„!Á†…·¯ùœ+МêµÕø¶ÃŒË¿†9„zÊ> ;Í‹dN–Øs¥e‚EÚ­Xø§G}ÀSÖoCÄÅ™OË?µQŸ™¿0*v«`+8*ü­VZµZÄd/¯6B§SÙ•åØ ZQêû¾*ñëmZ'Mv”sPØ‹ …ÞfdÔõùÍsNIå5sѾq_šKcΕô¦õ â;€ïˆN£×ÔÔ|y>Ý_þØ;çƒØ•ÜýÝnðòÙ–}¶<¾nYÿï ]ø?O]÷¢²ÿ/ûKm~8•ÆjRÃ55lºHóYD„Í„õ#d÷gNRú¥‹É'OD8åׇ¨€kŸ:»R¸î„ç¼*¢âqæú(Þ?²¡ÛŽé&Æbв “’&6ü³l0ÂeùÂ/-R{<÷wJà\ ûÎR.3Â&D.ióë„X‚P ÝÐë½$+kþeå?=¿ùñ­ÿÇ¿ÿl·8Ø ú¤£þmGS‡Åô }îÚ’K`ô‰%›á„~©ž‹Hr~aßùŸWi©GIÅ_žOLäÃS…2ެäß …Ð`òóU5¿7ëbxÜ=ú f)¼üùžºâZ½Í "–ˆ>p$Á¬þt»ü+«šøò·©Ý£¾ÍÇf“õ“¶NkJ›œêjNMM³ÞÉûçNB±¨(™àðî¿ì1hBø2OCMOÞÉÕÕñ§éfg¢"Ò ß¥¾‡Á{³mgÔ]èºüVÌìÞÈСJåé·_ðFõ£’§ØÖu–mž|XùDèüo¢Ú«·õˆðºç Ç?’e¼×DZ‚Ð^ü›^Ä‚Ë]Ey¬6›5ÆS‡ÒBo*)= ;‚ˆâmÅ+Ûu—9»½n·`+¡×HIvÆn€Ô-HGUå웼ÏÌÀ®Êåýˆ›×KiÞ?Ÿµ™¾sŢѼ.Rj̪5ìæ>Þâbº¥áyMÁkWS¼­z5‚ã6J§õ•ñ;öòªøuâ¶ÝÜ=ÉzRäB+Ó….¥\0Ïg^^ibõmš5´ÙÅÞÝ\ˆÍøBYÊßäJËÊ7W‘•Ì_¿>W[[«;ÄÚ&äTÐò5”åβm‹Mãžú,£ø¼7Ç’È?üM¹ÑÛ2pØ~¯cí˜Sž¾Â{eËì£DÕ¬ Â=t2˜2dª×=þa–Dè/f‚^OKg»£ø¡ñÏåÝ¢îïwõç6s/lA·^]¿ê—ç—4Œ|ò£•îΣ>‹¶BË¥§&Õè4¹9 YUÛ@ꀖ$!Γ”zóò¹¬b—Î`6ûnüì&5ÅЙŸ;ý33ˆ÷XŸ»´î)5íBù´FÏÒÔ=öåzüç—l";ëeHPàˆÓÝ­ë/+Z5Ký8÷gÏ>†ŸYÖ«é]ÔÕ›«ÈÜ`Ó¹3#í)[GW&0"¤ ‡®èGú}âÎxŒ)GC¦|dzˆÿ{åCÈfÕ»F6Lkhœµô~ÈÒ[{o¢”­;Iº×¿êËósú“L¡W¸ýÔ»ÿ»új+»2ïrös¨ëÃL}ÊÖÔÃ>€Ô-NGÏàù“äO¥¾¶Œ®"_wo:)ËA$)¸“*Ù¦+Ï4èC’}>3ÃÓ§b""Å*»c@ê€G£ÑŒ,z'ݼdbç„jPâ^ô•ÑcG7s³TjÿþzçÎe.…"sëqáB¦›ÞoH}ð­ vì»væ7¹Ïe•ºtðR¤Þ¹ª­×UUY©Ù[¶°•˜øû“'•ºM>γ½ŠºFíÒ¹“ªª)v=¤>øv&Ž²å¯ž3ÚßqžMW’žõÌ×gz µ?fô´à |}˜L¡[ä¬LÉøûO}}`§@ê€oª“ŒÔÈQcŽÜ:`œ7EˆÚ+P]øêzdøœÙþ-·&Kzò£7m>äï§@£Q:`‘ß¾–>pð‘ï¬9Øãúà;ÐÕRàäzùï]ý=~¤P:VgYVpñøA??_Z wuÊ+víá¶ic¸¯ŸœPÇ*rI‰ÔÖífúÌ`ˆ²°» õÀ÷aÚ½›‹y$t³Ã/½£œ~Vö*#æâÙÙsf3èôo°8U5³qãÄþX}Ðg¦Üç/éÙždeÉ:ôØÏ×!* ©¾'MµÓÚ´ys¯A£¥dÚI¼ýòyÆü¹³¿åBåt}}çlܼv”‡Œª*½ÝùÎmfB³Ÿ^ˆý ©Z qñ €€]û’B4³~.íu3…jªÎÚÝ»OŸÁŽ“¾ýÒ¢¬Ÿç.>}:äÆ‚Ñ£¥Ûk‘95¢[B²{öTœâ5{R´.?Œš‘yd÷›!cÅ¥;µ³­ËO}wgÆôiLQÑ︃M{ù2eõ‡Æ—QVjo~‰ Ò—.¥L:É’Á€Ô­‘ŽºZ`ÀüȨë·"ŽÚ¹aеƒzþðNÔÅÑã<m­ZÃú(*v÷óâøØðƒâ&Oî$)ÙÎô{ôHìÔɧcÆZúùÄ~€Ô­ƒ]oîÀÍ~ç®D:Ž›Æ“l£RöòÙµ³§<ÆŽuhmëfjîÊnÞ8zábÒ ¹N²mõ3=ã™ì¡ÃÉ#æŒÅ¾€Ôm/ûEݺ{öhhO‡ÁTÔÚ̪“µÙï%ÅÝ3f\P`@k^Së^ùCRò¥;¯»‘ÒÑmC—Q¾sGôêµ´qc¬çÏŽý©Ú*;«žÜáMaÑÁÃûH‚°pt§‹2[íÚ² ²/Ÿ>®«g0ÈÙÑÕ¾O[)²¡=w(+}ZQU9vŒ”˜Xë=ì3'GîСd§î ì#H}ÐÈJIΘ:…;’š‘yîØ‘ÚZŽ‘Ý@iyåÖ°nBâUjâ½ëWU5´œ:ZÌû¹™%&ë9ÁŸ«r“>[QYåæ&¡¦*ÒJ¾r$&°Î_|¢®&ãè`áïÚ'u5éÓ¸#%eå‘W¯=J¸/¯¢ªmj-%§ø“ÞëÌÇIwo±««-,¬ú[›;ÚX´›"++üø£w¤²¢ìÒ¥÷î¥ué"jg+Ú¹Ë7¾æ'íÑ#æõ˜¬Ò²*kkÃænÝðÅ©: qÓÝÕ‰;pÇ«««oÆÞOLxPôöŠº¶Š¶ž´¢*U¸Y?¤Øe¯2Ó3Ÿ>ÎÏÍգ‡Y Sîо‹ÌeÙÛ³·çsjªïß¿rÿ~âË—eº:†ÝE44„h4J3.®²B,åImròëÌÌbMMi33ÃnÝzuë†7;Rtx4Mpú_Ô’²ò‡Ÿdde¼ÊÉ)*xÃ`‰IËÈ1Å%X’ ¦."ÂdEˆJ#ÈÚÚZAÖTUTVWU”—•—”¾Î¯åpd••U´µ4u5ÔLT-[‹[dª0ÍÄdwhÒŠSSã22Ò³_¼É˯`±„äå™’’t))š„¸0C”ÂBªå&CG¨¦–¨­¡”•“µE…5EÅUoßVå¿.gWÕ*)‰ué"¯®®£®fflL76Æ;©àKÄYLKscî€R´†¨„¾~_î€R õR õRÁ®®IÉÊÉÊÊÊõªèÍëÒ¢7܉LqICTD„A¥P(TaáZÞÅ\8Üÿª**jØ•å¥%Ue ¦˜„´¬´\'µÎ]t5Te¤$QϪªxž™÷üyöËW%ùyì²rR\LHJŠÊd ‰2„D™BBB˜Fpj)55$§†,/¯­¨¨-.æsjªÉNrty9VçÎ ªåõPO¤>€Oz[V~'6!õabñÛ|UM¹ÎŠªÚ",9•nÜá«ZΪ$îßOÍ{‘ñ2#µ¼´XU³«E3]MµWb’,,|pÿÁ„ok8d7}Qmmº¦†ˆnWŠnWî÷é¯hºˆ ¢Þ\NM­zô¨2'›­¥%ajj¦£eC¡RñÞ@ꀊ$ÉG™9·nÜÈÍ|ªÖµ»–‰¥¢®1wh‰eÉ(væz=lË âü¤”Ø›•e¥¦¶V=tz;­rõÓ´ˆ˜ë_¿®¶²·¶böí+Ü·¯|‹YZØÒ‚;°ê'¤dç$^»Zú"›Ý³§šeÏÁL–4ÞöH}Ðþ•W²/Dß|xï¦jWC½žvfýݾËj(¨jqÁøÍÇÏï_=G£ÓÝ\]Ô»¨´ƒ"WUd^‰:þ ¾ØÆV¼W/QmmÙï²*Êô±cek”ÿzÿÁCÅ•¤›«“ššv¤>hoj8dø•˜ä{×m¨w·à­gݤå•ûyüÈyUY~aÏ¾Š’"QJrÚ\‘kkÞF^KH(9RÚi “;´žu“ëDûñG^ø¬bß9~"òEVõHáŠ8©ÚÔçyÇïSPÓ0´²W3ìÑšWU„Á´8Œ;ò87wïÞ}š::ÃÝ\ÚD‘³ŸŸÝ讥¥˜£#sÀÅV]dºûP)îHþëKÿûßQe™Áƒ¦âô?¤>h“®?xtíìIÛÆy·­5—–Wr3µ¦ºú;vSò§É?´Ò%Éû »ÎŸ{1z´ôÏsÛV‘å:ѦO—ãpÈ}ÿ(( }~š‡ì€ÔmÆýÔÌÓÃzâ2É· FÒh½\G‘dí–]{éT¡‰ãFQ[S,IM=|ðàÃÉ“;ÍŸ¯Øv‹L¥RÆãûwäÈÚÊ Ú˜1>Taö ¤>h½rß…íÞÙÕÌzלö±EŠ•óðšêê…hëèº tøî«ôæí;Î u—Z´H¹Ý¼sFŒâpÈÝ»×té¬4`à$ìJH}и•‘’d?Æ‹*ÔÞÕ¦ÑúŽøáeæÓU«×þôÓT qñï³$yñâ†ÜÜò¹så¹q´™J¥Lž,›žVòÇš_¼§MWÀ>€Ô­EYû[þ2°°ãF£v¼™ŠjÚ žš»Ãöõ·±þÆK¯ªx¾ù¯ÝÆK9mÇEÖÔùy®ÂŽ»ôõt­{ Ã΀Ôßßóü‚Ðí!vCÇ‹K˶û¥P„ì†Mxpíì£ÇÇ wÿfËÍϋٹóòt9q±qÉ“É?Ê„‡gœ82Ô}v1¤>øžŸf…;ää9ƒ*Ü>SŒû8g>Œß²c—÷7¹¼ç³´“§#çÍo‡Gu~†««Xl\Õž=ë&Nœ ©¾´œ¼Sï0nZ‡Š|jú¦ÕU•»öøaܘ]PNö¥£Çøû+P(”ŽVds3‘Š òàÁ£GÏÄî€ÔßÚ«¢âƒ{v8Œ™J£3:f´M­SîD:~r”ûZDQQBXØõÙs¨TJÇ,²MoÆ¥K•ááÛ]]§`§@ê€o‡$Ƀ{˜Ûa0;rºZØ]?½ÿÎý £¨rMhØ©1ãdh4JG.²½=c×®7É/è÷Ç®€ÔßÈéËÑâÒ²ŠªZ(EûÁm73ÔnîÃ\¯\ݦ¥ÅPíBG‘=<$6nŒÑÓµÅ=Üúà[(¯d'ÜŠ0Þ¥àaŠi˜œ>éîêÔŒÍVUdß¼™7¾"*ÌÅbQ{˜Ë^¹zÐÁÁÕ@ê€w1ú¦Š¶]D¥Ð4±:ú¿Aš±»ïú­Sææ¬{:ß?ÙØP×­{Ö¯oM¼tR|ké’Œû8µªUª|“ý¶¼F\VMü{œfH£3U5nÇ'öîiÚ\m&>x=fœ Þl ¢BêbÉ®uw@5ú å¼.¨¬(“QPþÊv®/Ññٚͳ ¾qÈëßöæßÙwô–ÐàŸÆ(Ó‚RxdŠÎÚ[„Ùü¨~=¾KY”Ôµž>}Ò\©ïu~€Ô-«  %)õ5-T>üsÈŒCÜ‘Å1î2'Æé‰[elöÜ×N¶až’û¿Û9/皺yvËÞ›Á{àðë­_læy®ŒÎâ>ŠÞüÓ¸]%L­kóæhòúú|#w‡ûÉ-;Mf«£Â}Ä-ÔÚ>ˆ;2ûüOãªÃã•WFò¦w±t·ë©”uúÑ™¼‡nÁq¿xèý‡bJH——5_‘‹dešç3ÚeíÒÍÝ.k¸DÙIM]µn©-룳å_:.ïpŽ?ªééÛU¦òypHÿ¡~džŸ½Yxû´´Õ™úÙ‡OëJɺáòåÓý£ãæ­>äüË(þ½%8É¿ñjóë-O#Ѹ5®¼Èg¶úF¸O]ÿÞ‚5ܧ3øŒ¯™®æKÏÿv³(BT!!j%›Í 7ꫪ¢Ís—Ò¢Juçqä3m-Ëìf_Y<÷Ú2d¾NüÈ7/rý*ûºž¹?·¾K\7õñ¢%§¦Y"¯Èta6»²9mê–i¤†Ÿú¢àåk('‡ÜwnüdazVï§¾[VCÁ¹úN²&6'¼É®!Tê¦éiÒæièwãovÙ½Ëü~½˜ý,Êþ®Âõ'¥_™úª«) È€Ô-KB\<ÿÕ«ÿôRvøÌû²Biʦí?J¾‹ Äð SÎø»íKÜ;b|χ¼¸NÏi¤°œPª?Û®&;îZ !¥¨c®/þ¥%‰›ìXxaò¯·fûî92/×?4› †…îâ_3†dÊëv&n½è<úïÓë]›«,ì²"QfóY"+³¨¹uòf„ôõ]ê¾á„4åÉÔ µ OÐÅ)÷mn‡j˜žr%÷ÃXV¥IßDTº‰1¥š^³Ò¶´Ä{¯¨.!Ž»´ õ@ SUQŽ»û_.[’´møÂÃ/ÂvË…–ÈžÕ;u9K£t”¯Ïï;&‡Ì ™¶1ieÙ{aÓÜŒŠ_ëµô!!8[OŸ`SøýƒÑ»6¤»jÊhwýçâÌ|yŸTÛí="š÷pöùmÝëB}À‡CÆizpد'XhK§]ß½hâ™î³Ã<ß«÷ï?KßäÉt’k®"+)iß¹“Þ¿¾¡ÁKŸõ߯1ôÚÖÐw™}"ßtXþl”ÆO—ÖùŽïL/Ìßó˾ 0îï‹Xb+ÅMàMøRá±zêæm뢶m l³r±×c<~ºÊw{X<÷Y‹¸êɦ_÷½#7—WÇ>€Ô-K[M¥º¢¼¢¬D”õo¾×dd’v³‚èºLùgäãaX­½{úÐñ¸jZffu/‹ ;ñ Ê’Î… ?²x/!*k»âð£Ü”ÉMÜö¯ûçw®Ýó"?òä~uß®&?®›Õ¿D½O£øGÊM;z]zw$[„`iº»5ºB§¸íöÜr¢øIxhȵA·”e‰J†Š{Ø‘}ƒµ:ýÇ®¤W/2UÕ5›«È**=‹‹#ÊÊ8,Ö?šQÓ©ÏŠ*}9Û/½£>d,™g±~;¯¯SOIÁDûeä²ÊÛ'=÷§˜Š ¶¬¶e䓹ö:uu“Ò4Ú´‚^,¡¤×èB uEä´Xü‰R]¯‘!Dá˰¿®üýØÅ.,¢œ®9~ZÒyc¹f8,óiZ±¦¦*öA¤>hqª:]Ÿ§>Ô5±ü78ê®Sþü,´ÎöãgÚ7Šm,ÃÞ†?1³‚í´Õ¶ïwñ6üçLâ¦3?}= ]WŸµ®>ÍP Ad¤>r˜2µ‹l` “Pam-öŸ[33 4ûÔsÚþÿ¼MÃrˆ-wøÄKÔfª5i¢”¢gà(Ïæ~ב$õÑ£·NÇc@ê€×϶wØž=ÿ.õµkyiI J*r²RÍØ¦µ¥SØÞƒ_“úÚ™»÷„õôD™Ò(R´8ey9%Uͧ÷ïh›X ‚ˆ½~uâÄIÍÛl'yu É›7Kü^GýRd¦7J€ÔßÈèaƒV¯þ£‹®¾³£g’Ç·.w5认 ×ì-»¹z­Y»ÚÈHôkÎîkN,ëÝ[O\\»R|#tmûˆÈ3‡ì=&wä:g§¿|‘éë3½%§ÑE=FÚïÞsuÆôvXo ^êý©¾)c}Ýü7ù7Âÿîå:²cV º8ïzd¸¿¿_Ë-BKÛ¦çÛ¼°°OOÙŽYäŒ éK—SgúÌÀÔ߃mïÊʪ»‘'{: éhÛN–_8ºßÏ×A§·è‚,,†UUíûûð‹‘îB&¯reþþû±¿ß\ª0¾± õÀwâæØÿbÔõèã{mÝ;Ð%õ+ßd_=sÌßÏ)*ú gk;îîÝãÛ¶?š2Y†Bê E~ôHêÂ…¿yÂ4:v4¤>øžízwQQ>¾g£Ã˜©4ºH»ßÞÜG±é)æÓc{ötWVÔúc͉Y>ò Ñöü._~õꥯïì_H}Ð*èiiøùÎÚüWˆ®©•ªžQ{ÝL¡Úš+Ç ŒŒfx{}û¥«t1ò÷ÕÙºm½……hÏž¬öZäªJñ¿¶¤9Ø[÷ïo= ©ZQc®¿odÔõ+ï´sŸÐþNÄ*yñäæÕKÞÓ¦Iˆ·ÄE£‹Î˜t÷î©M›§zÉÒéí­ÓïÎñ;w3§ÿäOaaŸ@ê€ÖÈÁ®wß^–»ÂPh4sûAíã$´Ê7Ù—Oq:lÞÜÙ­a}zöÜÃÌõð‘ÍNÅèÑ’BBí¡ÈÉIbgŸzNèma1ûR´îaa¯<+*+×®]+L8þ§¶›ýjŠó.;àä2((  U­…J5jVM5ûäÉm))¯~þY‘J¥´Ñ"?K“=ôwòÏáóçÅî€Ôm†(ƒ±pÁ‚êêê½‡Ž”•—[ t§‹ˆ¶¡õ/Í~u1ÜmȰ֖÷Þû§ÑÝÝgpjjNŸÞž—ÿfÌ(iq júrçŽèµ¨§ãÆõŽ]©Ú$öÃxÞ{q Ég´º›w5ïݪ׸ªøÚé£,1ñÃÜmÌÚD‘©ÂÂC‡zsGž=»²õl÷­¹5'Gîð¡d--'§¡¢ØMú =032à555®Fß¿{[½›‘®¹0ÖJVý6÷æåsBTaWg_Ÿém´È=æÎíAr8wï†_½v_¯«˜ƒ“Él%ñú(YüÜ…'ŠŠÎN=ýüݱS õ@{üÔvqèǸãÉOž^>s¹²¼\ǤG—®ÆßþšŸœÒ7É÷n=–ªÓ­»ë{‹6ö>@¡R-,sîxNnò±£s_•Xö”´´¢‹ˆ|ã(ô,]2&æù˼2k+=+Ë!Ý p¿u¤>è0 tµ¹ƒ`<ýyvÌÕ›YiOTT•4u4ºÒhÍ|·w AT¾zñ4%=å¡0fØÝØÆº§¹öˆö]de%ƒñž‚ñ‚·Ï/_¾˜˜ôBV†¡§Ç4êNmö“kkEž¦Ò>,HIy£ $ÖÃÌÈ@¿Ÿ†&ïv¤>èè4»¨hŽ~À**+=IKMK™ý¼¸ð­¤´œ¸ŒŒ”ŒS\‚).%,ʤ‰0?8:”$k9ìÊZvEyiqEQQqIAÑë×o^sªÙrŠ*]ÔÔôõtTõÔ-õÔ ·³ÈÒ2]œtv®¯‡“••ðôéìç¯r²K˜Lj§N¢òò I aia1!Ê"™¢µBB¤°0Y[+Tê©.+ª¨¤ÖV½}ËÎË+}óš-!IëÜYF]]CGÛ\·«¬nW¼£ú>K”Áœø™yª¹jx(BTaªÆ Rq_ïB¥ªªšr‡¦ÌL¥ò:Áâ×XM õ@êha4>Ô©©ú©ú©ú©©ú©ú©ú©©ú©ú©ú©©ú©ú©ú©©ú©ú©úú©ú©ú©úú©ú©ú©úú©ú©ú©úú©ú©ú©©%@ê¤>@ê¤>@ê¤>¤>@ê¤>@ê¤>@ê¤>¤>@ê¤>@ê¤>@ê¤>¤>@ê¤>@ê¤>@êøÿFrÿiMAÁ¾IEND®B`‚nova-13.1.4/doc/source/images/create_vm_states.svg0000664000567000056710000002420413064447140023274 0ustar jenkinsjenkins00000000000000 blockdiag seqdiag { edge_length = 250; span_height = 40; node_width=200; default_note_color = lightblue; // Use note (put note on rightside) api [label="Compute.api"]; manager [label="Compute.manager"]; api -> manager [label = "create_db_entry_for_new_instance", note = "VM: Building Task: Scheduling Power: No State"]; manager -> manager [label="_start_building", note ="VM: Building Task: None"]; manager -> manager [label="_allocate_network", note ="VM: Building Task: Networking"]; manager -> manager [label="_prep_block_device", note ="VM: Building Task: Block_Device_Mapping"]; manager -> manager [label="_spawn", note ="VM: Building Task: Spawning"]; api <-- manager [note ="VM: Active Task: None"]; } Compute.api Compute.manager VM: Building Task: Scheduling Power: No State VM: Building Task: None VM: Building Task: Networking VM: Building Task: Block_Device_Mapping VM: Building Task: Spawning VM: Active Task: None create_db_entry_for_new_instance _start_building _allocate_network _prep_block_device _spawn nova-13.1.4/doc/source/images/filteringWorkflow2.png0000664000567000056710000022303013064447140023527 0ustar jenkinsjenkins00000000000000‰PNG  IHDRôwSò¹üsRGB®ÎébKGDÿÿÿ ½§“ pHYs  šœtIMEÜ  Ü tEXtCommentCreated with GIMPW IDATxÚì½×w\רùX9£ªPÈDÁœÄ Q”D¥–,ÉrXîà¾öê¾so÷û0O³Öü3³ffÝÕwÄp<Ž?ƽŠF°¦i2Ò?@6—ÃærYϧ,,îWæ½½Äç„««q¸\V¡|Í "›N3:4ŒPÔ?1@º.ôG›nÊ<J‹É¬–(„àÚµùü ~Y.?Ðû­ ¡¦IßðÃñ‰U+óÑáaT» ŠòS\Šé9F˜šJ`Ê:¡X µkêqÚÕû\ ŒB޲¤cÓ­¹¡O‹Ì'æDª«,™C™÷u÷°\ÀéóB©ü®dõË|``§3,Ûø=W|$3L“þá!Fâq|«Yæ;ª¦aKOepÉLöóéïþ@|¡„7èG¥L×ÉãxêÖòÊ[¯âwi÷$ua–é:ø9O=[vtZÙö§Bæ³D¬‘ù}ÈÜw}ÆÖ²Ë|vöV™K«Væv{™|þ)ºišô3ã WàYu2ïgtxä†ÌŸÖT»(ç8ðSÆ26öïû4Ô„1ˆ÷œæØñkÌÌÌã³û½ÚÅÈè&*¡ÚšZêщÌì׺¯‘JÐÜ>ÚÛQç9}ô(ß$ž°Ÿ¦ºJdYÁãO¦”Ÿ‚U O’̯ô^%žH©±dþÍeÞÍB2‰Ó뽑N2¿UæÒª)ã/Ëüa}µ+tS˜ô 3ŸÀWaÉ|5SLNÑß3Jý®ïÐXF‘$@¡jí3¼ZÓ‰ÍådøÌ§||à‘5M8ÈräÜYûÞbǺ Žÿþ·Œä=ÔW™î:C|:Ãæ6År™R©ÌÔÐŽR†ÜÂg/^¸cïÃD<'Ȳµ¯Ó#—ùÕ«Äç,™s™gnÊü±ÅÔ›2W§Ìûû§q¹®ÌW¬ÐM!e>1¾82÷ûnÖj¸á×e®9ì(+Læâ”²y²Eh‰T ß’—$·Ï‹QHqõÜEluyåÑE–Ï~ùz/w³vÍVfgq®Y˺q(;˜O›„ƒ áp|¨žêh€ŠX%TV>ò™îår™‰éiÚÍ6Kè\æ=L$Djk°;Öµ¯!›ydn’H\f|ü Á`‚ª*iÕÄv\»Vd`àÑÈ|Å =•Épþ  P.15_5ªT,2=9‰ÝëY‘2Ÿ[X “Ï?ð–†iR(q²ª H‚B¾€@ ݰ®À4LŒRžùD_k%š*!c'T¤»/pذ{'GžáßúψTÓ¾í"! !aw:qy½Ë#šr™ÂL²Ç#f|b‚‹W®`w») [ò d373K®XÄå|ÙÎr9Íùóg)&0 ‰ééÕSÂ¥’`bbPh›Íx$óV¤Ð a¢:Äjk—i±å#½0œ˜ý“KDžd¡_îºÂ\>¢iô^²,“L§ 6_€hØÎp×eÒëjñØSâåÜ<NœÇWSƒ®+³9„I¡GVȲLÕº]¼Ó¶™Ää]'óÅ{àû‹×—˜¸kp‚¥‰@ÒâÖ3ïÇšñ¹×ò/ c1|á°UˆßPèÉ…$6Uy¬uÝ4KØí³lØ@UWWFenNef&Ýn<²ÏX¡ÏÐ%EÁáv-ߦ#ËÕY)—WôŒkDåšôÜ>UA±¸¸”â°nûvþð‡#|ôžÌ† mؤ×Î¥w¤À¾†vjk9uõ mU¸ŒY†g¨n}93͉C'ñÔ·S_¡º6Fÿè(eÓDU$’‰LÑvËçš,L q­ë*“S ÐDkhîìÀç¶Ý×ò¸üü,¦Ý‡Ã¦aqoÄç¦Éy*jïºíèW—ºÀîrâô¸­Bü†íMÓuJ…üc¿EŸÏ@]e«IMÓxäŸa-Àµxò»o’Bý¶½¼¢Ú9{ò~sI³ãW±÷íWi©Rðí']úŒÓ½‡@ÁS»‘»6âv™øÝ ]Ç?ãʪÙè|v±h”tk s—G(fÒ7‚ÚtÏ)>øÝ§î bU¤r–Ë?¤§gˆ×Þ} [!¦€[¶¼¼þ÷BˆÅ‘ýÒ뢔ãÌG£Ôobë¶6ëfÞ#W¯]峩3¼¾öy¶6¬G±æXXXB·xL½Òr‘d"A:“YÅíàõ¸å{ëÊŠ¦-ÏR·v …B $ÝáD×T$ *ž{û»ä²y„$csºÐ”ÅM(6¼ð:-Ïd)&²ªáp:QdXûükÆndyŒü<'¿ø‚rE o¼û:>’0™ê»À¹ £ä29Êr–®Ó§›˜EH:á†ÖoîÀ¦Àtÿ®\¹F&[Âî вi3Òt§ŽCŸÈâö»hi¨´*Å=PËSz‰÷&ÉrìnÝŠ®X™ KèËJ1=éO?¡«gˆRÙDRT›‡ö{ؾcºroR—$ ÝáBwÜõ§(š ·ïÎÍ$EÅé¹sÒ›¬Úpù¼äÒ@Ÿ`dxußÞFØçXyK ‘æì«é@Õ$.üŽ^Nбe#z~–sŸ¾OQ²±­ÅÅÑ> l£½!ÊD/]]¬kp`s» D* †ü÷”6~YîSÊ‚ŒËà¼2Jnð÷d 9öv<ƒÛæ´˜……%t‹e Äå>ý€3—§Ù¾ÿMZ›«=Ç?çâ‰ÄêjXSå#37C"‘DH ¾pŸÇ$QÌa”ŠËxÁPÌç) ÀíSP%IÆæ°SLMÓ{¥Hç«ì~~+Š‘!9ÖGOuk)fsÈQþªzÖ´¶RBÇ£gðºDë‰=˜FyåÞS!JNÐ?5„±LR¿6; (Ú]i~:ñÉR†ý{¹üÖdE ‹§IèB˜ÌOŒQÐb»À ü‰ bz¶ÈдAS³»úäÄ÷Õ/t#Ç™ßcÖÝÎ[oíîB0;p™÷w„—~ôiŽºï¹± Ÿ;IR ²vC+·Þw³œ§çØAN=Æt¹‚¿üŸÿ†¨Gçi#¿0Ï|Ffm}ýÒÎn‹(º“š5õù.>BÞÛÂÛï¼€^œáÿøsΟí"²³†®s—ptîGȦS” PD–ÓáiÜÄÚöZd 2sóT¸ÜD"‘ûºN[®@R˜ =TI,ê ÷Â9:š#x‹ÏjÓSœ8z…êÖFì•ìÂbC˜e2™,º­ Y2ñÕvòzË&2sÓ\<ü §|Auô…¥:³*Z%a÷¥™óÏ(0Êðë¹Ã¤.ey§åE+ë¾RꢘåÒ¡¥‰ï|?vUba¬›ßü÷_Ûõ6o½ù 0uõïýû^ú«¿¦¥&p—8RfôZ#©áÈlÚ]$XJsê“'ØÍŸ½¾Uº¹_B>1Îų}4íØAÐsû ³”ãòáÏ9uäiÇþâ?ý%!çýÏ0Ëyº~Ω£Ç™•üåù·ŽYLsòƒ÷¹Ø3†¢È õ[ŸãÅ·£­D§›&_ü>Á5ÅÎùënm±¡MögøÉOx÷ïbl«Wï1¶ zO¥ˆ »vºÐîR­Š©"ÿúÓI>‚ÿõ«fmÄú²R.(–/wÃÈçr‹‡n A:1ÅÔä,2¾p%>ä¥ÔïÔØ8éLÕî"ZS”çÜÑc$õ*¼Ñ ê¢Å€"™ÉQ&&æðWÖ07˜~zs?â«FÏ‹k¼K…,Ó“óD÷5-¦ÙE%ÕU!.MÅÁ¹Žš5µ\ºx”#óuÔ56Ñ´¶¯š"—Nâuú‰Ô. }NÕ¨ †h¨¯¿¯ËŸ˜àòÈ0 ¡:ClÙý ï¿÷¿ÿUŽöö5(å ×.œ#)GèØSIcsg/ærÄŽšgh:OÓÞ&ʉ1Ž~rO};uU>TMCÕ@’UlšÄÄÐ5â-ÕDü+/ø È0,ðûä)’]¾“‘õ w•º¤;ˆÆÂœ?ÞÏ\ºD¥Ocvx|1M|tt~;>›Éä@? Â璉Y`rK2.4BÕ:µêmó^„irêó$W‡Êh†Ì“ÖOjž¡ £L¹T¢´tîFù–£çøè½”47šd’Í™lzù-6µ‡8÷áo9ß7‡Ûe£˜IáªßÄ–fã£CäÜ2ÓS³T…h 3RËöW£$z38ÔõÔÆ\»Ï‹×a068Ìú–Š£³\d¬ÝmgiÛ–ÛÓ’¦‰¤¹ÙþÆw©êgx`€ž£qõê(¯¿±í‘^³$ÉÔmyžo¹ü\Í^!Ý“åGH¬kh¿Cê’¤]Óˆöy7ñÉQ——‘á "mÛ)ÌM3;—Æ4Œnx5?ÁÇïýÙ¬„¦²Ù2k÷½ÎŽ µô9Ĺ骣[8ú»ßÐÏàõûK³ŒÌÚx÷¯ÞE`2pþzÊÇüÜé¢}o¾Êl|˜ñ‰Iª&â¤ÛšqhößÃ[Ã3¯Õ0qá Æf'îúu‹éYN}ò)ÃÓÉÛ Á_ÕÂî};ñºôïçŒÖ³ýÕ³=Çí½ñÛîpϽý.ÁHU‘ðúÜ(Ê ¬ð³MCP, ŠKi°bIÜèM÷eøÙ/æÈ© vYL ö¼bÿÃïÍòñÅ~D!càªwñÊ•þ¡<3š`t¢HSµŠ]¹ÙYšìÍpìJ‘M»=œ>}âÊ⩺0Ë\;ùÿï¹Ñ“-fÌfÀ4òt?΂­ŽïýÅ›¸å,ŸÿË?rñôycÛÀÛø<¯½¸#5ÍÈTHCµ 5”+·±¡³ íFÎ}q¦µGÕxÚ7÷´ùb´¯kæÈ©Ï9]ᢳ½Å,põäçœ<=ÊÎ7_#ñï$½±­0ÃøD‚Š5›¡ëR?±ŽNv4¶ñÊ|xpœL¾€, òù<%ÃÄöžýI²Juûb-0—֚ˊ‚,-.Ó=av¼ö-Œ²’„¢,>»“ißõ"-;ÊKËÐeuqäÔ±÷UZvHŠ ÂÄâáI]ÕT Ó \.i[à›¿ä ÕñÉŒ NÐZa05•¦y÷3ÄO}Âðè ÊLΕX··šÑó'œ…×ð]*}2gÿø¯\>~ŠŽÖÂ,Q*ÌôÒÛ?Í®ïÿ˜Î7§ßûgºûf½hqVÔ²óoã.ÆùÕ?üŒñYƒ–ŽV"ý°mÏN*<¶ÛGغ¦ñ§6°V^jÛÖXs{¦Qµûq:´Ûßo)Í~y$¯;ˆV93’“ýœ:ÙGUÛ*ö•Û\>²Àÿ>•çú´ƒB¦ÌhR “Ó“Äÿå?UÒ¿ûÙ$‡§ÙÜè£ïjo£—¿íÅL—è3¨iÒik¶ xػՉýzl‚bºÄ'Rø]ìl—9sÆúãi÷²JåšV¶=·ÛÒäÙË$O÷c–óLOì܆ÏeC–Tªjb\½8IYwÓÐÖʱ3‡ùíä5ªë×вaº¶¸iˆ,ËÖ_Uæª/¼J¦øGNÿá_9÷±Ž¢ÈHªƒÖ]ÏÑÚÚHHìbîÀ ~ÿ?F‘D ÃYÍ®­ë°É9¦ú.qþü9ì6r>C]Çz‚‘¨ŸóÇàó9Ù¾¥õÔ%UCù ‹È²‚¬+w¼.Iª¬ß¥þ)hK¿_.[B8‘* .¶äëx»ñyÖ7®E–îÞ5§—šº(û™¬Ê’njP†ô÷0I™‚ :ê¦ïô(Ó“óœúè=TÒÓS,äó¤ 囩ë¹yÊ6/•U!4»FCkžK ‹?Vl„«zìH.·J1_@öÉ‹uG¹¿­UeY£²©åι’|Û•¯ ÉÜp?>DÉÛÀó/<³Ø1^¡“<$ êšìÝ‹sÉfSýYæ>Ïb–MƆK„[]„½2ª ktV$¯*´¯sòï‡Rüß“yššílÚêÆa“Pddn›˜(„àôÁ$#Y™ïìq!Me !žœÓàžš”»'ZCëºõ‹3…`RÍpâì²¼xdéõ*mšKÏ_$޽FeÛ8£ƒƒô_>ÅÕ®a¾õƒW¬€úFé•ìýöX?9ÉüBIÕñ‡#dI¢²c;oD˜™™G(Áh ¯Û„—ç¿ó=¦â“ŠºÓK¤*ŠMWxæÍïP;>‹3\…ju¦žZ™Ç2nž£ƒ×÷ÐÙÐö•2_tžš¦NvŸ§û|[ ’€ßƒÔXùƒôdK¸+ëx(ŠB0VCÇ–­ØU !¶€ê Âccò+%)n¢„rcÏÒ‡è ©N~ò)#Só·u"ýÕmì~a>××O¼B¸È£G[yiÿs–¶3^És6}1[¶ºpk‹±}ÌaòÁ¡ìRGh1)v3¶ ÌÅÃغ?H]g‘«½9.MóßÏåùÛÿ)t× W˜/röTšÁ9™÷~9M!Y¦»;Çß›ÇûŽŸ¦˜úD<¸xêס˪hmŒ³ýÌ̵ã‘2Œ MŠu¢—ç¹tº _Ckwì¦Â+óÛß#“Í#Ké\†|¡„Ë®Ý<ÿË,‘Ï)ä‹a’ÏæÈ«›]*×Ì*šŠšz*jîÖ»–q‡¢¸CÑ;F»6·ÚfßãðGhòGÚõI@vaé‘å)£l ¬Ø´»„&)øM'’¹<»µ•…AR¹¹¿¸,$ªܼbÛÌ«MÏþÉî·Þdõ¼æg\î™cí ß[œ”VÕ€-yˆ+S&_݇Óf#\[…25…áºÂÁĵnÒÂ…|Ë›y>”Âã#“xëœô÷ Ê”¿T«¾| Â,’Éd1½öÛ–» £D>_ _("Lƒ\.G^Øl7O[ÔÝ~š6l&Vº½î(6/.Çí÷B%ò…ë1ȸƒ¤B‚£ŸÂð5°kïòâÏt»Õ¸²VVeêÖè\éË16ã¢B3éî-©uà4Ëû"ƒ¯ÁÉŽ=>jƒ2ÿçÏ’,¤MdI"›5ÈäM4§Œ„„ͧóÚ"<»Tç²LÌt¬s *OÌ,„§BèšÝ]Wï‰ÓåBVm´ïz–ø>ã“ù%²$r[wmÁi×ÈÍŽráâ%tMÅ,©é\O$&U]ÉÕ“'8 °k÷Fœº‚&½8|à8‰Ù)fâ |ôóŸkîdÏþçð;< Ó´6Óxˆ„Ãa6¶w`˜Ë'Y»®£®ÀC…I¦Å_Ç÷“Ïbä—§¼úFùƒï29§fÊÔÏùyͳ—Ûž¥êkÖ ß*X‡·‚ꚣ=jk£È€Ó&q1>bR×P‰$Ë4lz†Öñ9ü»_¡È`˜Íϼˆ$¬9p84|5­´5_áôMo ˆVNbsëH€fw¢k7ï­îp¡©2._·”äЇP_{™º˜oqd, F»NsøóSÌÍN2=“ã?ûÿ¨éØÄžvã±)KYXÓ×?bÂd¼÷‡œd.1ÅÌD’ÿÇO©jé¤1ªÐ×Õ‹ÎóþÏú³hÞ0ϼò2 UÁ¯KŠ<‘è—ò剂¢Êl{ÞÇÀôÿô“)I`J*/¿âÅïÉÌ–øüä4º&#Ê&]ÔWkäêuŽ~œâ½O4ÞzÙƒÏ.!)2­Î¥1£›ºT ­ÍËöàñX0 Iz0G¬z¡KŠƒ-/‹’æÆ¦Ü¸ã„×ñÆ÷c„CN4¥WPIbfO(¼”úìxãš§§ÉÊ(6'¡H›®°n߄ۧQ=¡KP$I¦¢¶‘göûoÛSµ¹ñؼ¨Kù<.݆ßç{¢¥^Èåo{ù¨)>àéPªªR_S³üõrvÊ$I¢¹®‘Æš†eûÌÏ|Ás—ÑË íóaÞ íboû3÷¼Kœ¤{xæí?§eÞ ².¼ø·6?{¾ýC:3‚ªšö@Ïçû$¦g( ìÞ¡E†¶/Í;q¸ì´mÛCmYÆév“=ËøÂ(š+À¶ýoah¡ÅçÚö»_ÙÁçVyûo~Lº‘ ÷-eªiha·½â–ˆ!¡9=¸tù>î‘L¸¶™/oAv7!¿ƒwÿ¾ó¶ßW Gü_#A!•¦2Ay’:¢²ÌÞ7Cl“\êÍØivòãkĪšƒ¿úød‰ Ÿ‚„àåwC¬Ÿ(‘Î 4›L4¦á²K<óJÊvªGÅc»{1ÐèäÏÿ\¥:ôpdžÍê#hx -¯~¡K2þhÕ£ ›‹êú›»xÙÝ~ªÜþ;nœjs©¹s³æôRÓxçþàº3@]sà¡R>MÀ¦µk©ŒFŸØòx<$æçY®C€ÓáxÀ:be<–3¨K’„³¨QŸ ñÝØ>v¶oÅcsÝ×û¸ƒ1ÜÁÛ_óFªð~©ÍkvÑÚ;?Ãáá ÉIºOa^ø©­ö3|ùuk xí8õÚ[C0Vwã_Áª:î6¶¹CÔ5‡Þ¨ÕõÕ1èn±®Kû«^Ï'STClZ¿í :ÓT’$B•6¾\rª®ÐÔ|³Ž:<*k<ê]b»BMÃuYs¨4·ýéï©è2¾:àºÌ³Ù546¾I,Öù@ñÈÚË}PÌç± Ø´¶“Öææ'V@²,Ó¹v-æ2ϘµŽÔ\½h†ÌútßjÝÇ3mµTòH IDAT›±)×EÝfã®t]êev*AEë:¶l½¹YÌ*áV™oß¼™` `u~jùÞ.óææíÈòƒ)Ùú ¹'_æ·JÝÒ«Å⮪Ž?ÙXß"?é^I’©hX˳õ7F{’$±š\w]æ±@m›6Y2_™?ø+KèOºÌl^!2·°xØ4.=îzÒêþ¢ÀWg{¼)óÛ7o& Z±çÈ<“YCSÓÓ¹%ô'\æ+!Ínañ¨Åi±¼äS)*ý¶m²dþ(¸>2Ø2_ÑB·ô&WYÿR¾€ÓfcSg'­M–Ì-,,–IæÉÕá0Û7m¢"z¬±gµ…öë#sIZL³·´ì@’®‚W¤ÐYB6L&®õ¯:Ù y9yÊÊ$Ѩ´ªbûøxžry»Ý@–,»¹*„¾(óa.- jÑW¡Ìãƒ=äL‡ýÉŸ (©6$ݱ,Ÿ¥òËw¯MS0<:FÉX>«kŠD]M²µgýÉüÂÕA®Œ¦QµhNkÉ7‘ùdÿfff1aæ“ÌÏ19yˆhtuʼ«kIJàó=øcÔ/ô#ó¡yÔ@ݪ–96˜+o]­0 LÓXšÏ'#+ÊŠ½GÙl†ÃHê1dòÊ7E<Åq‚~/^¯×²Ì}ÉÜä|÷]YÔ`=šÃcÉükež]’yÓQñXRDB˜Äãç–Fæ©U.ó<ÉäS.ô›2Ÿ[#óø0ñ¡%™ë+3˜—ÒŒ^<Èôp…B ›¿ŠpÓjÚÖ¡©÷>â4K&§ðU5 Ëç^ª 9؈$=ú³„@Ì&-Ã<ˆÌ»úèšÈ£U4¢Úœ–Ì¿‰ÌûÎ33;·82—e˻燂xüñøÓ!ó‡õÕVlïN™V™Ì‡n—ùJûnBPÎLÐýác ë*žÚÍ4lÙ‡Ç)èÿâ'ôž?ƒa „01JÊÅ<år鯳p!¦QZ|½XÀ4M„0IäêÁ÷I¥R«pÛ_‹‡Ú™,œ»ÒKW¼€n¶dþ âN!—&ÞwnQæÎ%™?†ëˆÇÏ21±(óÊÊÕ%ó±±G#ó;B¿MæþZtWðºCVþ G0">t•œ©îY‘3¢&s×16šaí[ÿ•ÚúêÅ×۷ᦠ8f‰™ÞÏî¹L±PFu©Ú°ŸªúZÊéQúN|ÌB"Ü•k©mZÃè…ºÜVYGóöý] ’¬`˜À#D(ÒݨI)=ÃÂä0ù|E÷à‰Ôáòù‘ïë¾ ŒbY³Yòy ™—9s±›žÉ¶H’,c–‹VÁü©2+䙸ÄìÜ<¦3Òã‘ùÄÄYâñ/¨¬L‹I7ÚÅÊ—9ŒåéîžG–x½ù‡ÚW¤ÐÇâÓœ8ßaócSäR‰UӍЅ3ñar¾be€Y"9v5ÔN¤ªò†œ$ÕIåºB"?K÷áð¶¼@ýšZ¦/ý‘îÏ~ƒûÝ¿%×ýãô<ûz9ÎÄàÙR;žp5þ*ÊúÔÌÙ¬`¼PàÓ³ƒ´¨l ¬k®Á¡Ü.ó…þCôû„L®ŒªëP.`*nj¶½CãÚŽ{–º‘dàÄ!ë^" X³Ûï7FLLq¦g Có w[ò È¥ÌgÇ&s€Db”K—>Çá˜'›•¸vmõ”o>o26–DÓÌW¬Ð3éy-ˆìª$»Ê²®…|šb© NÏÊ^«$Lʹ,ŠÝ‹ò¥´$)H¤&®’5½¬Ûô~Ÿ ‡’bôWÿL2¹€Mµ!J’Ó£Dë[iÞµ»ÛKbÞ¬e±ÛD.¾* j-ý©G[Vfr”HÀE]ÄsãµrržÏÿrh+›_ÿ3<Fn–¡¿!1t…XÃìj‘¹Ñ^2é4²æÁ_ÝŠÛëL²3ƒÌOŽS6À¨!X#qõ(ƒg?#«x°m}Ó†Å}´£B©¢ ›¿Æ*Œo–";p³\~l2H¥²øýTUåVa”)—s„BùGÚW¤Ð…¨ÞJÔUÖX…”M3ã+ᱤ {}”Ç')•[&À ÌRžR©D)ŸGÒ¨º‚„„bó ‹Å’IUç+´:ñ¡³\î9ˆæ«§mïwn¯¼î0z`yê@¹|ç®bé‰ËÌ¥d6¼ú þ` PµM{H.[® üc£Ó¸ƒQJÉ)/ŸfÃk?Ä–ë§ë“"ŸÎ¡*&Š« £}3ùñlv(N\ 3Fó¹ “ ëª„Ãé|àïQá÷‹U®˜tþÔÜ JÔøïñš…@H*BZ•»[?Ÿ ˱rãkn••%jjVßã—KaxX<ÒÐnÕv‹Gƒ¬á¯íD\ú#ÃÝ—i^·EQ0 s û-¥†Ã ùAòÙ&ÇgqÚM$IÜ<¬FÂD’$TW”pû‹”gû¬Šò5$½%ŽÓOöÚ{dòYvµmÅ¡Zs ,,,¡[,+Šî%Ú±hûó·ŒnåǺ*;iŽ®½>¬çúѰš;FýŽ·n¦Ö%i1-­ÅhÞû—ÃÀâ:öùkŸÑñµÏý5kסª …¹~®ý€ÔÔ‘X„âÌ5¦¯R,èÞ‘¦õ86ÌRŠ™¾ó,$æ@ÖñV¯%àS>û)×fP+ª©_»õ®»X»+×QUû9ý‡þ ÙüáÊJJ©QOüžœÚHç®-xœ2ÓWÏto&;|†lÉNU0Bjô<ÓÃD«"·ô# r3ƒÌŒ\%“J¡Ø|øªZ U×£Þ× W‚RfÙáGy›ð!ËMÓ??йLkDû“c BÎipV%=ü>©B†×îÆcwY ÌÂâiº‚RvSv`wØo¾nÉ%°û*îs½ðWQaɧ“˜BBsxÑtý©^~$-Éøž~&IHHw™£#!ÉK#fû/ Wl¤¾s#úÒhÚj¡}%B¶c$º¹øÁ/1œUøýfΜ`jl‚/¾ÆÜ…÷é¾ÜG¨¶rÃLÓ´m†‘EH*Š¢~å4!Ù¢eßQN¼ÏÐáŸÓoš(6îh;mÛ_ÅòÒøÌ+ \:Á¹÷N2•›^§ª®s>‹J £à¼!óDÏÇt9€©ùpú|˜ù>Æ. Ôùgtì|í¥näfèûâ=ü^'ZUùÀ÷Ñ&—¯uóÏW?$§——¥î”Í2¥¨ ”l‚îà4?ü€t)Ëk{ ¹_›m)¤¦z›][¼—Â$·0â¬@ו¥_+“[H {*¾¢ó$0òóÊv.ÇW¶çbf¡xÑm÷Òæf¹@!“ZŒNš¦=@̘å<…LS€æð¡é:ÂÈ“K-ÜÑžl¾ðŠ™SñåØžK$‰€çf—[&³ HE}Ø»I A!k°…Š å!»Ãú×—¿‘fàà?’q´³þÙýhÊ¢RC'¹xô-¯þgÂ!Ï=ÍíÂ$=rœ ¢¦ž›uF`dã\;ô¦Æ'0Lмu´ìý.á°µ¦ø‘Ü_³Da!îÛv›ð$i±3…0˜8ÍBVgÛ?$àw0sùß8{ð4É…’S”%Ñö=øü²óÓ8ƒÕ5 L'‹Tµn@×Êwï Öжÿ?RŸž£\*#«v잊"#Iáµ/ã«ÛJ±P@ÖØ=~YFT´S»%BaºQ\É!z~ˆR¹›Ï½†Ëå@”³L\ø#“3“äsyd9C¼÷,Éùy$ÕC°a#U1$L’#˜ \6±êˆ6uìùœsŸvtû«xúœ^7‹Œ“$¼…eìÞü_S…኿˜=@ê|–7ÚŸ§6Tõ•j:ú Rj÷íGUdrÓ—9ýï?Ç»öÖíܤ‡rþà1šöÿˆÊÊðñ@˜%â]Ÿ’È„hß¹÷®Û‹Ò<ý‡ÿ‘’{»v,vHoÄ…Y&ú6oÁnÓnÏ¢¤Gé=øfâÓ˜l&Z÷¾K0轘!(¥Fè=ô[æç’†@uFizöÛèé®ý€la±6›åù²‹Íïü=•U±•×øM“¿aP±ñ¾çÇ­-Æö‰Þ ?ùuŠ×~as­z±]0Ù›e²¬°®Ã~[GGÁð• ïÿqžÁŒÂßý× 'ëøÝ§"å^LÍ'÷¥Z ˆcŠÅ^—QL“K§Èè.?ºÍ¶84 é¥âõ`í‡Â #g>&£¯Á¬Àå\ÜRRƒ™Þ#Œ¥hÞûC¼ö½Ÿþ¿ œ=F`ÿ«÷<ÂZ©(¥4raî‘~F©œÁT É(ºŽYÌ|iâ»@˜(“]˜Cö„GVH8ÕH¥/È— ܱ—™Ù÷¹òÇÿ Ý!ܲúŠš¥ ðõ¡@’$$Õ†Ó_ù?—±y*°yîü;YwݸÖìÔU’‚uûŸÃí^Ú¢TwS½åmÂù"šfÐwàçŒÇ³TÔ·QšîâRïE:_ÿ[üò]þ %²¯KfòÊ e ¿d ÍæBÕlÀÃU Äc]Ý$dˆWäøõÜæ/¦øvËK´V5Þuž„¤9q8u†®]!›ß‡Ç©‘¼Jr²‡¢·›bi;6Í$9ÚE® áryÀ(MÍc&šÃÍaG’4*êסeƒ¨Š„iÈ-$’ŠÝí!ŸJáp«Ó“”<…ä4åRÍÀf×I œ¢ïØ)p‡‰Ö®¹ñD˜e¦»Ÿ,ÑþÂpÊ tòS/6àÛû<êõï$ÄŸXØ!Ý¿0 fº06^dÃë?Â¥f¸úÉ?0pá [žŽu¡–Å÷e&NýŠñ„—/´bcMz¾ÌŒ¦}i'˜Œ)Kœ¼IbÎÀàòªx]ÒRl‡…¹2Ù‚@Ñ$©Pæð§I†LŠ˜F•±cޤãyJ±¤fßÏJ¹?Æô BÜÜÿûÆ3@Aqî*Ý_ü–TºBB¶‡hzö{D¢^âgÇ@O/¦0AHøšö –ï=NÁ>K n õ›ÑoÕz:Û©¨©E¼AÙüS³ï¸×ãfc•†aŽ<ÒÏ™Ð2Œ™v$ņ'ÖÀàùË$f_ ö_.1Ý}ˆA$Eż¾< f1‹‰Š"ËèÖ½ñ÷äç'™í?Aß¡_#;BØ—êÆ2ÕNJ©LÍÓã¾í„$kØ*…™.&úúïù;:Ö·c¤8þ‹ÿƒé‘~œáÙdšªíÔ46R».EYö¡•4®SDZ·áñ¸0ó «§¢Iù užlW”_¥£®å©K²†¿¶ ÎÿždbÝCblWíVŒÔ8éTÝkÄÛ….'¹vàߘžšÁ4Md-@ýîï« 2ÙýãóµøB{=þ+F†Pìz‘TÖNç ï‚ùþ#t%O“š›FvÖÒ¶ë%fûŽ3Ñ[Ïiœþ~Ÿ{éüu›Y{žPu’‘Áãw3#f,~£¸ÀøÅC¤Ò™Û A÷5PÛ¹Û‰›‚r1ƒ¬{ñ†"Ø”"—l¾² §ßBŸ¾ÄìLŠªM¯âqé+<¸/†sÓ·‡v 3ç×ÿ2ÇЬ‰(v•—ß ²¥IåÒó¼$·(f5ëÝù çŽàݳx÷ ÔÊ—X·};¹©n àŒ6¬iÁ=KmëúÅ4þÒhÌnűT«Òã˜ÊÞØxŸ“šV‡í›×?âû ç¯11l"É*áö½{ÿ]ÿŒüæ½x¼NR£g¼p–ŠÍß%VÓŠzõ&z/˜è9ƒ-ÔˆÛ!¿ð©’—ªÖ‚µ­Œ^:‡Q* É6Ê…I’‰)l‘Ø—\"P%’ù€N2oüŸ¢ëHå¥Rié˜Ù›Qa” Š%p‡*G÷ö ‡l6…½r+k6mgôü¿1uÁ†;ÚBÝæý諽ÊI`ºd¦'çHfR˜ÂD‘”;~ÉjÆí0IÄG‰ú£,ÌÎi‰T÷G$¦§ðPb>‘%ÜÑÌBÏçŒ ÏÐúâŸô* þGN}JèõocæÉ¥Cä&/3Üu‰êÝ?¢ºÆË࡟34:¾xAFS©¤jÇ÷pcœ}ÿ™ÛGhMÁ1‰5›öáóºnËà8£8Y|”—9Ëü¼Ix{×&2JKÛ¾H·=ò¹sЉJ°~ öžçÒGÿ‡Z$1#¨Þ½U‘nd'ã—Rv¬¡º±ùæ|”•érú/¤øÙOŒõ=5S`&+¦É©O8—ø›GˆÙM~÷‹i>ú$EcÈÙiÔjó¦›ù±<=qAE­ÖV;Ÿ‹çw8qiÒòל uõ2צŸÜ]잎º$¡hvt—ïÆ3‘›B–Ó˜å,óãxêÞ&Ž"K!Â5u\í¤,=‹;cdè8WIQQÓBU[3v[ EQ@w¢~)Ýs½‘:NϱÏÐkv³f]ç7yb5Eu=ØÆÚ—ÿš3<òK SÂæ‰Ûö.õë6£I9Z6Î2Þõ1Ó—Š=@˳¯áñ0!âçŽsià’¬ânÚCUc Ìeqi—ÑÎÈ1qñ Ì`'_yçÿgï=Ÿä¼ÎCÏßycçÜ=9f3`3)QEÉJÎZÛ÷úËnÕ~Ø¿ak«öãzw«¼¾×–%]Y²-ʦ(‘sA¤Af0¹{Bçð†³zDš!Ðè§J%4ý¾Ý'<¿ó<ç èŠÍè[ÿ3ߦ}h5>S%?~Š"ÞÞÉW]êõVHçj?Ï<»7q$Ï¡“ó Á²@3.ÆhºÀv$Ž¢°÷Û Z–8|´Âkÿ9Çk1/?þÓÈ=w=i„â%O25s–jí Q$;;…'܇æ˜?C k'©¡Ý,…}¯¾G!¿³¾wìÒu‘ª²”#©Ì|ÊñßÆß¿—áí»ÑUé:+R ìn·ÔÝ‹G÷~Î Ž ômÜuÁRWŒ mùò_ªzI m'ýþû|úÛ4ºâŸ›¥uÍ×ð™:n­@%·ˆ§­õzi~Â_¨âª9ôš*P4…­»ƒþU–úÇ4~U23+¹ïñANÈsüµ©¸F)k‘ì5I%uæ“:o¿›ã…>'öøñ›¢žÎv¶ÄK¯?WåÜx_ý$M߀‡Ç óÞŠÂM7ÿŒ†ºP½tnû–‘äÒ¸4_ËZÖ>è'ôaÆïgc “Å™I\¡Ó“ì#’lA0øÀ÷™›8MµRCíÝL´sÀ~Ïw0R§1¢±‹Á+ŠF´o7C«/S4ú竸N…ŠƒOT0 ƒ»]T!p‹leeÆB–PDpTãJ TÑLÌ«”C½ôïë&¿› &{èIt¬Ø<ÿÞ}“çËQ\Aÿ|”§Ã;Ù;¼‹ŽÈõ•óf„þþœÈ|ÖÎv@ #LÏŽï û D=l| J5ÓsÉ<)ñU°1¹åkY`Fâr×7G*±5O²9:ÈbzW*´oê"ÖQ²“z€ŽmÏ@d躬s¥–%žòÝ’Šƒ·ÎPSØñp„!EÃ{‰G"Öåå{ß ¤TbÞ ùcƒS£5,W°÷I“þnMÀSÏÆé?Q%[p1|>ú=´EU"Gñ¶Wð$5.µÁüAž>mÝ&ÛîExÞ[@QÛV©T¢˜æÍ•4n|  hï–+ h¡cmËE7jj@jðŠÏ¡6ÚBW]ðD{èŽö\ñ®pßvÂ}Ë0Bî<{ÖµÓy'¸ÅÒÝ–à~§†”+q*Úãt¤¢wý¸ !0„¾‚7ÓÑèÊønò\{/1_ø†@êÜL¨órõéÙFä3¿Ïé¤í*­™ý­ðVa‚©£ïâøh‰ªL=ƒ'Þƒ7èÇ›Ø~Ñ&‰þ>ŸÚ}•œFtp·re E'Ô¾†Pûš«8ñU»®›ÕÖ¤tvnZaè·ÕZì]í¿B·{C:;ï½ø=SRž+ç3j°uÇ•A0arß^óŠw&>|ë{ضÊÜ\ííOÓÞÞÛz£‹‚CXÎsßúv¶oZW°¿«¡¡€«¿’wß²gÿk³"qÆ®wØÒÑ\…Õ I¾Ö¬Ý…ßð~õßÉ—"Þ9ÀرCœ·Ñý­¬Úú^C¥Ñ®¤•ʰC½=¬C¡è$×>FlÕƒõ7ECÕ´†ƒÝE˜¯kÂ|™a¾aÃÔ[°¾›@¿ÝaÎ÷oè`ÛÆu7t§Ö”ÛWÂ~ìÙ¶²kJQî(Å<ØÕÏ EÜ^k_Íð4èÊ”(•Ö´MË|E`~ªzkPÜúm s‹‹<°¾“­×6aÞ ^]×›ñ%¦¬´e¾ókiK5a¾\0ïèx†õë÷Ü2˜4wËm(ª´ˆ²ÈƒºØº© ó¦4¥)+pÀÔê|æw(ÌïX ]››À¶Ê 7á²°@T«²{ã [7¬iZ(MiÊ éps“ÈKŠ÷4åKƬ”au«‡½;ÖÑ–ŠE0LNú¨VOïåó Åb€5kö. ÌïX wvtð âÁvpSá#ä餷³µ ó¦4奣½ƒ1±ÜæX\3 ú7Ò–Œ¿2˼½½Ãxh¼ƒ˜× --Z[‡—æw,Ð>ë:xkɦ«kIªÕ*¶m¯ðÆó6Sw¸ø¼&ëZG,Þ¾RÝcš>::¶4îK±¬ã{‡ºÜ}c5aàº.oz˜_Ž[8ÊÊ,Õˆ[æ¯6w±ª¯‡æ™êÞA͹»CõŽhhý·Üë²á¢Ü] ³ùUÛiÈaª‚TÈ×´c+ÃOõ5ôÀмoUaŒØM?m#KÕv8vvœÅB‘†í-» ‡ë×dãÐà-)c}£„Je˪6äkšÀ0â7åŽo( ;Þ=5É‹'¦˜·Dcô¤Ä»0I›WåÙÇ¢/â½{” r…ò]¡Mi\˜¿yè?«2j¦pÕfæî—Z•Õ2áñƒìôUXÕÛCÐk~ßB’Éclì}¤Ì5œ&Ÿ/37'Ø´é[ 5~æÿãx†ƒzÅpŒ†!ºt Ξ¢œÓX[­ð”s7[ì*z9p%R3±½!¤Òô±6å`~ø$?;Wc$¼ Ëð5åK­Lhê4§‹.~ïWäϨÃ|bâ ZZfik t)aq±Ìyl[Áuo®ƒRCýÌe8h´QôÇi¤Ìž"7q–ÓŽŸ^q»ƒ¥ÄH"yè÷2“(¶‹k)ulcvãTýžëV¢œ!4~†Bïœf‘—††ù[‡Oò³±*#¡Á&̯æçΤ9¡%ØKî+ùÜ\æñø,íí¢a†¥”d³eÌS./’JÝ|åÁ;èŽ+y÷ÌtÝ27Ú(ú s—ÀÌ)òg9îø(«¿;•‹”(…Ó´½óLµ—™A9ÄÌŒÚÿ")ÕÏÄÖq…ƒV\@µm¤î£æ×׃”ˆZ½\D `û"8ºŠìR>…@Š\KOS‰7*Ìœä§M˜ßÌ3U“+våõYàÍÏg|¼óŽŽÆ‚ùy˼X\$™,P.ßå@wäÌ¥9¨7"ÌO.ÁÜOAõ Ê»ùn×Å?ö¡‚‡Ñ'¾O.¯ëhÕp†@º%b‡~Clô¢^÷ŠüÀ#̬ێºp”öžÇ¨Ö@JéÕ; ޽GôÜ ¬SÃÔ‚I¤ÌX‚ÉŠ\Ö]@ÒÍhìù)~6Ú„ù Á\K` U®¬gðR˜Çb óR©ó[•%{Çý¼eþ‹ciê­s×%0{9Ìïz‘6fæ,N°—Rø’~ØB¡Ú²Žª”Óï‘ùˆÅMÌ|O/þ“ÏÓqð·ÚãÈÖ{ø¯©)yBcG‘FœbÇZJ³ÉÖ$‘âq iñó#’_ŸZXÖŸÓiJþf{/-ñXsn—æcŽ4a~í0;Àl&Ã1½ó¯Ò2Åfèìl\˜'·æw,Ðë0ŸáÇÒÐ[)ø“€hˆ AcÀܽsá^¾ŽT äg7·”€‹9?Ž®DÈ÷¬Ã x(toÁ=ðžRb¸›#¤öýš|ÇŠ[(Åøs&B¨xÉ™`'UÍddÆgÓâYþ¸X‚&ЗEj¶ÃÛGNòóÑ #Á,ݲ™Žø…º§V!8vð6„94‚r—êwæËó;èRÂÁ©~óÉQN‰8®*ñågfSyòiæ§'9îøîX˜g*6¯ž£pÓÞ$GçËõU*T¬H uv£R¡¬ùë.q)Q*s¥ŠkƒP‘K»D*:Õq)w?À˜‘ <~˜À™7HùÓ;ÿ„Ú…7 òFª¶2c^²üM‚,£ŒN§yîÀÒˆ”Ž7äZ`P˜çt¶ú•Á ŸŸæðá×ñù¦1 ÁìlãÜIYV' T*Ëó;莄±ôŸä$Ó^ ¹0ß8'd ©…1ÆlíÎÍ3/ffùÅǧ9©ÆoÒ[!IåËÐ bûf¬ÃÿHêÀ«LmÚ‹åõ &H}ô¯æ*æZ"¸VÏÂeOsá Šâ§â1ñÌž@'Êì¶ï¡Çé~ùïLŸa1ª€´R^yr”ný¿…¡4ËÝAR*—ùƒÙÏY_{s0®Q†òû™P¯ æss9lû,~…ùùÆß|Þfa¡BW×òÀüδЪœˆ¬bÔßÙX3.%›K‹T­;;øMcžVÝäÇesm©IƒX©-Lo åðôO}Œåõ¡Tó ÇI¯ÙN!ª²86Bü“Ÿ8BÏNQêÝC1–À{úcZŽ~J,ÜŠ++èJ„\²K×Ö4žÌèÙ¹ô<Ó À»˜-@)5Dvàª~ÿ ˆ–O Û0›•}¯Crù<5Å&î‹\ß=ª”T/Y-ÐÄk¯š¢ábå_ep°Bww©á†øÜ9—ÙYÉr¶‰h–IjÊ#ŠAvÍ7)·mÆ—C³ll_’RªŸš?¦vÿé“èÕ*vÿŠ-8ºA~ðœ@'žÜ(: ëz)&;‘n;瀕]Rl.¾Ó¯ÐõÁKØÑA ©!°³OüŽÐÔ(çîÿ>UŸ ®‹nÝ ¨­w)ÁuÈzªPÀÊ‘øè9ÜÎH¯Úpe @S>WŒd_ö8OmÞË`¢§Ù´¨)Mi½)_íéßE-fðdgQ× S‰¶áÆ @]£ë§ë¿ú«¼ ò}‰«¬t¥Î-\qîWüä{wS›ÝWwTgIz'µƒ±ÝßÂò˜€Ëb÷¢£c«†(-yÿüƒJë2«wà¨à{‡ØÙ´š…íM]u?2»ŸäÑ×°sU\¯ÉbǪëà8Ίt’Ó´;w›—”¯–ö³p¨È³ƒ°®sE4;áÝ~¾¼êÿnJèMi¨n<þ"©Ãï`Ô\UA8µø03ÛŸ¥ÜV_WÍãŸ+Þxo梞Ï^mÙÂLb=RØÄßÿ%‰é*™uà–ÇIú5è!Òm>RŸüR»™ïn!0þ É,¤bX¡¬ä «íiªé,!»ÈO?Uñ›YÖßÓ¡;|móñxüŽ]Bóѯ¹GÈ-ñÝòÃÜ3¸MQ›{ë6EQ.xOš@o½)ylGŸùˆ¶_ Úûãö`y Œ¹£´~ü<‘³‡)‡v!í¾Ì(ZÍÆñÆ)%:q5¤ƒp*žuU¹ÔÕ}ÓîX‰Z« ƒš/pùó„@jJ1Mxâ4¥ULfh;¸kñNJpò8ó©M»Š¨•‘fœôæg‘ª‡ylÃK5Þ‡å-q(¾[Ñx>“›w‹7·€§óÇxôy¤”,r¤3é[BéÅ90 °y¯ršì©"Åj™û×ìÀÔŒæ» D¦i!Râ8z³ŒrèËa[H͸\ù;5„Б·º ”àX(ÒE ©êwot´´Ž~ˆiô1¶õ1ª¾zJX¥mã{;Pñ#k³´½óS¹*¶éA+.Péyˆñmaž{“àÔ!zô ¥¥>©¨×B±]j¦‰!QÅuØS\‡ ]ŽáÁ¥ŠQ̃Œ]|ž”«ŒâTЪ•À’gA1°<~‚•<2ÐÍô=Ï’<þ>íoïÇ5¢d‡Ÿ Ó½ì}5ÕX‘þîR·°›˜ƒËgöó“Ó/ó×Vd 9Ò¥ìu@@ÕãrHæ¿{žR­ÌCëî%è |é~N„qq¯K‰°« š›úHaךù9ûU‚k!\©ªŸ¿Îœ*½ž:yÍkQÖc2»ž‰©êõ8‹Öõ áØyþyÊ…4OáXé"…ŠTµ›ÖOB Ã@mÛ¸îÊäûK)ql‰+†&.û÷ZMb·º Dº`YW‚ª 4õöªöØØ@—#s˜Öƒ0¿åY Ñ%ÅêI|ü+ôÀZ¦×Üsºê‡ßô*­«pUõ²Ãƒ9ý É£ïáÉ-àšrƒ‘X{wBݵѦ°BÛ©™ÆeÖ®lÖß©7ˆLg˜|è&›J8ñkº?~ßÀF´ô1*ÓYìÔza¤Ä;ñ6ññ<Ö†HMaÚߊ 8òÆŠ )R\h˜ê„º(ÆDN½E¶5EÍë$Fú©‘ä{Öc›Z~á‚×Ëyls)k8á!Æ÷nG-Í;ðïĽJ>ñµ%%+?÷Ày>ÀÎ=ð¼׋”T\‹™P‘…pmåß/À6$'óüÓìKd­"O¬½d0þ¹Š[Ô²Ä<‡©1µqR¨¹St¼ÿÕ^€žþ”öû˜ßü-ò‰«Ä`¸¡¿Ã_J0³qîÕ ;GòÓ_£z61³vãÒQsIgØe|³ã”[/7"¤DT3ĽDhz ÅU¨¤Ö3»ñQjÞɈ`ç‰y™ðø)4GPI®©?Ï£á}‹øÉý¥Ž¿•ù5Om¿¹l!EQƒ”Ëå‰ ©Ï‰Ë[/,0©|ëÉ ~­Þ³!}¦Ä¯_)²çékÛµëB)%…tŒ¥ÐÛ®_܆RbWÞy5˾#U UIªËÃ_ÐSo›íÚðº°sÏ&·á™KWžÙTbÓã1@â:P©ºH º¡ kb©_“¤V•ØŽD(ÓÇåý—9í|ÿ{aÂ傺y?ÇïÞ+³ëþ-^—×_XäךÆý^S»=6[ó]ºxϽMòø>ôj WuPËüô·øçÓ¬p/™áíN¼Jòä{ˆhîæ')F¢õͯ…Y\ýu*m©y=”<%⣿A«”ÐÝ7¶B§ëÄ8~O¡D)Xrû¹™ã˜UåÂ\0­]g‰C‚jûFîÅ?{’ÀÄAÚßú'ÔÝuÞY–ï¬PꜳÞ$ÑÓû œÛ‡ÔCûfap'5¿—Úæ?ó üû@ñ0¿ñ˜ëBb±Ø³šÈä bÇÛÆôö¨†ÌßOjü,fvIqº5"ÿ•öÃÈ÷ßËì`¢’&rü :³‹Œ=ðª^áTëJ¡âèžú‰zjœbUR"U½•â$©Ÿ£ºêëd׳¬ ¯_õ¶U`:^æ¹Å÷)®ð¡Géo»JZ›0)µ !N½'›§–ð㟥šXÈc”*”ý.éÓÔâ먙’ÈáßžEµì`éMOQšø§*´RHÅ œx‘ø™£`D¨LôŠBzý^ŒôAÚÞ=‚w1ƒffÍ6ÂÇ_%qân¼wã”CÁ¥ï§RI­g¢å!ò­`eñ¾…QÈ-z—~SÁ?~ݺ¼_¶k&É·÷_fõ;vf¶ö±0¸)+˜Sï^œCõ‘ÙøŠ]±u§v–ȧgPk5¸   !ðûý(ŠB­V£Tº}rÈ­¢Íï_X`ä¬-!œ4yìkú’ £ó¼ôV‘lQ¢ «·8¼úFŽ aÐÚ£óðn?½~•—êôðì÷|lZk¢º.“‡ ì›s°\0›.÷•ÅÊ>ñ´Ù¥¦N _v5š¶žÃi¹Ÿé ëðŒý–þj´ gá}bgO’Þþ*F‰ÈÙDUÿÜù` ßü …þû1ÆÞÁ¿˜¡*lüó‹× až{ƒ–‘ƒ,¬’bP%vð?hÝo2¶û ´Ü(}~„–ƒoPê’lK˜èáÿ y®ÆÜÚÁ-a–ódÖ>N¾ešö^"ܺ¡®3Z*,öm£\¶Öªm›©:‚§ß&0} O-ÀB×ÀeWJ5‹òìÂe?®_G9Ù…¥š—¬ÝíT–ŽšÛ«PêiÇu’ µc,œ 1v˜àÄ) ][©n¾iM(ÂëõbYóóóôô¬\+âôÙ2¯¿šÃ«Õ·]æ\™‚U7«÷¿¾ÈK[|ã™mÉKÿ9ϼ¨ð—ß ðö+Yæ>žý¶Ÿ™Ó%ÎN×0vxéè1 }lÛèů]<´¤ú½$ɉO Œ«ptV²ý!ÞÛ(þïîz5‡oæê¢qÁÍåÉ/PFbÌŸÅWŒ­y˜bnÃgë¬ÏµñÍ®Ýlè[ƒzÕ}&p¼)бþ©S±*fYc¦kþÅOñORİ}Ì&ãøG^À°À6zÅÁÕ üSGÑk]¸®3æG1d€ÉÕ÷Q ˜P8MtæÀÒš1)¥Ö1?°Y›&~ôôb‘B8‚T½Té+cv„‚° xÓÇNžDhÑ+j4¸Þ³;þ„ôU~Ÿül ŸP@J´…c´~üŸ(þu¤‡6Õßë:‹„ÎÆ,–È·ù.¹ë¿q‰F£„B!&''I§Ó8޳"õ$07kqø`‘ó^ïrÖ¢P‘¸–Ëé#eâ«‚ìÙîÃ0w¦È¿ï«·x< ¹t“c:ÃC~Ö„U¢~ûNX#V?“è"¶ÃøX…CGÊ…€O¹­lµ»èv éÝ?f!QÏÅNžŽßýïx¨V!4C¯o͇«¸h–E¹g7“Û%‘±Ã´|°ô(éíß§ò¹«ËF­V¨´mc,µsfݯÿ+Ñöu̬º;ã„J©w/£­›0ss(®Ä1CTC ¤¦^fwü)ÙìL½ò›7J5EPx”jj=z¹Bà ¶`y} wrö©6zJsu¨JßèH=Àü–‘Úˆ£©óÇhÙ÷"þÙscQÔìY‚S§Ð,+ÐF¡c5¶©ƒSDØ+é&”(V ç³¥`—âÔ|†àì ù?"ß6É8áoáO2ŸìÇQÞ™#T¼›Èõ?ˆH"k£¸ŠŽLakZãnn ÁšÉ–b'϶ïeÛÐF¼ú4ÕÑüÛú‰=Nè\áMR‰´AªàøAÇ vS ðWJhÕ"‰ƒH!"L®«í²7Õ¶ês§Ö=3¶?Œ«kÖ»ã ×çT¨ *ù¥œ¥D©AO0{ÏŸ1·!CË{?!yèu ÉïcŸ®S"8º£ZýÌa¥…\÷ðg‚s%Æì'´í{ áíerÛÓTýpj¨¶C¡o/Åî{ñŸ}•®_"عšl*uSSbš&ñxœ©©)æææ(•J„BËÍ(€áíAþëã¬éÉ#9þÏ¿_¨¸%ž>e)ÎÔïW©Õl,MåÑïÄñ¼•gäÃï¾&i ð£g‚Ÿ;OÕŠ‹«ª<ôõ»¶yõ¹9^ùÝ"ë‡S´šwè+j)JE»Â#åE‹ÈöpÝ2F>O1àA)¥QAÍô¢•°âï»-?JÇëÿÑSŸ2Ó£×-Fyù„뙣´zŸìúgÈ%bÔB-HªhV‰»Z„ÀõÆ({cWß’š—j¼—êU\öV¸+üÙèTTsKJÌ©8·ÝDfx+ŽQŸçZb“÷·ƒêGÉ£ëÍÿ¢'©ú}„O¼Ixú~Îíx˜àñßY˜`ØŒ`):ÂuêVSEu¨CP¤ƒ¡*õô¸ñI‡•¿°î¤[ÀÌ/’/ ’²Ñ Y»¶T·ܦè8ºŽÇ®àû™ÜýCb§÷=ò< [’~št÷ò+PE̪ŠYY9¯SÕt¹4`KzÛ³”^jÁ8Â* Ù7Ÿ±`:tˆL&ÃâââŠÆ E ª -E¹«ª¸ðï‰¤Êøtš š*™±ð5¼ŠËBv?ãá'%#æø§Ëszg½)Ög3ï\Çeßk œ¬h|ûé0~¿F"¢ÝoQ¶oU«Ýí ©%V“mIûô_ÑÒ½xfâÄÖ’O¶à™z‹–£G(u¬Áe4× Šá×Y ~ôMìõ{©ê­<_QÄWÒ9óäŸS y ýÝŸ~€wÍV´Åqææ²5J ˆD ÿìkD2Ùþ(¶ª2PçÛ>†[b74›Ò±yñP–WØÑ~б@Ä IDAT±ã¯Sˆ=C%×Âæ5R§F™[½š©c,¤¡³ae1JEjÑXYT%ÆôÎ?G©.ß÷SÇÞ'Û¶‰¼²sÜ-ƒ¹Ê@´‹ïfïÃqV¦Ð¹Åi~=FÙ瀄ö|û•5<¹j뺮µ ¬ÀvSñÚ$g§XØÚƒ`…º±Ä<±,̵÷#ƒBÏvb“/Óúñ¿SúðO Ú¾‡™µ[.<«– ìy‹äG¿Ä‹à:ú…+BàêA¨Íy›™u÷Só-uST<غ 5òzn K):wŽüÀÓXúŵìzd¶~ÌU¯.Mƒ³ˆŒüžèÄ8óÑÕÄF^ª_儺(ƒèói;G)–Àœ9Œ¤Š_«ºü‚ÿOÐÚÚJ0¤X,211AggçWZ{_Ñ6nó³ÿ7~õÜqÍåÓ“[wÅð9.¯½¶È¬«0Ôg0?^%×BA•G ¼½ßd÷F¦&ŠBÐ/8ô‡ù‹„Ž,3´.DÊûèö†ºão'³þa*ÞK\r 7ð e_®§…©ÝJäÌ~ÌÒ"µÔ2=Û©øˆî{ÉØ*þ…Y T×~ÅmØ¢ÈÌæ‡ñV,” 9—××Éäî?#zf?žB;2Ìø†-ä’±[Âóˆ[!î3ðk·ÿ¼ß*ªf—õ^«LY÷"Qp5 a—/ †¥¨6Fvh¥æ÷×rÑ.¨}€n ò}{˜Ÿó½A ¶“﹇¹õØv Ï"d< l]£ÇšcKk˜G“74¶m³ÿÔhýkyZ™Ùô8ïÿ'=¿Ÿ o;wnŠJçýÛ†ÑûÖ‘8ù"mÖ¢pSÄ™îFÍ#õÑ‹8‘>*~Þ|J´[÷c9þ•`r2yË­óÕ]ƒô¯`°Ý«¿É«•c(Rоà)ß6ÜC_ªëº`!Íó¿®’MµÕ½vž$s›¿EuQ’K¤@ªí»8·ÛOhâz±@©s‹›qUrûN2Ñ8VdÓ[¾Fxr ƒbÛj…q¤ê!×û ŠÙYwÞ©^ µRIú˜Þ”Æ´k(¶s)uÈ®{|ï˜9‡* æ6ÿ‹ý[/½õ¸/ÿ¡+ÔÅB¿@T0«KÐ×#ØÝ;ßý#"£‡1*E*í»H÷l§øòVÍ­²H[ÀüBïT<§µµ•ãÇsæÌÖ­[·¼nw!Þ$®hè— U nðÈ#aZC ]]aþØÐ8|²J¡&¸ÿ©8Û·øðêðУ!>Ü_&=caFL¾{¿ŸÁ6àaJF‰JÑÅ‘½kwGø¯Æ¡cUŠ5ÁöG¢lßÀ§‹[ðSŠEå&ã­èB`{™Ùú¤zÈ­yê¢ìfncwÝŸW¿üª{ÈŒ0ÙÕ‘åH„˜ßôìÕß$¼äYÜ’»ó˜[b·È7ÖõÓ4oëaüüm3Ëö œv‹ü¢ÖšI%ÑMüô¾üCÃÁ¥ô8 ÿ™÷0Dµ^–u øÂ® P‘B`%7pî‘AÌÅ cÐòþ/f÷*!qç‹Ñ)7R‘N|Æ Ü³—Ñ@'¡ñ#õBD¾vfû!×9„«)Ìmùvä#¼‹óÈ@“Ãß ßÒ 2Az}•àÌôR‘r×ýäúîÁò¤·|ƒÐlº~G¿,Vº‚G]¹’«š¢bH•ÖL§Ã;ylí}´†’×où)ùÁ'Èra#L²Ã_'{ÞÒ•¶ÍTÚ6_±‡KÝR”Ê þô8µÔr©$‘ƒ¿Âñ§°|ŠCO\\(Z…5_»`HÏmùîçüH?ÙU]u t†j’_ý5ò«åU,yÓ²‘™–Wê§/ðêt¸y¾txbÃ>óóCº½^/œ:uŠ™™¦¦¦–Õí.…M»C<çK¸Åäß0—~²`xK€á-˼ ©^/_ïõ^V5ZAë€ï÷û–ÖÈ%™ºÊú{B¬¿çb¥é[ñÛ„PH§c„ÃÑÖÖÝú ù‰>S§›K7õ—ý;_Pbôs?ss0¿ÏSà¿ÜÓÇCëW¡)··û>óì½›–×ó"áÅ#£üòØyËýDGÿŽw~ÊÌú½Tý¼““:ò!ùµße±uwôMB玑K† ŸÝé£bJÂG_Æ$ÁBÏj²½›ˆž:ŒZ+á(Š•ÆÌ/`G˰6Ujñ!2ñ¡«¯!#Lvè²Wœ(tŠÝ;(vï¸bY¢0°´¹ SuCsqíÞÅÞµ÷õ†n\‰^ís_øoW×®ÄÕ$±ÃÏ?è"5?éõORó\¥Ìóezf…t†Ÿÿ¬ëz—¤Ã-ðí¤Ã_íbc߻Ѕôöö’J¥˜™™add„îîn¼^ï²­ñ9ó÷Ùèô/úì•S&¾`øÄ­²Ñ.ƒy0ø7>†×{s)„ÍÂ2·¹ÄÜ{Ì<³½G6 aÞîv!@_f˜(’ i*_ÏÄž‘:ø*moÿ=Û'»æ›¤WoÁ¡Àì|†ÈÈoˆW1³íëTýQ4¯Ÿè±7z ©¨T»î%Û9ó‹ØÎ!Z¼Îäö§WöÀyÍÇ+?ÒHÒ•hçþ§Ø:´€î»=¾”ægnó÷É®ZDu\#€åõ7X‹¤ÃÍóLÒå¯v³©ÿÚÊÃÆb1˜žžæÌ™3ŒŽŽ2<<Üìcÿ90Ïd.Â<ÞüÒlëí óòÌ{ytãó¯nw¨T:vp.µ­RDHpu?¶ç|Q™0sÛ~H¶”Cq]3ˆcx@@qàaÆ:¶¡Z5¶7Œ«iàÝÍé§W£ Qó{ w‹¾js¶®Y{ õvk—ªèØÁ$vCŽúy˜Kþz×0›ú®½Ö»ªª¬[·ŽÓ§O399ÉþýûI¥RÄb±æb¾|‘ÉÄ ž`ãÆGo Ì›@¿Ýandù›m½<ºiSkö¾TJÝ¥û?_ ®Ñ+oÇ{å!ÁÞÚÀ2¿"Ym§W¤Ûš"XJ“»s•©¢4±_•e~½0?/Éd’ 60;;Ë©S§Ø·o{öìYV×ûóPè 6l¸u0oýv…¹¬°Á¨ð×K0÷4aÞÛXQxz ÎŽòËV‹þ3b*~ÂÁ`sð›rMÒáæy¸ÕsÃ0?¿Îׯ_ÏÌÌ `ÿþýèºÎÖ­[ñûý!îb¼ÂÜ\œ`ðÖÃüŽº ø…KP85×BÔÖøÑÖaÛ¼ÞáøÅb¨—ˆ]³f >ú(~¿ÿ*Ö¹K¥Au» ë:š¶|^£;èMiJSšÒ”Ûù@%9tè¿ÿýï) Äb1†‡‡éì줵µ•P(Ô</ƒ4}ºMiJSî*q\ɹÙyœJ±9×(®U7èíhA¹ONNòÁP,‰Çãìܹ“õë×cFs0›@¿v±—³³`WïÔ‹Dh½­èM—|SšrC0gd”9¸À¤íÇM›æKõŽcœ;ÊÖüÕwž$èû‵jµÊ¾}û˜˜˜ ‹±k×.6lØpÓýÑ¥tÈd&)•j 9ΊâHtÞTµ¸†ZÍ–ãòòÇÇùùH™œFŠÆžp" #$µ*?þΓ¬mñ75MSšr0ÿÙEÑKÙi°ênËsÛ"´xˆ±E…¶ ò¥É–RJFGG9~ü8>ŸU«V144tK`~úô>r¹wËh D.)ab¢Äؘ˶m?bxxmè5ÛååONð“‘g½k°5Oã̸kžabAaUPÃr›Š¦)M¹~˜-Á¼›²mʗ¼F0}˜ùÙINÙq ÿ¥Ÿ©T*:tˆr¹Lgg'ÃÃÃø|7W²×uΞÝG>ÿ6ýýEÂáÆ9„I)™˜(Q,fñzUåæBÚ„­Ù/r‚©rÖ»ª!a>7=ÁH-‚Õ {hJS®Kì%˜ÿüÀÂÌ›eH¯æ ³“³cT¸¶ÈìééiΞ=‹¢(tttÐÑÑqSÁou˜B.÷6}} ódz(J–Xìæ3vîx ×a~Š©rÆ3ˆ­5PyÁÏÀ¼Ú„ySšrÝ0· ó›‚y•k ds‡“'OR,I$ Þ”«ÝuFG?!—{‹ÞÞÆƒùøøy˜/–nÉíÏ ôó0ÿéH•³žAlÍ×8»ê30¯Ñ¬xÕ”¦Ü ÌKM˜/ÌòùèçažÍÖ-óH¤q‚›/Â|EÉÞR˜ß±@¯Ù./í?ÍÏF*œñô7 Ì™ž¼£`õ\ÕwÎ,ò÷ïf9©v¯È;Ü þcŽíÃfIÖ¦\³SsüæýcLWøÌY|åÙæ | ŠsœËd9fÇ©^§Þq‡©©)lÛ&‘HJÝXùX)ÆÆê0ïé)4aÞè@—ž™äùOPPR„­Ñ†ÚTFežÑLᎃù…Ö %¸ֺ"ï Øåó4åΑB¡À[¥£JTšãq-ÒWžbÜ\7Ìjµ“““!ˆÇã7\ .“™àر7sLM ¦¦§—ãÔ8w.‡ad‰DÊËâQ¼ã€îJHç«|dw2®¶Bƒ5\ë/N3Vó5ï̛ҠR­âŸáE\‡Fà’VãL©-ÍA¼FK)%Î`cßÐÇ‹Å"Ùl)%¡Pè†ÝíÅb™––)ÚÛË 7Äé´‹m[¤Råe»¼#]î®”¤ÕÓZªá6U«ðâ4Ò)Å­à›?Šþ4šmc{[)Ä×P&n¬¨‡tÐ*lO˜¦½ñeÿÈF²gyb󴇛p¾]e~~ž|>×ë%™L¢(7æ*—b1›TÊj¸1²,‰¢ÈeõiæA5eùÄ)’8þsS'±|­X†‰oþ0±Ñ×H¯þSæ[ú®êR¢/~Jjô(éµß¡fÔKP– y²ÙÅ㻦ø}¾fÞ HÎ.òŸsï2¿?Ï7W?Ä`KïuYêMY ;DR,±, ¿ßO4Ú,ÚóUIèMY¦]îâI¿Mêì~†ÿ‚™îM¸ªŠR™&yê·˜ÅY·µtŠðÌaôZ ÇÓB®u;¯Ü"¡éðåf蔣kɇÂÄF_!9vh#ݽ‡²„çÞáïÔpW Ów þhïVBf³žþJH:Qá…Ú>x¶ò›º×^Sƒ¦¬Œ¸®K.—CÏçk¶n½)tæ ¿†Lç&Ü¥À®§Ù5?BH Q9CÇÁŸ¢jmÂI|S¯š›`tó÷0¦^¡íÌ~ò©MHgŽøè+ÈÞÇ©ù°®¢"…³e“ßqP—ýgùÝ"½¹ãT»pJ¥¤bU)V®KY©\`1bñFá(ùc%¾_­°}`ºÚT_·ƒ8ŽÃÂÂRJÂá0¦i6¥ ô¦4еÒ<ŽgŽz h…@ª>¤tñÏÀ_rÛñ#òÁ žHˆý/á+<Õy4W£ßH!”@­.b{[p«=Xó:‹­q4õò È¦U°¼Š—÷Žíã¹S¯S2W掳èV¨Å]P 8|X¥túyŠÕ2»‡·ã5Ì/vÁK­8‰ÐX¦çÂÚ4òS¸ÞVl})øÔ­af°ü¸Wíd(QªitÇKÕ¼zü†”h•i„Å2<ŸùY¿ FùüØ)Qjs–CÅŸº¹õ,]ÔjÍ’TçŸ%ë1(å94»†£G°<›Þ7RJÊåz›a¨ªÚÜ,M /›YVšFQƒÔLÿÅÅë”ñ2TƒHEÜІñ›A¢Tç0«UÊÁö»6BÁÕ=ˆR!].%”õ±“z% F„šÇ B`ûÚpe ö™o{€Åüùï$µ…äf2=©kšoÜz9ƒf׊å‰ãèæ ÎÁ5(â»ÇD'ç”8šb!ü´°PõºÐ&)ÿ–\­À#köò>ßÅk爟ü>e5£ëŸÂUzî}þ#¥®ïrnh7žÌÛt}éE.’¼ò9®Epê-Õ8Sƒ÷_úN–Ô±FõÞÇøÐ.ä…#•"Ó'(´ïÀ2Ì«Ž­¨MÓ¾ÿÿ!lÇ8½ío){n°w¸[#0õ&©3¯`ÊVÎìø[*¦Qÿ c¿%uîSTׯÕ"Ì÷“ùÖÁ›ºeYH)QeÅ€.¥$7gQVZ¢ê…5àX.“S©vC»þˆ )%®z­ã:. s6ù’ÄRIDÕÛêú§ñîHÿ¦±Ž±5OàŠúæñÌ}HÏÑ7˜Üò¿¯æÞ™0DœljÕU½¨ÍÑvàÿ&VT9½ã¥èópW‰Ð©DÑgZœe!Þ¶4vÉ7ðÚl¡]Eq@C±Š(h8ŠŠk¶0½îB­ÍãË|BëÉ‘F‚¢X‚ìçG-‘<õáÌ(Šk# ÛÓÎ\ÿ×XHõ_?Ô2¡ñ÷°£›(ãMà6[—Ïñ“ôËäk%¾¶öA¡ØÕ¡®ù©ú‚$&bÔ¥bꘋÇ1KçPŽ£;»°T‰gþ(šôPõ„v£”Fu$¶'NÍ¡Sˆ¯Á­EqvOa! jÿ?{ïý\וÝ{~öI7ç„ 0ˆ”(‘”¨Ô’(µR'w²ÛÏök‡ªùeÞTM¨ùuæx5Sož]Ïök·Ûí¶ŸÕêV»ƒØŠT$EŠ9DŽ÷7§sΞ.Ä R$àùV©D\‚çÞ»ÃúìµöÚk{B寵–AÓ*è…Q4³†éNRs¹ñO~@òü!„;Á|²ûÓYà²Fxh/ô0ª[½zŒKùÙãþòŦ”hù³§OƒêG-þD+ž#9ô!¥ÖgI7¯&8ôñW)F:¨ºŒî)%–e!eãóÝh†ûõÛv›7~™f@uñÃïFðëï?u®Èûi–gÿ¬‰mÚu—á£E¦,•­[ùp QÏx™øàk”MTÿgŽ_|×Üi\–‡‰–{¨y=d[ï'>öOx‹iª®æ›ö–?ùoÉ€T 9]~Êß’ÌÍ™Xvcñ_ÊYLÎ4~Æ4E€mJf¦êäŠ6šK!Õ¤£VLö½žeDÄZtV'<)™(²÷í[öÄØ¹Vãã÷ò”ó¦ Æbîš=t!-„m-,d%¶.†ÃôÜZNþÝR‘Hfû¾Í|4Jxà_HL`«*ªQï$ÒÌE7*øç7Rõllxþ GŸÿ˜èÌ(ù–‡éÓw©ÉØîvÆûÿ„ø…×ÿš˜-©»“̯ú:é¶mXŠÉDwšèÔG´Ì~ˆ­G˜\÷eo+ØA0ý&M'O‚ШƷ“iÞ€U÷Rð&2ú>Uo’Ëëïh…³„g‡˜ëûKÒ-Y &7ü Þ|–šËÀ?ö*© ï#7ØulO'ãë¾IU+ÒtâÇøŠU¤–äÚŸ¡Z?M(}ÕÓA!¾+rhzǬÔa.RãÈØY¶Mö‘Š$Ñ® ÷ L7øçGÈúšñæÈµ>…gì7ø²Ó”©ã-–ɶwãx‹È\ž±DÉ£?ó#âÞ$þÂ* ×ªÙãD§Î‘^÷çÌEƒÄOþˆØôlc¼É2Òh%Ý÷êÖ]ÿÑ"ŸXO)-˜íz˜ŠÛû©hÞ‰¡¨G¶1ç/¸ÐQ êžUÕe´ÂD^PÔýT5/úü勽šGjÌ…Ðl=‚E WuñJéÝŽìviK, LÑ»uYéÈìh™Ÿþs†™Š@P“‚GŸ±k½ÎW3üö£*†.°ê6ñ^?»×(;YbZ³8uÎKKÌ‹GkH†Î–(û Ö¶©ÌÍÙôÝ$×Ñ'ä¾Ä07ñ¿NWe¹Ðöje—YY%<´¯•àÂæ? ¦h>ú7ćߧè}€àÔÌÈ“ŒtßVÄ[2©„[)FºQ‘iÚÐÁ]\IÏ’¸ðfh3騋à] tûâ&7t2S¯ ©èXš{¶*ùö=›v£Ha«A-²™Ñ`/ªUG¢`k^¤ª‚{=Ã÷ý/(Lõ̥…Yq ÍöS wpù>ˆíJPpÅ•qbƒ{±Ã_a´÷ADñ4]‡~Ltj³A“àìókÿŠÙD O檠î§úˆ¹öÇɇ[ðÊ"Žî –Jï|œgÚvÒ×Ùs ˜/ô¿¥JIŸÅ(ã²|ÌÄúÙðeÎá&šˆP †ð9‹»”%<öAFaO¥Žnš—³iT5B!¶ ÓmM­%1s`aÌ{¨„ú(âHÓ¦îÖÐj%¤ßhä•\ûW†Ú]V€ñÎíˆôë 6˾Zª\ënr×X8#¾Œ{øIÈþRÞD,ØC¹Sýö@Mgäù¯¥:êÂG(gkLæ%Ò²9ðN–³E¿úaœ¤Ëæÿ8ÍoæY×äÄ‘žŽò|€òl•Óã6©.½kÜ$Ã~vßçųs—¶dn¢Îø°Éïþ}žBÖ¤X…¿ãÉíî;¦6Â]t)Tj¾òÉMØ ÙžÆ¼‚·<‚°KxæG¨&¾NÅFÒBpjEyœRd5ñé7i«ŽRˆö’OnÄÒm¤¢ U[Q¯ µ¼†§æf¢÷dá£Æ´‘Ö]lv(ÖgíÑ Ûðc_c1 5/¦võ붸ú÷¢#B¤×È0µ6§T"Ý»ºËZewq ;µžb0Fxàgs}¢}äcmÈú­¹ÏuêÆßò™}ó ¾™z˜kïÅïö}ÎtSŒ­!~öáɶ§…ª7 Ñ"ç Ök˜þÕTÜ>ü€é‰Q´c+P¶2¯†¨éÞ‹B#!ö“¡ í2H lÕ}lÅçS¢”†ˆŽîÃ-ÚIžþ Z~_fŒÄÀ›Lt?BÍí^ðâÓÄ‹¯˜½bnÕk™Yµ Sÿ¢=pS÷!¬qTË]C1sh¶‹ºîZÎÖ…hRgÝF/žSœ\˜©`›6Ã5’=a:’*º¢Ò×ãâÀÛ5ÊŠBçj§>ÊóÅ:k{ݬßè%ä]U¸´+!]¯‚áÓyø™(«c‚×^šáÍ×rlëw‘ð9@_Òn¯†×3Ûõ•†7-%îi•èôh¤P.…¬„hLZá!Ýû=Ê‰ÓøÓg ¿BtâÛž½ö䬌݇·#qúŸPÊcxçΓ8ÿ{ê=OPõzK| ûØôÅ1e_vœB pÉÈš|s£XšÙàüeÆWŠÆvŒíjabãñ¥OâÏœ%qæÇæŸ`¢£ÍiÚOPÅ(K¸Fµ?å€Fª6[y¡í!vômÅÐŒ/ŒUý˜Ö^¢SirßÃTUõ!Ëûˆ-²«ÅÔÜT-˜¥,ÅÄVÊ®ù“vK½dLwifqç©j^ü³çÐkõ/p*u«Þˆß.ŒAÛ•b¶ï{¨VƸ(à+•©ú’XÚ%ó,5/•à*loýSчÖ§ÏãK a›Ûl8¶‰*Õðjêâ þÙA*Mø¦!Œeïò®ì–èòðècAüzö¼ûqcÁ¶A\–3¨°l‰­(ìz6FÛº 'O•9òî<ûö—ùáŸF?kˆ*„Ò*Í:~7ôö¸xõp™l¾;£-îúsèRñR6?‡^ÛF]ðeG©ù×aË,¡‰“T£›˜Žn"аêè»Õ|ãXƒUAØV# ØF’ÙÞï1o6ÎÍêój0ó IDAT>Ý0ºqW´§¡€O±ˆhæ’¼_Àºô>uÿrÑ$±Á_Qu»É…›ÀÊ|™èlñµOPq»ñ¦Pb)”âžr‰R*…Z&™"Ÿz€|ê>bgþ–Df­µ°PÍ BÞÝî¹"Ú})žžÚLm~iˆ>UÎðAlˆŠÇ ñ’—V_íz-ÝýhÊ— 7 Lw %¿—ÀLžb¤µ±Îó6SqC +ÈÇ:@hä[$8÷-GþKUPke ­Ï6ƒÐ°•jxùФŽý5a_jÈ…ù-ýŠ=m[èH¡` ,kŠæS¯0Þ÷¥@¨±<Ò‚d[w_t ÜZ™`±F¶iÃ@GõRhÙù% š{v?©Áwq.àÉÎÒ~èÿ¥ÛÂt×ÒM=ÄÏÿ„ðaÁ\çÓ”W¨£¡h ­:g.T˜Íû‰ê6ç«D›]ø¤Å¡ýebÝö<祿;Çþ»³ó&B@µfS·$º²LWÍ«=ˆ‹œ®qO»Êùž˜Nè pÜ!wEo„K?5ÉmÕÅÍ|Ç£øNý†öCÿÕò1Û»SwúcÃï`n”z–r¼ŸŠ/…áO{•”/Âtǽ"'ª|óŽ‹“Ó0l"™)r©~,}qšZÞÑ6&4þ´=CÞ:°$ïiš+ T‹2Ý÷ ÔÓ/Óúñ¦I÷ ‘H-Ì\×3bkQ:"9ò:]ùÀ,c6’I­EZŒí%:ú6–¦¢Örä’RsŨx\ÄÏýœºö-jñ$w«¡°¹{ºÖ.ÙŽáÞƒo±¿:ŒÊúØ­màٞݬmëFבM­…™ÞðC ŹhËLãLõÿ%Ù äà ËßÇè–âÍŽ ZË„J¡V¤dÛž£ZbºcÌt¿ˆ»\ÄÖPŽá<‡­G˜íþ„ÖÚ˜§Z”éÞï"Ý]T=.†îýK SPýœíjì^Æ.*Æ:‚š¿ùÖv0%î¥eÄ1u?™5ߥœĨ”¨{[(›¶’äçZ¸Z!æÕ.îSß)R5~5a((šÂ½9ý³yþûßLãR$…²à‘g ?_â7oå úê%‹–^-)\³ÎÛ¯åx¹EçÙÇüÝ ¨wõû¹ÿX…_ühŠß¹Õšà¡=!¢ÞÅi”ÅðV>Ðéžo¡jÑK™èBPnadS‚¢Ï­lgäž<ùIÕ@UOLmøùì0šYÃÖ”Ã]˜ºA¦çÛÔbÃXž¶ËÂqWN¬zdcëb”Ý‹q]’EÚÃn¢ž;óì”°±#Áº–ð’½gºXgô•cPl|€zp#÷tàÎ`T‹HÕG%رp Qëx–Jt3îÒjÁ+¾ ÕÈzªŸĶ;NÁ¿zñè¥(åQ"#¯£éä"6Á©3Ô=ÔÜ^LÿæµÛtñ§r|3å/€±tÅÉ»â75 mO+ÙÖÖÏøåèzÊ×awRj™§šÊ|eÓj¼nãNŒ<øt”MŠŠW»ôý“«½üà]M*~—?û ƒ¡‘:u)H´´$4$_ývœþ 5²%ãÒÞé"ìD÷ĈvWÑÃ>×¥çê>¾›`ó`•\bM-ê"$Ú¶J©D¿ÉHîʺP¨Fú®¦z„|òÒÞ‘ém&ïm¾z‚aЉ«e»“äš?ß[“zˆBb1Ž9IšEžG›ê¼¸}-m¡;7‰EÐõ¥»Ë]ÓäU@êAʱ ×6ZŠN-¸úS†}!,{Í1õ`7Ù…ß÷Y-áx’°z6ÌWÚ¶óðºh$nûŶ»™\Óf¢ãG‰ bzº™è|²±/¿²b›¤Ô2{šÊüÅînذúŽºtEAS—‡¦O½®{46nº„¶@Ô ?j\5ß ¯ÆšõW#ÐÐÙ¼õÚ6ÌðjônX\lÚ¶J:ÝF"±‡––è+[—`þÃGúxxC»s“‘£»FÍßìzŒ6ÜKļCH¢RLí¦”ØyÑiB]a¥¯„ùýwÌWŠ,K%“i#ÿ*7îÄ0nÎYs€~§Ã\Éóhê˜w8“ê3¤ –Ä ªNó/©Ö­YK}wÞÍjB U}…¶úÕ0w®«½u0O$¾JÿN\.ÏM?Óúód>ÜÇnæ×”®Àj_=æÙËŽÞÂ÷³*túmtç*ô¥Y¨©ªSmw‰åÀ|)aþìÌç®èw¨.Âü‘^v÷w:“ê3ôè|oW/ß2Í¥zGýîÛq—„¤VK38ø>¥ÒøÊ—vJÅCOÏS¤R­Ð¥„ã¹/à]é"—lkdЭÏ<=…29BNVytãú;è““Sü_‡Ï3à‹.Éûùm“?ÔJ|sçvTÕ1ÐŽ>æoŽÍð/ƒ“œ6‘‹5­(qKìªYÇî8Y©ÓfÝ®ë›%µÚ,çν…¢œdõj²‚ê:U«5Ž-09 ñøýÐ¥„c¹/à]Ü̶®f%õ¸+=Ie|”ÅC|ËŽ„š8дfIÞ/aUyfê°c}}!Ì68Áé@Òù—†ù12œ÷FyD½u?.Á\ˆ“tw t}åô[¥RãÔ©"¥Ò©”릱µì©wÌ¥›Ù–óÙIäùS Ö,&¼!g£Ø‘£‚ù´ó€yzb‚3¾(µÛrÍ•0ïéYy0?z´Àôt†x<¦Ý|FDzöÐ¥„£¹/ä½ s›IOU.Ói£2¸Ò³(¦í R‰§°]ú OtaÚHUu8޾æ£ÓüìÂ$§ æ_æg‘žœàìÌU{©mÏÝóX,‡¦Iª‹ë·l~%Ì]̶¬ZQ0wÏN` œf°f3å 6Œ\–Öÿ‘ß“üà\Å*RQ@BµeS¾@)ºî‰®NŸ#rf„ùûÂt­Ô+,-Ì'8L90¿A˜;žùâÃüÈ‘33âñÌKËèR‘\™Ÿ_ó•“ åžÀ:ÿ)˜/ÓŽÒGÒòê+ÔÖ<ÎàÎ]Ô½.Œ‰4½þk"'ŽQÙõÒ,â™A«šX¾(å¦lMÛDOážË‚¢Q·Qó*„ï#qx;ÖÄ|ï:Ç ;ú˜'ÉÆš€eº(^*Y&þ³G`s`¾Ì`¾,.%œ-Vøùñ3¼g»˜ié[‚m®ˆwÏÍ\í™/ßå>þSûqi ?º‡J°q³P¹{;#‘VTüÈê M¿ù'3EL·­£¼öQÆ}ãì>Ö½öKB% ¤¤Òus«Û %TÈ>ü3^Ÿ!) œ¹0´(Yîm©$ljæ/cKÏñÊ¡cŒë>,Oÿä°Ó(_‘›IsƧ®Þ4Ôë9NŸ~8AO@‰¹2L;¦iräHñ–Á|YÝFF998D=œ œ;´‚¦”Ä“™æâeÊ^þáA»Ž15N=¶•š÷²cvBÁŒµcJ‰çø¯‰ L1ñµÿ‰ùŽ8þNÇ›¯3·±}äM¶`äÅÿ@Épƒ؆ÁÌWÿ‚YK"U©©d¿öµó·Sn²TH[všÿÛ´ÙڽʉÎ.c•³Y*^†‚)¨9åc¾ŒÚsYÆ]Ûs€ññ)Ž×8rdeµ¯iÖ™˜(ÐÞ~k`¾<=t oÃU›е®¸Iµ©þ!ùš\9{}¸VB”€‰kzáKQjiB•ö>dí\å:ÅÎ~òu…ÁÖŠÿ’|Üš¢bÊ•wuãrU¡\Ä’€Ëw}´¤d$ÑÆ`¼ÝiÄ/Ù^Á¹)¬òíu‡m[Òß_ ££¸âšxlL’ÉØ· æËèW;ª†©é+nRÙBYˆC¬):µ¦6ôÏãΕ)E| ÀK}ê<î²h„µ¥}©V¬”)A ª«ï§d8º{uìÔqNæ.ðè¦]t†[œZâwt]¢¯À\WU•·ÜOSœáãèÖ.ÂÚíTäÍ{_Æ;9‰ZÌá9ÿ!m¿ø"g©Å›±Ë3øÆÆP*E¼ƒÇÞ$¯†ÿÔ‡ø†G>µ„7Ñg†y‡è78‚‘ÍÝxñx)Q*'YêUÎ*ñ«ÌûüÓá_qrò<¶s«Š#G+ÏCw´$¨·lfü©I½û?9„Ôt¤¢PiÛÈÌö”=2CCDßú)a]G©Ö˜Ûñ¥xÿx¥4}Ìk÷ÿŠÔþý(èX.¥RWŒéÇ¿ÃüêÖëÛªuæ4ñƒ'™{p5¿Çé²;P3± {ëGÈŸ(ñÍÊWØÔ¹Í©ÁîÈ‘tGKÍtbÿã u݃+=ƒbI,o˜j<‰­k€©=Lvz­jbbTb1P ¿BÞ}à"|‘ƒ4¿ùåMÏ2¹su¯:7Bêõ—ñ_8G¾£ YIã<‹^ªbûcºú¨ûÜ`×qœÆ;=ƒ*Õ¦nJñáCo‘øð"œböžíN}îúG’¯—˜+g‘KÒ˜«ä@6Tg_ñ ų¾Y)qÿš-š³ãÈ‘tGK u+§ˆ_Ó‹G÷Rií¹ú¯T ©¹hbâøÝhg臨½˜ñÕL<û'¨¦Ž]¡åW?Â[P¨D#Óo9ÕÏð³/¢ î£}ß›h¾8Ò®"ΓÙ|/žRÝï—OS)åÑÌ:ç*6®ùâMïwAgÀ‹¶B¶}-l=ʯνEE[šä©B½„™  ä·Ø¯] 7ô ùj‘GÖ?€×ð|aF)eš˵`î¤ZÈ!Ýl}ÁÓ—Z!å !Õku˜„ZÍÔ1=®Ï¬P¨”s x° ýËW1´ªhÅÒ/Ùžà¥ÏvÝUC+^™T&=A,]mä¨Ô+¨Õ*RÕ±<^§ØŽôeãR §Ÿ‡@ÅôyÑŠ%ên02Ãhr=j1‹0MlOËÐðœÿˆÔû™öÇɵµ^6°%Zaáïdì¹ç° ‰ÀŒÜh™Z‰Z˜B¸[öEL—†D`…› –!ùú/qkíL>ñÆÐ¼£çÑ»{¨{—aZ¹´9øv–Óš›õ}.ü crn¼ÂK/ÍñN/ ÿõ T)%GÞÌ2jë<õDã²>q¾È¿½”eãv>„"ˆ–í†ùwŽÝ]é®]ã´ü´õÉk%7AðôQ\ù2¶'BaÍfʱØu¼Gð£HA5ÕC¾³•С×IìÿÕcæþG¨<Cg ™^dzóe!dåî:L  x¨0EïÄÍyqBJj…YNÑÛ(ýÚ² õÔ1|Ó²íÉ‹Gà\>Æ—K °L”…sïª!¤@* V¸z"‡T?/…÷à趯 c±"ÿ:¿ì‘ßX÷‰Ö«µ ƒbWúît–ªÛox˜jûfDv #W Zø‡©¶ÜK]«{í—„F&PLË—dö‘ɧO½Oh¾•RÓNBï¿LìÔylo˜šWâ*L<þ îý4ç?Æ3;‹^Åäƒ8þÑǰڻ©öPŽ.9Õ¶¿‰j$Ö«R½rŒŠZžàÑq+W¼nÛ˜ßÐÙÖD©¾ÕH [U±Ü^¤¦  ˜Î3óôN*± •èrªkyÂü²yv­©fò¢-8UâÔù*u[lw±y£¯Ź:‡”˜™³Ñ½*k7z VküþµcèDZ4¶¯wc,lÁÔ*6Áv/ßþ~œ¸O\ô:”;hË⮈ i£ÏÏ`xç»E½€Z_øs%Mó¿ÿoQ§ØÚ†gh‘Ógþú"§Ðúê喝®£®VˆœA5ŸFÚÉÅŒí‹a5³†bY¸¦‰Aº#;û¨}wÕ­`-©&þW—릟cÛ’³ ü@(×l'è#šó(>E)˜8Eòí½˜«ajm5>$xêõµøO~„ð¦(½øN¿otZ»¯Œ¶”shåJcŸßíÇò¸=Åå"æ"uŽŽœãþé ´ÆšÑ>]úW@=ÕCÕó+üccä#I¼S3ä7íÆuüu|S3T0ñÍ(ì^ƒçä$NŽ0ýø7(FTâoþ3É}¿§ôüs(…i<™ úäqâ?"÷À·™ï {ó_)2ù`•Ь8ù­_%mMÐþË%0»ƒÒª5/ØÌnÝE9ä¿r – x†öÓú‹qÔZZ²›Ù]{(‡/³f -?‡k>Å—JŲ.Ûo—¨¥îѳ´¼2‹Z­R­böÁ=Èô4z>MøÐ^ÂÅYTS!·õI¦·Ýƒ\ÆC¾V´˜™1)-Ðl6m.@^ráP–¿ÿ9’«=D Éo?È34åë»yí¥4¦aýjƒÉ¡"çÆLžÚî¢lÚ(n®‰K¦@B­bQ7mN,`Y’d‹‹¾5.Œ;èÀÅ]tmvæWþšÄÅ}/÷ði mO`ŒŸ"t~‚©?øßI÷4¡ ûþÿø8åb£h’î»\[™ü Ò›BñôR?8ÅüÆíW”4uÛ.à½p‘ `ÌŽ’8ÔÎð‹? º{ŽD©ªB<[ Û¸Ý®‹ÆËw1þÕ?$ùÎïHýæoQl°¼!Škv3½óajnÉôýcÄŽïÅT"573?O1C¯1Ý“—ÅmË„þžØÇpåò è˜sÛ¾BzÓ¦ÏHŒúüðŸkð†ˆïêt`»òØ«3a^h «Ö¢^3ò"°}qŠÍqÎb4UpU\dV¯Çž:ˆï EQG·Z"xß9«X˜@Í€Z«ã:‹^­_´Æô8º‘Û°™JÐÅÜÚõÄ-¸knJëÉvw#«~j!=_ÄJøÆXèò}$‚jÛf(‘_³»6Cò­_Ò$] =ýÌŽz鉒~ø¤¯í²\ñçjË&f¶u_³Ûš'ñÖË4½©0Û*Ð *ÍkI¯ïÆwüUšÞþ%¥öÕäS¡e9¤„ãäø&Ê—­},FæÁ6%XÀJyùÁÆðk’ßüd’·yx«Af¦Ž+æã‡‚D<0•±èhÖèjÕ™‹zÙÒçB¿lkÄ6ÅL• tÛfïoçÙüh”o<é¿ì÷ ßr™ñ.¦ŸùS²M È3OÓÏÿ+šè…,ªæ¡’hÔN·‚MXšQ*“é¹—ÌÐ0‰½O̦¸j#éûš?5—.u¤]Íè·ÿ7loŒZЇ>vˆÕÿøßˆœÛÉÄÖ~Çó»éP‹B­u£_ïEÏçPl‰­»©_4’’ù_£Ð?‡Z7±]~ê~A­ã*µOªÑYøþ†–×ß&¿åI&ׯÃU‚ÿžä›¿ I’ílBT èù<é`¹²›m -—A­Õ‘º›z0•¢¼ŠÛXE-o•st˨»èÏ7ó|Ó.v­ÛŽGw}v9ÍK¡«›è»gÊ?E%žD¶·:~’`ÍÄŠuQ ùðWk $Z1‹¢€ïe¶#‰¥]ò€ËEÃV`{½ÈOñ¬•Æv›üœD E£ÐÿÅ~u!Ìn¢M¢åôŒê“TŽæ‰ZŽÐ¡wñ®Ì†¯‡:™Û¼åR¢¢R\÷(¥uŸ<ÏFIŸ¥ýà´–õ˜¡fò½›©&‚˜¶“|ïî|vÙ]X¿=ÈŸ~;Œo¡ &NøÛË!-›é)‹H¿Çh„Æ›› J'«Ôt• òòïòüÍ)Kl{ H×e9Ò‚+^v†ù?7„H6ëH^û·^Ù—g×NíAèK·Š*µh3ÕÄ‚×X÷a[Ó‘¶…R3Âæ•(Té‰1ýÔŸ.Ìâ:Fêíß¡âe¶ã3VÉR‚P1ý¾Æÿ£­X††V¯5öëž/Î Ö=Ô£žk÷¢b†â\óPÕ'ÅH*i"‡ß¡Þµ“‰GŸÄÒ^P%ÑL¹ûÕX5}†æW_Æ«¢1ûù– Á_&qô, Ê}37‰œüÝ“¡¸º›Lÿ¦å¸„ŽK,MŽ€-mê——B–ª¸¸¯²ŠÚv³mÍ&Œ/*-*ík±jï=™¡´áyLCƒ¶>Ø÷S"“Ò=€éöPM$©•`桯QõhéaŒº—º[¿Ø¦× µF¾DÅãÁ3:ŒZ¬}ÑÀi6n†”—ÝÕ`V|ü:.¥‰ô¦ÍH»Œ+›Ãòİ.ßG·%ª#Ìú•ýa[WVI´ªø¾‰ÇŒ’¾g+¶¬àšÏbyÂT›:¨zßÅ39F>@ŸŸBº±¼Ïó»ü*MM:~½aƒíŒ†*Mîõf‹v£Ù‘TÊB¨Š mƒŸ¿ìõ15Qãà{9^úç ±Xü3c õºÄPñ 5¥a}X¡zÝw—Ÿ¥ÔRí”}áÃïSÛ¼ïÉ÷Qõ8¥Xï™÷ g™ß|/åö5Tï¡V* ¸V÷Ôå ¯±%%úô9š~õ3êk#³¦÷ù÷pÕ¼dMÌï˜Õ¨…y<éùÍýX—?2ä×ÞV•ø[¿%˜õ2üüSUó4ÿûß“Øÿ.åGï#|t?vëãŒîº}ò¾9¨6¯¡Ø¹%ñ™ ýØ+°™"šÜQ,¯¡n- Ðgjó|¡j4’X/Ø=¼°úú»Ö~ÉŠq+ÔF9¤¼0ÇlGR€i£ê*Ÿ”L¯êB*:ùõ¾ð ­¿ú*£1Çû§’ØB … ÚÒK1üÉßü"Š1=ˆ¢i ΃rE$-wY™"µo/“=A%ì[hT¥2Gü£wñŸÝ4ó¸gsÌízšºëÒw“ž0s»žgî ¿ª†¨f‰~ðþó±í"î™Yæïÿ&¥Ö>æúV{ï_ñœK¢¥Ç¨­º—b2±"§ºª«t÷¹9t ÄáÓnš ‹OVYÕÀ[7y}o½Ùæ^ƒ5=.öŸ(R7%º*ȤëL¤M:â*Bl[r|޽ÇLžz:LL³Øw DK·—„ÏIŠ[:û­º)·´7Žš\6Áë±6DÀ‹íaò‘gH9BÛ¯aë^fy‘|KUkBœ8Ló¯7<îp³[·Qõä)6ˆî‹Z F¾= BPO­an˽Ďì£ãØ^lÃÏì#_c¾«yQÂíšm£)*ú]ºW¤°ñü×Âe0R»Æð—a–ðŽ RYõŦ$RDÉwtÐ|æªò•D3ÑóïÓ$óä»Ö0¿¾Ó]ÇVU0ð ÌäQ|ããÚ™Û±–|W×Ù E#ß ŒÄºñ"¤ÆÜöçȯêFª éG¿K­õ0žôè×=Ä5Õ„B½©‡LÓÕ‹–J×}T%?NðÌQ÷’kIþàêÑVêÁõÔC—5–‡\ÿÃÌn{â3ÕM±÷>н‹åšº(­ÙFiͶk,lBä6í&w…ìg)UÉó¢«Îž¾5xî¤ð¼¢ðÈóqjBàû¤£4õøø³?s©hŠÆsßI/ØX<>·ÞÈ^ßþ•(ë°¨ÕAÕ~¿‚ª 6íŽÐ¹9„b(=—žHºøÖ§(žåUð¸”E0ï‚|>ˆaÜC4št€þù®c†"WÞö…±/ÿÙľƂ@º|˜.ßUµ½!®é3 ËZÔ P «Î¶Rš¿ZÓž{6âRïŽs튢ðÐÆ ì0o~“ªhÃ[ì¤/In]?M‡_#ÖÖD¦¯)lE$rs§ƒœz¤w¸\V­¥4ÕÓÌs÷l$lèw×÷w¹p-™vl‰úÉea0·ý\óÿLâÕ¿%ò~[hå å®{Éöô¢¸Ä÷Ú´¿üwX¢Šk^!ýàý˜¢†ÿÔ;ø>ÚG5@ÍOQiS‰DIxDS‚ô¦~gð.‚¢¾0_ë|”GúwtûºÈÞ÷…!,‰Ô]X.׊;Å’ªxÁ¨ñç›zؼª EqV©‹¼$!— ¡( ˜75µ;!÷•óm¥Yþ²§…ç¶Þ}0¿…K{ì`Ïý9ócxfgA1¨&;(µ´7’×<ÄH° ïÄÙ¦U”R)0ýä·ñ  •kHÏVнÔ>2~³e+»rŸÔÑ kSïúÅz õûŠŠå ®ØvOU ¼ UžùªNTæ‹ól6„¦í ·÷)R©¶›†¹ô;æfmeæ·êÒ¤Ô½…R÷µþ^¥ÖÔCí{©V¨™ÜÆæ«_v2m•QlËiãE¦9&êvÀüù˜oYÝõ{ݨ¤„\.„¦í¤¯¯óE›/NóÞ‰ž¹ÉÆê<ÕÓʳ[û˜;rähi`^+òˆ›Ì»˜ß ˜g³at}k×>E2Ùº¸ àeê\¡Y&še®¸Þ6l“­¥YþhÃjž»g#!ÃYs-Ú¸aéÆÍU?-¹¶IÄ6†øRsC5k<¦”ùÞÆ~¶v¯B¹M07M¹»Í²Ù¬ŸPhëÖ=I"Ѻèï±ìh¡­.‹cTJS+®ÓÔØÔßÍS[˜/ê@ÐëÑùîÌYdzi U·fãS½ÇÛ2 ?«ŒP›vãË.€ü’-ëúÙÚ³ú¶Á\×Ý ¯af¦¶âÚײ,ZZºX¿þâñ–[³0“rù¹U[2W·X‰þ"%]Åp’ª]EÓ&oÙK6n4 nhN ÿÛ )%–åä1\W›šzýÅMÊå2?ýéOæÞ{ïeÏž=—N”\w¿ÙXV°Wh+{Ñ´[WGdYº€.EÐär¼WG×'Ÿ¦àÓœ…ÒÝ !„“P·,ûMAÓBNCÜM@wäÈ‘£÷Þa®X'_qöׯ§ÍÜš$ö,Êñ*GпôÀ«T«ÈzlHÛív&•#G7hFf 8v޹b§¤ß—i3›BfŠ–¨‡çžxûö•ž®ÕʘæÊL6UU‰®{n*A[i“uplšãçF¨®ÀN/ÍMáui<öÈn¢Aci9ºn˜ç9pây‚„º:œ…ñȶmÒ#§ÉVþBù¶ܘŸ```?–UXqy)¥R•LF²yóS¬Zuã—i+i²ŽMsðì$W žDrŬ¾%’ÜÔ& ³ø Eê¶sÊ‘£†¹ôáOu!„“Oñ¹0·,2£g™¥¦‡A-ÞV˜¿E4:J{»X1@—òù*GŽä©V¡^䦞§­”F¹óžpÓŠYyK)ÉO 192HÉvã3œÌ]GŽn æIæ_ì™[¤GN196LM ‚j·èóó ½E(4JG‡X15奔 UŽËS(Ì‘J¹nz¡¢¬„É:86³baž›ºÀÄðyŠ–º×±4ŽÝ ÌÌ¿ Ìg‡/‡ùíÛ3ÿæÁàʃù'žy6;G"Q@Qn>òª-÷É:8>ÃÁsTŒøŠƒyvrÉ‘Šö'0·k³ˆc'+1›¯/Ù¹tCödÃ9:·´0?>è„Ù¿4Ìmf‡O156LM 5ÃÁ3 04¯<˜P”.Ð< “Ê18‹×Æ6ÇŽŸàè”D,ÑM^J-Ïžmí¬^ÕåtÀÌ¡‘ÙûR>üÉUÌ¿ÌO^ óÛ lvåÃ<—[\˜/[ K)O_s+¤tvæÃ”.‡ù2R>ŸÇ\âbÌ¡Pèº{Ô,–Z‡b,ÍV†57„“ϸ4JgK¼wà(óuªj1ë4ÊçZ(ÌŽ5àn#Ì¥”ärƒ 6`ÞÕõIÜòŸ8Ÿ$ÀÝ*˜/K K ³Y>øøE5„Ç뢚Ϭ˜IUÉÍ29>LIº—åž¹eY|øñ ³,ç+½žåá-]tv´ßØ3–Êp9'¤–è3SŒ§ ¨ÁÕ©Q§A¾„J³ÃÔ?h·oϼTšáر7q¹F™ÌÊY›fÓ§óäó’É·¢\þò:05=Ãä|-¢<=¾rVÉJéaJ¶\Ë7®l)˜±õ(ÆÒœ•×r‹æXµåÜ<¶e£^<Á(ª¦Þ ‹%Ò– §žûøj©€W»þbJîæu¨áv§¿äدVJP¼½Õóffæ(•Î V˜˜XYm\,šÌÎVho¿50_–@°¸šÖ¢…ÛVÞ¤* ìM»>{1ÚÞ&7ô!ç¼Î|f)AÑ\Z7ÒýÀ3„Ãþ넺¤žcrà‰u÷ãv9wÚßˆŽž:ιâ(oÜEÊw Á¬ø…ôõ•éè(­¸ï62b3;+¹•Ù9µÜÝú˜U£’¦R.#W †ÛçG¹Aã,mkÁë‹fEjéSœ|ígX‘Íô?ó]|~/åéSœ{ï× Mâßñ*5Š™)êuÕÀI ª RÚÔ ³”ò9@ÅJâvkÌœyŸóO ‚Iš»Ö8ᔫøÅÄ>2vžçÖ=Fg´Åº#GÐÝ™ÅI>ø%ƒ˜–…PT4W˜–ÍOÒÕ¿í:³W¥]gúØÛí%ÙÚ¶8Þ9™™¯FضûkÄcAà Åñ„[¨Ø>+ÏÀ»ÿÊÄ說cÕë„{aÝö]T'q|ßo1m¤î«·ìdnäcfƆ"˜lÇ©"p}#`2Qâ—¥)-óâšÇèkYâd¬;räÝÑc«ÌðûÿÊÐùVïü6Míí`™8üïŒyƒPS'ñDˆjvŠb.‹Þh ¯!Àª)¤'¨×LTOˆ@4E=s–¡ƒ{Q[óxB‚EÀ¤mRšAw ø.†Ö…Pð%{ðIIþÂ[ Ÿ:IçÃÿ‘öUm¤O¼ÂÑ_%Õ½{äÙ‚àžçÿ#>½ÌÌàŒ@ñUë‰LûXµe7Ÿk~™÷§””¬ ™zž¥Ê:ÎÖ H ­ñ»Üa²'‹|£ò[Wõ£*ª3É9r€îhI¼óüãgŽ“Üüº6lFˆ±êÁÂö“x›“ï½­z²Ž­†é}äûÄ# oý˜ÉÉš®bÕª„{'áÉ03z·ˆÝHÀ¿jÑ`%„rU&º”°(ÎŽS×cÄ:V£{\D:ûÑÞyB±L,±r†S¯ý˜X{Ñöu„¢aæ¦u*ª®ƒ´AB©ù[œ#¡(à5ÔEOij°ùhà(¿=·:K“çQ(‘ ×2äƒ&ï”Î’=W¤T-³£w†¦QÇbÛ&íÒYf)±-”«_ªö™!}i›ØRAU•ω ™H”ë>B)¥DÚVc*êMݸuås-lËF]h§O¿Pçð…ôå%Û2A|j’I‰e™(Ÿ3oba߂ɹÜTËe(U4Ú[»¹Ül¨F€X[»’áÌÁ×°‚›¸ç±§QkþÅÿÇðÉÃøû[™½p†Ðæ?¢oC¥™³ª^BÉ&"M]„û£¹cÕâôPñÄš¨Q,”1Âþ…âX’ZnœB±ŽmÛ Bˆ…o¢ ±°¥M k[üÍÌ dnô(G½KÏ£Œ¿1ø(FukH«Î˜¹¥íîÓ,vm^M2™\ôq=WÊqHýÿÙ{Ïç:Î,Íó—™×{Ü{\xï€ÞH"%J”©’TRuwm›éíé5óe÷oÙˆý²nb7bwgº{ºËH*Ùeé ï½÷×{“™û EДД$ó‰ÐÀ5'ßsžãÞs;sûçêv³d,2ýºe2 Îgx¾õ¶¹"ç,÷þ‘œ±‰úî$Q _`âÛϰÖ>OmKÙeª·²#oPR깇äT¥ÀÖøg„S^zŽ¢»Ï´?µ˜`±÷CŠæj;ZïêQåñ5lj¤ï•™v2o°41@:™Áh¡öð9,fÃÏ{\JŽåë`3®Òtúml™ÍQ†®’Œ'1yj¨ì~§ÓþÄÚ¹¨¢  “„»Îi¡¨¢Ó ܶ«ªŠ\ÜY˜%JÂ=ÏR#ô=†*§Yºö>Yc5õ]Çw¼ª’ZdzhˆÊ“ïàvZºƒ9[%§Ú°9wGBªB|©—ÅÑ^R‰$æ’Fj½ŒÝö,®;UoÙáïE¾*ª Å|‚x(ŒóT+«,AÜ¥%¬…–̇pWT³:ð;r x+[ðÕ· Ã‚€¨3Ü5=JQAVF1ïH :ܵG°ôÿG¦.HóÉ—±Ù-äÂ3L~ý[ŠÎ.j‚^t¹~b›Ø*ý$Ö'Qt.lf±Å!Ҳʞ×)oî¦ÿßþbë˘Ýª’G.0:tFò@~%.‹ÎËöì= z…¢þ1Ý `Pólñ¯}L,Ÿâ•¶3x¬®ûoQ§§˜Xai6N°ýƒDzsŒÅÞ÷±å›šÐ‹*ñå~Öæž0€ª È»¹¤ÛÍÜè°zÊ)šÜˆ’€ªª(òÎ>uQ’PdQ•‰-÷‘s¨–‹wü½@jmÉo®Ððê¿ÃårÞñYUrÛ#Œ~õ; þJ*KÙœíeÕ¤®µõ§OHSU —™½ö!|Ôžx 9µÆÄ—ÿFÞT‰¿¶…ðÔ5ƾ)Ðóê¯0>ãˆUYáò§ÖD¿8oâ۱í[ >ø"ÅÉ×Ü4tmÛÓ¡¡‚HЯ»Ë¶+E…Ëq®dˆç Pcâå—œGvO?¡«2ñåvuwzý™ëS}ø¼½C0вÉ# HºÛQµªª¨rUUv¢|Ir–•$©¯£åô9ô:é;åŒL2öÅo‘|]øë+Ùšîge:Hã¡îçÍí5ôv7fcžÈêÁªÒÛFL)f‰,Ï Û@¾oˆUF08©?ûï)Y$´<Éj￱1¿Hç '¾#–è ‚R ˜O2´(2–â\¨¬l'Á¿óþæ@-Ï¿Á䯸ñÏ7ЛŒ(Å&O-õ‡Náq*”ÏO±xéÿeÝd¦ŽQÚqO©—èØ æûÞc©ß…¨ÈéK)Ö`$ÄÂÉõi-'õû3°CÐ=åWäu*3%þ¿Ðç$R¼Ùö"~Wé=gIx*›˜™ºD<šÄ\j!º<©¤Žbt‰t*‹ÓªYžÁ\ÚˆÅ"±9ü)«óÓò2&O 5G_Án•HlÎNø(­ ú‚…±0:±¹läÒPÕu €ìöãŸO‘ ‡°øÛ¨éèduàSæ‡J*h8ú .—ý¶} Í\'#VÓ~æX "þºTcÉ]d¢*yâ+Óä‹wŸqÑàÂU¼Ç¶S«Ì \Çà®Ç–Í*©µ‘<ío¿ßïÂãÒsý“? Åï÷>‘ÃÊt†q=¼~‡™OéëMÑzν+»ˆ]$I¸ÍïDÛ*Šˆ:ŠÊµ?E™-øÍ_:±ê¿ûÝù8ÿü~‚ö6špéóÿZùoÿÒQ:¶]«¡£¢dÃ,Üü˜íµ5UÂRÚ@ÝÑ—±Z Ä®1?ÜK&A29ðµœÅ.¬3×÷c=f›êÖô’€ª*Dæn’"ÀáS¯c3ëñÕt èÜ?ùŠÖMèŽ*ÊZ™ü#v§•ŠÚ:%ÃÚà™[¤ñÜ»ØÝ.bK“äšêskD·¶±UŸ@ͬ³:=»þ0õUØ­0Ò¿@.߀B>“ ŠcpøQ 3@æá"˜\V½Ýy.:J;^ÇY}˜ØæÅ¢ŒÞêÁQÄ`4 Ðtþˆ¯/Íæ1ØJqúÊ‘$‰Ò¶W0—6’ŒÇ@ÐcñTb÷x”=oYPSÛ÷8Š\@ÝMã‹¢A´ëX«¹"l”døfi€fg5^‡½¤»'eañ7aÖ}Fds•RG)Ñ­u<-gÉÌ]#a¥Hd3Œ»«™ÌÂ7Œ_¿‚¯ã,åNK}3~YG÷‹çɆ&ÙÚR¨3qù,Õ/(w²>ô ó EÊ;N€œ&Ù$Ðx§{“ÉkÂìoÄVÀHâ¯íÀf·Þ‘3.Ú˜CVœ,^úϤã1Œž:êŽ_¸› “Û¬~ÅöÖö]ßÍèÂä*Åj5Ý•j_éû”¬è§¶+ÀØõ¯AUHE6Q .¬N‚ `tU¢/ÆH$O(¡?@J>[ä«O¢ Ïä)(Þr#/¿æ¤Â%²:‘âÓ¯“„â z“Dû1;µÆ"Ÿ~cY0PR¡ã¥ÓVlú=z‹Ž.¸xáyJv9ÅçKrE0þÌg„ÐUÒ›Ó¬Œ]Þ½&¥’ZGVTPe6úßcvl…º¯b‘’Ì^ûIÁJÇ‘v®ý‘´µ“†$–{‰®Nãj®ÇZZ‰Ùu_U#·³UJ‘ôæ<Å¢ÈÒ•ÿB&AïRsâõÝôó³e°ÉBõÉwÉþ•Ù/ÿOæ/™EA²RÖý*þêf,Å—˜¸þ ýïÍ‚’C±ÖÑØÞƒNˆ[¸ÆÒøeôz=Å\’’úãX¥8¼6Æ?§¤­²Úã?i¹Šª*R‘ïÙ~££ Ÿ£ì¾#츫:î÷ØMØßÿ¹wuéÅÞ»ŒíöÔeV&HDB ³á,k"Øu·ÇõФ®ª*©µ G«å™šHgP$ê¢.”£¥²Ý}»Þô6?î'‘ÅYÒ%9I‘ú“„#C„—qS 7QUVJxøOdd=6—Q/`u8XZ%›{þ–’“Þœ']°Ðräe¼.ºÌ2ë«#;ú-p”·SÓu)¿ÎÚØ7dbI<ž$ýŽÒ tw4Õ©ªL>#“Ðaì<ƒ§<ÅB,t½pþv4©·—ÑðÒ?ÒpŸ‹xÇùWwSík+a‚§ÿkaü¶ ”墤¿Í‹’IR(ŸìAVá•W¯$±ì²ÙÖ\†LqÇiü2Êûßæxå.ÊL Ÿ}åwªÈ?üÒÊ—EX5™yëu ëÓig³4œ4¨0`¶[èn7aÕÝÊô›­”7í4f£×ªkõ‹>„®ª2‰õ)–û¿›>$§7ÉæTÔbŠí¹Q,U/QÕ~‰™õ!æ–&Èw·" PH…H§³”´œ'`°b21™ÍÈÎV»ý;#ªÊ’‰å,m¦´Zf©÷#&/éyåM ºg3Jo½ð¨ ­I$@gÂê)Çâp" ®Æst•6‘Œ„A2b-©Âl1#à¤õÂ?ß^¥XPЙ]8|èô /þ{J¶61ʉ'înôÿ‘Ë—qÕŸ úèIȇYý†¡­Mº^ÿ;œv3ªœG.î”$½ QÙé;Pó9UEõèôzäÌ&s—þ€¡þeêu£—žLSQG[ÌÏ[ÞÓœi9†ËâøAgHÐÛðkY`{>Žb*Á^R(gjyŒm¥ˆ`¯Äá´KÆÉÄb¬~¾Ó)+8Á;Ö‡ªóY½a§>nrznYuQ‡ÑêÞ™&Hˆ:qgÒ9½‚€ÎdÁnh¡¦ó:I¦™ajnœlþ6³xÛŒ-ßÓ!š¼”kv;ïU”ìó½_’UJ²kl­ÌO'-McR%9,«»ñGŽ¢,¡7èž`ÛkóY¾ýBåVÕ!—,N©(…ñá ®F;/ž¶bUâË~?!vÁ‚NHÇŠlÆš:è6‹¸­àvˆ¨n=ÏÝ5tAU•t8Ï{¿ ³¥7òWçl&Ýþ̺ èðu¼ÂÑó¿ÜõxUâ³_pí“QÕ¹t}µQƒÅN±°¢÷P÷Â…4ø-Ë7~ϼ,âª{Ž–cÝ?ðF"’É‚Í[EuÇ z 1ÏÐÀ8™ÜkãÒƒÇ QoÁhĸ߳1»ƒ˜ÝÁ{#+«¯õÞT ÁYA©ÍOjñæ]‘I!µMl}žT"Ž ³`÷Uã( I?|5G-dQöð>óΡn=9µÊÂÀר/Ðñâkv{/Jƒõ¬-,# *ùÈ Ó×>#‹ƒ Ãh§áøK˜ CŸ°<3M¡PDgrè| }|ˆù¡o0§Àì°SY×°·ºè "ºÂþ±¢Ný.¹¥‚­` =à/Ê^äxsÖ?³3@$œÁfÄ›ÿÂÒDsùó˜MF„ŠFä¡Y‰°Õ¼†ÙlÁìpã¬Òþúßc6JÓò²«ÅDxW:ƒµ˜%Ÿ+ Z$ÒáMйÂ]Rîî½½/°øªPãä‹2:QA.$ý]J1%4×G8¾»œPÚ½´‹eǶ(…,’ÙQɱ2üÅÄ:ñ­%ÂK³”û\H… Ñ6³—Lh–¢èÀns<Á¶ÚN:øÿуm—ÍVFüOÿGdwU©Š¥ZBwœ'»]"Ÿ+RÐéxù/Ò— ®þ)ÊÅ<Ô´ÛøË_Ú~4[ËòÛßFØVu¼ûWn|Ë~fj肨CÔé‘v»ÜÅÝZ› 1;¬D£[ÈŠŠ@L,„ÎäB"O;u/ü=õÅ$›#1zý"†šÛiÛ»ßD‡ÅW‰Š“+0è$ ¹‚d|&kèû祫¤×úûê}âɳAÉ“Ï+”´¾LËñçÐéî%uQo"·ý‹]T™èÂúÒz¬V3¨ º|”rŸ‡‡ ‚UXɇ•|d™X¤HÓÙ#tßyÿ¦’Fj¼õ §™þê¿°sÐtêWˆ©y&.}¼=@m•…ùÞ¯05ý‚úš2B³½¤Â[T”UâðWãn8AiEõžÖáA¤ÂîãÜf;…øþ¤iC©×K–ÉXdPÁ“5Ó•­äª³kì¾·fþnˆÑU‹Ý”ei-Lûñ$LîjÌê«Û*‡^¨G’ôxêzXZºÈìõOq¹­„æG0–§¡û–/bñÕ`5|ÍÂÕHù„ç'ÈË꾿¨³¡ä·YïÅØy“Ù€°l”4ciæßÿò_qXÖV(iy“á;GSoõSöï¨Sï "ÄÛ©€ÞQIë«ÿÃí„Brö ²E=õ'/à6¥‰,ްpå_I•ù‰Î b«9†Ëã|²m»$ × èõ;¶]¯vª¢€Ç#²ºU  ƒNTÙÞ.`´J˜D•´"òʯ¼¼&+ _ñŸÞsøðN/Â=kŽU•\,Ï~!n0ð›wÜTzÅ×÷òÌ7Å : ¾Æ¶nö3}Ý„‰$ëümg‘ æ®ÿŽ‚Ñ7PF*Å`s¡3˜1˜Œ„ûX_¬'PY…$ ‚„§î(öñÿÄøÅÆå2±5=ƒ·þægm9Ç>žs%bæÒïIRFç›ïàöx@N³6ø!+ËS$ã]8í÷”ê IDATzÂK£Ä#É„£¼ W …lx‘ÐÊù|ƒ=€·º‘ìÒe¦†ob©1PòU\6žô/Ÿ>„ÙðàŒ.Ë2ÓÏ: ò(‚ƒÉt—|açì’a¶WVð}cgö»RÎöÄ·„Wf©®îA§ImLsÙ)i~“Ý‹NÙD§Ócvù1 {*gI9TÛFm°zß¶SÝw™ùePÁŸ´ršfÞ¨?CWuÛCÍwÌn*z^A\Íà++ß)gXJ©8ü ÆmŸß‡ ˆ8êÎЮXŸ›$´´ÑÛHEsz„µ¬‹r{ fo3Ç_fu~žDTÄæ«Å]G ¸kŽS4ùwßÔHIýID[ ’šÎR± ²™,¦[wÌsÙaÚ^(°<5L<*èz“Êöcww®¯Vþ£ÎÃíßS18+¨h9†Éh@4Yizá/X¹Jb{[íi‚g0è¥;J ?ðªr³QÿDõhˆz‘Ž+}¦xïC‰½ÂÉ]GÜXŠ2~!¢ÓÑZo ´YÄêÔa6‹Ø,"}Si®š8ÞjÜmxV¾çR–c/›˜H2X\zº»Ì˜uZ—û¾¥Û=õǰîžû­·—SÙy³Éˆ½ã´êÜl¯­@Gùá_QÑÒƒ^•'Y%´8Îì§éÜ1Ü^?RÇyòcC¤£!”ŠÊåŒ%í´½ø.˃$c |¿ Ø~âîÁ?õ»ÈyLfýCÏ?ßo˜$]xA·w£"@a§'"žeke‹ªWþ%>ߎ׬w<òž¦$&»™ÕÿÄÌè46_5dÃ, ^¦éü?PâÈ1qñÿ!-”b³HN]'{ Ÿ%I:ÁP,ìx뢈€Š^'axˆ;»²ÀnvF@o±¢#K2†2×í5qª*“KÆP yнÅr;ã£7˜(ÒˆöJš_ú–Ç®³Úÿ (i½@C[Ù¾>[½¨ÃcÜ¿ˆÎ¢3!æÊ#6^6uñZÃs4j:ã%ˆ&ʺß&pˆ i ™ ù5êÎÏv~Ï€§á4îú“;$·»HJš_Á« (¹mR‰Þ¦( xY¿ù_Ð[K0Ú}³&nEæ·ÈüÏM­{#®ïû™;}#á{ÿ· ¿ÿÏïûû¢À¸\„Ã.Ìæ—éèx›Í¦úSíŒÈYìJŒ3•;Ô²Sï:ÀP…‰‰ ¶¶·9~ü8&ãÞg î2,Æ,[ó£ÊžÛ]…Ä*+SSØJ܈ »ÈªŒ,v"&…àñßàkÛ$¶>ÃòÀEÆ" ºÏÛ Ï{Ís~+ÀÆì0ñpŒ.ªOÄ_׆Á £âø_ sÝ ²µ†šÎ½ˆ¿ª ™Ú#I6gHE#XËRÛt«ÃJÍñ_²¶´B!—}êÎ{s+-B fñà4 ¢Î„­´êéµ;Åô™÷<™Ë²Œ,Ë;s&´à&óPȅŲCæN§ëgËN#ôMæ9lr”Ó•ëjÅ ?ØKQ¹zíHû䱜5›;™ìû=zI¦¢¶5f©÷CÂI ¯¼»ÔKhâñ€ %‹·ùî×yZ`45CpÀÉ\UUTUE’$ŒFíy=.2×ý€“¹Usº½’ãÝm8™‹Ez{{¹téÙl–žž$i² ‚h¢êôoÀø!K£³Ö—C4X±–ÔÑrîU<¾2 §ÅÔÕ‹ ø¿"ކ³Ô´´bPB¹u&/þ_  ˆz#UݯasðVV±2ü›“ê†ÚG÷yTq…ì$þÁÁ)ZD¤áA”3ø, çzêéh© ³•Ëå(‹èõzL&“&ć óÎÎWq8\L‡5B?J•ÇBŠÓ한èiÃxÀ'9år9nܸÁåË—I¥R8œÇþ ™¼ÔœúkÊ»bòyIÁâD·{ßÛ<¡’&r™ ‚¨ÇhsíL×R4¿üßM%PÉ`ÁdÝYŸÚôÊOe*ÞêE~îö2uw·ú^› @Õt@NGQõVMx>É%ñÒœ;ÜNGsýëo2™$›Í¢×ë÷´oæé"óWèì¼€Óé~¤¯þĺ$¨H(O—J ªÌ…mNu5p¢§£á`ß_O¥R\»vk×®‘ÍîÔpN§sÿ»^E £ÍƒñIgr 39îqD½‹ëÞic’Ñ͸óûÊÏXGjÑ)ˆ[Ã÷oÎÜèä$¢P÷L›MÛESIš‚¼¢qȃUNj+h®¸E©TŠl6‹ÉdÂnÿù»ÕUU‡¢myy9GŽ¡··—L&ƒ^¯ÇãñìKsŒ€B1±‰ íý1VòAýIçÕi·à´£aŸíÄA»’Íf¤¢¼œ`0x¥ö“”­­ÀÊétb±X~–³RYYƒÇó·Oí¹4 ö°Ñó‰L¹K’ôHR;~®Ñðz½Ô××333CYY±X dzç qfƒDO})‰L|ß¾¯ÓVŠQ§Õ5<<òù<×®]chd¯ÇóÔ|¯B¡ÀÖÖŠ¢`µZvSœ(ŠÄ¶' Ã3×u¯5ÅiøÙ =11ßïç…^ ãt:÷|È„$ ´65ìkJU´~ ?EGnܸÁ•+Wp¹Ý?;-}‰D‡Ã˜L&ÊËË1ÿhØíím.]¾DKs MMMÏÔY{ª–'«ªJ,#•NkVdŸä½°°Àúú:¸Ýnêëë)))Ù—¡ ìLÝݯÿ42×ð°e™þþ~._¾¼S’Òéžš«]ªª²¾¾N"‘Àd2á÷û{o@<çÒ¥K,,.¡7ž¹ó&>MŠ311ÁÇŸ~Êòòò¾\zÖ‘Éd¢¬¬ŒªªªÛsîµk+4ìØ¤¡¡!.]ºD2™ÜuB…Å>ªÌÃââ"çñŽæM§Ó\ºt‰ÁÁAÌ&&Ó³7俉O¹«ªJ6›¥¿¿Ÿ›7ob2›q»\š5Ù¹OOO“H&9qâz½^Š ßÓo¿ý–h4ºclu:, :ÝÓQé …B,--!Iµ¯)—ËqåÊnÞ¼‰,ËèuºgrÊ îIWš­­-nܸÁàà ²,SS[ûXî@?kˆÆ¢ SWWGYY™& îC2¢("Š"Š¢ Š"‡ã±,1yÔP…™™b±.—‹`0¸o“!ï—)¸qã×®]£X,ÞæÃ3˜r×=Éjzzšk×®1;;‹ªªØl6ÊgòAî·#51>¢ª´µ¶>Jƒ†G AhkkÃápðᇒËçQd³ÙüTèK4errUUñù|”••=– JUU&''¹zõ*¹\Øé”×ëõOM&ä©'tUU™åã?&ßþ¹ÕjÅï÷kÖd±¾¾ÎÄä$‡:;ñŸgbb‚@ @C}ým‚y’‰FUU–——Bjjjhll|lßIêêêp:Äb±Û6Êf³=“¶é‰$tAðûýôôô°±±A&“AQ|>f³Y³${ˆååe–——9yò¤6ÞUƒ†!¾••"‘/vw?5éßh4ʵkרÞÞÆëõÒÒÒr †|mmm¡78ÿÒKZÊýIC6›erjŠ`0Hmm-‹‹‹­ž»Ç2¤¤¤„ÚÚZ-:× á Ë2“““x<ÀSá „B!._¾ÌØØ‹…C‡QWW÷Øí@>Ÿgjj ¿Ï‡ßï¿Ýœ§EèOEaxx˜í­-Ξ=Kuu5õõõ˜L&döP¡çææ‡Ãœ{ñE­ñPƒ†ÁÆÆëëëœ>}ú‰ÖUUI§ÓŒŒŒ099ÉÜÜ’$ÑÐÐ@GGÇc’£ª*«««;véÜ9$Iz¦9@÷$°ååeÆÆÆhkk£²²A´Ùî{Œd2Éðð0ÕÕÕ+*4ÇIƒ† 8¦¦¦°ÚlƒÁ'^W666øê«¯Èd2X,š››9rän·û@Èzff—ÛM xæíÒGèétš¾þ~¼^/íîã³OLLÉdhmmÕd®Aà  ±¸¸HOOÏSÑÓ#f³¯×Kkk+---¸Àð.UUÙÞÞfeu•ãÇŽ=s‹XžxB—e™bÑ(çÏŸjf"? jrr’ææf|>ŸkÐð#$333ƒN§£ººú‰×Aƒüú׿Æd2a·ÛTŸÒìì,&£ñ©È„a}}ù…ÚÛÚp:š@4hø3A‡,Ë¢ûûQGê ‹‹‹¨ªªÝ¸yÒ½X,ÒÛÛK6›åpOV+Ù'äóy†††p:Ú þ Òé4“““ÔÖÖˆ†±§Ùl–‰‰ *«ªðh²~r]UU¦¦¦˜šš¢ëÐ!JJJ´§¶Or_\\dmmööv­_Aƒ†?£/KKK¤34çwe½²²B*•¢±¡AËÖ>I„‡¤®®ŽææfMQö ™L†‘‘***¨©©Ñä®Aà —Ë1>1AEy¹Ö8ºÇ(‹ŒSZZªíîx’=ŸÏÓ×ׇ(Šôôôh;·÷Ñžšš"ÓÖÖöLŽPÔ áaôemmh$òXçš?+ØØÜd;¢¥¥E³MO ¡ßºû¼¸´DWW׸÷ø¬ 311ACCƒv›@ƒ†Œ½^/åååš¾ì!dYfjr«ÕJyy¹&'…Ð777¤¡¡ú§¬cô CQFGGÉçóÚm [[[lnnÒÔÔ¤e÷‘H„¥åeZ𛵾ž'…гÙ,½}}˜-º»º´´Ê>áÖä¥ééiZZZp»Ýš#¥A߉'&&°Z­·ÇPkØ;û4;;‹$ŠTUUiyýV„¸±±AOw·6£}ŸÓðð0f³™––-:× á"Æ••ššš´ÕÍ{Œd2Éìì,õõõ8Íy:è„~ë:Âðð0ÍÍÍTUUim±²²Âüü<­­­Ú®s  ø˜ššBEí&È>pÃÒÒ¹|žÚº:M O¡g2úúûq¹\êìÔ–€ì#r¹Ã##øü~í­ €D"Á ÚèÑ}°O““+*(ñz5ûtÐ ýÖŽóh4JÏá,‹ötöÑû™™a}mŽövm×¹  3³³³äóyê´Ñ£{.ëµµ5¢Ñ(õõõZ)𠺪ª,,,0:6F{{;åÚ¾íýD:fllŒššmk‘ ¨3³³³·Ç¼j:³we™ÉÉIíZà“@說’H&éëëÃçóÑ®]•Úwù“J¥èèèÐnhÐð:³¸´D<‘ ¾¾^+ î1¶C!666hllÔ®tBW…ÁÒ™ ‡{z´»…ûl˜"‘ããã444PZZª Eƒ†?ƒ|>ÏôÔ•Á >ŸOÈóÃôÔF“‰*íZàÁ&ô[µÛ©éiºÒæò>eE§ÓÑÞÞ®eF4hx›µººÊÖÖ–ïâñ8‹KK464h}UУÑ(}}}+*´Å+Á0­¯¯3==Mss3‡CŠ Åb‘ÉÉI|>Z¯Ï^Û¨Ù¹9Š…‚v-ð z¡P ¯¯EQèééÑ:«ƒaÅåri»Î5hx@lmo³ººJCCƒVÏÝcÜÙxèt:5TBWU•É©)8|ø0^¯W{û,ÿùùy–——ioo×RY4<nu[;œNª««5'xmÔòò2ñxœºº:­x }{{›þ~êj먯¯×cŸ‘Íf¥¬¬L3L4< ¢Ñ(‹ Ô××kÍ»{ŒB¡Àôô4Ú~ùƒLè¹\Ž›½½èõzººº´¦’ÇàùNOOFéèèÐÒ†4<Earj ½^¯mÜlnn²¹µI}]v•ö ºªªŒ±¾¶ÆáÇq:šbì3âñ8###TWW4ùkÐðH¥RÌÍÎRWW§í9ØcÜj¦(cbbEQèèèв#4<` 25=M¡P ±±Q«çî1b±ËËËÔÕÕa45DBO§Óܼy«ÍÆ¡C‡42y Fik{{gˆLc#Gs¨4hxd³Yæ´në} :&''ÑétÚŒüƒJ芢022Âv(Äáž-eõ˜elllg×ys³ehÐð€ŽðÜÜÑXŒ¦¦&-Ù‡ÀoqqQ+mTB¿uý`llŒö¶6*µñ}Å(-¯,3?7G[[v»]Š €B¡ÀÌì,U••”””hÙç)ÉÐÐР‘ÐS©½½½x½^:::´‡ô˜ŒÒÈð‡:­CWƒ†‡ F6wƒhÑùÞ"ŸÏ3;;KMu5n·[ÈOÀžÞe™ÁA‰çÏŸ¿}wSUUMò?CÈ·¼ÞÍÍMΞ=‹ÙlÖ¨AÃÚ¯ééiüÀ]k;5ûõèí×-çikk‹žžž»?MÞ.Ï=#ô[ÓȦ&'éî¬ A(‹¬¯¯S,µ'ñ`0œéH§Ó QQQA0Ô¨AÃbccƒååeNŸ9s×¼†B¡ÀÊÊ …BAÒÃ1&£‘`0xý’e™é™ÊÊÊîºN«ª*Éd’ EÑdø=F*++oËsO]UUâñ8ýýý”WTÐÚÚzû­¯¯óÁ€^ j©ß‡"Ëè‘7ùËZÛ¨ª*SSS¤ÓiÎì%UU …Bär9M 4v»»Ý~W¤#Ë2333¸\®{Öv.--ñáG¡7›5öPöKÁ(мûî»÷ÜØÚÚbyi鶺|{ù2f»Mâ÷ä)) ñq»D±'„.Ë2ýýýär9žïêºkñJ>ŸG•$šÁêК³±Pˆé›}äóùúýH$ÂÈÈ ·WÓ&“I>ûì3"‰8:m!ކgÙTŠCí<÷ÜswÕÈ#‘ssstuwßS¦* è-fÚNžÀ¨•°‰H”ùÁ{"mEQ˜žžÆérÝ3ŠZUU ….¿–cGµþŸ;ŒÆ˜¾Ù{×Ï9¡ßŠ gff8q⥥¥÷> &#mòCAo4žç[×ÔZZ[o§d …©L†Ê–|U•šP5<Ó˜$—ËÝU§U…™™L&ÓÞ…ƒQ³am¿îƒx<Îüü<ííí?8#_”$ F#‚ÖT}‡<³÷üì‘z8f`pÚÚZššš4ê1accƒ™™ºººp»\÷>xƒƒ6…IÃ3 UUu:¾ßr•H$˜žž¦¡¡›MKóîõ3˜™™AEmQ×#À#uwòù<}}}‚@ww·6Tÿ1¡X,244„Åb¡¡¡AS ‚`æææPUU» ½Èd2ÌÎÎRUU¥ÍÇ8H„®ª*ããã,..ÒÓÝËåÒˆä1¤Å¥%VWW9tè¶ë\ƒ†‡$˜©©)*++qÝ'³¥áÑÚª……’©MMMšótP]UU677¤©©IK|Xk"yŒ¸µë¼¶¶–ŠŠ -:× á!°²²B8¢¥¥E#˜=Æ­›^¯÷öÐ1 ??«kMUUVVV¡¥¹™ÊÊJTU¥X,"Ë2ªª¢ª*f³Y{`û  är9Z[[5ƒ¤AÃCàÖ˜WÇC vRÂÅbEQ°X,š {„…B,..rôèQŒFãmÞ¸5AÔd2iòÞoBÏf³ôöõQ,Ðëõ “I§‰F£$’IŠ…N§“S§Náñx4iﱂLMMÑÒÒ‚×ë¥P( Ë2Š¢`45‚× áG°±±ÁÊÊ ÍÍÍÌ/,ÅØ…ˆE£x<N:¥ua?"Ü* êõz<‹‹‹„B!677‰F£ÔÔÔpìØ1Ífí'¡«ªJ,css“b¡ÀðÈ‹³É´3_6Äívãp8îó§áÑG###D¢2™ W¯^Ýqª $ŽcGR]]­ Jƒ†û36·¶G"Œ#Jv»‡ÝNss3>ŸïgïæVU™ÐÜkI#-íuè%U)0?8@Æè§¥%ˆ((… SC˜ƒMTú÷D©ªª’ /1µ¦¹³“^º¯mŽ®N°6ÒÒ^ƒî®ñ´*ùT ÉlEzLckÓé43³³lnmñùçŸS(°X­¸].Z[[)//ß32WU…äö:Ë K$’TQ·¬’ÊÚ ø s»PU…t*‹ÅjáA ªœc~lÉ]Ge¹›½H@üdBÒÒRÞzóM z=f³½^û ‚ðØÒ&ùLŠ‚ªÃjùnxŠªI%R˜íŽÇv˜÷Åb‘\>Édf}c«Å‚Ñd¢¶¤Çó@sß•ç]ȦÉå ¨‚ˆÞhÂdÔˆÔY1—!Éa´Ø1îo,”BšTVÀf3ÿ$eSU•l*h°þà{<ñܧI&2X6Dán¢È& ·b $Ó2v»…L2ŽÎlÇ ï6ªñ–¤‡•••¼ùË_b±X°X,X­Ö{lÙÏ„–ÆùâÊ&¥Uÿ5—‰||¯þðÏDlTÔü5N³Djkޝþøoûú÷%˜l2F4œ$_PîKè¨2+cWøfÒE]sº;Îb1áúÇ_8r†ºªR‡jJ’D}]Á`€ßËåÂn·c4Eqïì…ª’X™àãß~DVïÄí²’OÅèÿö2g.ðÂsßs~îÿ*[¥·o™£¯¿ŒÃô`4ªä“ |ýƦ_,w³ßR÷sÌ­áËèdøüVåR.¼q³NU%º8Á'Ÿ\¡û¿¦©ÂõU‰-ÏΙ¨®«à ùƒÓ§NQÚŸ¦½£îÏ+Ê}È1²0EBpPYøYò–sq®üî__‡cç/ÐÙ^¹ûz*…TŒùé‚-¬ô_¤oNÏ¿z «éáUCÉ„øæƒ÷±Ô<Ç©ã ?ë3+ù4s£S”6µâ°”™û*‰µ1>þ¸Ÿ£Þ¢¦Ây›”lŒ¾¯.BI•–¾írîåÌ߸³ö8‡ÚËÙšCv”ãÖG¹|±êÃgiªõòØý=AÀëñàÛílß }_e â磬¬Çð;MÄ×–Hç%äô&á4Žr¡ÅyÒ‚“²²Rr±-æ&§‰%²˜œ¥Ôµ4ã°ê1˜,X¬*zI$Ûbrdœd¼?¹D”’ª&PUäbžùá~b¡F—ŸÆ¶6‡npãòU‚гõ9\RšÙ‰âÉ,«“ªæJ=6öò‘˜L&Ž?¾ïAŸªÊ¬ÍM³yç?ü† ÇŒZÌ2qõ2QA!_TH³01ÎÆFAgÄ_Û@U¥Äús«yŒJ’¬l 6{“›ü~ºÂPˆ2;1E$–Æ`õPÛÒŒÛaFÎÆ™#Ëbw˜Éd {¸ ö©å–Žl’MwuTä[++äeP‘ 9R‰4Š ‹ ³iÇ#WU…l2A._DôXív„b‚‘Kß²’÷àôyp[õ¤ ŠEÉ`Âj³|/bÙO{$<Öúžª*,ö~Ãg]¡¬ó8çÏÕc ÇLÿU¾ùð#LÎßÐTåE)dI&Ó¨ˆ-6LFÝŽ¼™L2A¾ #ê Xí6Ôl„Áo¾&j¬ÂYâÆaI'’eÉ`Æj3ßGÞ*Å\†T*ƒŠˆÉjÃhÐYšcrboÏ9*+}·•ª*¬N rå‹QN:œäSa67M;ÙTQoÂnÿ®JÎgI¥Ò(ªˆÙnÇ “¾GF*±­5”Ò,©x¹¨`²90u»ç-™H¡ªÂç ä|ŽTòÖ9´b6-LpåÓ¯è48im®F‡L*ùçÎ›Šœß‘1¢«Ý†$‰ªJ.“"“É(aµ;Ðë”B–xJÁb„t:‹¨3bsXU•É$ä 2’΀ÍaCvÒ†«äóYâ‘"ˆzl;¢ÑAmK3‚£Œüâ8ëk!D‹—º–flR ñ z¿ø±ª‡SÏwQ×ÚLù®Cp§¾ÝùÝTE&LP¸ã\ìeD¿§Ä"€ÍWß ËóËj,a}qSu%©V—Ö©÷U²º°ˆµ¬—çËß¿G¨`ÂWbg~tˆé¹ ÞxûE¢+ã\þzƒªÆ ¦.¾Çèb†Êš2¶†é\æ•¿ýï°¢°5;̬_@Ró,\ºD(õ6ÕºÙbQQ I®}ôó)3•enÖç§XÙHðÒkÏc7í-5<–!2‚ˆÅjĔ™ÆÐ^‡Ãa§ùô9DŠ ýé\X¥¼®5¹ÈPï §ßþ5ÆõÞÿ×aÚºZ(¯k¤PHSQ@ÉÅøæýß±’ñû=DÇG™œ^æ_gþÛ¸Ò¿FeC ±­Eæf7éjÓýѧäck\úè–·’¨ª€Þìæè…×h:Yêÿ†ËׯÉ‹‚„¿å0-‘‘›lÈ>|5>,éu¦¦WeQg îØ9ŽnxèHô)H¢d£ŒÜ¸Ž¹º›_ ›aGYË*Ë ŒÏáqšÉF–øæ£OYgPU£½”¯¾JµßÊ쵋\ Ë¢DyÇ ê]Y†û{IYÔ]H‘Efç7‘eIo¤éô+ƒÔT’³|ýÉE¶£YTÌ®2Ž=œí±æfÈz—Y]ÝÂå" ’‹l03t“‘á9,Õ•xÅ ÙÐ&ß~ø>áM ª‰ão¾KGƒLx‰KŸ^d#œBQÀæ«æÌ«/Sê0ÞEꪪ°8tÌüeÂá8Ö²F^yû5¬òß~ø +Û;„®·x8vá5*y¾ýèÖ¶(ªŠÁæ¡çôI"#} BY%v‡‰ÄÄU†'VwΛÞ@ý±—8ÖSwÁ©dÃË|ýѧ¬n%@Òã«?Ä çO’]å«Ï¾%‘-‚ â ¶rîÕˆO^âÓ‹cx}>ò™™ŒLÓé íªdæÚÜ蛤 «ˆ’žÚž3œ<ѾëØ$¸ù§òq²y•ªÃÏqæh5#×.#” 4ì–›‹É-®\¦¤Í„O]`¨cDÁnL°0±À!ƒŸ¶z'³7¿åfßä®m¤þ莮gîêEn ÌR”eQGðÐiΜî@ÿDꘀÎ즪:@ÿü<©d=+ ›”7>;žaj~‰t‹ƒ•å‡O_cz!Âé_ÿ Aë£ßòÑgƒlœ9ŠP•LhéÉš_ý;ž?\ÅêÀ׌,²›|ÂR û•_PfSùêŸþ7—Ã>W‡Û5BcW%f¸¾½…µæyºÎtc’òDâ2fýÓY.(k=ÊÉí8ƒ7/2uås,./Á†fÚÆ£K0Ò;Féá·¸p¾ 5½Áïÿ÷ÿÈÔø,­.0Øt_xƒ:¿ia‰éÍ8]mäúŸ\£û­¿¡³ÞKx¶—Þ¿ÎÜt-3#Sø»ßàÂË]dÖ¦XÝÓï¨{š‰fsj„‹¿WØåR¡bÙ"(E¦®~ÍðbžWÞyŸµÀ•~ËÕ¯®xë7o {:yãüQ’k3,l±úª©¨©Æjj¥¥ÊÃÅúב×9ÝUÅúÌ(Ñb–BQ¹«^õŒð9¹x”ÍÍ4ÕZ°¾«éLNš»º@)ÐûþLnH¼úίqII¾þÃï¸v©’ómŒÞèEWyŠWŸï ²8ÁJ´€£¬ŠŠšj”ÀêýV>ùrœŠ~Å‘fËäóвŠA·ó^J!ËÐWYˆ™yõWob•Ã\üÝïépº½“ª›“´œ>ESCÙ. ]~*ê.é8râ(éÉmém¼‡9yÎÌ•ßÿg†ú&hª²3üåçÌGŒœó5Ìù-.¾÷!½7Êyå¥n¤;”j!‰b2ÑùÒ9tÑÞÿÝ—ÌÍF¿ô5#ËE^}÷]¼¦—ßÿ-×¾¾ŠØddbzƒÓ¿þkªœ “Ãc1l¬£¼z“î'(Ñe¸Ü;ŒûØ/8Õdmz”X1C¡¨ íž7U.0úí—Ì„Œ\øÕkˆ±9®]›ae¹)ÇVщÃíäÖ§øä½/˜nk¥´bqqºçèi-côË÷¸t‰ºª $“YjºNÑÔXÁÂõ/¹rù2õmX€B:Š©¤œ3§_ecø¾üú+jªÞ!ÞDpÀzGÆb{[A ¬¹Žòê*JŸ¤¥ÉÊÀ7×)È ÉY.yÿáó9TÍòÀ×|ûÅ”¸ ŒÞèÅP÷<¯Ÿn%´0ÁZ"G.'£7?¡fKÔQ^_ÇÕ±AVçØHÈ®®Æ‹2ðÅ,ëKBi‰ÖªÉù6WWøü=Æ$¹!_È“Îå0ìÊ6ŸÍ)x}.QĬÀå6ÝŽF­ŽRÜ6#¢TÀb7"‡swV08ß¾?ÈúF5²,ËæÀÓÔÈ HNQA’Ÿ9WE‘ñÆË8øÊë”ú­`šä VÕ`ü¢@!W(îQ-O>£í ô/k`‰Ô5óÂ[¯cß Š[¹ÍôÔÛ€A¾ #[Ôm†—]Ë£‹VÚ_ý ®Þ^FG¹öî]”@ ¯¾yvÛ/;œzëç öõ1>òáÛkðÊOaû¾ÝÐPl66‰¥Ù94³eKc7MƒÔ⨅‚ôˆÞB‘Þš¾†!;9òÚOñ÷ö2:4Æå¾.l±f^}íè6~ÕãÜÏ~Á@o#} ܼF¼å4/¾|ËVô´i4‹Eپ˲ŒVÈ£éÏ…")Žm·‰(IŨVS§PМÏf€DÓA\%ÕHOÜNQÂb±n¡BÑGlèä zqþ[ËªŠ¦åq&yågvï?`ôN·¯hìñ5j¼;c–!μõsúú˜}ÀÐíÂõÇxùÕ“ØqûˆÖ :’ºsˆ;<ô\Š›ï¿ÇhÆEûÁlúc=÷‹Ê­¢¤"o)F‚$ƒ¡“š¦ÿãPJšÙ·¯šŒœax:Í£ H*ªªlý­ æŸDãÝ‚EÓ4 ÓÄÔòä2:¨œ9A,^B°ògz{årÏM‰V^}ëù£Š¿›EõD) Û¸ÙÓMÙÑ7pYd%BÄip»¯—Êãoá¶©ˆ‰RÜRó“s„›ÊX{ÈlJ¥ÝÜÆfsyð9F{ú‰Û«½{ŸÅdæ ¬"–ççYqä¹£gù>*Jâ”W&™£°UÜAÀ,áøJ¾#™{\ë]娋gHD}h™5&°xƒx#%~&îÞa&æ@_bfµ@M[)йöU ¹uW GãìóãÓ”økHÍ 1>¯ÑÒ^O(ìf²¯™É‘^æ–Öñí ô¯x[‰*þ°é¹Y29 ‡¢±0;ÕÂ&ä™™˜'\ˆºö£Ì=ìä÷¿¹ÎÂR ë¹t’¹ù jŽœ£ùh‡×ÞçBç’gaSß7j¢¸‚T7Vq½ç:÷«"ì«-A æïpá£.êN¿€/äetf–l¡+çqxKPÍ43“K”´§áÐq&ï]æßÞ¾ÇÒj  iÙe–s4ž|‰ÖYzÎÿ‘ŽÞ¬ŸnÇâ´l b+þ€‹ùéYr…Tc“Å…œ¾¬Šøy×&LCC7Œ¢¼z† .+6‘xË9öòY,’ÁÂä¢+Âóèo‚dÁö17;C&§a— ,ÎÎcóÑ×ÈèNŽþèu´ô þùŸî&qÔƒ€Ž¦ÒIæ6©;úÍÇò<¸ü.—n=`íÌAl^ûöã yžš&[hAÊ.s»³_"ÌÜÌ"‰3/°ÿ`sýäµ<º¦ƒÙô K«ë„Ý «‹s`sC:ÅÒ†É+gÏRR¹;ÓK:_,R„ zn¥…%Œr›«óäE+»…¥/Öý(è†)n›…Ý^^›Š–#´Ô„Èo&™[X'à€Ù±eJÛNÒpø8w.ñÎ{÷YNÃýËí‚&Ê6Êëky0•§¢² ED¥u• ,˜TÕ”#‰Þ²}œ<³ÌÝÞ[¼×Ó¨Z)o;†Ý&³©:‰D}XšÀïUð¢ˆb÷ î”ᶸøq`q)¯ˆ0Ðu·ã4«@ÿµOèÕM$E¡ñäI¢ÁïfQ¶Pô$©ôEî]xŸ[š† Ê8ýQÎýø4‘PëË?äÚÅ.þþ7¢LiÛ Ú[«X}8A8\Œ— ÄÊ Xû¸y¹“3¯œãä §é¾ÝËûƒÝˆ²…’ƃ¸AöŸ=ËÚÇ×øè_'ñ<ÄêëqÚd¾®@÷ï¬@„gç3 ¢ˆ «Ôìogøë|üûßãPtæ§7h:v »TàÞ½›L]é$ò‘MÎ㊔àñúØôz¸Û}‹k®©É1ô.~µ…9B¥å8¬–ï¥É]UšOžcqé߸úû¦7À"A*™ÂSÚHu]Yù“uñÑï²X„, ËMgZ±˜9†o_g±£‹PÀÍæÊ,Þx n¯×IWo'Ò+ãcܾåÇë²°:»B´¼»ºÃ¾¢ÅAÃvf/öðáïS(ú& ›*­'š±Ê…âº?ëÆdó@zžkŸ^%,eÛc&RQÉâdßÁ\¸x÷³ŒUȳ´’¡ùì úá„ÇGEDÉBmûFß»ÎG¿ÿv©ÀüL†¦§1“3t\è¦'Å.é,fD ì6 a“ÎO/ ·Uñ°ã:Ý·ºñ¹m$çç —Uâ°ª»LýVj÷·1üþ þí×IT}ƒTÞÆ™†"ñ #·¯òñB?h,c÷îà«1Èo¤è¿zén˜"Q’²²(§I÷‡ï0îVÑ7×pŠ›ôßíÅ^-b:c=Ýd¦ûHNá©ØG<âeD‹A@ØŠb·è.HV¼+7¯à2j1·ö¦§¬Žúš>î~üoÌöøI¯.b¸Ê8uª‰¡[×Xîì"èw³±<ƒ¯¤—Ãþ¾W$jŽ¿B¤ù4v·§Ô)(4œyÄÏÖg’…ºã? ¬õ0ù¼Ž¨¨[Ñÿm8ÊKu\N•œ?Æ¡—ʉFƒäšÃîvRÛøñ ›"!M'_¥ZSq8TÎýòW¤³¬Njs5õÇ6Ñ4QV°;ß©Oîw«7ÎÉׯÍtѪ$ŠXlv,j1ãÆWÖÀ+¿('“É!H26‡Yq6ŸÂW.» xÊšxëÿ(E3E\Bð$ñ}ûÉå5Di‡Ž‘Ú¼¯#—×Pì-ƒ 8¾&Kÿ÷ÿ÷ß$IWWW%R^†j±|m §ZlKJ ½Û‘Ð’¬âðúI”—ˆ–H„‘°8½Ô5E´¼ ùKt4D«Íö˜à%«ÍúX*¢ (ª«Í†Å¢n]P„âûVÓÈ3rû:]7î°4?Ç@ßÔP´`·Úv¹ŸŠ·SuKhIŠŠÕfC–ŠŠ—j±nÿÆ×™z›ËdY™ža_C6Ûó‡=ê²²¾N¸4ñ•à ¢„j-ÎÙjµ"ËÒ.¼’¬`±Ù¶ ÝÛ®%‹u‡ž‚ qX-Û¹ôò®µwÜþ\‘%ÕŠ,5){ùl–¥É)šöíÛ¦§`š¦ùM ô‘‘>üô<-§Oáø’Béû É¥%ú¯wðú«?&‘H|)+++üî ¤±è^9Øï-˜¦ÁT÷;üö“E~õþŠ Çò=¤ÉÀí;xe…_xY~>ƒåƒ¸ÔÑAÛ™SXìö?ç(¤×b5•F²¹)­©+*=ßÂKvje•Á7ùéO~‚Ï÷|ždÃ0èììdhfšæãǶ-?{ë«Iú¯]ç?ýé6=å=²ìÁ|?A¶y)-÷šZýe‚  8¼Ôì?´G‹=Øè{°ß_Y ­?Ê* l6ËA¾“åéIR“xyÖ¯!Ç4MtÝ@–¥?Ëü–¦Ç‘\1œâ&³Ë:‰²ðgÆ|™±ê¹Mf'§1,>‰à¶)ÝÔóLOLá‹•}©”f±¤$ý»-+â·‹M´BMÓù&Z¡À7ëxøüÅÕµ†a~K ­ ýIô2 M3þ"Çþé’ŠÝný’‡ˆ‰¡k躱µÆ:Z¡È·_ ÿ>¿·X_’ÙIMöóoÿí¹võ.ËÉÌWÏ÷¦Irüw®w“)|ÓûÛÄȬrýý?Ðu{„…‰Aôö³‘Ö>‡ܾÚE¦ ?÷¹5Ôñ1¿ýõÿ¢ÿþ™]9þùµ)Îÿîw /þ;éj’^œäöÅk¤ÒùïÎ Ý4M–†îpõÊmœ‰fŽ>€ókË75IÍNa8ƒØe.}rƒÊƒg©*ûs7Š0Ñ3«t~z {éAZ›K¿eæP“äd·f9ôƒs=Ÿ% LÖç¦ÐìA¼N™‘ÛW˜Hy8qj?åËëF!ËÜôá²’ç.±k:‹“¸KJÙœ¾Ç; ùÁYëwáÔ&5;‰á ãqZ¿QÞ5 iz¯_!m©äÐ2î_ú€þ±uÚ÷“]Çmý“ù×4 –''°GX…,w¯^¦àªãàÁª§òý¿mJîæÊ +膀+%ò¢o,19Ÿ§¢*Ž$B69Çìª@Y©™ñIœ/ë+Kä ™HY)…µVV7°yCÄbÌgâ}’†–gyvšäZQµŠÇqÈw‘8XSj:©Å9—’Hg1pÒ"ƒ^`en†•ä‚l!/Áã´‚¡±27Ãjr$•@¬—’§ïÆ úFÓØ£aêªJÉ&w7¸5Þ¯ñÓrtüñRJÑpÚeL£ÀêÜ +«ÆÇ¥êôݼÁ½¶h„úÚò]%„M ™ 榊i¥V—hImešþ{Ab¥%OX8L²™ ùLŠÉÁ5²%Êðzì`ê¤çY\ZÅDÆã÷90H-α´´†„7ÃïR¼ÝEWÇ(‚?HsK=¤W™›]$¯Ø="±0Ês†Å{º–aøv3Iƒ3'J±)›2` #¢È"˜ÆvU$Q’‘$qëÖ¥#Ëbñ;AD–% ]Ã0LDYFz"BϦèþèc¤êô×Y¾Ÿ@Ãq´ByÇ´bšÛ¸Qz¦YÄ444]@‹&A·žŠïoÏx4¶bD¥‰¡ë[7I–E0SC÷ñ9ö}³ô× h¦„hjÈÈŠ†®k˜æÎø ›Ë<¼?Dëé3[sôLqMDQÄÈ­Óýñ'å9v¢‰P,NÁéF254]A–„]7f}+Rx‡NϤ³i²4ÒËÕOû9ýË·ˆøÅÆ&Zqm§í.íwi‚«oDÓkoâÜXäáýöŸ>I!ŸêÓ4Ð5Ó4·ùë©[¦aèˆ2’,bê:º®ƒ°E§m³œŽ¦oñ¤š.ù´P@VÔ­Æ$&ZACV”í"4E>$y;Êv÷Øñ—žMqëÃQêŽrôHãc›~7ß"ˆH²„¸E/ ÓØš§,o§–=9ñQ&¦aìŒK–d+‘D 9Hve†{·±4œ$^VFƦ£|»pj˜ç#ˆÅšeÛóDØÚ£KÎÒñû1Mµ1¢¥%lħæ/›ÎlÕxlï "’Rl†c&š®Y¬l&IÒ×…b’š|ÀoŒ&Ù‘LAàÐ˯ã^ëæƒ++üõÿþ×ø]“÷®ði¯ÄOßlåÓßýÍUGÌk2?1Ц(/ ‘N­0¿˜çô/þš¸¸Ào„&9m ï4ÕF·÷†©çé»ô]wF°ºÜè™MpÅ9}æ«‹‹¬­&I..³±‘ǦÊÛÍp¦{¯sáÓ›’Š™ÏáH4óÊ瘹õèµî IDAT ýXœnŒ\Ýä…7_£0ÒÁ…ë°;ù4†=Æñã,ÌŒ2¿ 0?»ˆWÜàꟗìÈ‚N&/pðå7h®‹|½åaM“¹[\êL+‹’zxO¯öau80ót[˜ã'ZYœe~Ac~vòòRkQ@6¸ðû?2±œÇa³^Ki>Fk¹…•ÅeR$—WÈk±Ç ‡™…Mî^»Æ¤]cmq)RÏ¿|ÌXŸ~ÜV7’Q «){í-‚…1>øð*’Õ‰¨çÉ v:ÎòÜ8³óóÌÏÎQ¶Ñýþ;¬äU,2dÒ9jNüc‡jžK©•¿®Îìƒznõ² †X˜š@LÞgh4‰BG¼‘Cí¥Ü¿~…±ÉtCÂ)åàÙS¨#\¹v«ÅNnc…´&S]_Ãúâ ËK+8ã œ~áÖGÅô½ÝÜììÀºEÕ«ÐLƒñ»LßJ’ÚÔ(k9ÂÑã-È‚ÁLÿmîÞ}ÀÆf‹+@ó‰3T%ü;ÊЙì¹Êíþ%œ6™Íµ$šä õì‹T—x˜¸Ã­®^Ö7ó¨6UíGhiª °6ËÍKW™_ÃBåu9s„?ǽÑ4 ŒÜ¾@ïÐ:*9Üå­hÓýã“  ‘@Y-GNyJ »íCÓä g0ÎÁS'ÉwÓÕÑ2›ÆéQ°mŽ1²hec2E’g~pU„…‡]tõ-rðì´¹~îÝ`#ÃêÒrò qß¶@É,ÏÐÛq…(ñ(ÇÏœDX¤ëÆ=R9«ƒÊ¶#´µì4,Ñ6×xØu®›]lzTG Z†žK³±²Hͧ_¤©6Š©exÐy•Áái²O¤ŒCgOpí4_1 iî]=ÏÄ\’Æ£4”[è¾|ÙùULQ%^ס£-H…Ý>e|z«ÇE) +qŽìsãÊ=N¼DEÂÍúL?W® ÒvæEBŽ,·/_ejv…Hu#GO´c‘ ÆïvÐÓ;L:[ÀâôQè(êê7;:±/çqºí4ï«@‹JäÊX7;ï²¶žARí”·¢­!L÷Åó,­ËX•kÉŽPÇ_¯cè–Ƈɩ!~ü³×pŠYÆF¦ñGTÔU3¯ :º§Eܦáü@Ç’¼ð7ÿ‰º2/£7?à½ó7hjû[jj+Q”Jiz*Á4ò„+kxåå£l wð/ÿÚÁÜô“×o`„šxë­ Vy÷¿ÿ?ôÜé¥ÑµÀjÖÂO~ùsB“©‘1ܑֆ:"Sv?‚²ôÉ©eüì?ÓZ`ej”´ì~nEô[áC‘`YñŠrÕµÔ6Ö"åæé¾Ù‹âKP][Êx×:nOPÖz”ƒ‡›HŽtsõÒ-ò¹uúnÞ$#Úi9rcy€O/táŒ×ÑPg¸ë*ãs;eûI&ˆÅ)¯o¢¢"™Ya=§QÑv˜Ê¨…î‹—™]J³>=È….'Á‘3Çqjó\xçcVÓ…Ý®Hr©º;ï‚+ÆSǰ䦹~þ*«óã\|÷Crjˆ#çÎu¸úÎûŒO/r÷“÷¸?ºAñӴ6•1Òu®î?“W ½ÌôBšêC'hk«fæöez† Å#„KT74Qõ³<5ÄüâKƒ·¹ðÉ \‰Ž=†%=Yä´FnmšîÎ{àŠÒ~â(br˜›×{XãÁúC=yEß`zb–¯Þ%/à+«"î¹ô›æÃwΓ*X øŸë'Áh E‘±»¼xœ.‚A²jé d³¾Ò*â"ÞÞÞÂxoz}dJªJQg8ߥ°”Ln¥x ˆÒã–«B6ÍòÒáŠRì…`e /þø¼V“ÕTxM9UÁˆòÚXMm®j@Mñίÿ'W.ßFqp9,H¢X´ŠH2ÞÒ*J—ó?ùàíó$óü~ßdµwQR‰V7`MOñî¯Íå‹·~ÜNuKÑß±žnÙkY™_Ât†ˆÆȲB¸´)—"™Î#nI’¤§ S‰ª‡xY%v‹‚ÓïCu²›)–×UVà°©¨.?±X€äòîD-!KšþåŸ8ÿáUr¢ ŸÇV´nY¾œÑª*ÃÜyï_yç·ï0µT ò=Q‹ÿ[.ÐA@uzð8mXÝ~BA¢(â„h9v˜h@eòá žªföh¦ºµúÚ8³##¤óV»›Ê}M$ªª)‰EqxÔ5×QQ[‡ÝÜ`m3ÏîcÙêñb³(x‚ÜvYuRÞ¸ŸÆ–Fš4"ê¤7Ó,Œ0»œÅã÷¢ià úY›aiuý …DÀãÒØÞB¢²–ÆÆZV&Fa6 Í'OSYWKÛ‰£X²‹LŽ224MIÛaš›ëh8r”ʨ‰áqr¦ Añ†#´ž8BÈÃ}1l.ª€dwáu&GÉî ns…â4´¶`5VVÖ1 —í¬O0²SüD(©oÀ–_`rj™lj‰ñ©*jYŸb~5×ç)Ò9àcur˜•äÆcŠ˜ÇïAVí„"!6ç§™^Öi>ušªºÚNÃVXfv~i»õ„ ˆ¸~ÙB0Å*‹8}Aö:Bmcu 6×’ä³›Œßï'ƒ·CÅ”¬¸*SƒClæžhJ!Jå´ÞÝ\g  ÕÀ"hX½>”Â:ãC#L bVÑz …úöÃT–ƶj¦?{cæ7Wº?Œäòa“ T·«™a|pŒ"’`²±²ÈzN áèiÁbUU<¡n»å±ý$ɹõ$+É4ñÆCœ8{ ¿Ç¢J¸¬Š}Í5T¶ 4`azt˜á¾~Ö5 Ó†!Zp»lÌ ²¶žbb`OM3mZÙö‡¼üÚËÄÃîíºü®`‹"ãÄp9”-S{Ù¡AL_‚ö£‡Øwô,?üékTWD‰”W³¯©2k¬¥2h™ Ë)l^?ЬàF±[݆4æGÉÚ"´?By]#­‡Zؘgu#‚ˆ'¢q ñŠ*ÊÊ}¬,¬¢ ‚Y ¹0¦x8ôƒiÞWþÕûâG¤ŠWþï8÷Â1\R†koÿŽŽý[7ÀGü¨å Ûõð„¢0,2¢Pt‰<ªÿIï_ñºå-¼ýè»´~AŠî…GŸ™&¦ið¹µEQ0ô­rǘr9 “-·Ï.Ã40ˆ·çÍ¿ûØGaeŒ÷ÿù7 Œ-m¿àˆVòò_ÿŠs/žÀ­d¹þÎïé¸qó•%Ѧ#üä?þ ‡5¡¯Mòþ¿ü†þÑÏ.L,n¹¾­”iÅþ_|h>¾§·º°‰»i¸å–4MðT4òW¿ú§ND-¬páw¿ávÏXñw¶ÖOq†8ýÖà‡¯½@Ô§Ð{é=>ýø:9ýù‚¿ÕikŠÕ]ì(edIoæ±T:‘„âAms8(äÈi‚bÅbQ¶´Q›ÕŠ$niV‚‰ñBR-8ŽâfÚòÝš†A&e#µÊdß]æeÀ0ˆW•¡ÈÂSL$[œÛݯT›Cϰ±™ÅdìvÉbG ÖÓ›dsw±l¨ ÈØlV²›iþ|ÁàªÕE0òÉkò<÷»n#‹—X‰ÓÌÜáòÇxUDCnAÀøœ Øü¥”Æ\Œ ÊæY×]¯Ž³z³›Ô*ã}w˜‘Šæ³’ʲm_û3`>‡!ÊØíj‘¶ªE‚tîó#E%Õµ]2UVe0 Sg#µÉúZš‡Ý]º•DEäi‹‰ a³»‘E¼V`3¹†1?AÏÍ0Áâ ØH/äQ|öâ­@”°;U„”ðL“m1°©ÀF2ź4MïÍdq¬î¡¨ÅãÔk¯q÷Ö]n}ü.º RuàGÛ‚Ï> ,N¼òc„Ž.v^àîe“puç~ТŒÍáB„be;U!—ϰ¾¹ÉúZ†Á;]ˆšL¢2Š©kd6sXvDгbå˜ù/²’ÙÌ¡ØíHR±’V0Q¦Æƒ —¹|kœDM >g±™ÌgòiÞÌ!Û]¨JQZìNв¤ó:@V]¨[.5Y•0uoy#g”á~Ï.ÿ[’ÍKÛ¹Whi(yîÛÎóšzç‡û™\Ò¨oÙG¼¢ŒÍ¹ÿÎÒÜuDm”Ôú&Agfj‘Bá9˜E¼ÓË»ðÎÿ¿,Í-R0@’ŠÂÃîöpJL<¦&bgsj˜åM–P‰ÍÏ8W„Bnf¬’y@g÷8-Gšù¬L=¢±ÔC~aŒÅµe­nÆîv£9¢Ô´¤$ágöÃZr « R(dH§Ó¤ǘ\Шom ^QFz~Š¥Ù¥ñ~ÁšZ–á;}d-!jZPR`îþk+IÜ¢ŒVØ$ÎâP¹^$üñr× ¦Æçp”û˜@t ºìŸAÁϨáh€É¡!’-e(Ù¦fVU5°4ØK2k¥¾©…’ò8«ÓÿÄÊâ Á „aäØÜÜDL­20¸@M[3J+ÒËÜŸ]$§Ûnã¿Xþ¨.¶ )¸½vÆ—–Ñ QÐX[]Eµ»·¢¦Ÿhø<ûÕ¤¨¶õÂîÒ´/§ÛA –àè_½EiÐŽžÛ$™* {ŸB–Ϭ±‘ÍãsH¬'W-nü^'²–cm-C‰G%¿±JNñºÜd*©¥U ³A˲¶¶ŽÝSÅŸ³« ƒ†DIÅöc[yõõS("l$—Au“Ÿéxä@gvh€´%›o¼Ž“5Rƒ],íÒ$õ'¤¡¤:¨®¯äãŽûÜY1p•Õñ;ÐÜ.‚±RŽ¿ö3â~+Zvƒµ `Èý¤Îf¿ÍnC6ò¬%Ó˜> ùÍU²8mÖg.¶±ëfôô¼¼Q{”—ùvU$›Z!kØðÙåÏäKEµâ ñì?Á+§ö!˜:kË+¨N÷gî2¹¶FA7+KÅÀJIDÄ  oþéµ…¬†¤Xñ†ø÷æ¯^lGÄ µ²Œìô£šY kÓoþ=½FÏå÷éî¸ImÕ [s3ŸŠ‰ÈTÚ_zÃF–ñž>úð&S % X_[¡`˜(Z–Í öˆ¿ÍC„^øÙ[¸m¹õUÒšŠß#ãòØ™ÙÚ{Ò ö<Ä“ˆ3ɸ¼rÓ+äò:VÁ`¬¯\Æú`/?ÂK¯ŸEON0zóÒ®4·'æ#H¸¼´ñ5ÒY ‡"³‘\ÆT8T í™kj’ÏlâŒ×ó£æCl®Ìqõ¿ãÞ{ÔÖİ•›L‘пs•»7“‚७¹†HH§Ôßǵwÿ€Çã&5pº­˜€ÍáÜŽõD›sÇ7®Ø˜Š„$¿»ƒWÃCkS q笲øJ8xê(7owñlj»˜šF´ùõ%lNcw<]5N´¸i>u’¥¯ò‡ÿ6„`šø*Ûˆ–× Ÿ>IGç]Þþ§~L]ÃSÓFkK ‹w/sçF7÷®¨¦†½´ž²Šº8‹vå—ÎwÒXábü^÷¶è`zhk®Áò5ÚU›E$‡CGeÌ\Š{·è¹¦"˜:–D-å• „¹%̵¹øñUνt† ×Š „k[imœæÞù·¹¯Hh–Óg‰ìL[¬XŸYÞWÀîpl­"ˆØd‹‹–Ó§H~r÷ÿùŸLÑ[É‘#ûç{¸q·Ôb‡BgŒ¶ú |æ,6ý—>¼Èáö*†î0xÿ.²$ k: í 8ž³†À_DaA¶RÕÜÌÐÅ\½`Ã%f_¥fÿAœjîK8Z6‰±¾;DìŸa U5é¤ûÓOX­ˆ²46HÖ–àÌ˧ñ:_äõåî]¾ÂbÐÂ@÷±ê#”ÕÖQQr‡ž ‘«K°8Ô‡©¢º®×Ú7ûnÒaÍ!nÌ3³.Ñz¼õ[§&YÔ4íãbg×.Šx”£#3T:CB1·Øá÷Âà8}7oÀæ2K9Ò¹I¦öa·JŒß¿Ëh‰o—0‰Ô4`ùôý³"?øÕ_a‘$ÂÕ „º‡é:ÿ •åaÆ(8*8ýÒ <ö:+Šms‘{ݽìoLPUê¡÷âGhóå,ÜG VPQúxD­(ÛIs¿»›*ÿ³[K ’JES3ývsíü%Â^…‰ÁaüuG8v´U~öš¨îõMÕܹsNR(¹ƦR´ž{‘hM ÝtsõãOq‰iƦfѵ(v/!¯ÂÐ훨›a¦úGYÏ(uMµtövq]Ía7RŒŒ-²ïÔK”{²ÜüðJ¸ŒXÈE2•Åéõ¢Z¬8l£}wH”©Lø-“¢ïêy’¦‹òŠë +Xn, : cCܸp9»ÈdÒäÀÉZ*T'ß¹ÎõO/ؘÂ]y€ãÇ›¨h¬gàâ=.|``É/12¶Æ‰7~üùV]Q&^߀üà2—Þû€€-ÏÀýI_ú!΀Ÿñ…qz:;ÙXšc=o¢²ÔІE*ððN7[Û–T"VÛˆ·ç#:?ù„DÄÁè½~¢µ„¼f?C[_äVg‘Úz¼Vƒ´&àŠ¸¾r“» „jÛx5XB2¹ˆÃëÇçs!?üÛ¿ey%…d±ãq#¹nâ{øÁoá F‘DÓ!~iÄk“qràÕ71\Q|N…Wƒ ’ÉõÇñ Ó¹æè9Â5ͬ¯§-vü¡ EÆY¿Ÿù* 9Õ§Æ­?Äëá VW×U;h‹"áh?I ¢žµÔ&¢bà aUe'_$ZßÊÆfQRñ„B8LÏ ~î¯@“lDc!J+ªž1Þ¯ãLm~N¼ú¢#ŒCŠòøŽ×mÃ{üµͬod%w0„ËaÅôáçÿ%AA´âsíôAm^Ž¿þõ Kdr6—À‹$@ó¹Q%Ú±Æ_•T³–Jƒ áô𸦟7ÿ‹—LA$\§¼¼Œ•å$ºV—Ð÷Üiºß¢æ,&…\G0B,ÄÔòÈv?¥å1TYÆMàuH¬.,°™3)k>ÄÃM(hè‚•’Šr6™B6Í"^A0u4C TV‰ÏeÝid!©XU‰Íõ §ŸßO¼¼˺•¾¥PRQ?& M­²¼¸Œì±ïÀ"~Ç.&5Yìg*i£ºÜOrqG¬†#gŽã÷ùˆ'"6“,/'±øâ:s†’°‡`¢ «T`e~‘œ©Rø-M•ˆè &x9Á'`¾¶æ,¦‰VÈ¡:C”–G‘%_¼·M`uaÔFžHU-µ(h˜’ƒÒÊr"ñ0b!Cre ›?FÛávd=‡Å%ó“ÙØ@qùñ{íØ<¥!T«É,`‹UÐÚÞŒÍ"£:}Äc2kË,/® º#4l'ìÛ‰H°ÚíPÈÉC¸¢–ºÚR´ôËË«(ž(Ïœ¡4âyŒfŠÅ†ŒÆúF§?ˆ×¤¬² ‹*¢òˆ¥ ü±•ÔÒ«É ¼%Õ4·7ã´)OðhG „XÜ$É„ËʱŠ–æçIçÊöµP__Ž/Å©š¬.-aª.\Nµœ—ƒÇöòÙI­®Îø¢%Äqå”ÕVãPt–ççÙÈè”Ô5³¯© ‡ÓM…äÒ+ËkX|1œ:I,ìǪˆl¦6°¸ƒDBÅFD¢bÅíTÙH.±¼°Œ©¸i:~’ÊC=wQ}ÕøíÉTŽÊÖ#´í¯ÃŠòÛY_^deew¬‚¦öÜvo¬—Õde~MvÒtì$ua´‚†+” r¢"አ¼v…|ÁÀ+§¢¶†€×ÂÚâé‚HuûZ[ë …}hIR뼉jÚö×bärxâåD¼66628aœv žH‚òºba+Ë$×ÒøÊ8zú‡‚VÈmŸŠ$PÈçPê[êQɱ<>/vÛNÃÅjÇãóár;Q-vÜn;‚(áôxP•¢{OR,¸<Îb– `uy±YäÏÄû,‹‘ÕáĽõ;²TL7e—Çý̆+p{ü~ÜWñ-\û#\®âíW(ZïlNwñy¯§Øl«QŒÓëÃív"ŠâçŽ÷«nÎ"6—«EFRm¸ÝÅsùé±*[ŸoÕãÚ¹Y?R$‡ÛƒÇçÃá°muÁPNö§é.ˆ.§˜N½¥X¹½ž-Ú×Ýíóáö¸Q•bºà#Úz¶èc³ª[|"bw{ñl½¯XlEúwåYnÇo{sÓ00·6L³HPlƒº³¸†ûhØf1ßTØÊ™-¾'l-˜‰aÏl¥jšF1pA,æ[ ¢´ü8¾­ß,fûwwðè _ýWÞ¿©óÿëÏpÛäÇž{úý]¹½ÛóÙ×,šfŸøøz›³<¢÷îCmgìó‡wͧhvÞ5a‡þ<ún‹¾ÅçvÖÄ4Šw¯Ý-M¿ˆÎÛæXc÷xØúÿIÚ>þŽi˜ÛùÛÛcxŠÇƒøÄ¦ßÅ£}WÄóä:îâ1S§ïüÿàò`€ÿí¿¾†M)ÖOxôü³ùm‡æŸGŸÝ|, Â3Önçy3·Æ;ÿýÿBOüˆ×~ÔŠ¸E„­Üð'æ_<„„^5vÿ6[ë(}=‡Ýük> ÒzäÖsˬ¾5ÇGtDq+¨ËÜù{û<Ø¡Ëccxj ·ö¿@1ÈÉ(®K±¥ëãÞó¿øæ,a°×œå«…o}s–Ýü³çgò‚ HÂãþMa—æ%JŸíŸôÕ®ˆ§ñ _h2T;nŽ ˆˆOD|öû;‡ÒS¦¤?ƒÙýO¢÷®xAžè.í|'ížë®¿Å§Ã’ž‡ÎÏ¢ÍÓêñß{\Ã|ÎϵÖÏè«þlEb‡ÇLÃDRx¶rr…­€´“ÿóÛg¾‹¿ày°9½èQyòlüB^•ž¤Å]wó½(>9Ÿ'_žºuÞßÍOÂãëö™óžý¿€ðLÚìÁ|Wa¯9ËW Ë[Ïà®·}œ{ð,%@¢öÈK„›UlÊŸï†!X<ûÑ[˜ª½k{°{ý/ ÷&<~Þmºÿw“ÈV!ë3íÁgóˆb÷ü3[gAÀéì-ÇìÁw¾³ Ó4I/Mrýƒwé¸ÞK6¿Ó)GÏ&¹}õS³kÿîÊl¦©³¶¼úTêÐìÁìÁìÁž@ÿ»´cì žÑéºÊÍ®û˜’ò˜_OÏ®q÷ê¦çSÛÁ>;·ù]¸>ëss'lczˆkï~Ìb2óØçÛÏíñØìÁìÁ|ðor7MƒÕÉ!ô °ž)`u¨mkC]Ÿäî­Ö³.ôBM7oÝ)˜¬M q}¾‡õMx] MM• ¥ºw‡É©E d‚¥U4¶Ö£’eðΦ¦Ñ‘$*©­ŠÒ×y•®Î‡ÁGO&?7ÌÐÐ8ÙœŽÝ¡á@;·uÏg¹{°{°{7ôÏç¤Çøøo3¹”Áð±:r‡þð9ÙŽÓåÆíõá xQŸF2r)ÆÆ§ÐD æÆßý€©Åu†;>æâù.L‹ }î„IDAT»¢qû“wéì`ºç—/Þ»· †îv3:“BµÊXœ®bÚ@jšËï}ÀrVÆçw³8ÒGχtcÓö`ö`ö`ï†þ9×s–F˜[xý¯Rq°³ð/ÿã)N‹I¢T×”>Õ ^duMœ|é¹¹0#ÿðGVæç™é}ˆ«æg_9b¦ÑV¦y0@¨Z"“3ð†¢T•ï§v‹ËÇšÇî\§ª¡55Fz#Çã§t_5õ-Í;Џw?߃=ø.¹å‚3Œ=eýùkãßCp ÓDØ£÷çÒó/Üän°‘Ú» —«X—×î YR™ÏoÒ!(v|ÁàVe- ’Z>Cj-«%ˆ,‚€ŠÇç!3¾N áEö/¯Ó{é}î¡.¯åÐÙS»1âŒWqäìQîÝ»Á;÷:p‡â4Ÿ8ƒßçÜã¾=؃Df}ƒþ›]HÒ^ªêóB!ŸG/þ¤Ì"AE‘•ù9ú®]‡=çå= y´Bá»$Ь6 f.K.¯a:$ ™MtAƪHd¿à]ñÉ¢(¢‚ͦ]ß(¶4u2é4²âGV´¿ôm…4 c¹üÁ%ºÃã,m9AeÛ1V¦éþô#:Î_%‘x—å[FjóÉ´¾=؃ïå5çO~%‹qöÔ)òùüýþD°Ûí8Ž?éÊ­²¼{Ö§Áf³át:¿#] ”Uà•ûé½Ñ…YeøÆ-,¡r¢~c":Ùâ ¼¶‚{è/÷aË/32•¤¼m?Kºx8¾Á¾-XìN¬e«^²#Ÿbbd Í¡q§ë>%M­„=Öb§£Mø6ÊÍÕÅÅbãÞ=؃ï1l¬­á †þ¤wû÷ïßSˆ¿Ì‘ý\!>  ÷ˆ÷ôùNÝÐ=‰ξ¼NOÏ ×&°8œ{ý!Êr8N\ó>]ÒTTˆWVm7Þ$ ¥ÕÕØívÊϼLŽ+<켌L¬ù‡Ž·#­O35y®ó!ˆžªVjÄe,QSqŸá¾ûN&°2tó2 ›ËDZNà²|{êOÚívÊKK™›c)µ¾·#öà{ Ó$#ý »wí)ÄßF%à{M«oSs–/ ¦i åó膉()(J±¯·®ÐQù‰çMò¹,ŠjE‹rÙ,ªõÿoïîZÚÂ0 ?û1f% ýÿ?ÌkAj»»ÒBã4b5Èuå`70¸gCÈ;ìMÌsÆqLÒ¦/%íï#ó4fœæ$MúRÒõÝnRÙ¯‡ÌÛädX$Ûݵµ&m×§”þÝ6ã{ g©µfš&__ÁŸ§š¾?(èp´{ùkœàڔŧcè»r’î•ßb8Ý{=ü5ίíú,žýØ¥Ùݯ<»Ùþ{¿xíqvK)v>Àc: 耠‚‚: 耠€ ÇæÃÿ£´išÌã”›«ëÜÿ¼ó àîö6Ûi¶|~ÐW«U¾åÇåw£êTkÍÅùyÖëµÅ`ÿù£§­ÕZ³ÙlLûz£¶m³\.àsƒü‡>K‚: 耠€ ‚: è 耠‚:: 耠‚‚: 耠€ ‚: è 耠‚ü³GÕ˜‹ ÀrIEND®B`‚nova-13.1.4/doc/source/images/Nova_spec_process.svg0000664000567000056710000013200113064447140023412 0ustar jenkinsjenkins00000000000000 launchpad create a bug create a blueprint create a blueprint End states out of scope code merged bug fix? idea REST API change? submit spec for review a feature? spec merged blueprint approved for release spec required? add link on nova meeting agenda blueprint hit by feature freeze re-submit for next release blueprint unapproved apply procedural -2 upload code for review remove procedural -2 review blueprint in nova meeting no yes nova-13.1.4/doc/source/images/PowerStates2.png0000664000567000056710000076145013064447140022306 0ustar jenkinsjenkins00000000000000‰PNG  IHDR§}¾ç½{ pHYsœœ&Í:4âÚIDATxÚì]\ÏŸ“P1À–CEÁ¿”"¡(*&ÊÏ"Ä1QT L°›0±E ;±,lA¼ÿÛcY.÷ŽãïûÑeovvfv¾ûfÞÌμ§ÌápåʲM.ó£·¡N]–¤‘?¹v-–Àðoß8ù‘Ô·‹ÍZØbïå¹QŠŒdÌ_n«„"ÿÎaI]ªU«æ»ºÎ•úv±Yçýá¨(³Š_l)žQJþXMFq^mv•Í&Ê‘ž^"-3=q8ßû€c®^¡šÄeË}§O ‘ž?‹ûp8oX¬F¸ƒ„ÀNÇG¾†ŸdKme¢‰˜ PÝgp³1=ýh1ïÁ•k­;áÄ¡ª®K[±| Å_Ð"_¾ÕkêH&7âà lÝÒ¢y®®®+û²H ­D Â:¤º<»Æƒ= ]›Ç/‘yy¹ÚÚ•%’T ަ 8q»Ö,†·³ÙFÚèz9b0Ë5–8Iù…Œ«Š—øðYýÝ£ ;T„Ô%o6¤hÖ¬]âÑw¾ˆ{ð·bÅŠFMZù­…s8B ··w_{çð£K[ç EN¢Éƒ£ ÇŸøåG„ †ƒ€hø°ž7‰Eš\ÖÄ©%°ÜT•8cTòÙ¬³Í3á¶Ãq8l]¢9IO?/âNëá³éMwk©È`È"=>OO—Dþ€'8†´µ€ãõëש#àhô8&Äï]‚mÛÖ|l5¼å›cì šÏ¦¤®µ3ˆTî˜Ø„舠§b¶ìéj!ÑÉ»— ‰=ç}û÷቟}sÙ˜,Mþ„Óÿ–¥Ú4rÄ8±u7wÞ€wßò¢Ò9ã=§t´lœ¾s8ªªåéå<Õiðô‘7.?pîáHûV¸6~ÊKKç¬^½6%åJTÔvGWûãqûEgí8ÞäÕ׺ï¯ù¡p*¤YYˆS ·mÛìâ8)×Ïîö$~Aˆ7u¨ßÅËõuÚÛu¥«K¾zÚÚºIIi<‘í]ýñIXD4ñÓDÓ>6‡l çyyO²±%Ò¼2§oP­ fŸ›ëTn²a«¨@¡•(èFŒVuUà8y²;üƒ“(¢Øb*-Š|"z^"ò}˜ã«”¼Ü!åÔäXFúËb fÎ×€rwoD–É+{O9™’Lªq›¸/‰³4Ìîx”?ªqï{ä¾_@ég˜ÔARe,3-”Û˜8dQð*ð¿å­Ìœ“͈c},—¿™8;n|eXϵèeã ³ˆ£Zaˆo×EᜅÎ-•F‘?++foÊÈü‹³A]ç‚Æ“ÃáíÌùÛU-­¦&oH£FÞ¼y#0oþÈjjjÙÙÙ #3Çܹs‹SGH]“&MDÄùþý{Íš5srrªT©"åø¯ÔÁœ<ÿ   IÉ;wî\·nÝpHRÂ;xð`·KGOvY¤f;yÆaÛ·oOHH€á¾}û ñÄÿûßÿžþ Á˜Œ®:ŠD£Æ2ò0F޹k×®òÊ_JJ Øzzz’Ž ’‡$Y?!síQVä•Ì«t‰RNLLd™ùDZ”Yý Lë/k×®•(>óåk’е‚?ǯÜmØD÷@òƒ›ï~ååýîÔXèBˆ@v):pr+ñÔ(û¢SæÆÇRxx—Çø± #ݺjÖÌ™Âb½tCK·uìå{pKîï_µk3Ióæ…ã.Cí˜Ä<¶-l¦Ÿo9àïþ‹·¹Ujy´Zex’ ±;<'¹Ñc/YÚÛ¹ÈP½cמS¯¡àm^›wî7´îGéÚ¤°øôºÃè;zŠÀÈO^ÈR©äQ!ª•«BÌÄQÆ)ðÒe½Ü‹tÛ½¦™š‘ÅbU¢‡ôqññheˆ? O`¸Å`ç_99U È1ÇÎðG¯zþç\²|¥Ã†ñ±01Œ ä ŒÙucNnn•‚eœ{œä!OXšÇ’nj6kÉüÑ$ç/'UÕ ñ-­d±XôË…ÃÙF…ó\Ûb<ÍÈÖÕ,²,õÑWŽ^ƒ~Ñéð<§0òÆFƦ»Æ9&¦t<üœO¸¥‘9Ã#cݦ-“Æ‘üaJò‚Õ´ÅÚ?“çšJñáßÞ njF6b}Õo8,5ãDqd:O’Ô‹‚÷3ED#»’k~,\ØÜ4ºô!ƒöSuÚǔ蟠ÅïP¸ï->ØÎÖ/ŽŠ'Æó.£@‰ù#È#RÑ “—•ý«šZÕÛwuêUX‰úšj£×¬Ûê1)éÌò*m&ò'µlùŠ>£ˆå°c×_Þ<±Ë¢-ç5;êŽ1$¦Í–F]áØ©H?G’oä‡éc»5G-gMp¯ÅÑ8÷ô×Ò¡u⯠Ø4£O¶P`8™Ð£‰[äCHv× Kù‘•]£šÚ†-;Llÿ#_ù¯QƒØŽÒõ²áꆳ—&t7[3ËmN ȃÁJw2ÁÍ+½ †/‚4qUPi›¿*–<'çö k%‹Ê)ܦ$Á"vªÄ7¾!uwá•wIGùÔô[OZ•u3XàüqÞoîèÈCù÷¢gÙ¦¾ƒT;¸rØ÷ío‘c‘åñùó•*)ý²»ýGRb¦Ì™å£©vŠz¥ø°{~ÿL»}ð”IÇ‚^´›òjÏhHGòà¨V­:yy2rD¨ÇÂd8¿z…5¡;X îR¤¢û„¦n? Ó`Û¦™'·Ûéi–ˆþ"CëLPÊ-IßWn¸ù!ñFxž?¬½è#÷§w<Ô¥z‚7¹Ï™3§0¾R[">‡{ׂἑ'ŸmÝ¥g-"ÙK¨à¸Ddŵ¿o¬­¢7¥ü¾•'Yçaƒ¹ š"÷¥c‰Ñf?¢o7¾ð«oê¹#úÝúyp~èj,¶þ7áKSfüYÚ;_ˆ‰l6íÜóåݨ.¹Î"cLkŽ!«ŒËŸdïë¯d;†>¦ÝA ;ãóƒˆÖJ’ŽìÖ“JƒUà?Ð`MËèÿ‰Îýê±ýz£eÉ—¼¸§±Ÿk9bµ AE—)úÆ= ±ûÓĭUE”ž®"ŠŽÙ‘ÍÛÀ¶©£|?S(©=»v.òD±;`¸",òDy2+p£šÔùé==†» ‹9^*ò„òÇÕPüö²’Ë“ïûÑ5ÀLÆä‰~Nþqn ÎÓ,&1‰1µŠÊß+Õ×aßs’[pÈ’Þ£&3‰Ì¼À:Õþ>˪$6æô)^AÁÁ¶.^ ­Xüj(ïæ@GÃ.]ã ¥¼÷8-¯&×&Ðó v¶Vj W ­Ý°Ñl#iR5¯¶zMaÉt V.…,YÞËyi_OUØR|?_bƤ€Åüìï†: DøÆÝû•ê6+èAOÔŸ?ZÍÕôj 5ë#ºÚqmâ4UËרY?¦?¹ÆpÙªP,ˆœßÙúÚuŠÙDËuþ³m‹æ…µc+j —{‘9*ñ¯§¯Ï4æÅÀ,2IÙ ]áwf=AäQð˜èÊ0wDæÏU¶ø+iT©R…ù'§£GöíÛ·¼?r…âùN>D~œ*-þ¤ÞŠVÁù ÷ññaùüùó¥UNY‘WÑø{úôéÛ·oµ´´ÄÆÌÈÈÈÎÎÆûä ¼ÇEÁŸÐÚaM“4•#gò l¿ÿþ·Ö2äuëÖíܹsqqqvvB?,R¦Kx› Å®ò‡É[¾|ù´iÄXÈÃJ&æZH±yóæ±cÇR䙘˜$''˼HK—.Å[“Jbç_Ål?1y6mÚÄ;ûûS_ŒŒ®]»F†É>|8e2gôèÑ<{!ï¤T$ÌŸ²²²‚?é¡««K7º“’’Ò¯_¿#GŽÐ#¬^½š"/((ˆN4}Æ ;qBÀ׿֭[Ã]Ô®A8§v0aéWŒÿ¤DÛ¶mÕÕÕñ60:y …wïÞ¥“‡#àÙ]‡A!µ%33~>xðç÷ïß7hP8Ù6jÔ¨G`Qäa VŒß¥GZZZóæÜ)º{÷î!AfšÌIà>ÉØØ t„ôbÔ&SPªW¯þíÛ·Þ½{GFFNŸ>JçôéÓt}^ èh}}}©½¸—.]b¸SàŸæ:3èÒà„"·nݺçÏŸƒ*?»wïNUhݺu±\RÖH¿ÿ¾œ"¼~¬š2eʸq„eæãÇó«»Ôf]€©©)’BßhHíG„8KùÞ$€j`eeEé#ôK]ºtÁÍ&ƤI“¨sÑÒÀcæÈ4hЄŧÈCäÆkà^XLjc0¼4˜¼}ûö :TjRåž? O__ŸnC0;;̤“'zzz7oÞÈ õòò›|ôÉ‘#G2ß=JÁÐÐÆ‘tm³D‹ŠøöœT(þàÁ¤hÍú÷'V¯HA k×®ÔÜ©¤Eenp¾"`ÌKÿéããC枃简€ž†7öèу'Dà½üq9A*ö^þóæÍ˜¦ø›2ÅuÕªqnÅä ÷S©}¦Š½±’‡áLÊPß®ø ÝÿàäÆÚã6ß¹~ý)*â”3—Õ¼5BBÀf³š ŠJ^çÀü–Ì«a®¡1Ç çÙ¿_›ý©úpVí¤i?ñd¼þ¤ 7ëð„žÊÒÄûßüÇq`½ÃÙcþb&OüÒ¥ánPÛô"¼«ýL@Õ-±ïJ¤\¹k÷öÆ^SB¹Ÿ:~THuô霧òÜ¥®®þíÛ7eÇb29ÀŽJâéÊ¡þISµ¤~ëDc̘1§Nzýúµ÷JjBQÒ,—ß$òÉJ¡uëÖÒåxýúuggiö§_»vMºsæ_ðËå÷éÜ—þþý[ºoݺ%ÝFFFÒmcØÎ—×öS:ŸžRû=Eä~3ÍâtSÿ¸þRê¨UKâ9ŠãÇ‹vÓ"°i‘Ô¤œ‚?F³¢¤ä!©–{þ@;‘_ÉÅÚ¿>}út=¤ËˆrÉ(–6þÅÚŠöS(0y [>xð€>1bD«V­x"y¹¹¹Ò­iòV®\éíí-ðjÆ ±«±ä©©©egg+ø+:y:::Ïž=ããååõøñãcÇŽQu-)èäuéÒåÒ¥KT‹ÇŸ M³fÍÂÃÃyÂeH^ÅìÿxÈÛ½{·­­­ººzhh¨°ºÆÍ#D ïÞ££}ûö”çFDúޏ|ùrÇŽGŽéë+Ø9ö ©h?‹ hH©s;;»¸¸8aÍ#ý'ö;Lµu<žZð~Z©Ç… þ¤„0òø•x ÏÃÃjë@dy¼>*Æ¥ gÕ¨À…††žþX³f ý*Ež.Ñüɘ<úŽÎÊ•+#sWeÊË¿>~Çä¹»»¯]»–áÆAEûY†€3JêhI†mW™‚0¯ÿ:@Ï>Þ2Hî£E1ÿ‰J‹<Œ(;µ+É(Œð|À†Ÿ›fm¥†Np‹wïªm®h?Ë(ã² ;ÑœB ©týŠþO‚Í÷ŸElVŽÚùÙÑAb[„ÃÍ»ógÏŸÿnÖl²ÔÅÐ×t…"´A¨N¿‘$»Å%L0©òw.Æ« »­©©«.ÿQþV¯Y8Ù£>&‡ÚE|û–¯®>‘Éír?*«ÆaòÍšÁÀïØr•¢0 —Ûh´ŸgÖž¾­—Y‡&ÚÄâ‰9Ni˜µ8yíO¸ÈÕª4þ¾'°Ô­xÿcRî)-;ض}1Ç®®®DÒ ž OÈ•)(ì;’»œÀÑÍ… ´hviŽšM`62ôdÆŸ*wê(%ØÆØ˜{e5…^ïø’•{}{ >N1‡Y 2çngA¤k(•rŸ˨bZíµ9òBB˜…¥¨‡ Œ=yQ·#ͽÖï y¹uTjüï\Få©ã—Eö9ªìxb„CØLßü´Ÿ&3OqȾ ÷ÕvUÖ Ò® &˜¼´õlÂ5ž‡Îb¹­¸ê­O|^ôšç:O:¢`a)àK}Ppˆ­K!©EÈ#&~,T*£‡;†usÞ‹®§¶;BL)¤™†Ð4Iøãó?¦B{eø_"ê\­hä2 ŽYܾ€pÝ:Z¿ œ|à„;‡e8…й}ÕªS¦V³³«¨U~|#]«q zÄŒZ^Þn}¦>|–WUCµ2wÄF'OhŸúã×¥³ÇS3¢}êЉån+˜øG”Ÿÿ±2…zÄ£…Ó3 †¬,n…\Ï«bŽ8/·‡· d‘¢L‘G¨”­t$¿·s?T8¨Ð þÑB$åz¸Ü/o’‘–E-VEb1?Ÿ£¤ÄB4ùí(òág ²áÄ\¨ÛÐ@†Ä ›Û±/®ƒ…˜•Nñ[CýgúIƇjKµÜæð¾2â™õF¡[;ˆŽúú#sƒåŽ?%%7¬€¬\|–ª÷AƒÝXnáäÏØØ?ƒ ½%ij’Ôp*Þ–”ÞL?$¹ü[ÅndކF,TðÍÙ°e¹–Kï™Ýiç½ÐLn{5xð$Ñ7†„¼óõa¢ÌUžO¡,¶ç£“týZÅhH]…Ä×¾¯ïœûBÛ´®ZêäýËóg®W®êܹp!ÓÒ¥f̘Íðæ6­½x‚ÕhÑr„üãßÿìÜyýgÙjI嘋-Kç)ßÊ7”Û Ë72LëÛ7Ο<ñÑêÔ%´ÙÌOL߈ÿù‹øÈÕ«#UÁÃ\&·j×’òva7оW­*ªR…Å<õšˆ¹–«VÍgnŠ-7O‚½6¿sÄG^(Ìž “Û‹™»÷òXl. = 4ü­µ×seü›Íf{]ŒóíZ-Ʋå¾Ó§…TÔö›xbb¯I9 ¨Š% س8œj…6¹ßŒÖ$}õ0Õ€swqé™Ùû3„Í¿%¾wï>ÅÂË—T`þ\¹Öº³fñäS6ƒ¦§Ÿg³­à(YÿWmäN ]t¶åØÕÕõöVÂ;©OS1…À¯ex¬Ð2[Ñ× B^¦}VQ«Eaøê1I!//wغ+±^· pcßWNRÍ ¶(‰H¥¿zTT¨»²o.Pb‹È£Îmõ„co‘OÊÇ_ÂÁõ]ó¦$lŸ»®i-îÍ¿2_ïÔ¬ŠikFä˜ØpÓn^;ýù9v³n$ÁDÑ—&Z9¸^ÜŽ]kÁ- s‹p®*¯'[òvlŽJŸˆ6-FÏ“ØÎ5·1l“7†1ZÒ¸ó0·ÎLk‰²¹˜,’<¢=ç™ û–lMVÚé%02b$ô…-YWæ«u ` ZN¢_õ:Ö“¨ÛggÕë¶PpY…´oLª˜ øíÂU0pCõ3˜¶ŸÔw¢\rUÄ—k±þ’÷Äá¡=ê`'KÂÈS tæ_è6“)h00;Hÿ)lø,YÃáÌßn‹ÈÉíÌsß´iv (é½<™ò˜'þ퉰d篡ċ/fèf"ïÚµ‹¾Ý¹lbüøñÌù“^°”MM—,ššš ù@L&ü§TØÿ Î5jÔ`î¨N~íg‰uÅéÌ#3oÐRRRš5k&u©rrr6lØ $ÕJ–?~Hïß¿e^åõû_—.]¤¾÷àÁƒÝ»wç1ùÃØ)®Ô½ Øõë×gh¸° ñ÷ìÙ3†‘wîÜ)uF¼víÚ`«Ì„cÙ²eRóÇ^^^Ì­N–!þ™óWœÚÉÈÈpuu]´h‘·¿yó¦äʦ««Û¿ÊÎP™à¹±*æ6nëÕ«‡ êàÝëÌ S·n]ìå]*pèСûöícr»ó?¢qöìYh±ED˜6m´RX(-qþ¤³4&”5$ ïõë×7fx#w÷îÝvíÚakKk×®uww_¸˜‹øúõ«†††À»@Wzþü¹£££¤ä-_¾ˆyÿþ½0ò:tèpûöíŽ;JmÊ©üé/” aybÔÐ`"š5ø‡"Òlè¢-Z«È1yGíÛ·/½A™òfÏž%iQ<86hЀ®UzzzbðW¯^òPñìp• þ˜ËÔ8< LÞÈ‘#é­"¦¤ ‘_.qeÊêŽ ž?ÞÊÊ ÿ<~ü8ð7}útÐSzöì äaU˨hX[[Ÿ9s†&ÂH:³^½zyxx ÒtWRRÒ’%K0y€N:•õñlNØUæ  C-@Œ A°`TŒ›DC@êÜÌÌ ªþÀPé¸r<{{ûÎ;ƒ¸ÐUAh}}}DZ½sçΨQ£îß¿o€ºº:–¶½{÷N:>Lï;¥s›]&øòø;תUëË—/Lnf¢Åßß?((ˆJÙ˜uê”ÍfóòZ¶l9aÂê§ŠŠ %7NNN µÀbttôÆëÔ© }ÿþýôéÓ”¥­ùóç7mÚTYYùçÏŸ#FŒ °µµ…KÃHTùžÑk¿~ýŽ9‚Â33$oÍš5ÂL´yÇÅÐ샾ðñãÇx*@àË*7S\ríÿ(òñ•Ò“É-ÐâþŸjÙÂ&ä²³³©iIºXß¼ySOXª@‘Ê Tš4iBÝ{ýúu‡ §NêÝ»÷Ë—/µµµá'G]‚¶qÅŠ˜<##£k×® ì,ð 6Tîù“tÖ"fSésʘ¼Ý»wCˆÉãWvö¾yyy˜HÐ\àˆÉãG $Ž’&¯ø«V­ZVV–D·€Â cI3Âß1’““MLLDO3m ZXXHç–§t!oþ€¼+W®€jÇ\maHt“TK‹ Ìè $ïóçϵk׆¬[m@žèÄ·oߪ¦‚?n³Ã?‰,ËA¯½ ¥. ‹‰É«Y³&d;wî,›ä•“&M*‰dñàF ýŒ1t'Wœ1sæÌÅ‹ÿ+ú‹¤`H -@eèæ †:L´eþ !yèŸÝ¿9`Àæ>Úu˜‡ññãGéü*øã‚>¤“?öíÛçîî®à¯¼BVäÉ•¿§éT*\ þò楽»[»q‹YÿÂUhÂLwö(!KúŒšÌ0þš›»öÉ$òÛO_?þ©\8{pµe1Ó\½~£¹#óG++ü†¾*Yǯ­g&Ð&7ÒÉCF„=*\¥“':>\¢W4ytãò0[kF•é!ì¶…˜?M€p:y8äòѽ“ƺ”iþDXYç©1¯§ÿ4dWgžò©ë{¶b¹¥A— à`Ú"UÙøÍÃF5ÄÆìÒwXð’¥~>3ŠËðÎ9ù)°§ó¦xí7u¤&8$Ú²¶h¶ç,âC¨¾¦Z¡¹î»÷õÛµ“%ËWÚ8Lv¯R%Þ嚫֬³²-,~=-m†L#¾£ó§¸uïAǶÄ(%xé²ÞNB{2%ee†¹÷vö™üëÕa±&"´Ñ–æÑ&¥ˆÚw?–º¶Kkê…<ŽDÂwûÈŠ6Õóó~Ó r³êrW⊠cwÜ‘vý¨Ÿ"ÈÃX°paÀl¦&•¾ÿȪYƒ0ù–š‘ÅbU“S§)·ÞĨ!QÑí1É=7/OUªÙ×"ü=Þ"Là̪AÖSÀ‰j‡™¼‚¸:ô•æÊ+$ÊlS#›ø_ªoIû ³wü»÷îíÄ|·6µÿ% jH|H}qUm¢ß  Žßœûµ”èSHβ·EFyNr#°’~ß5Ó.ßð=™±f£ŸYOý†]z†Ý2Ð_>œ„ü¸\CÏÏ3`áø™³u¹íºöâÉýÎ'äÜ^Í\-Ú0Ç¢Ï|2ÚW}Me«B§yy‹?,[ƒ´í9º½´ÍÒùx2mÉëAfMëñÌ12ûÆrËÈ_ô1*pêèˆ>©žÍÊ¥dd þL;›:Mpî |D|«ÅÄ´ GcéXr5Û–Uzþ…Mׂ‚ÈdÖ6©Ä-›È@©ÊÂT•ôl¤Uuzíäî«©KÛ¯ÍÝ1l|è æògk¦] ݧ÷™‡fóŒªç|üYEÔ¬Ò̢䥞9¨o=PDüq4òî§\hclÁ¼À""P®üãÝï" ¢Yä=½uE·cç’ü7/î]h6ŽøvƒÉ£¶V2Ô"Œ—išÚÍÅFkÞ´‰ˆŠ;³w“žW‘qÞh‡á"⟈Z¯7½p§ùȽDDþ_Í"?ß¿zÚ ‰®Ø·þ_3Q˜ÝázÞÜ%3CzYŠˆÙB]¦ü­të35â89<˜B’w›]ô™ùh™Ó3 óècÜþÀÙ¤¦mx—2°8ù5«ó¼ÚŸóbìŽi^“Š‚Àø F ‹œýæa5#}zH¯Îí%\kÒ²-_ÿvÔ¬Æ$Í„è­3¼§0Ëý‘ZC=Yòç~Ì;ñ()…çK¥ÈfPwS¬Oc•ì}Ú^];‹hÎß~¦QŸØP|åØ~×ÑÎz¤f("~ÒÓU«ƒåsû6{{z3#Sõx"r-Ñí5Ôç9À’0F³ûÀÑÖ&„Î’ùò‘µ±ž¨ßy®QhK“ã÷Nã¢ç1I`Ìüüü;s¹ÝùöÕ3}}P1È“÷üuá`¨¡˜ÎÀªw#’ÞhF›ZLu¹þpô<Å…©L¯@g†ƒ  ‰«e«öÜQ¬ÞQ“aJJJ…¹ûú”§ùë2…K—.™™™•VîUªT¡/WðÇ…––ÖÛ·o™Äœ={ö¹sçJ«œ²"¯¢ñÇ|¡¿¿¿× Dfff:uüñ¢mÛ¶÷îÝ4hÐÄFöððxôè‘§§gXX˜< yæÌkkë’ÿ•Sy?~<þüöíÛ---›6m*,fbbâ˜1cZµjuøðaj͵| [ò*ZûY¯^½¯_¿FGGcò¨Í<èJÂ×—˜ÍÁ ´cbb† R¢e“í²ÁЬÚÛÛ»ººFDDy¹¹¹ªªª Ž,;vófÂ&¯víÚŸ?–aIðö||, ò*ìøȃ#µ] È;qâÄñãÇW®\9eÊ8âé$ ¯eË–ÐRš>Ñ ‘ÒDsïÞ½!#b(>ÛVÎÐÅ?ĉ —mÛ¶!Úvµ   P8ñ¾ÙU«VÑ÷çyÐx‚p\»vZrwÇS066†Á¿Ù¬}ûö…††ÂP’ ¡ö×ðvÎ’F¹ç”˜R9r$eM>Õ´OŸ>0>‘ôF‰l%”?þ`ÌW½zu)î’.;lßQ:/¥x"MR¸¨Èö¯¥k ™o˜¦£C‡¥2/Q¡ä/00PØOan;xn™7oÞÊ•+yâ0¼—?DàüwA¦ü<·ÿøÁY±b>O555±7 áïÏs¤,Æ4ÿb ÖÌ ru?DwËÊÐE«Øh ï•UŽñ÷¯€&%°Àüü½þ --{ÌØ»¤¿&%Ñ]H_„ŽÂùL¤€0µE¨T“ÀsÉs»1¼ëáÔŠAã(8Ê”a½œûçiˆ)-,‰öûn§­ÿä¸ü@Ô´AÀf¡“,˜¹”oÈ®5‹ô])Ôaª8þª£§Iþð1ð_%Ï÷zËt›'‰''Çr¤¸W2s¿ÏçUê²$!sVMÉKjˆÝ-âL]È“ XÂrbJ¨9âìâ ñ;<(1ó–Ø.û³¢°ÝîÐû² ¯†Í{@3ž šÜ(a¬ÌËéÀõÄÛ7=ý¨èì†7‰‰¹)ì}ÀÖšõ0~~MÒc-v­¼ª«ðr^£‹”Àaò·Ú“8êÐaÊn'Ço}á"Ù,œ¤[à¥FÞ÷çs—Ź 뀔tžÁ@À@åÁ1-Ì\ÿEnþï¤ß?¿ÇÈ~ò§Ä½„ÿÔ¹N@žç ¿vÿ¾å®Js /|Mi{sudjbQµ®ExÄ Êékm±Qœ_ô'–k$Q'Ù™Õê´B(÷Ù³ç::-EK<ªle’è¡û|€½ÓáÑý{ôz–ñ!Õ·/¯˜w&^þ¨îÍ¡Wækt X{7Ç£=ñ>ú(äY „ªq+N+þü }Q,ÕÊÑÉÌ AA÷¸qãjÕªeee•;vÌš5‹ºª®®>vìØ²0÷ûoÙ_Âäûù 6†·«ã9Ê]m‹- îÁFäl»BÁÂÈC¤Å|B÷–Ê­)’¼#GŽôëǵÁ]ü*ü¯HLžÌ-U(ø“ݺu“Î Ež··7ÿ²`rL€2"ÅÄ"7„þ÷ßÅ)@غ‹ÁNb£%Ù=aÜ`jjJÙ‘5kÖìÒ¥‹ÔY_¹÷˜ y“~#ò'Òmj¡K04ÂÒÝ{,z÷@7„+áSâÝGôëŽ~§³©wü W­¬à}Z˜ú.]¿!;5#ûO>GY‰¥àÒµœô´øe òÃH²ŠIêÂÛ2vE@žûCxªH÷ÿ0œü¯ßÂß¾‹PÄÛ7µIl‡úä‰e6½ˆ=õkÖ|ôð˜%])(.w² GEέŠÅ—zš6}øÍúõýÇ©'üñ ±†÷ñµ]þ!W nG“‡‹õ dcÄçñ®~Rßçɦ<ïÖá g_EÝ?¾«M»ÆµÔ«§¼orÿÂá^N«ÉåUL`<ºç1žhr[‚-Kò„†»J}ûx×ÚÛ¶-wq™&iij43‡c{M•ö®„³o‚m^»ùI0~ ²Bˆkjq¯o¯¡ÁÄ`V㉴\¶¼Ev¸‘Ë…⮊¢ðæ­H½ŽB¯º¸Ô›ýšˆÍ] aå¸Ôú…ÐÏ<s=s‚I}ÍÁ‘³ÿh›uÑ×ì%¢ñäk?ßÅr8çY¬~Π“È}½^•µŠ•}+L­£gêÓL22«œ6 ‹õÜüoÁn9÷y·sÁÏœçîÇ{—^G1êvï1¢ˆ'±Ï?²¯?~Eù#Ääñê/¤×§ª*È#Cbë+âh`IŒø£Èãi!UŠ\åžZÔA¨¯¿žys±¿íËMÇgF t@wŒ#Ü–dý̬V‘}ñì<¤F΀޻—ÞBGE¥*á¼iÄÂ}Ú¢  ¾c¼©†Ç™$?¶$}_=¸!&rDðeÎßͬ ó/¢Q›o‰å‹ôl­pj*úǧÍð÷bß6­¡.dßáBŒÔÕYþþLJ@y‚cZs åÌœ(*¢øÛqáÉ(Ëœ?—‘’i¹ÕS„âÞÚ5í<îàzEÃÊbÍÃ4Óaä(¬)IÙ…SGD˜–ü2aÓbsŠÄMbùòà˜²t®±ßÉÈÈMÎÁ™˜B//¯U«V•w.Ûº{pÜ‘ó„mk‚¯©‰&]zDvoŸGŒ¨-"„ ¾•¤˜.ÄÅ´Ÿ¹9?Y6¡¹ÛÑ}ÂðáQßv[’!´jˆ“S¹¥Ð•RAwlpá¹v÷Žn»ö¢n1ÂO„›ÿ§š’|{$¡¹*,Ä¡Ð=36ê½c‚å´ñ¼§Û¶­€u+Ë–¿Ÿ>ÍU¢7€w6NÙAÎÏò/ê/mÛZ#dŸ®TD)p.ÁÈÛ5"|±«[¡š“e¯V­¶üŸåßÕ?•”ÜŠÕ »±£V­tžB±~¢|CY±ZJMüÊlÉ¥3'ô½é2.*•¸¬RŽˆ±;_ÎÕÅÅðºõ8*ÊO¥K¯è.þã0dP¡½( @YîýrXU­Š¯£®´`M½hÉ᜗îvºA87µî“tæ¶h!¦¥71xùê"™µÁ’ÃÏ“‹RZÎ²Öæ)ù<4/™¥¬Uô!mô²ð5²ê±û¯yÝgî‚óiœöj%¦|²X¬·_²ØDkmYÇ çG©ylj‰ õs¯S£a‘oàçÔ‹2¬Ã˜Èø!\Ó–ééç áL¾~ñâ…9Ƭ2õ6Þ•8ËÕ†8û{ùÎß.TÉ]c9ó؄֔oò+LJôc{Rª‰Ül¤ªVÞ¤>”žt¼Ÿ '^qÚ+ƒ@Z-Š·Æ¯›ÍÔâ®äâ—ClKÙ›×—H|±Fúc£Š^ï@tbÝ`Ü–#.Äþ»€(rtñ+‚.Tزi ‚_57·0Oa*{TwÊæÚse¥güdkVxhÖ­Óóy´ñˆ2Ð,¸Äð™­YûÁKNkíÂpѹôw-0+_©KûJJn¬.û—†zˆ^Ý:ùp“Ñq;/ù¹k2ˆ_áã¤sÎdæYçì¤"ŒÔ©*mŽDjïnFDæ v¯³+2c€ö5·¦ž'º7a¥¥sš“U—ÇQ¥gÍsÌÍû[¼Gfí=ñܼ}SâÇ—S¨VÏmN,ëpÎÌæ¬(2ý#—fÇé:ùRQež,}êekÏ}Ï¿¹ àµ”X³c ~U, ìJÇyG9óhé—šK$O¦“G$£5N‹.|šeQW FZ èBÛÜma*âT ÚpúAæ[s# äåsŠ€šˆ{ÀQ'ÉŽÆz‹ñkõýÚìšF E”¿dñã:t¶Ñ£WG@u¯÷-¯À‰OaáórYôÅ̰¡žëJ6ëBQéž2ê~ Íj@'rÙ–ÌL9`Ù#^‰çÄn@ÿ3(] Qó šµ¼Q¹šD²Çe6¬é'yß7lR»ÄÆ~eÑ®,{xE¾QU5Á"—²ÖÌØðøíÕÄ>”poÚ+äl½P먂¼‘’I³'NX¶ÝIZ&v`MEXÒCs ·µÛÆÏávVéé„Û¬3ÜE÷C,:ž¾ÞÌ«ª"zù .A¯(£¦„Ö ×0¤@o¬®âËá)|oMeÁ¡"_7¡NZÛÙ²àgYuê¨HïÞ±Ë Î9Å{dÁfâÃhy¸Ki€ÍöNO_éééÖ®:w@x§’M¯æ_ŽlÜ»ZIÑõñ‰_æaVݸ›ºlc1óÏ^‡‹í,æÝÃ??¤„Ö7öZ“ôÑݤ.ÄY1¤ÉÔ˜WYNµÂýÙ,ñ9ÕH55Χ—Ý’>»ž„Œhî¬ÃŠ|ŽŽ¾xÞ·i3±£NÇžw²Ñ£àY»DssÿÀõŒÐÍÉyÛõO.FJ_s*Ó-»g•Vã¶ðKTùI\ÿg§6f˜Ô-¥´ úïèèˆv&äϲ ̨åþMAï·ÏbaWîw’ãGîEj̹ç{oÝ 8ÿre>š7ˤJgOÎ¯Ž¬ªõÜIáñŽ~Ù%ئmw–=DzÀ6(ä8'„H3dDÈQ&¥ÄíVû[öÐ) $’êx‹jÕHÕ¥Ì5Cs ›ºã¥:ÃßÌã#¼m3ÒyËO?*P‘¾gJ‚8µ@ ’ wEÄohø›¡kßÒ»;,K,–þùõÒl*21PÈÍ-2·õ;?ƒíw²GµWó=Y4X OÛlL¾§`W|c?e-UþSÃl!‡³°0°fOúÔ‹ùÜ»œ¹üó1È™< )ºíŒº:NˆæÉdÑ$R©R%I'u.ébW€Ø”µ´´Ò¡©—¬¤K^Þ"z_uq úÁ™åË’m¾©•••0£L¢«Kìã(++çååŸÁò½êdŽÕ«Wÿùó§lSÆoŒÌm÷‚ì¡RÚ}×¶m[©›•âפœ3=þ| ¥üçÏŸ’éýÊà5ªV­šÌe§|éÒ%333™'kii™ ÿººwï^@@@ll,œÈ™#ssó‹/Ê9S}}ýÔÔÔ’H¹$•Ïò†þýû—PÊ2—=Œû÷ï—V]ä={—[¦ÙÙÙƒ–ó“véÒ¥$dOæÚP¹¿={ö”£Ò ..îÀƒ ’sÖŸ>}ª\¹2±ö’†ššš···~ü8]ööîÝ;iÒ$þ±¨ÍÚÚÚ<Z±ìa…–.á2<Å‘#Gbcc©È">>ÞÖ–0¼Çãylܸqð2À³`Õq¹{÷.èô’ƒB.çÏ0vêdïñãÇ-Z´(N"ÅôÜÁMMÍŒŒ ~ÙàdÙ«S§4ÕX!}¸gÏž ´ýô8tÙ›”>i1pà@бŽÕ¨Q#ž¦Áݽp¿ ´\*䨱cëÖ­™±™³³3Œ©°[ð3f@!½¼¼@l¨±Ãž={à‰ S…ž®R_eÌIˆx~·å ðÖ=< ,pﺪªª óuppعs'"¿;‰ˆÚQãÆy>Lá^7„„ØŒ=z„e‘ÎpbggwàÀ*h‰ <0¶Áƒ ¸³‚*=·’ùêÕ+(gÕªU¡çÏŸ²·xñâ™3gÒ“JKK;{–Øs³˜u)''úF¸zôèQB333,{¸¿@èO ãí$¨¨¨(777è–E‹_yG…ýî'Lö@.dO†yáÉ,{<€‘¨CtƵ#þ‰~hii½}ûNø'ôA¥»•dzGçýAn¡‹›5kÖ®]»x ÑÇøÞP] ¦¤¤P²!0$KMM½zõªÀ¢‚̃ÆH ÕPÑo€ “ÓK+Ž$^š6mèÏÌ“†BüJÐ tìXèV ¤Ž®ÎÉÐóðL6À(ÆB”8I”š@Ùƒ„Ëô]¦¦¦”ÚÌó6ƒì-Z´èÁƒ­[·¦ŸSð$ÁŸ %{ÙÙÙ'Nœ ¼Üƒ¸ò(xðö3Ôú¨i'z[@×ÉŒŒ®]»&Qñ¯Ú)¡5 ñ+.@ö@jÞ¼9œ7hÐàýû÷²Mt$þFš’=é ¼ßÔÏmÛ¶Q&þ*…Ç<ú6Ò¥ú(<„ã^&ÂÓûÁCÑ—1`ýV Ä®¥6íDARÙºìá/æ ñ++‚±øÉ\ööíÛ'LA*è²$hG '§¾JÁБ.¢u9=žø…ô++¿|ų̀x|È {#GŽ=–8qâÄõë×—Ý”ìµlÙ²äV«(Ä)†Žhk¯dˆ¡C‡ÂÆEñññ²M”&L˜°aÆ•+Wz{{ƒì!rËþX/¢Gˆ/lÿ”žžÞ—/_ùÑBŠ¢òÈ ˜²WœÅ+<Tp¬U«~:…ø•&d.{W®\‘¹ì¡‚¯ð {ÿûßÿžx¢×ú²ï÷¾e%]»þ'¿ÒÃg/[éhË0å›ÓÒž>Sª¤db¤§Y¯Ž¬’½œz7ûWÎÙËW»wé$“åä&$_ÿ•“ÝL»‰¬jøþÓWÓÒT”•»v2P¯Y]!~%… ÷_Ö¬]ŸúYK»uüu‹ÃÍw¿èÑšªåk0£áèù$­VzôÍVÜ¥÷ô4o_:íÏF(»0ÍK‡w¹«¿ââå»Ì/¤ñlºì‰À‹l% áÒîÆ ‹¼dyoçI<²'tVì~Wô2uÓµ#æ0ÜUTö„¢Ûc˜§c‘=á°"S~~ïºuWÑÒ3¿}È#©ÓeOžeUBY¿.Ù=iœPs!KWôršÈ#{ÂÐÑ¢“ÀqŠÈžp˜õ ñ\¹8b€ME?CCÃë×åí3dË®hƒî}¥¸ÑlCjF–¾f5,‚ìI£ã½ûu"r­ïŒéb£ìI—þ›[—ûõ²x5þ|›Y{Áƒfm !eaoöÆ»;ÙHc­K¿©é?õÙÕ>Èžt5p2jƒÏt^j‡Î\lÂL’yк³¹ˆg—¯øÑ¼I;³X‘äªbºã{+¦ .åË–Xˆï ,u2©w»XšT:DHN«j$ »¥ õËË„¾¦ZjFöÇ+;ëuv€sÔÖÝãîH€pzL«Òék÷{µáoA¥F/'÷ —¯Xté,º…–:v¡/פÏáH'{ô‚ñ¿…‹‚‚úŽ‘ÞX «’Òé«÷ztj+ðqœpùjj—Núôg—NöD?{©õ~”¨'†®®†ðóqŠ)ºí~ž@.jZ ‰eŠ’§Ð|nrЦ\\ðË…×W÷Ö3l òÖYÍó]:âhèk XxU·Q3úÏ«w©ÔkÂçüñÃD™Õšw³hͤT5uÚ ìmÙaj[Dá<è°Õ€¢ãFÖW(­ˆô“î¿4ï ËxâòM¶n+|þ0ñPBÒ#·3pK$,¼oW³Á–гjõš)“‹ø æÊÞŸa!¡ž³$[PŽs¯ÛX§èlÍ=5MüàHYõ.|ö7çÕПÂ}v²n|C‚ÖÒ¨5nEÿ mhƒÆ:˜&z‚•s͆Æ—ªøUiŽ7>‚x<ŽX¹r%œ¤ÄoŸº?á†fÌé"&Hß4 ¸Îá@ vqýQà«"–#[OêgI¿Lî\ƒžW²±ÿÆÔŒKã¿Íx§¥Ùûf'&XØ9ñDðôkÌAŽ“0Ocƒ/¯²¾P£“ß×ËJj•kèù1/Ûïì"VñŸîfå¼’½pا†Ñ<¢´šjaï“dú}× >ÓLê²|®/]Šj€Q²PA¿7¬˜[¿wܾq,)5aÚ„¦ÀyhH 2…ÄÇeÿ˜[˜rµBÄ^Emëš[ל‹œ]§ÇBBX§!‰ 9PG–†àÒzš¢Ñ±…ɾz“Ѥ×wÉ•K Vö„øy» 3·îå=zX*Ù8FÙ© œ;Ç!“åÞ;vÕµÍÞ͹¢¨AdÊ_º¤„CRpœ¶÷áòa­¦Å€ŸsÂÚ è†ÅøÇÍ` ?8.']!ª$Sk#Òõ~Z”Yâ¶®-{6ÈØvÔQÛ­hxtÇ£7â@~“Æô|n>~*ÿ%™€jé/}by´¯Škê}l+5sm–òQ¿¡MÒÇ9[ŸžŠÊMÍ(´­@É`¸Ñ÷3y}h¤&qmò %Óßìš3¡%׳än„×íú_jÑÇÚQ&ÊŒjpϳé™ÒsÔ¶n“S­°˜yJ à„õ׆cÜ\zi©[(Ù¸Žqyü½è@€LÐ1.›:'z<²„“ÍSŒpàËû™Pe"_NÞo„ uÅ{ÉçÚšt£t$O蕆»PÜPF+ZNM¶f©‹ŸPE”êèG9¥â»çåÍËÚz…ûßöm _<Ó×fÈìáT¸WØö‡yÕ •L;·ßÍ3êTTsxÏñ¥ ±;ô&¹Q1UUT~|~W£v±Ìo¶×Tvi`¯îÅùõ²âtfWü”‡Ùñjn/R›ê åj†"UåK‡vê¹Nê©©ý|ý¶ºzmIËfgZ! nSÛ¸HŽƒl‹ÿìv}û”Eñ£°|Û8¦ÛÐåζœ»s eÕ4˜sÙ—º Çì[a±Y!3eþ {[oݹGßz Õ† uq£·pð~P穱<­rÒ‘Ýž4ÙÃ0oÓøÒ£ôj굤+’–Êo%%Qƒxâç]1CP)êÓssÔ.ÃvÒ¥yõx´»+ï'®-µ.§½W«^Sº4›TÍ£w}%]«¥*~9 ôNoÚ’àdîȈ¶›ƒStÙQÒ D,œ%ô!¶„µµŽfKôŒJBü£ˆ^îÚëïÊ4«* k\OÈW)³–ìŒ÷ßs$[!‘ñàzŸn]ùß Y9qºQGÉü( _"Ö¤¼diog‰R~œ|fè ~®ŽuIŒ/ÞüPRV–¸†]œ^êÒ¼ÁÛw>¢%øæî•~=,…Õ0d·tehÏ®¥™–rÖ~`_$w0«Ê*–œ¢¾iòåøþäñ\BL$DÅ6d KZ/5jL4¥‡ŽŸn¢'æ>¶}õL_± j6¨‡‡akÃ-†8‹RÎaP‘ûY§‰–^îÌÙöâ~÷Û}ðXkc+Qéÿùݲ¾ZUU=f¾{ü|C,ùùù)Ï?‰îaž§&ÚõéI¼¸Âe‚a#BTbÅëtî&:füÖPÿ™â§£´Ö×"OV¯ßhnç(º†5ò¿hk±õZŠNs†·1£›——šþSµrÑÃPˆg/ ÙCrÑÙ€Þâ¿eë1=:<Ýݘ¨œR—yÄ@±Ciô"%%%èaÄT){að[ñ5<ÓO¢4'Od2Ý/A%¨¨¨tÖ3vÐëm]ºïªÂÁXEûöíïܹ£¨¢oß¾G-SERˆ_ÅÇK"Y+++H¹t”bUˆŸШQ£>}ú:tHEÂÉ$Ñ8sæÌ¶mÛx¼²ü ¿?S…ø•W€Ž4cÆŒž={Ê6ÙI“&U®\ÙÖÖöÙ³gÐÖ«Wï_¨Ìƒ¦¥¥•AÙSˆ_Å–-[bbˆìÈ¥3öööÑÑÑfff—.]’:M‡³nݺ¦M›âU„xeG´â!++«Zµj%í)U!~Xö(ƒÍØMÈÞû÷ï4h ©»,___ìÒäÅ‹ˆ¶†IM»4§E‹?®U׺uë ÒZL```Y–=…ø•E˜ššb/| {999UªTÁfg;uê„]š€ìÞHÍÍ`/¢Óæ8‰Zs{úôi|’ŸŸ?oÞ¼²é O öîÝ›ý ‚àQ enÛN!~C† ¡{ÀÙƒw ÷Q”;¡=zÐçEAöx|<€ÊZ»vmϯ0˜sæÌqã¸û(gitÁÃ`±XtÕOÒ­ï€RŠgVùéB²Xö ŸÜ½{7ôEP0ŒÍ›7›˜˜@8®^½zWæÆ` .Ü¿?´)Íš5ƒV aÆ;v숋‹ƒÎ ’0`€££#„sßBee(¥O†……AŽÐëBœiÓ¦A:õPˆ_)€š‡ä_Ú2pàÀÙ³gó¸ã‰â™\kc^zèm@&L˜@ß®DBØ ÊvëÖ TPúWÇsçÎÁ(qРA{÷îë=`;IàŸÐOŸ>}Ò¤I Oò쮾qㆹ¹9Œu;tè@×'½Il† ¹Ù´iŒT¡ÏÇ~êé:<©½½ý† â§€PÐûhõa¨Cý|À?Åú3àÿÐC/a‘éºkçί\¹‚ez­Û·oº…'äòåËô¯)Ç™‘4YÐlñ@z¿Ji P· ~·jÕ ÇYUˆ_–=wwwèCx<ȾxñâäÉ“ô@Ê¡4s€ìÁûôðáC$ÄŸÝñ²ÔÙAöð 4 Å—=a +ÛДà“~ýú9rD&éÓ»n,{Ðóc}A†žÒâWVP½zõŸ?þúõ‹Òß~üø¡¬¬\µ*±‡­iÓ¦Ì]:‹–= Ž”‡WÙ:=Çnýòòòd»(œ›7oÆ'Øÿn:u233ež ¥«ƒìA· #X9¸§VˆŸœ²G,lx6²F%‘¤\¯^=¼¸¬qãÆ¯_¿.Q¯ { 999rð„çZJBöøgéžZ&þzâWú ;§´©’ˆvgٓã<Á–455eî$””ì•Ð-…ø•8FŒ|>gggC××§OŸ¥K—Θ1CnϸeË–\ºtI]Èž¶¶öׯ_Ï;g``PÒÙìéèè¼ÿtTü T!~å0ÀÛHâîÝ»mÛ¶-ѼÔÔÔ@cbbÒÒÒä)~x¨9gι嘟ŸÿêÕ+D~½OŽÏŸ?‡£ eO!~ò@dddTT”ܬãy—›7oÊóq¦òÜd ¤¤$+!ÌŸQæã@…øÉPz)=#⪦¦Ì2b’"¾:Îef°P†™VRâ4¨ÏHZ@Œ]]Ol¦L*ùÛ7Nv6£‚=|øav k˜øÑü+-XS/Zr8炪€÷~¸£ôöK ¶Æ&{ÖødÑÇ)!~¢¹v ¬Êûø¦N—, 2Gþ‰ŽûÀEEm+ Øl«ôôóä º;8â~ÿ|;²ñà]ÚlV‡¦HmØ™m–—ZÙΙa‚–&q$̈UpK.Bªl2ÍÛ­Ö¦oÁÖ$ôõ]rï¨O[ªã%JRgÊœ§ÝAçåígHÛ½¼!Q¦’)Ÿ®®®-µj!M?² nÕ˜C<Ô¼0&>™yá½?«½½ýÅ{TÂXëæê^ºeÈ{Ÿ ÒÀ²$RBÊÆcJB0™yîÕ»éö·\çô&ÕËï7/áC¥Î+eE·ö^"os¬ë"RÚGÄ=à« ÁÃøù5õp`ìu1η+›=/éøSBt“®ãp™Š_…y| 'C¹»!ÃÃ#Ÿ§1Ñ?ÿÖš•ö’£¦Â}iö¼ü­­]™j­éÇbæ’xéNW³öTjó<@üxrI;ºøžÉèz'wWšpcx=(ŒŠŠª”Yþ>1ÇÊœœ}° IŒ(êq–•j>=¸ØkA$¼×_O/ŒtP¥)iìpïdúkbÙç¯côhÚ7€ééÛˆ˜¡æ1œÛ,Vr`LñíÛ·š5kÊ!/*Guuu~Ó’¢F “½_xHŰb>‚BüJëÖ­#Îß¿544Šÿ®ˆÖ)䳇çÕaÛG&œ£Ü§”ÐL¡BüJ“H@s(ŸÉ Ìí É0S9×*ä8`ÀöiÊÅ*ÄONÀ²çàà 'f8-òQ%0ôôôJh‰Büä l`“‰S”â€Ç¡Ü 7oyyyò‘=ì£ävo)ÄO®ÐÖÖþóç%{%'‡Ø²“œŸN²Õ•VÒÆú÷ïøðaDNç–hF ñ“7”•¹u>|øpºì EEœü/¹ßòs¿(**çå~»sîüÍÿö­ ÿ[}Y99EEeE%E%e€ŠšŠ’šªªºª²œ¬,6V®îçPÎ*Ö}»Ö6jk¬ÖÄ,±ÞýÑî{GsÍæ›ç®ÝÿtìøîO}MØŸ“s+cT–xô« ¦¦ú ©¤3bê—´ohå¶ol;ÕíÑ£ô¶mÙœ¼ï“?µnßP¡žkanˆŒ|}ð¿üo‰Æ=@Óf‚-u8ûMÚÅXݎ͈8›Mç\½ûν-*ʉ ííwCÞ\BùrK¸ý}/|Q¿žeé±qá5Ïóñ³ç…J‚§âôýv¸¸»óûwë Ðˆf\åÕ@®Âh´äèQ>+±ßlÝ7w§ñ À泯½XÛ ,7èLƒâ³ç˜c%llâzÜJŦ_*¦g‚R¶‘ÀA X¹2ÃËK›O˵x¶ÏÛ óËSðàŠò6b`‹µã!g ÿÅv?Šdëˆ:@>jälY¨h'Ê Ü#}¢°JTÔDøÇ;TPÏÌÉêÒ°ý5cزEj×kø¦¦OãOÛëF@ï.ÞRƒº²Ö§}®ÏbÍ™²3 ÿ ºS4Û´¥aÆ·2e¬¤îÇe Qxyùž;»¦o¿’¹õ+Þ-ùõOì ˆ¤–µiÛ“<§V¥EºâeÁY€ÕÂæÍŸ¦Ooõ»~jÓoÂô‚WÉÏ”Òf^Yí´×`‚p`X!Môí7÷í››ÄW=ðàêÄÿ3põ‡yž‹èÈ‚óÉ!m dExqúôßùÓ0Óî7iy í¡u„¸tI±G±óC¿?ZZ­Çk]ƒ°²Y¼Å÷C? jŽ~ÌÜŒôcÀ€¡_­Á§ô‰å¿†:o¸//ŸÈÏ¥3Ÿ x1ç~#¾sè‰S]­Úë$?gÑV]òrHV¶ÈÎ&~5PdqJ]Ir¹¹Ä÷B)§öÒ¯CÛmÚÕ)ùH«—:;Ó¹‹ ¾–‚™Õ5A4Ç\-|/ ­º6„Tï^¤àà+8¥¦°fôŠÃ(Ÿ 0Ê' ý$SSÓ»wd\f7çͧ§ÿJCµl6«KO‡ŒÃ_‰Lß\rA†>,÷ó„9›ŸÎŒ<ÿzŸOzôc±X>W>úYiˆ™RBB‚8¯ý0SáY…l6¹æ¿tâÀ0Ö’$!¡š03ÁaÞË6>æLíaé‘ÈÝíÈyÀCÀÍ3{ÀúôSÌÇÚ êó}IZfП\µóï+âÆ$Ö‚r¯íéꣴšœ°Ò/ýÌ¢ÆýW`7ÿæ÷ÝÉyŸœägÇñ<-^]Þï  ÂžÅ:LG- mËӼX²ÙÝ—t!Ý7×vã/¹Ó8g _-ùx1“¨‹‰=gË_?„?çÎGÈ/Çp¯6ƒÿómîÃkxCW]=A|ßèÞ!/Øõç>f¢  K?.÷È3—(ŸØä·–­É{Ö•á CQ4üÉà^!AÈ}E•ÒσMÅÃ["¨ÚX;^Ì%·_¿!µGô±Ù#ÓÓB¸jŒ}‰·Bdf×Á8Ç‘–5’thØZçÙ¿ òYòùPŸ¥üJ)´±ÔÈ…”Ï£×’´ý00…À]˜y§ž&Þ)¬¿¶ôý˜öþº¸¸8;;¿}œŠ*8çzàR31!|ª&XPàh=jåã?¢ˆë6MO¿S¹1ó­›•2±zrkbâõ…6Úå©ÝYˆP3ã!n8?èßÿ$sî‡l‘žNçéRãÙ¬Èôﯟ$˜õ0è†l¾ÆO?܆hH’ó2æ§—2ÁnV¿¢ý–Ì‘ ýþJí2éÄ3q‰**(éé—Eþ|èoïUF§·ö’¯ã§l¶ˆ¢EXúñvßÍt0i€XO³0íÌ¢æýWtÖÅ]@ÞÁ³Žž˜Ý`P(yžëôrr+'«ƒ÷é±ÉbÕ¯bËÒ*v¸Ÿ'|ƒ“ƒ…в^]´ âE¿K›Ø`µáûxäßGç‘Zï]Xs¯Í3‘ÖÍ:u-ùíúÞªsï_˜©¶ø0QeÏb¿àˆý‚ s.1)]O§u—ôŒÏ«ã?®¦Éß"OÎhJeq8kVi“j*2TSb—®Æÿ®Ÿy€{ëÛ(àÔÔG&®çá×êdÞþù÷ƒ_φâŽ?НÎÌBh&J²‘ñ™­­þßKQ’(õù:-ÇÿÞðí|©†ç!NÕUM¿µk×v7Ðð#ûà‡™…xmÞSSÓ¶–6xÉè„£&ÎËŽ„­øÞª!Òè–õ$ÞnDÿ„7yÆš¤¨„†.w×Ö_W«ôŠQýLÍÙD|¼Yzúv(jÃíÀârx_î*ôv²dO¹ ¥kË›D¤ ]OžkÒ¼õ€C¯Ñ!R*>`¾Å/1|G¹]ø2O)&"wÞ%òSÒ”šQ~¿ X çæ#v0bk7‚fš°©ÿ‹ý 4ûe|¾™†h‹C¿†óë'7>U.°\Td$U½ât[UÓoÖ,Þ¡ˆ¡áQ%¾rùg¯”ÕTmÕåô¡hê'IÅ#N3â/ê—;‹”‹;$QŒÉ³wÍxÚà^%þx€ê á3Îl˜ßLëÅ%~õ¦_¯ö7KÕ8§š¦MÙ²º´Õ!¹uX×õœ!Ð\H¹\âÃ@€m ‡B't€h_¶ H¿ìEVlý®s#ooZ™Jê.³£v›irëóB<Â}¹µQÿïÅâ$>ŸÍZ•N€ÀpbÙ»âždÖ‘LÏ. J÷,ßÄnª0j$¤|Ö4 ÷Fó¹(kïÁScÇN@uy§qíÝ·}옩9ï^Œ\}ý\иœwO.ÜJµd[Y|²-ôëóZ^èFø;¼¯éÀ©Ž#Ð’!u†³~Îm£3ȱ/çþL'-tQ¸$¿â4ÁZ«ÃÈež®EŸzëh¶2ï¾ÅÔ„Ø4ŽRO3cü§>ÞÔÍô´šÇ]: Ú¢g{Mzl?ÿ5¡akëËhbšÁßaFl%/òuêæ2®$ó Çv¯‰ä "dÔ’¬MBhôò{ c‚¦Î[¾îê¡°¸WE3ß?Þ,® Ûü‘¬éÿšŸC}ü0ë€WLfëQÛæÝzß,ðo±Š Ü»zjãÖÑóWìmO 9NQ72>îúaEЂyBAŸ;h7a€ækÒ©¾<{2¢U§zÚª`EíÛn?†¼£ûÓ‹øK²¡™ °0µCG¯õoK?6›´éç’çöñ¹uÞiøF/°4ä¾å¼ ‹¯çl?¨uv6Ѥ¥¹‘Úýás*Bã-QBÜ|ÞR˜™OûGÈ5„Àù×Qñla&Âa|ñOߨOK­A0%ˆ;B’2c¸Ÿówœ¥]òEîQÁj¬_OØ9Á¹r$+Ýôí†G«ùó&É\$|KE…oÄõ.•êÚB,ƒÐÕÕMKK¢˜%üò…éÓ§oÞ¼Y¸:d ³än/¦º„L½ºW:W¾FJ-//Ÿ——'BœÒ,ί½ÛýåË—JJJ_¿~¥=æ-[¶ìÞ½;77—ö˜ëÕ«WXXXS5&ýÛKüø!ýb6hÐ ??_1ÓÛØ~ùÃ&¾}û&¡˜Eë;+Áóçä˜'‡Ã©‘Š’••­‘tëÖ%ÛØòåË-Z$µD?}ú$¡˜•••1 ¡ßÿ£éçà ©›’6l(‰h[¶l‰·nÝêܹ³”«ëû÷ï ¤œúÒ¥K¥F¿uëx“>>îîî´G»`Á‚Ý»w3ô“8@@I‚~Ë—/ïСC”¸‡Pff¦4ÓõððˆŽŽ–fІ††’0L@!¢—{¿ý^½zÕ´iSÚ£5j”$r[SÜKIIÑ×'÷d=þ|ÅŠ  ‚Ò ÿ”ƒêâñãÇ´ÇùôéSI4³_ž~ OàÓí¶mÛ$”á£G‚jôàÁiÖRãÆutt^¿~­©©éÇņ $¡žñ„ž³³³”ÛÃ? \B¯X………’˜8ýåé! úI^^^Ož<‘fŠmÚ´ùï¿ÿ¨Dcbb4hm4##C[[[réöïß_èUô`ܸq{öì¡‘{PQ?~äp8íÚµ“D†yú-[¶ìWÉj‹-@÷ëØ‘Ü ¢¦¦&¹‰~÷w $ö¹zõª¯¯o«V­@¡ºwïÎíøò勲²²4¹g``œœ|çÎN:‰ÛÅ‹mllV­Z…¸ó–Êó/O¿¹sçÒ§„fÛ9<ˆ$9i)ø¹k¾øÙŽ—÷=zÜkÛ¶-8èM"W]]ýóçÏÒ)æ´iÓ€{-[¶|ö왘Q]ºtÉÄĸMkÍš5’ý:ˆAìÛ·O ã.R[‡)Àö¦M›¾zõ ˆ<îíØ±cÊ”)t¥EpR㤸eË–ÿýWLîឨG 6üðფ¹÷›Ð4:ÐëhŒZœI#zöì í»srrTTT€{sæÌ ’DìÚµËÑÑ‘¢ýýû÷ùŸ÷(³pæÌ™xŠE|}ϰ‹y™Gµ€,¤5Ô°h‘êèèŒ3¸×¥K—7n÷¤¤›üô:t(‰ªªªÙÙÙ´g’âVnq? ܓĢÎ;ߺu »¸‡‘››« @ÞôÜkß¾=T æž–––h ¨‚¼~ý´D?º™™ÙÍ›7±€µ²²Š­n Ý»w_¹r%ðmÞ¼y#FŒ°µµ…OÜ“fÓýèGï ¾$¸7dÈcÇŽQ?ùu$h²©©©zzz´$ô÷ßß¾}›âÀØØ¸ÜÀ½þýûã q\ëÖ­›5kæd8** ÛUbñâÅË–-{‰Rª%Ê=P@} 6,Z´HxîM˜0fp0y¾÷åË— °bÅ 0€:T#M÷7±ý¶nÝú¿ÿýOüxúôésþüyÚ³ÇÏ=Œ… ‡Çn̽‘#GâQÑÀf³ÓÓÓ{¿ÜãW‰{?~ü;ÐÙÙ™Ê0XAïß¿/WŸôôôôòòÒÐРŸA‹I"!ƒÖÅÅeÆŒ†††À=ìÓ¤I“7oÞ,_^ÙEÛ·oŸ?>ÈÿÆÉ ¹""" ©€uwéyƒ€ôWÀþžô£…{£F’÷@A‚WÀ“âà‡Ã‡ð‘_¼xô%À=øá¨ÄüsuêÔÁåÊÊÊ_¾|e­Zµ ñž={@H‚|óöö^½z5TgÏžîÑ^{@0É@- ¥<±™ Ü y–ÚÙÙQSS§N6l(`ââeÿ㢖´ÛßgäÏŠü:è!’è ñ|¹ð2~̽*WEOž<ÙÈÈÈÃÃÆ‹²Ü†{˜{Ó˜{”¦÷éÓ§¥K—bO’ÐÊAwÞ‚:)êààЯ_?Zj RiÚ´éÑ£GqU¼}û–z¤’——wttîÉ –X¹[¶lÁ’ yxùòåèÑ£¡Î9‚kú>鬰û£éí@dTÃ-xx³"îáñ È-ä¹ì#̽ãÇŸ9s† 0xð`mmmü³¢-Kx[ƒðÜ£€¹gmm}å ï° ¡­cM4Ì%K–ôêÕ ì%ÜÊsss¿}û½ž­­-~ ýgD˜¡ë×ÉÉiãÆxò/þEEEAAA  gddLš4)$$dúôéÔ[ Íz÷î p—.]|}}AÊQöïßÿ«4ÚßjÞZ3tŠðaªË[Ú¹æ\jjª0&+ôÜü¼€ÅøôéSìVRR‚.¿¢±ððpà9¨Xx[ƒ ,+à¸[·nýäÉm ž\€hë ž¶lÙ’R>555Ëná…§ÀXÒ6là÷ øž}ûö…D>üüùó¡C‡‚!'ЀÒfÄÖØl Á,XЭ[7* È^)ï¥`èW€{ÂCãY#è¿iÌÀرc÷îÝ+ ÷0€{xý×»wï #-Ô<ühçÎT0j>‡`ÞPŠ"Ö`«{ì Øo`.Ù@ªP6%@ðêP‚ @Ê;w‚è­D¶¸øMMPMAr‚2Œ}233ÁŽ…ê…$@œúùùÁÀ¾K||¼¾¾~óæÍ´Þ\ îz Å‹Cr ß ª`Á\ ß¿áªøÆxé­0ƒ"ô&””Ü2pAA®®.´6àôôÐ…Y!iaaÜƒÖ ½4waìU (‡À4ŠØ³¸¨ü-‹õøñã?~€À˜ »ÿ>ð 2 ,æO›6íÕ«WÀÏC‡{ö¬òõnÝ»wǃÀÍìÞÓ§Oã&.ü(n~~>è–P`Ä‚·ƒ7fLÉvÀ=ˆBb•”^ª 111@ÎÍ›7Oœ8ûLæ‚Ò®©ñ-ŠÀí9sæT™+þEBˆ»H5,,Œ®ÁU†~Òh\e‡$±g¹îá5„NNNÂÄSåZÓr¹·cÇPgr¦ Ï¢¢"ƒ®î‡ ¿³OÊ lV!ƒJ¯Ÿ•ØÞÞŸr :y||¼@¿™C ª±±ñÕ«W©‘ÆÝ»w÷€<ü|¨xY9U(б1÷¶lÙF£NqOB«súÑÞ¥ŸÅ {áááeG¡UAÛî‰áÑ£GËÒïÇuêÔá÷ÁK̨åWeñóçϳgÏbm-))‰òÿðáƒÀúÒ²6'^uM©ˆT+§†[A,»f ,öîÝ;vìØŠ¶t@-AÿH­8W xAþSƒ® ýjÀ|§†7{÷îMUÐ…qãÆñÿœ?þªU«€{"G¸xñbŸŽ;â³¢1ðªT%f”¼BâáMÌL<õÏF`K‘ÀŽX}}ý””< 2ó¶\ Ü•„jEf$ sÏÍÍ Ï7Šд1÷À.ˆˆ`èW‹Àß¶hçâ.’âÇYXXX¯^=JîñsïøñãeW¥‚ç›7o\]]Ëi÷@Wk­\úyzzRõ6!xq>ƒ_f–‹J¸W­[œ€{´½¹œ§w"—¡ Ä¡&ü´,ºÎýoÒ¤ µÐùÊ•+ü»HZlûU®å÷øóI-æDÜ…)Ø7w£â]ðø&&aPîA–––Õ½A ²G×ÉÀ=\ †~µ£FJNN¦7NªMóooü;h*ÚÁ‡+±ý*R8©!JJå:t(po̘1ûöí«n«-Ë=0¶E[øÜ£Ô]1¥ ë0†~â"88xÆŒ´s>ÆÆ©Aùr—ƒb›ªº›»ñ¾[J.aî­X±âèÑ£!!!"oóôéÓV­ZQ? @‰-*à^QQ‘{|+Ç/Á½?‚~K–,Áë6è5ß@/÷ø¹G Ÿ€T½N´MUÀ=‡###ƒ/6ûï)i#ΰ¡··÷áDZÛÉÉIÌ㉥C?ÉÂÚÚZÑöêÕ+##ãÌ™3¶¶¶’Ë šãÁƒGŽIûm!t]¶A#÷w ¶’-] ý$Žž={vìØ‘vúmÚ´Éßß_„­ÂàÂ… `5oÞüÅ‹‘‘‘t‫;vøøø@tˆ„­kÈ$]û\333555éÕáúÕ$±DÅbIˆ{ˆ»:»„çÁéŠvàÀ 8wž‡‘-Z´èGãsÐ?OŸ>Mc}â‚¡_Ít-Úã få¾hˆ‹‹Ûºuk‹-è=¤$--mÚ´i­[·~7°UAoñéå vrïO¡ÿƺ@3) ë>|Hû\uêÔùùóg^^½Ñ:;;»»» *QÛ Í 'ú•‚$.µçLÎ*ѤI“† JˆØÕÚz+$>L/ýöíÛGo§ùþý{Fú1 `˜ 2D1Khù½zxxÐK?É]ÆÐïwƒ‚‚‚„®¡¦kaªhד»víú'|h†~µ•oÿ0`ÀÚã¤ý&,†~¿'¤y)lí„™™™$ Ôùóç3ô“^¾Ë¼›xøù³u+ýv-õi‰óΣ§¯_½–S·éú7-ëƒp=ñQvNN̵۽ºýMc´œ¢¢‹Wo‚ w+Ѳ³1]Ñf~ι™xïGÑOCý–Ítèªáä§©Ù¹½hªa‚ bâoÇ^‹ïžôĘ̀-C?ÉbE@€Ý$|Öµ¢ž1y¤dBIïÈÕÆqÇö¸ÿoju#üA{Ÿì`Ñ›¬2 æd;{ø‘­ñ§öO›:Iœ ú’w÷¾nòœBym½+ɨ8ÃEΫûׇىxÈ×ÁSçõ:v©ËxÐ60¹õ*‡Š9ãųN­šjj¨Š­ÀÊþ“ðdZ½&mÉn"·8ÚˇwÍt&J 9Õ¡› ®áv¥jøFô¡ÿMv¨nœQÑ›µ3•©GÞ¸Ô@¯ýа}TÙ_=}Ðݸ­²’C?:¼¡×¨©ÅÜ+–CÆÁ¸°?Ìs¦°S±‘GNµëÚ s¯\˜ qf§>èÞµ³y>wëa£¦ú˜{å|!=Skˆ_»nA£jÂGûú}Ö'B¶U'ËŠh7où–ƒ=JëÞVWøh×oÚÒ}¸c1÷Ê<…Üž‹Ü2îl!ãÜs,ºm—˜{å¢Kÿç×—-ÍL…*{fÖ§²-LºU i«ö©¹èãÓdS†~4)÷çb{„ì=Ú91#×X[¡Ê.ùÁûàž0qªêµXè=¿ë3ß}þò®P¸'LàŒ"ÙˆõÁs…ë5ÖmØÔcÄd¡²­ÑZvCô•­UõLãþS€]ÂDÛwü4ák¸'LœJÍ …©aáËÞ q3(»Žl¡†ª C?±püÒ ýŽÕ0–X¬¿ ê´*Û -£Zy°è¾2pµ×ºé·ÉÞÈíVGâÕn)C4îaàÚíÄn Ž‚"‰§vÛª-@lA‰ÿ«‚zê•ô幨¼« …ÂöÈ}SÇ—¿ÖD4îÀ¦½~'©k'AË„I´8´*ßÌ(}i˦mÿ¸:M®yú \ä]ùþÿìÛËÔ:ûPa ?ÜC¯Uù£Âæ7g¦¡½»x§úœ¿é×[ckv4ŠÍ",UÅ-ÃŽ½‡L{ ÎÒ&_=q%þ‰‹g‰è¡-’QÎjcƒr…˜8YZ½n½ç¬Ê®™{uÿÿÞJTo*úXB'›òWºm ß÷wÑÁÉ5nY®§Öo˜UZ ¿pí¶¦~;‘#ì6p ’:Ê—~>W>úYiPTÔg±ð•,‚P-ÅOk"?¡˜²¤-éT¸¶“îþ@ }ÀgiŸ~1›¾{Ër̳Š ”œ®“¶O·ÄŒ¼8„ŒµåI­2£Š%ÿÑ—ãÙFâd©÷—Jž®Yb3ÚIœøå´Ë_½AÔw$}E@ÀBî{ü‡{ØÔùcb›¶kV³G™16eq÷Þn—,ý–Z7XÊ÷3ËÀñÀ±Îÿáí²æqù ƒ*s¢; Ö×ܪ½œ+Wö×DÜ-:M¢sÛÛÌ,ÈD9ÚÕCö#&úœ|=eàà›±6©;%¦Eõ­Ê©÷U^|Ö|VÖg¶$+Ýr¸­‚RùVš†–¸ße•ÌŒO9‰{\ŠÀxInníµªª©%f 6•všÒ£ß¼ WÙhPª#ÀÅ…Ì™óø oßðÎñžgŒ‹9ðé ÏS +igK®#?—œ¥ÆÖüÆ£.ëÑ™â`Oøî>ãÊÔZÖu‹‰#Œµ»é¸Úd'tv{E‘u+uœf[sÁÓ5/Ÿ8‰[`÷~…ÌØãg)†¬³©/¯(àóàU^û¦òü> ¨«”Ò•#dÅ"»É ôÙ…ˆ¨L¿‡„Ö¹÷–s=n¦R¾¤ŠŒØÕ{4yöáÁ°Õ2Z†ê[­l$ä dlQjò¦MgÞläå³'å™YvæËùÉpÙ+×Vž¥½l©Û¬ìgÒÒí``Ьš:j©®ÔwE”O?YnSsæ3ÞþvYñÕßtöáW…OwÎŽ&—(îÜÖ{¦•Û¨ˆ7…|$Qt‡U·AD²X4ÐONA±_5[ÎX¦¥#xZ÷wxxlÉG¥Fp‰Ì;ë"bq¯¢×c²Õ1…$à§¿ÿZ}±Ç]d奴nhP’¢'n^[–‘9£o±Zûd†•à¹/¹¹Â0³õÁ°Ä÷ËÑOÔgÐ)Üzv$/™b ¿ÎÔó×&äÏ67gɺâ¿L6/™—Ëμ¦3;ç ’çPñL5—¿›†ߥ_u=-­‹lÚ¹º&丆¨ r9ê:TÇD¥Š6½3ÔgÊÖmû f“{¸áâ´øãR¹ý4:6qf§„JOâö?4?åÜÔü“0¹ÜîC•жð/‰S KÒBä>%5ðéKNu¥RsÀ*ï&õpÌ=^ÿEÈâvï·—ÏÉy&ª ýP­ ÿ­ß”ûô•’»ò¦zƒÙ»ÎªÔm2¤§Ws8R¬é¬Y¼abM³ÑÞf%Ñz{Ͽٟ>¨RêJ)-®( ÕÕ-gÄ‚óähå ¸Tç¿î'®X­}R6|£FÚB ëLbF$pÏX»×ÊF× ½rHºûªgãúî9÷¡ã]"Ó9¥ÞÓkÖD &½¦xA#Uê„MƒJ¿^·YÒÕë/ï[Ò’-Sõ]ê9ŸÉ¤-"#ÏeéŽkYyß+îY¬¬Qì.œÛ ÃÖ£Y«âqÊ­á ‘GåÈÜŽN+wŒM@gá»ÀØPOçI6?ý nÈÌÓéh‡ %r»Ë¼jJVA,DhRµº.ü¾Žþ>ÍîµeäSÌŠ%f¡šGCíR­06#O‰Ûøâc‚|v¦\ˆ,ÄúÒ Á®¯o§Å´È«[Æ Pôþµ‹Æ£KîëùñóGÒî%¹vÔø£y”gEjíàþ}„*€šmqy#ठ"pä\îñThviÃDCµ2Í¿nâzwê8ÃüÙçÛAÓÿ_ÿUjd6d“Ã9'ŠÃtk@tóíƒ|óÊÈ=®ã€BN8·d·B ,WA VÒß?±‘)ÇËU$öçfæ\9ÕÎu§\-u=ãÀ¾%«pådA¦åãð8«ûüí¸²»méâs+™@J|µºzJ„:ÔHëýåW½L=œêòKúB„FöšCi9„ýú&¥w 3æ^¿z—~qƒVR,¥Á^ˆÜ"漪ôtƒ§‰7Zw©õ™™òi•š˜8ªœ.ËÊŠdeK^ØÖg\µ7Èë[ŒàOHàÊQñ‘ñôž‘V—ZH¿T‹E˜H»øEKôÀk‹òg­²Ñ¨ÁÖµ+ô¿ÁlŸ‹.’iß™œ~NíqƒECLQ=âÁƒ—à³jÑh}>v Pê°jÏ9³dŠ“ Nö¤Uáàܾ÷/Ÿ6]ÎõzqÇöX'NÌ?¾çKâ»Ø.57;{†;w3‘èøþ9i•º¦÷Æ™Ã]l‡‹g}–´í?¡è§OrïRãÃöðø%º,T<(Š'úÆ/?±ÐLÍbB+VDÜw«©QñŠÞd … ·Ueˆ=n5Œ§þMrtAŽxâ_þžÒÎ ÙUÐO?¹ù¯Ñ`;~èVËêŸÕ‚†BekÑÙ¤y" F—µ¥ûÿ¦Šs¿žå̋Ʒî rœ)wcìJMWÔ­[—øYÀ£†›¨É øüoÒx1ËÞÓÒ¼6Ò/%í0Kw8‹µ•Ç(ÂË={.¯0 #’Ló¿LŽs÷ÀÓ|ÙÞV‚Hk­ûÛK¨ Ó]Ä©÷Q¥¹‡Ñ´çm‘ˆƒÈËÒÓ«bH#õî=Sï~yr#ÆhH…G¡ÄGG™‹ZÕb£F–#@<\ŪáávåLjÕÉO(ŠaÑ·O[4)ëŸðïi“žv¢Å™|íœÑð!µ‘~¨Ù0,ÇôY,~µsF@ïÃÞøWº°8¨ ¾Dâe¬tM'ÜÈ—ÖI´z ?SsÿáÅk'ö9—³EP³Z8wç®qëU½ff˜]¿#¯êµ3¥¿RÙ1DÓ&O*Y¯’ǬPyÓ•ÿ‘–'Jº‰ÑôrV“h7Ò<°q³h:•Ç=Àä±ö"–ýÅãÑRçž°ôãr‰4ùRˆ÷,V#ÊßÊû‚ñìk AJ,oà(ãm.’5Ç"1&ÀémãÞÛ7ÝÈ%ÌäÉH2¡YK¦ÊJ ÏngÙ¿Zo ôõõ­èéœîÇ.ÅW´ ½"hÕÉ­rƒÇ@‹›iŸêËÊW+þÊ7(RaªÛ YEß{TzŒª²â½Ó:öDc Ït›~"öVÓVí«gc™Êf‰D,»¹)ª E?¬^²Xä6¢„÷Š“/¢6ïßµÖÿÜhÜÆ6[1†ØèÜÅÓK·!¾ÕØš’,ÉøÃãâo+5vÍû¹ˆM•´ Œ!=ÌC¶„Y dœ-•‘‚|5þšéjQNA‰_zDï ñöš/¤rHA¿)9ïü9çËÁ#Ǻ؎à”÷ù]âÕWç©F"q6>~–’pïA;‹R£ÙoRÞ¦>“̨šÜÃX@9uFVEƒÝ²”Ž÷èêY㎠[êiõ¬n´Ãû’ú´o›±Îõê—ªÌ {¶zΙUÝn¡KÞËù9;'êè‰Î¶ö¥k8#éÚ¥éNSŒ„æ¯ìÜÛuT]§y£f­ùýwýbÛ6­;´ieT£ÜC¿ñ9ŸånÜêè%ú)«ê*Êÿ›Tfð]«yWé´d˜@-ÑVQQÉÉᮕÔj‡:µ3fþYM‡4Œì‡Šm¹ÒØhŽèK¡ÔUUœ'•Ñóµôºê‰ç¾bš››ÇÇ“ã‚FÃÖ’VÊœr]¡¦¦&¡˜‹ŠŠþØZµ··¯mYbèWѸqc¦h‡ä®Ceè÷[ö0ù•O¦zú1¨ oß¾e³Ùééé´ÇlhhøgV©’’Òׯ_ú1¨ýõפI“$óäÉ“UUU³³³ÿ´*522ª…¹bèWÑ£GM›6A‡íååEc´qqqÇŽûÓêÓÀÀ@B×ú2ôû áää´oß¾ hiiÑx­³³óÿý÷ï¿ÿ‚ úøñã?¤2W­ZUk¥=C¿Úˆž=É©ð©S§‚Ú±cÇ{÷îÑ-p;€Û›7ož>}úo_“III³fͪm·j2ô«í {ö &L¸}û6âNëãG Q=<I±Ü³µåíTîeddhsO•uuu…vvãÆ Ì=àäŠ+þùçŸÊM ìn×®dó5ùÜÃó˜{4´Jx9 žUÇ…úU¸ÇЯ֒T˜ ”?æÞîÝ»'Nœ ®woò„Ù®]»^¿~slÅ 0GCCãÓ§OÔÏ;wRnþ©0<xøðááÇãîß¿M錄$Xü·mÛ¶=úåVó0ô«] ¸‡qòäI õ¸·uëÖÿýw@-pþnß¾ýû÷ï )îy{{ƒT„ÉϽ*Ü9ùíÛ·-Ztàqç¬ëÕ«Çï@ b̘1­Zµòóó£X•³wï^ "pï—Ôt˜_{о}ûðû@órttܵk價7yòdJáœ:•·ß÷áÇ#FŒøï¿ÿ <ˆ=ÁÌ366^¾|ùÑ£GÏœ9Sy@SÅ++«ØØXpPÖ?~üPQQyóæªªªÔ*$99ùï¿ÿ†²`=|ß¾}Ø___ºžFA÷ôkL£¯=àpzzèàù=1÷Àüóðð È ¦¨‹À“uëÖA»tqqYÚ¸qãÈ;R¦L™‚ƒÍ™3'22róæÍ î°5#OsÔQ02W¯^¸“ ©}úôÑÕÕ £· +¹zõê©S§@#îZMj›‚……ÅСCñž=< õ;ØùL£ÿ%Fð\–€¿²²2æ¨ % A;¥Ä&¿Ö:oÞ¼M›6íØ±côèÑA\ðÇbM]]Ø5}út µ'p8Ø­¦¦vóæMÐýðÏóçÏóÇ:êÒ¥KöÎÎΔ² iÞèé鉕I P¤)7–x”ÄZþ~_–¡ß¯à^§"௨¨´éر#4hì³hÑ¢+W®ÄÅÅñ ä‚ßĘ‹ ӀЖ––xQ(–ëׯ>ÇŽëÙ³'ØW˜~À±ÄÄÄøøxjv>y›4iˆÙúõëƒR Ö#5ï{r;øçñãÇ!X› gÂ#ìY¥ªÌи~soÿþý aBõ±!OéâW1ÀÌãÿ â¨ÌXÿÑ¥K—»wï ¼› ™™1cÆëׯÁÈôõõ…`¢@0¬s^¸pÄ(Š îB6€6@°Q£FõíË;¬m'ü/B¶ABBO<MÛÞÞÒ¹=˜‹?ðë3ô«IdddÃ=P»wï¾ÿ>¦Ó°aî]»&dA+V¬îܹ3X Ö äȦ«« B{óæÍåæ6???44tß¾}ÉÉÉ`sº¸¸XY ^·½Pä90sÙ²e ýT åNs8h¾Ð¶wƯòH5j„©òñãDzOýüüÖ¬Ycaa-^ á‚߇¼Ú½‰‰ÉÂ…¥®ª‡Z¶léîî)¶Í455*u×ìB.²ñåË—ñãǧ¥¥©Ú¶m æ äçŸ#‘““›ÉEEE†²¬]»ööíÛÀÌ%K–ð?‚ÎJ:6È^JûeèÇ@Ÿ>}*»u¯áQ†¹'<^¼xѱcG‹µuëV°Ó(_.DÈÞáÇMš4™?þâÅ‹q7Á? éææÊ¡±úzâÄ ~0¡S{/$$$<<œò‡ÎâÙ³g ý*‰ÍŠ‹²þÀ½)S¦€Ì„|‚Ê@ùûûûƒ@mOc0ôûÓÂíÖ­[ü>xµ”ð‡p–’’æJÞÞ»wô1~î!îP$ØiÂg tT0ó𠡬¬ì›7oPñv§¢¢"P&)b`uêÔÁGMc€A ¦à»wï(]777×ÞÞ¾ZÞ œ:uJÀ”aè’®_¿NMØ,_¾ü_.úý‰€&ËÏ=hîwîÜ©rµ4DPù^¾|‰ò¯€¡àííÍ¿X0vìXa²äååÕ®];ÐÁJÄÜ+§‰Ô­‹¹2äùó甸~ûö­øÇwèÐZ „$ °˜{`ýг¨E@1xìØ1¨püsÞ¼yPï ýþ€˜½ »7oÞL5…²Æži°µµ¥¸W ‹ò/×.‹œœ«W¯^¹r¥ðùÇÜ£6ÈÓ~7èŸØ1{ölÞ˜{žžžxÙ˜˜ÁvŸ>}úÆxeORRÒþýû«U ý~IPÜv•{ÌZcÆŒ¾NÙö*A Ãp8œŠFD¡)ƒQQQ¹M÷¨MI:ÊaíÚµÔ(>Q`놘°ã‚ú(xî+# ý~O4lØkw`Û(((”ËUUUq–zPC ¦¦¦x’XWaB‹øð!h­À½²ËÄiæÞÌ™3{ ¥ƒL{˜{?~üÐÓ#ïQ’òéL ý¤„´´4ø;nܸ={öh‰ÐšË<‰‰‰ÆÆÆüÛj1ðÎ@‹I|þüY]]]rÜ£°~ýz¼JrIÔ©SϬ[·è§££óúõk†~¿äåå?ÎÏ=<óNïÖò©S§Þ½{wÇŽþôr¸·téRŸÅ‹Kz^N™2¥lÑèžhÁW –ÝÂÐïµ¬Ë IlÍÆË8åäx—Ë‚NýºäJÜ>|¸›››t*¸õöìÙ30%šŒUTTìÕ«WLL C¿_ À´ÍŸr+t%Ê=ŒÃ‡K³&¡ç+º¨¨Hüs‡«„¬¬lfff›6m¨ÓÁúý’X±bÅéÓ§ÁÁb±‚øÍJXJ”È•4>|ø€+S €Ž>˜ ¸ëdè÷K44Pc,--¥À½¨¨({{{ZÎ¥`#m&ÍèvvvR»& >Y“&MúýÂÀW¦l•Ö¬Yô“÷0 sù?ß›7oD8Ú˜¡_ÍzhŸ?¶mÛV`Y¦$°}ûö„„ ÝŒ[ @=;{ö¬Àæ&IWìСC;&e>&&f̘1û÷ï§79†~’èHxb×ÜÜ<,,ÌÉÉI¢Éáø322¤¹†¬#$ÉûèË¢  ›šÒ¹ªžÃáàmVŠŠŠŒôûUÇ'$Í=%÷ŽB’(¼½½¥Y™²²²Ø!ñŠäß¿gè÷KB:«(Þ¾} ²HYYY]]]jEóòòúIGñˆ@I¥¦îâ䨃äúýJ樠T‡-€ò)å2*((DDDH¿nñÍMRCEä0ô«aðVYK–,©$@µÖøVžâaVånVWž®§§ç}.hI´Ê2 S«Â§.åäúIÎ΢¬”—‘!èMH´lH¨tµ$­ºuˆZR´jÓÏeæÌ‘óÖôb‹KÔßrÁ’¥_Xp°Ö°Å½ØLe1`P3¶Ÿ=‹u!=„R‹Gž JM@Ú^k[¬œÇó´\x5vy7pÌ›7#00˜’Q1žÍ¢›¤§ «=]õï`± ÚëÚÙlV‡.V÷oÄþs—è§]aÈÆÇ<Ô@^ŠôÃËÚ»yø¦/áy¥&ÄÞ}biÒjU©s2¬H¾á ßXò€¬s2¥˜ Æ”}%œ{8äŸ6ÚéÃf9þGè©ðŒ„¡ëoÙY éægÄVI?13%zùò;âä6k¡^‘–E;÷¨3pŽ.앚ï?¢ˆË Óôô; ú€~$?3^³µYüºŸM/^ýW#¥†Ã›_jÎ^”Æ‹§à&!Éw¾»¤ŠosC¨ˆkeq 9õ¹þ²ý Í.hîò€#«½ñgËÍÉE¬\vÏEé§]ض–¨-¹ÝýX·TÓ5/Û‹Q÷ óôIO?Ïîäôüôä^o’g@ƒ™pÜbñ›ÓóÚ¾L'šqÛ›ï/™ñ #SÉÔ+„ÈøÆÖVL>±ð^£¹œn†n‘&Ú¬çÉïó8¨CûF©‹–âÙâÍ\¯™Á?ÁIŽÁT7ß-Ñ„Š;#Çê§µ âÅê‡l{8ÝÏ”{¡Uƒ…\‡r'îunŠ}H£‘P³TaèW£p?B¸C£Ù4yÎ\øT ²2 ‡¦Çp@?Y‚º mƒŠ{ú~cæ 9ä¥y9ŸÛÌBü½?+ËÐDOü´Ú°?çr¤h܃4ô#"¹À÷lÝ=‰†§¯Oy¶·ÛŽžz#äp¹s3#fЇ\á–±EzúãË÷L6Úu"ææ¹p,æ]$ךrÒv ž}t ‹WoØ$Ü­¯ØÄqìȊ¬ˆýâˆ&Ûum±=»òOL?ý75qÝ'`ÂI©/H5ú7¢|ÿy3>â/¤ù1J¦eø³]> Žý‡GÏ’¯…/ßu=rÛVqR‘Ó&/ú‘iæüv·Ïª ÿEFÊK31È0ÜÞ1ýÒ]„|Ç;XtÝ6ˆ”߆µ¦2ƒÐ ‘õ]»>dÉd„ê™¥ŽŸ° 2‚¤º‡³½\£¶Ëüt¼ƒ¶žüÔ¤þÉÐ04ÙyvzÊ:'—þÛB£ENÔoÉ óT£OMˆMã(õ43Fä±½í'€ºçÒ©Aj*ï Q=½Ö9ïž\¸•j?ÈV¤äæP#=¤£(kïÁScÇ’£Œ7ÿ=Š4 ÍÚ·¾z5ù]ꓼâäàïÞ}ÛÇŽ™*.ý€{A×óg›“kóMû“{+fš°‚y7^éÄs‹Ÿ<ßp%zì…)jÇbE÷]‚Î-¡â¡&ß)“a†®DGy¯ œx–j×ßU£XXroVdé8qéÀâ­*Ý"»9ˆkƒ²ô›¸´7fy]Rç<|Žw_tdø•âNT‹,*3"ƒöΤ‰¤ÕvXdÄ0!âxº·{i¡!×Xîq¡€õ’o3šÚ¿µ¡ïª÷zœôí7ÐïìÎû=»´ær¦ÓËxçøzÎöƒZ³ÙÝ1oÅ@aŸ[çý'Ÿ_ 6ëé>©yÈ€Ÿ¢hzúÑD¢ ò‰¹¸½›4t'Ís Ø9m½ðŠ–˜ldc‰¢Ï-Á? âôÌCWX åDÖ‡ªššà æ(ïߨOK­»¢Á;ˆc“ÿ´9 ç#̪×ê"—kd’c­ó»°‚ÌÉ›ú^¾´ fÃú•Ò “÷Oõ äú‰~ôKw^v57GººHÞÐc¸-+þ^É@(ßîš›·hÖLŸÛ/±sýfúG®_0¾ì ¤ÖžŠ\<ÑÁëŒAã×&ï(~}{µZçyþÅweð‡Áð÷²gZaUúŒ÷ìî~6.lÃšéØ©ŽÙ’Ǹõûôe Á`Œ?wêÆ¦É` ŠcAð"LCñéÏÁíÁfæÎFœån¥t*-ëâ㟋6ÂTŠ~ ášÈ°ü7†owsHEÓoBÐ%¯–Õ>ãé䆸;w‚Õ‚ÙènyçÁ-0Wñ&r™†Å *&pH¢aú%r[#éF7}»!_²õ/=GþõìÒÀ“"ѸÇ}ñ¹hÄãwàHJ˜&Û¾Ÿ¶àʲℸô 'ˆpN–i›nNóSÂVb;ÍÁ¾ÿô˜XåÝüT·éÚµþø§>hæNküëPO[òXKô˜àí½ÿ¥Éšè"/ÕaÆL§â0þþk-ôUüÀ´6 ØvÄc£zLcð‡¢ÌăŒ¾' „“Q%¦ó¬Y³¸/5Ÿ5Ë›úÉž0ǒ頻á€Y†ÀqúJ/†0rñ'ãí= {?µe¾†~ Įʺ¢¨¬¿ªýJXXeG|ùòEYY¹òD8l¢òD…ð‰V¾9]üõý®ÊËÐOz¨¼mѸӿÊF,¡S*O7==}õêÕëÖ­£%-͆Bµ]ºJª©)TrrrrùùùŒôûÅ ýØmmmŹ%Wà{Þé¢_íDAAÁ7è=ªœ¡ŸÄïPRRúúõ«tR<{öìo_«xÆxàÀ'Ož”Zr...ô^‡ÊÐOJÀGÍKø,ôš‚‚‚Bn®T'“ž>}*eÈ(Ÿ¿êׯÿóçÏÀÀ@)¤Õ¼ys]]ÝÇJ§€ ’Y[[·jÕJ¢×¯ `ùòå ëJáþPŒW¯^õèуöhúI©ËTUU9s¦¤Ózòä‰ô æÐåË—=:tèP©%º ©%gjjúáÆ~¿*¤sd béÒ¥Ò¤Ÿ4miĽRWÑ2ô“ òAÊ÷HR= VšÜ“úI §N‚¿™™™ššš’Nkÿþý£G–r/^¼(µ´$t×,C¿ß—.]óºmPœ$šÐ”)S¤O?©aܸqÒáí’¾—Š¡ŸôÜSWWÇ·®ûøø€±$¡„|xøðáàž>}:ö433»yó&]IHsœZZZïÞ½£7NWW×M›6IŽ{=Z¿~ý¶mÛ€{R®.†~5̽ |üÈ;ÇsïíÛ·LHH3þÎ;Kaä ,èåÖ6{’ÈêüùóY,ÖÊ•+¡«¢ýÖh†~¿0÷TTTrrr°OãÆ)îM™2þîØ±C´ÈkjŽQüÉ•?~€.pçÎÚ-=¤ÐÁ;v Ü«V­ªñÀЯ湗šš:wîÜ#GŽPþüă0:uÁ¸¶ÔéU•ìÉ!C†à¦&M÷’’’ŒŒŒªû↠@ÃÜ¿?ä¸'~N€Æ -àÒ¥KØGB‚”¡ß/===Š{:::Û·oïÛ·/õ$ä³gÏøÃ?~üxÒ¤I?þ„V[Ñ&4àˆP‘å§ÈÀ܃.²]yH]]](,"Ï%!ZŠEEE3fÌ€’zzz.[¶Œê€¤¼ï‘¡ßï€×¯_Sn??¿½{÷ÞºuK )–;T^½z5ÄÄ[´hæh¹`gJ¹a0q>LùúøølݺŒ:ìS­µ2Ÿ?†"""Øl¶››ÛĉKÚqݺ›¸øµ>4C¿Ú_.¨Ÿ·oß¶³³›9sæ‚ Ê ?Œ Oà¨aûöíƒ'<<\MM Èéáá!¹l?zô衬¬ ¦ 06–<}•¼äzôhYÿ·oß!á›;vÄž_¿~KÌè¨ ³fÍÚ¶mÛÂ… ½½½±êˆÍÚ¸`>C¿ßPSŇúÛõë׈"UkÇ@ãÆÏ;‡ÝmÚ´wÓ¦MÇ' ©ÜÝÝAÊð¤fÖqQ6™dèÇ@zÕş¨¨(§ÕÝ1ЩS'¬Iþ÷ߨçĉ u5j²±°°°aÆ Ó‚z)Ll ÷úý)ÀÜ6lÿŒ"â®5¥Ö»UÐHÁ&䟅STTÌÉÉ©S§(œïß¿GÜQGê )°0AG=xð`åÑÖÈ’T†~ j˜{Ó¦MÛ²e ö©’{ Ð>|ø@-Á¡w¨üȶ\ î&ˆJ77·rƒ÷€º <ú1øSÜÓÑÑ¡&+:.%%%E__¯‹À­««[ÝÃ(ËÓ¸lÐ¥y6 C?5à^ÿþý££ÉksÊeTÓ¦MñÌ>¹Lüi̽fÍš½|ù’ߟáC¿?À=þü nM ]4aîq8þéÄ &DDD0ôcðg¸wñâEü{š™™÷<<Ôvñ“¦ò[‘SéÇ@XÛØØÌš5«¦2СC‡ZR݃Œ­ƒÐÓ}î¦V£Èsè"uÁª¤ÝœÁç3vÍ0åþ,˜Ðôñý$¾WÎuŸ]$Z¢ ýþtÐ{Yyu‘––V³ÅŸb@J­ÄŒ<®8þ6üÏ|B§@7nÕ* !5Ž{m0öÙäH¾¸þÎŽ™ð+n=ç&f„1ôcPmœ>}ºS¯_¿~ÍŸG<ì~°!ÞËJ›a¥1ƒûÛ—ó C?¿Ú·o_Ësø5i½’‘¤®%fè÷'âòå-EEy66%÷œ¥¦|?{þëôi ¤‘<ñc…ÿŠéÓ¨©×=yr B¤Ú¶wïçnÝz6kÖYÊU1U[^¾ƒnȹ$'G¯í{Ù2Ö–çé»]Öo=·ØÇ“vPFw$øÌó˜:yUв!¶\ÛoBGâc‚‚‚ÆÆ3ôcP5Ö-;G«{÷:•ºcPO¿þôi †Ý½›kj*©‘˜ü¼çº(/ÿ×Â…ZÆŽUGÚtÒŽ²¦Lž/µ ª…œ{œyÙ;dÛÞ)×wxu5™q1!Øfߺ‰ì>ë>û’TìlAÒ¯SS•WÙª[Ï] Ùw*€äœ×kΨ=G8É0ôcP ¾ç¿®/w¸Wy0SS á®]_çÒ›uk—͚ݡ¿ª ™¬–›»EAaštªe|y,•f÷€Ðå‹v,Zn‹º1.kWŒ&—Âü=ÌO¶©Mb†W&iÊ \¤:R›Ø¹au–ì±C»eÛxDÈšåÝzöc¤ƒ ‘“¯¢V;7•ýüüøÏ×®­ãrO(((Ôáj¤ÎR¨™ÎÇb‡ —{€IsK‚»xzb‡¦LÉ£Iîžü1xÌ]D§íÇb±(÷î+iVÍ*y?q}“Yð&1²‡³bÍŽ³&ˆËàξ¶HÍbÒxáÕ„åÝÊ$ûö2µÎ>T$ è¶µˆjqÃ×WûÀ £F¹‹Ÿ~Xè g.]‘EÑ¢7>ý¯‹†D†^üïæ{›Èr>Æ×ÓÔuà²bþLç¦&C]lÁýèêþUáI+§ÙÜJ¯ßýo+„êaÏð /Tù"Qí¶œ –Û±XÑd #9°}íé[¯ÂÃÖóº– ­ÍÌvI½r,:Z£igËv ÊÐNQ˜Œ K„G¢aV ý푸G"22püøyôÖÆöÈ}òŠª†]ºó{²¨»Ðe8vÇohnl”ø.ýkâV%#¯ƒÿåôÔˆO}&ûW ªoµõƧQ\âýsçËÑ#q'ýíh¦*,DHV¦>%c?pf6”q›8‚ø8¡å S‹¬”t²>sЧ“Ø“µ´Â¡3,T߯,Ã[ªñß™&,–û¨¬[ä-çüDe@⯇™we‰üú™³ëmû‰5ìÎnüBtÃl¼ªø5—ð þRL¯1SëüE^!ÜÉfH¹Áœ'7Öî–˜q Ý]¼b©dô0³åÇÖ‚M®~á|¾/Û€·Dî}þ^¨u’oÚ6ú-0WÁ4JþÌ#ƒUC<´s(`è!ì3\’ÿʨªÉX žóŒQ`bPö}–j‡Æ½ øÊ'8îI]ÌâÑËeÆ ›àà‹*(9ßedÊt¶ýäŲ9³ï©ˆÇ ½{WëY­W 8E›6oimÚµI Cø©ÄnÑg\‹Ê_I<5‡ü›q OÄóO»›¨”šÇ:§A c±¿Nùôó¹òÑÏJèa`¹”xèƒøˆ±i¯+å§™r±Q]÷ÈϯÚábÑK.™ÅjCE¸}f?HK¿yº%g0  ,VÉHãúõ#üؤgóYGtT»B›R`¹¬µ@³âB¹· ”üd±"b~ëøñ°ÁƒE´ÁΜ=;z´š8ù;VE˜`‡£/f¾×Åv8þÙkÔT‘S<øàËÈöÊà84zþš‰eä›±ÝÆÄÓn¨ôŠZ•OžÅ^Èb]çCI*ÿ+½n,ÿð Àµ”§bEÚÈóÞbi-Eïy£jÍÇD"” ŽU/ˆ@+üɇÖuÚЂ„»‘&¦¥|Ú…D·yKÛ_G€õóàL<Q]¸Äs‰>óèt´ 寨P$rÄä^%HMÿ°gGX÷á•ÔÉ‘}“nú4ôUÙó¦N×›²ÁX»²šbOdãÎÂ'¾O\áé·pÕÞ±NKW)ê–Ç:JþÈ`·€DÂ?¹ƒœå¾U §‹ýËð)çbÄ(ŸtãÜùSíÒõ¼$ú#+Ð!8t‡vé¿‹T¿¢%Í\0?\ÖÐ:Ä?vuÿâWzÙÐ<;ÅÉû.#_q¾#Ò”zšö±•.I¡´ô<]¶<îøÃg~HÑlH’k[ø^ C3+Ò÷/¥NsèÍXæ¹UÛön½ñ)ôÐN!Ç-{[&¦-Ü9ĸ»Û‘—_rönÛ@îrý„UëÉà ¿1M¼6CI¹ñp›Öî߀ Ú7C_R²¯„òóhÓÆ¶®nõ<|;¢%I¤¤¥ò“ÃêÅ&åŒÉ¼’éþØ;MnÆ=³Õèí»|øùwŸ¡Eùy¹Ÿ3¾çææËÉÏÍÍýú%/÷[^îׂÜoÄOBVAQ^QQAQQIIYYYIEEUMEY]UþÈI7Bhö ]½z«çÍ·¸/^DÎÈÊh :ú“Žä…,'§×CV,Zî5øèÞ¨¡cíËÆðýËG¤¥# ú-I#–0 ¼vƒó]Hm¢!Ùôô,6[M¢ùiçö€ Ïv2ô»÷Þx§”ˆN™‰W4Ûë÷Ri‚Ü]Õ’ oÕéU'¹‡n¶kOö+‡†ø9‰?en[ð<ûPwR³;–ÿ‘Ç"rzÝc!žÿ»Üh³Þ¼@­$I¿cÛý‡:-´²~%:Šißµjê¥TH¿‹Ø¦\%Ó—È“'¯\<¬)Ën¾¡2^»Èá þ«5?–îæ¢ªAdù-Y›púéô{çÉ›’âvvZç{ð¬Ö*ã&3vY:ÖL·Þ‡ãááƒK‘¶=ýnN›4áÂÇšMšÓçñÐÀꮪýX¬®…fÊh¡¦ï‚Ù±X×ó½Í™{¤jãÆz ÄwŒ4¾[0‘×— üò’ÏyˆTÚÍÉóVŒQ5¨P·N=ÒîZ&#Ó?’Ý»^Κ-b¾}û¡¨È7¦ªµd6¹è´Kr“;».šåÇ[t±>Ü;¸'9ôîd¸çàá¶VýÅêûç ½†*"¬Ñ«ýð!ýÐGÞåR.À˜ÅíJÿÏÞuÀSÕÆás … !!"#™EdDÙR*%í¶Ðø ÚÚJi £ŒBeÚZê*-##Òýþ÷N×½×µ.÷^ç×ç;÷½ï9÷=ïø?ÿ礂¨ÅRöÛô$Ú8¿G!H”±Ú8W$ó\J)E ÌIon¤žžRzúk¢‹¥mÀ/@¿ñÖí'$á'Êna(z±~âh ‚Ó·`?ùÂ7žÎí¤òßb=51Šˆu$üTq+秇Ð÷ßti~ûY´h¶œ¯Ù8qü焉í¶tšÖü¾Ÿ^ËÀl#íÐõZn%o/Úüê $$þ«±£à—ê+cëm §‘Ë逕"UØMôqÃ>R§cJR'…€TVÞ”›»‚6ò‘#%-<óÂ…B—æÏ|™0qa;.¦5?ÏžjQ $’õBI¤Pø¿÷òMðw¹Ä"´¡ß¦™®¿_ºj ¸¦3fL@Òß­¢-´ùÓòeÒ‰I×ö~ý<¹¥k\\–7›SS¥ ¢ùÕàk1å±À L¾]Ys½o_8Ñ$ÚË—­~ûn§¢B“—/tê4–- øR`%)ÛÔ»¶m+X¼xFû.b¹í?E…ù¯sw*)7©²­êKJ)¿zõ¥OŸÇ¿%$äëâÅ+Û}¹Íï_’òüÀÀuÞÞ {¡î 8½¿Þ§‚UUíkÌÊÃ7ø.ôý …h~ÿ¼½©ólœ:…ùì–'O:+õv0P´•ÀÏ?33㺆æ;æ;¾~‚DÚÚþ+%B4¿ø>bñwN=Î~ó»²Z±·¤£ƒ¿€†F«ÿºŽ®vQPð*&æ9¿X´s'M=}Ô@ü[eA4¿æcÍ-8öëRR}Æ÷úÇ‹€h~Í¢ù @ íš1ÿ‹¾' @ºX»víŒk¸0a:RzH‘x(Á8DE)""$.Oóþýì—¼¸„$…ŸÔ6¿•ŸÏ3…È%¥ÃÞJH ” @&€T°oßqº°îý]\‰¼i=¤ß>¶÷Ô©ï_J»ÉéÎ^è§§Äê Ϫ‚k :Žpñ˜L!Np%@ -[_»#¿âx’˜üyô×À¡Ý9þJµ‡.Õ.ߦ¨x²ö¿%ŸR ™G—wúÙçi9-KÓš»Š>=ÿVÙM¸øQÜãüñã©YåeG'¥t“Ó·4ý{lãëô„”ç/¤•ô- u‰ÆV?oɪÚĽ§â„M$9Æ"d2…iž}z³u$0ŸrjÒµò¼ç²†j[®>7’‘R’'ò’¶µ¾ß?^¿/PRêw%üS bí4Q¢ö(‹²oo¢îÄþ,!9Ž™ZX^ðúÝ%µ¾7®„ÿüÕÉÞuŒp›$ŸäW{~‡à߃<ÊÂ÷„DÇ¥#BÝÆNusH=6òØ™s1å$Dg ãây5{û“3b2ó+++'§šÍö£.]Bº(›˜ö­e’²‚à ­¯óeÖ¬›C›A9I§÷GÉôV÷YëSÁé*Á/BrÃ[~JxÔ ì´JYYbsœ|ÄýôµÿR§dÊø~ò㪬æð«¹×qýR©¬ iTÈ ó»V ÎRoTÒ3ÃnÔòнémI´»t¶V@Kyê -K•‹ü<Ã)ž,ó\®'‚äÉõRê‹=@YYUIŒÈGØÙúгw ¶_½#µÊuøD¬õ ÊÙ9A.ä=o¤POö–%…#ædòâçû ²lÕäÚȈ^0ùj«o¬ÍþnOlÓyÝ©‡ÒϾ"‘D¨{´ÛK¹5k£É—F“-Ô#‘æS·8§P(§½†agäâëœá¯©_R¤"ž$D—.›“Î3 _d"Bš[ùÛ’Äê r³(Ç|ά°õMäp•(øò»JâÏ_.äˆ ÏCô7â!‹åm7#ÄýdŽÙÞôˆðW; …"‚iïmyðwР>Ø ÊYÛ!ùÑ ™\g¡ª·)ü=ÕŸ`™çhÅև͗uíõ—_>|üŽ]¼³^=£¿r_º]Ö'«×Dž× Ù•~ø÷Èo›)4–qh*éÐT,duXâÚIƒáböÆõ¾ƒW!H<~*‹ÝøQ'V4øÌÿY`– &-û¾1¾Y5»¦äí…¿c6ÝH»E Êàd…è*ÌG{*%`MÄ’sã}n¼_j+χ|ŽÞö™°g3Ý]zó#þ½lŸ»¹ÍµèÅ)u‹qqeËwù…ðŸ¹+ÅS_!ý•éÖkæÑ–÷TÐ…MX}=hV“·Ô¼|å̈ác8Ûö²Z[¯Ž¶œ~|º0©4zÔ9õ”˜Æä9 æQ‘¡·K?[YÝÆà§“~˜*±êªÝïBúïûFòzWzÜiyS7 ~®yÉ_¯©.? †x%Ÿi¢ëMm#`|yTÌ?»µÊÒsCbʧHSšÞÜH?._OakÅ©ÖWÎÜøÄSáͲ™ZB¯o­B=ÓxüK—u7.¬¶E:<Û•‡y/hƒÜf?ù-J ,úû©Ì$²ÎÓt§2…òJÜd%…òw»þƒ m§o÷!Eø”R(}‘Ä{4ùHûLIɬ£ qì®=r;ÀIæëdA&3Ù1§»Å2yK͇Ãð83.PþŸ@‘ÃÃÅTÇ’ÉØ¡&ÈäÚÆ@éÊôáM?µošnTŒÎ£—Çon¥ª¢A²¤wµáïÈ,K·ÌÔÝv%ó/SÎ6׳fºÍ½ðÅÏH"`bï]±o±;n¸jI"¤²2½orŸ²âÄÜS_üÌêÄ9Vi*ûÛoÚ-ò4šÏÜmLž³!?ÛèÄj`CrËB^‘)`h²Ï/³ž_ó¦6$MLw;cÎ×ÒÌX¤ûä¬5 >¡-С#]Í„DÊéJ¾M]׿mêž„…OÝœºÞI©”•­=¬dÀBrdpV¤É* zÁW-9nÉÆË‹UH¸X|Ê|€?jmÖ*Ù_ØØ¡ït8ú, þf¯7í$ÝW½Ç‡l¿éHµËµ¢P™6ÎÜ(Ê{=ªž£=È´‰rÏ{îYÊ<4P€þàáÎD§D}X½t–¡ Õj¿8wæ\œæ#ÈÊô8K1ªÕˆ²$hŠüO6óÕ´±šZŽßÜ–í=«¦Ô~|§ãd²;n\nnÈ80U®³CÈäãX ¡W"9|0gÑ…ÂeFÄh[3á{“²K–“ß)Hv[¶9n/À,Ê«#ø˜æ<ć Œi¶X“X?¡-á¬^ÛŠ{#Tb¾]³¢yÌšž"8á9:R »>“•V?œF±•¡†Þ8žššaˆN«æÇÆÑØK¥ïŽöWjg'lÛŸ^óâÔw~ÈÎUì¨õîþùk9jÍñÿÑ57*O¿ª¦6ƪ—›ú  á"´ÍmDù#GÿJZo‰«ñûG~–ä'hN+ÈŽQ(Çꉨ;5‚25¢>IÂz—ÑÀtJ`=_Û“6+·„š¢¼sÅKþÚµ"YXBÕvâ¸ël]ÓØ4–‘ÉԾ椛ÇL¦žšOéU÷ÊZ*´¿~ù‰]˜iÿõ;%$º!¼&¹¸ŸnÍ£Ú8 ™ÿ¨óì¤ððóÌ2–uÎ'g7ü„6­ò«z{8 ´Ë~CÒ®<Süþô¬å*[׉èÿÀ}d°.ÃÎÐH¿æªUJVrºXùåÌëÐóWs)zêôÓàÝüHS:i›AÔ€¦‹ÇÈ3 .ö—eFÌ«cqæz5ûÕ,8 ) ÿÒªí)k†´FÚði¹Ã·¿·]tTA†äZmŠõFˆ<Éh”÷µ2_ÿ¢O&?Ì>æl½â’‚¡ÇL;íþòë^®¢ › “ÌF{góÓr5”½å±Å©ïR?ª óŒ™!Ï¢‚Ôì5e÷ÙïXd·`Éü–ó_ƒÈ:ÿ±<ÅÁñtCbXâÏç½_l=ve‰G2‚àb¨ÁœwV7°´Öà8£ü0Ø‚ò“UEc´7n¥N+¬!ARƒ eã=v8),ðYµÝ”³=h×%ü_¡Ÿý0¹èãûшIÔIp?ÿ67¬2lÙp>À¡X¾swЂñ´OªúUŠ ÷x{Ìñ?¦Æ!OuN1o¬ú¤ÎEðkCo¢ÃkYijo­å ݸä¿©»­ý£á¢¿"òø-bä{ÁË’Î ÃßAVvɱ×1Û„Öê“d÷Q?N%ÅÁ'-s$‹:>|ú1Å´Ùè$¿â$2yvM;æ9wvÑÏã"Ù£6pÆbTkÓèB*þ:nã'Õ]w„RT§F>\ÇÍk^ÚP  eþ.+žuÒ~Ë$ÿêbƒ(p£Ï•Âù}3eU-¨Ù=T7. ΞL¾ÆU5ë8 ’¦Háïë9n"Œe}9ÿ7—(Ó'´¹êQ¨ÏÁ'Ð#ÈÚ4áKoQ–Ö\nª}!ü!4õöïˆïü%ÛiŸŒ»G®“Öq¾˜iFýë‚öýýЦ¹ù¬¹X:æÈuJjo¸ø.yqÛ%<ïÖø]h´ã‡/‚k7Ý}þ”)ö²²PKdI¥^‰ ¡Òwó>s õþ¦pvëOú» ÆóQ»€Ð!ÉôÝÖŽ(¥Õ“×ð»˜æÝ1K ¤ÇÂÃÍázÊ cÜ÷Þ?1{ áεoçÓÞÞ>**ª1ÏùA¡€´ ÷ y„·VVžGgÐÕÌPÒ£o>_fdBÎÏ“•ù;’pìi$ƒOpZ—JQòÒ¡ÎÑÇ:¾‡Z!¾¦²T[œ©J;Þƒ/ýù·ÐÙ<5b½aí˜Ð¼¥@ï±È@ªR‰Îœfšÿ‚Ú˜:—Ÿ9l VAËy÷ÍÝs‹F€@#Qöâh Oüã¨U‰bà^R'½ý¤ͱ¯Û‰8YšÖªŒùš}† Cü—Cøé§”6y­—ü>|Ž]ÈÖs-óÍ¢Pöb!Ø.Îr$ "-++Tò«¤P—‹>Xßuàjø˜]EÑ„Ÿ­H& ÃÇÄBÄTq2E¨«Ô倸spJ&6ò‰F³8Ï’ŸßIJýZ\i?LyÞ¢Xn‚?ŸJÝbþœðm­YŒù&„> Ÿ©N½ªÝ8†]`.êvåך NžÛ3=·ÓßD3ÎHX@&Ó¬úüW•I/‹•dòßu¢¾‹ÃYd ͘“ü§’¼A¨=š aÕ¿ópÌ¿E™Ï¬¹©9n&“é·›Õø;2Íyåç<ÍuÞùmp1ãäûýãäÿ~nc­d73+*´&DVˆõo`3«ÄµÌ°=ùp?€rØÿ,í‡!‰ÑõHÒAF¨ LùAqºû‘_]˜ãåÂø¡NüD}%@€õ’Ÿ¬íÖʪãŒW80žþÛi‹ü…©êê)‰¤ŽƒI¤`{W»¨s×±oc ZÑì‚]u­3+ZßtRØ©C,oí ƒøf"&«˜¬¬‡ä¦åO€ùÑ+6•qÊ8–·«±X`.>`…òw‡ºµðtënІ®¸EYÁü£îÂ[”ú·<5­û¤îxŸ ŠO‹³lÍš5ìÊýÂÂBeeåoß¾±éy¤ÖN0‘ÒÒR¶>’>å«W¯fK÷®ŒŒ _^^^Ë…&©ÕÁ®tèÐAPP°¬¬Œ} ãê‡÷ýóçOK+"‰ÔfÆWºYC÷åååЛ÷¦Í.D¶”ιsç\\\¸§ëü8‰®]»Â_¨®®®¼’fÌ:,[¶lóæÍ­ý+-DuuõçÏÔ}@?~¬­­ÍCÙË.®ª¨¨˜2eÊ‘#GÚý‹c·KHH°Ï•äÒW惿ÅÅÅbbb¼U:ãÆ£žZÇÊ@«ÂØØ»˜5k¯ŸvÞªäÇžÊÍWS½uttþ©=ð¾|ù²~}Í®ÁaaaŽŽŽ£FjÇïÛ¹sÍF¾ß¿oß%‹“Ð|UUo¥oùýëHN®Ù™|Õ.]º”””py‚‹ŠŠÒÒÒ°kPTÝ»wçf/²”ö£Pyyù?Rµ¤¤¤h?‚kõôéS55µöêDÒv ³«§—›•âïß¿óòòzõêÅCi†èãã³qãF‚üþiÐu#üüùsÆŒû÷ïçæ4‹‹×Ù¼ìÜÜ\eee.LjHHd)mHEEE+ Uò@Õ ®®^\\Lç´âN$na éêj»,VUUU.÷çÓ “ ¿x/œŒŒL~~>o¥ùðáÃS¦Láæ¤.DàR¤¦¦>zôè_«ZÕÕÕ’’’àÀß?¶×—õFP÷+‹‹k¯ê–¶õA~ùò…‡Ò‹/îØ±cDDÄ«W¯ÀV~øð¸Ñ¢Eíòeåååoܸ¡¡¡Ñ¾ËtçÎG=|ø0O¤vâĉ»wï^¾|ùìÙ³¹6‘ùqS§Nå­c+çxàr®X±¢C‡ÒÒÒݺuk÷óà1`ojmm ×þüÍw÷î]MMMúWUUmO/«¯¯ÿþý{…wïÞµã23g8¢¢¢ÅÅÅ\žÔ€€Híýû÷!µÜÌ|ùqŠŠŠ'Ož7n¯$xûöí¼’TP¨<÷³²²ø³ í»:uíÚõÇ'Nœ¸uë‚ÎqÝ·oßêÕ«srr¯^½ª¢¢òòåËöñ²Øb›eË–µoæÃÊÔÒÒ’Ë™¯oß¾™™™BBBÐÖž?ÎýKçÄ+ä·råÊ 6ðJÆ‚ÜÁ.–,Y²sçÎÍ›7·ï©FFF`%áâÔ©SX×4üÅÆ;ù@öó]¸pA^^߯€Gáìì ÌWVV6cÆŒöZ Ož<ÉÍÍ…28qb\\w&òèÑ£©©©àVðPï:A~œ­¾ºrå ¯´É°°0씟ˆˆHçÎ_¿~­¤¤ÔΪеk×ÔÕÕSR¨§ 4_ú†‘ßÖ­[{ôèñùóç/^(++ƒ=…ÀÙ³gûøø òÜËm?|øðâÅ‹p}ðàÁ ´K³ ££BJ° ·%#¹ààà &€s äÇ»èäÇ]¸|ù2O¤óÎ;<´`nß¾}“&Õœ1vìØ1ìBQQ±²²òÀÓ§Oo7õ§OŸ>¯^½Â®ÏŸ?O»èßë˜ÏÏÏÏßߘ¯°°p÷îÝ{÷RÏà¼~ýúƒÚfÏî–# ÀÕÕ˜A2‚Óþ˜¯¤¤ÄÆÆæÞ½{À|º’{fKfgg›™™}øðAXXh¯{÷îà ƒOùõëW^Ìj‚ü8qqq###Ìmçfx{{§¦¦òD–Î;ÌþÑÑÑüÓãÇ©'¿ óIHHðh‹¥6„3ˆZ|ÃX ´«H×­[jÉÙÙê!|çÀ|+''FŒw¾éСCOž<éëë‹s<„pùN MÅøñã.\8pà@¬ê.[¶líÚµg¾•+WFFF>~ü˜D"õë×ïÂ…  ÈÉÉAÐ}y:à òã €uæòŽ1‚W˜B¡àš‡P#—ù¢££¥¤¤xå´Z”––É ÇFø0$$$èè舊ŠÒƤ"ѱcG`¾ÀÀ@lKLímÞ¼ÙÔÔ|/pç±hòòò ñáRÎâû÷ï`jA­ÆÄÄÐQ~»iûÛ·o÷eçÎXøÁOž<áÔÆñ^^^Àv‰‰‰ØÆ¡ ítuu%%%ß¿¾‘……Æ|íùqÀ½²±±¹yó&w&/33<>^ÉLy'Nœ  Ø.h`ã°¯@&.X°àíÛ·ÀšmüšyyyŸ?îÖ­6B‰$û–-[Úó)**B„Jdƒ†„„@¸““S›mH´téRHüîäÉ“q2^¾|9ÐY³f­ZE=uŠögu òã€ë÷û÷o.ܨâË—/"""¼²ƒ†½½}TTÓ¯Ö¬YÃ˜ÃØ6»»ví?wäÈ‘\øFÕÕÕ PŸ?.!!ñæÍF™ëééÉ”ù0ËÌwøða;;;\cÌœ ¢mrFExùò%°QXXÒ€$ 2,/”ÝrR   0Ó¼ÛÀ+++AšÏ›7oΜ9ð¼ ÚBìÞ½;È\lCÚVÒÐK–,9sæ 84´[Kƒ3sæÌ™¾¾¾—/_ ={ölÇûÁäÇuPUUÅVbqUª®\¹baa¡¢¢ÂýXQQ^ó444ð‘?:€IÂ.€`òóó¹a/ccc[[ÛÕ«WƒðªopÛãøèÑ£õ=¤¾qMl´LWW7==vë}mmmŒùPè`1‡®‹ŠŠðh?†´¹»»·°k…¿‚uS_¿~‘øAæÂ[àS–xàiA. bZ@@€nÒ&d)HyÈ(bvíÀ°ÿþ}ûöÉÑjJÐÐGP‹‹‹ƒ’Û»w/$ AÏ^àòódòkÿ€FÒ·o_îYÊC³BôõõÓÒҜà Ìgmm-gÍà¤ûûû;88´Ù+lAqÿþ}lùí„F/–œœÜàîþŸ>}bñmFF‚î…¦ÐÄÄ„î[ÚµðÇC‰oOÜ¿¼ã˜u T0÷`R1õÀ 2¶nÝŠ-Ϋ²²2YYY( é¼RKKKAž‚t†:†uZBâi‡*q‘žÄÊ•+A¯‹‰‰5•ókê˜ýmä¹h`ãh‡grrrÀÞ9::6r§´òòrooïC‡;vóæÍÀ[XøXy¨ƒ`=Án6U(Ц¼AàÃl`ÈXȴJ@u:t€kCCCWPP`ñØz¯_¿:t(˜o(îYí#$$лS§N…æ@K-PÔw/Ð|hh¨››P85ÌŸ000®²¶¶®ooU3pqóæÍ«W¯‚c±~ýzãÀ‚3Œ²ð5*òkW3Ì—žžFs÷îÝmð‹àHîÝ»÷òåËõMàgŒ1,ØÍ8,«ñcBMMM|>$°ncÆŒ))))--7|áÂ…ðÿVHHhŠÆÿhM°z ƒ°»Z²¥NóNú į8R¬W¯^d2ÙÅÅD ÝâÖbîܹs›6mµ:mÚ4]Y±nÝ:̵‰‚E2***@cœJLLZŽ„€sÖÂúy½xñb||¼ººúèÑ£gΜIÇ.@WðRÊÊÊ@E Ž‹ŠŠàc¿~ýôôô h ôôéSŸ»wïúùù-Y²¿w3 ìjEtt4ü¸/yëÖ­ Û,,,lmmëÛfÈa òû§¡‡L<ˆ`ÁÖø 0s¹¹¹û÷ïç~GRBB,XaðÇÙøXŒùX/ƒ8aaaใ#šl¥¥¥%þ-DÆÝ§òóóçΛ°jÕªûKÁD‚Mq€•2hM°•ly;Öç»–••Á{?%''ƒ5ðôô¤[9þ4 à¼Ã“)€]€2!æÁƒ]]]ñpÚ­d@hIœ8qBZZÔÕìÙ³éÖT NCÑàk^¸páäÉ“P@¿~ýúýû7Ôx,|õáê¢¢Ž‹ŽŽŽ¾¾>ÝKÕç媫«¡¼|ùðVqq1”‘¯¯oŸ>}tuuAÉkÂb©h—.]\PÖŒ ?MxˆÐÞ =ƒž`×b;04ýû÷‡¿Øzg®Å‹/°™­: dÖ0<<ŒãŒ3è¶u8Eã(##ô°@ǯY³DLJJŠ»»;Ög &²5öb‚¢7nÜĉ騾šƒ¢ñÏœ…ÿê ˜j)¾v‚uÇ †=z`s|°@3'>ÌÌÌgâÕ«WØ }QQQ\:§¦gÏžÀ7@ÒtÓs@·MAÑ`ú¿ÿt{úôix¸;Àôrrràʃs“““³`Áhø XÈ1_„!"ȇʉŒ)4Ýàààõë×7ï!ïß¿?~|RR¶¯#×¢ªª Ì\AAv » ö”ÇÍ›7Æ ÅÄÄð¯@@×`X!í¶Ôˆ˜·oß‚"¤“•@K ½ |›´ÝLyyyPPБ#G:wî „=þ|º4ƒŒÃÿ² ‡ B922ˆhƒvýˆ#¾}ûR8==ýÙ³gMCC’‘‘H[ºy‰²³³¡6»wëÖÍÍÍmŠ…2ømÇŽóðð} zëÖ-È@PÌ _­¬¬ÀaÂv'aü ʧOŸ>|ß¾}pO£‹|ñâňˆx8°/¨7(>LM È@[š.Xífìòž,2uêT.Ò%‘——7iÒ$`¾–<çܹsÀ1`÷ìÙƒç@¾Axƒ÷Þ½{AW¼±EÝÄXÍÑÑ‘éÂ`,º¥‡`ÍAÜÄÆÆbkÈd2ˆ30î HhAƒjYƒ‚ů·dó6pA._¾|ãÆ à€Ÿ?8$0–?LA»E;(*H381p 9Š H …®˜r7 ºGì_äìÙ³YYYC† 5jHIlý%å½(°˜¸x=xð p¡¢¢"-óÁOCuÂW ¹,@»ÌŸ) ¤víÚ•„õªU«EV04fÚ1vÖ÷Ú'6—ŒkQZZ ² Þ«÷fdd€m5°nÝ:,ÄEKÒÌ'##Ó˜¥L1räHpSú÷ïO»;Wcàëë L°cÇ|µœ¡¡axxxó¦þöíÛ—1’våʸ°°°€Œ^¡ë½víÚÊ•+!cu¿¯ ð%È# À 6à›…b'Ë3Ž{Ý»woË–-À¶¶¶@êtƒˆ¢¢¢KP4þe±Bø-IIÉÐÐPH9Ô¸ Šq0ÛÁbrÊÑ£GÁ/éÝ»7°#—l”J€ ¿ö0 д֮]Ë:ZHHȼyó¸öL˜–m<óUWWÃ-~~~ºSIc-4À|JJJMS B³°°022²1‘+**@Q 4hß¾}XH Ú8 …± P±))) îuš)00ðĉ:ušˆ‚N‘€]‚©bŽŠŠ:|ø° Ö?ôû÷o³ðsW¯^Å–p€2cÜnê*|Û½{÷Ó§OëééááÆÆÆlÜ0V^^þäÉ“&&&ð[xeXùhðtÓîéÜR®Ý)›ÒÒÒ¬7"Á¡¥¥µjÕª1cÆ€Li›}p€ù0áҘȽzõ¢jpu6pÀâŋ߾} T!((øøñãF&ïßswwzýúu CL`Íþi5“? ìc#ŸÍwmP1ÃÏ©¨¨`›0ðññݸqƒö[Œ¹—/_NK9~(pEÅ®Ýã€}AäaÓtY4´;Cδ’"¬Œ;Dƒ6¿D0ÑeJ&<A\4ÐTO0_Ÿ>}X3h#YYYŒ~²²²Ú>…ðÓÇg=wñ- \¢1} àx(/`&7OÀíÛ·A¾÷èÑ#88ÃÃάPWWóæMƒ ?œ%‹­t¬o¥Gee%ä?œ|LLL¤•‰S¦LÆ3§Á‰‘ؘ+k§­1ÀT#di³OxÀ·KêÖ­[ø<Õ66=z4x!ÿæN›ùhïß¿äÈîO¼˜˜ívÉt{&´»öüm6@c± ?++«ØØX¦¼>`À Ex‹Fî×µiÓ&O;vìÀgi2„é j z€ùàádÑ΂äää>|øÀt¿··wII ›€€NŸqqqNNNÑÑÑØ.ÍX…bmv·Ö«ÌbãÓ¦ó¨€ãûöíËxhFãìÙ³´õÊÑё؇“ ?ÌáãããîîNw|]DDÄÀ¹<åK–,aÊ|ÅÅÅ...111ÍžlÂv@ö®X±h‰ñ+)))¦SA”-aY<¹ººÚÔÔ”.àV h|ÚzõêÌ7sæÌÐÐP¦Ë3ôÃtÏ;wð9)´…Z|öìÙø&/@à |±E÷–––´§ã¶À|‡š:ujãoÁ¶£k¥=?ã1ækêŽ?­𨰠(©¥K—>|ø0wù¨:a4hРFîzÌYôë×.¤¬¬ÌÞÞš:ã^øG||}J‚™`>îLpbbbii©ˆˆâííM·ŒëEd:0yðàAPf˜ kðô†f(qö¸´éÎ|ùò%ˆN°§À|ð‚½{÷:Vn›C€ùLLLX,Djû9ÛÞ¡üúõë7ܧ¦-ï”Æz7>ùýC?~<6ïnìØ±ìÝú²•àããƒ_ÇÆÆª©©q£Í¼ðñ9::ÒNn¤UNªªª/^¼`ìE„÷ŠˆˆÐÓÓkä•-0j©¨¨À¯]]]qæ+//>|8ˆl`>Hff&P{ ÷h€ù=!@œ:Û´/0G¨·A`ÌÇCÇmäG µ€oøÒÈ5œÅܹsñ=ص‘J€v£5Ú©: ã€ùhcêëëƒ ¢¢¢òìÙ³¶O'0Ÿ>Dôë×/¼‡ßà“À|²²²d2™.ýmŒÔÔÔ’’’.]ºÐ…ƒ{ÁZ¶€ù°="¸°BbÌbãótò#À1`{ S(Úý¥¸¸~⪑•±xñbì"''c>°‰ À·f™4iRtttãOÖm%ó?Ó¦•••X h;;;;‚@ÌÀ|X)Ý쨨(0ÝØdß¾}Ûf)gƒ€Rvss;}ú4wVK`>H› ã™òkÿÀìòìÙ³CCC¹?µØQ¨½zõâ!æÃ+è01,--…·ÀÏCHHHý:þ|`>.Iíƒ0ò«ªªBjÇ#ÁJºººîÝ»×ÓÓ“«: m¿ÿæã«1GX72÷$òMMM#:¾1À–‚㟽‚üÚ9#"" ês?ù¶ºlÙ2Ök¸€”ƒrMIIÁ&•——ûúúsÛÖ©›6m266¾wï^uu5|Œ‰‰Ù¹s'Ð3_jj*·e¬˜˜˜®®nFF\¿zõŠ«˜0‹±In46ž‚BÞXä¡C‡r:Áùqù.Û,2¹¨¨hÆ [·n522Âv*æãÎÔb'€òÓÓÓ‹ŒŒ´±±În̹~ƒ¥¥e ÷i%€Ó@;­š ÌF€ W äG ‘––F;…’kÞ=ïÎR{ôèh)ào ÀÁƒs§Æ"5 àÏŸ?ššš^^^p „͵©ÅŽ(ÊÏÏ÷îw¦PTT´sçÎ?þäæBæ‹ŠŠ²··'L"A~ÿ ^¿~Íúì1.¡¡!~ðÏB¡€Y9tèv&0÷'ØßßßÅÅåË—/ºººjjjܜԞ={nß¾=$$„›ç.6õMŽàòåËùä÷A÷§4h&ÍäÞ½{çééé5õœ#NÁÇÇ'""´Ýñ ܉ãÇsy¸¼¼<ð H+nNdhh¨§§gXXa òû' $$Äéd\ ÎC””,//gºÍ4wbþüù7näæi´ÈÌÌ çòDòIJԷoß&‘ ¿t'qs-œœœx7“)Šªª*%XLL¬¬¬ ô O¤VZZšËûf‘Úu\BöäG€ëФü¹ yyy<$ûxÜŸÈæâÔÆPTTÄÖ•Š ¿ö.9‡¥A(++ón&ã' òR çã#ê0ÑÆ‡¿7>$È ¿öFr—c³R“äTÔÕuÄ%¥±ðÈÔg™ŸÊ±ëªª_^>É}üP¼›„Û(§."ÂmŸÎÇ/ßݾÕ¡#ï~º=•ÕH¤µßˆ}BÓI¡üù˜ûìMv:¥ú£ŠwuÍÅ%§§ÆGKÈÊ+jhKÊõÆod¾ù›É•y/rr³Óºv“ã~~,üA^1žæùïžf~xõTߨÜn'§]OHNOº-­¨¢ Þ_B¦¦è/ÜËÁ“ú«ì'µg¥Kõ”ss.$(Øö‰Ì|ñæÎ­ë|zkèÊ*÷­©Ã¤~Ô&’ò§êðëì ¸rp°WVèҬ¸ôÒå+ß>Rî¯ßKUS SÍØ¿û’uxf~ùøʽàÃ[c + £„%Èg@þZtìð~«±Óø¨&@¡ßøÇ">??µ¹Â?¸Î-Ajx”tSTXØÙÞºõXyÛÎÝjúÆ=•Õ©Ÿ»H™òdL‰\ ø×¥`hІúñÕÓ—É çÏmûL.)ÿµ3$ØÜÅCD¬+|ìÖ[Ý®·:«LTê§ÿàú5¼@)5ýÙ÷bù; nNŽm“æ“—¢ªª«û›`›ð´ua¿«” üÓ1·Cj3¼òWy쩃sçÌmmòþZôóÀÞÝ–c¦tî eTuTuXćhÊÚá\?/EHMmúí()‰®ŽÃ,[±ïØ¥>ÀTV©/õ³¨´…ëdVu¸CGð3àµþÔf釗9¹ï{ÍÓz™yêÊõ_¿~i¶A?uÐ:’u|Éž ð¯†ÑÑD–ý,¾söÈ¢E 9âUäG a{qôh˜Õ˜©"`ë1¯…OÚ TýÏy¯Ë¾|d# m ¶t›ÊÇÇ&µe}J}Ôá$òwUÕí3‡—-YØÚ™\^Yl7i>\ÛOöjáÓú[a™üý3ùó«¬q£ZevOÄÅk’òÊ`ÎÔ†´ðQ àÝ1òŽ>µîÌÂl6ü½´ì`è~k÷™ÒÑ㥇âê ±Ç²7ÿÍóêâoŽ6Vìâ¼ -ÁCÇMïÈÇgåÖÒ“§0:„DVUýŠ?téB/v–{/eI9µ-|”pgQ¨í˜WqãØž%‹ð×väG€“xúþó›ùà~¶œNÑ£—ÒK gTØvß+šýœj0A›ÁtZ»Ïb½äçæ>yõж•~~­‘ÉŸ ÞNº§n`Š1{Ñ­‡,üƒôGî Z³f »»víÚ‘3—k jé3lìŒEÈ»ÔÔ~J²Êò-íÁËÍÿžóò•¼ªÊ|l†Lï¾ ^Ûá³Â»%u8dçnhe6Ù4?§¡cg@"¯غjÕJ®-w[99ß~?OO0Óï/ÓC’ ?œ´Æ#'N )«$ÚÚ¿eïé•ö¡¤èM¶¥é ¦Þ»108£5hƒŽSƒù¸ql׊åËØøØ]{,\'óµvúÁfeä—¾N‹娢M7.Þ¸#ßßžÖÚ VPë_‚ ûÃŽÏðlæ!æ ¤ö=ad; ˜¯µS jòá‡â’wO‡˜6¹o „Û[ÿ¤ÃðéK ß<¾×{éâ¦Þ{þz¬’¶a”{_=ãÏä|èÁy³ÚâÔe‚üÔÁͤûRÊš õà³ùøº©èlÚÔx ø»š—Ø´W×9wþæQ6-Ô£—ï~ ta=–Ã^H” †m ^¾dQ󞀹m™ámG¥‘VΨ«Ý¤cS3Ä{©óµ ããïªÜ?`ã&_ŸÆÖáʪß×ï·¼¶I°™0ûLTÌû&ì>¿)hsËÇ;š„ÁNîóŠ…Ê 4Uû䇢8ž$ÆÜîëa¦Ð’d„Xë-ŠFê™èlFZœÿ7§PîІÞÝÐuð*¸Xÿu­Yw<¼ìé j'ûê[_‡Þn¶6¹¦ºS(»´?Edܱ˘+î8òÄ…+šÆœ9–$àνæÏžÞ`Ì’ò_i¯ó•µ80aLYÛðNÖ5¹– N\OH‘QÕæÈj2k÷Y‡#ÎL?¦©7î>x¤™¯Æ1êб£Œê¥¨Nö¶¼åTd”šáÎÔáÉP‡÷ÏŸ=£Á˜…¥eYï¿cÓÁÚªz&q™/ÍúõnÌú“½‡ÃÛ˜ùj(Ÿ¿Š¿çÍÛ 6Cxòèv“_­µH+÷Ñ«™Tõ5Y@Òx’¹â$fúò"}ËžÐüï†CÇÍõ°£}FNÒ©5!ç„»õÙº{SUvLf~e%¥‹“ƒiïÅ@~´1÷GÉôV÷YëSQOŠÄMVn3]¼¸Î\b- kḃhlZ;¬{jZg,P Þ€äI¡„Õxâ2îÊÊHn®2‚ärɪڄ‡É|#d„ó¥à; _Ŭ]ì’µÞaå©PWZ6#¿ H_WfHFþÝkê<Á?`£ŸoçBÄ&§+jèp*—Ä%¥£Œ´4nÞí¯>~’êÓƒ¥¬g9|KÈŽ¥ › 8@”ƒkÂÁ4+ê™?zòL[£áWîfæpŠùjëðÄë0xØ iÙmÐ[ºIË]¹}Ïy˜·i>:H©éç¾ËãÈâ .#¿¿ý•RC~üulnê&k#ª€K(¨2•äÓ'‘æM‚O£)”3È·+$‰ÔH½<(ï7Yaë{îfnnž7ñþQºù…>)7‹ṙ3±Þ´,J ,&‘¨¦^Ö‡BÞˆÉÄš*žC;>Z9 u[zèb=ÁK3å èÝYd×hî)†O_ˆÊ5®R"HòúnZöôÉ~3üQªû¢+­p.·LYq—ôŠ9äSÛUõ'A:72 SÞNJ2¸ÞSo6øû;N]ÌÙŒRÐÐ…d4o LBB’ÞΦèØéç/GѨñ¿È˜Î2†—o?4†ü>óûJ+q6© ÖaÿÇi®Ã½µøøùúÖáüõXÎ2‚öÕ§¦?"Èï/|‰Ñ•سÔeQÆ|3)Ú^©³… ßGÔpÒû£ðw̦i·HAãð‹ ó#è:ìx̼½cf°ðã>“H=üM§ß¬wë~í E>ÒwŸ¤Qž’HêKô…S>8ïÿA¯wº’vqKœŽ¼Þ×Т‘‘Rõ\m柚µïVUÙ43÷òe}†\f«q3¸!»¬ÆNoÆ]1wpœùjÈê?ŒY@þÈzAgÛ žÉÕ›±¬×œ¹z³¯Wô’½zõª¾:\M¡ØxÌá†DššÄâÛ¢ÂÜHAVÑw’†Yðê,l&¿¿lŸ.d¨cÚj¦ë(Ù«i}/ å*Ý]¢=jM1zQŸ5>&.„ âKYn«Ä±òôˆw²L¢©]ôÒrÞžE"QåÕÅܵ‹Ò›çÙ'¿#3Ôv\ù{†*¾.ÂȬAº25Û¸ ½…5_ ‚:O„¯2h(“´Lë]üw>*º~Ãs#á·†Mœ"Vû±¢óõ«Ù<÷AH¤Kƒ¶˜©ñ1Ê,û<%è*ÚÂÔ×K|m†°Äæ2C1¤¤¨¨‹˜X³Ó¯>Ȫ¼¢¢1‹‹µÍm™ô{í^ÛË]/ªÞò8¢~ò?&=ÆG\„oi_:¹ÆÊnWV¹‰“Öñ(í>ë •©ZK~UÏ 4ðt]2lüþÑ.ihd‘€’}èY™^mô«¿Ÿ¼®Vwe$Üë8}Üšq`Ϲô©LÉÛW–”‘ºˆ”ÒgríàByQ‘PëCg±n×cãí¬Ì™2´¶© ]òòŠÇ×¶îVU|…´¯™œx› ?H»P(•$’’³†DZCAA¢vB^CÿÖ þ«Y÷¹g)óÐ@š¯˜¢þ˜õöÝIÙº\q[Ð[=`mÖþqÌwÉr yŒlGŸ©¹Ö‰Ë6çëÖ£ š¼ÿã<Å™Êꬥ¢ˆ×T_JÏ&µ¡Î¢ä‚Y))Ư>å½ï£ß¨‡LYü_wSœU¥°æäè³~Šñqgá­)Ƚü2!†®ÚÛ3»u‡Ò¿êÉ4¼[Þû&ö-ع k=Ì—_?AX×í@ÆééXgòµ÷e²ÔôŸÌ+»yñ ¡Z35ªuÀ©ânÀÀy§moïí8dT 5·Ñ÷=÷¾L¹þy9O_äêõ×dàïò:I0q÷û½Q_gsäÓe#Õ§Ék…çN‘8jæÅŸ¿3  ùe¯D ‹S;Ã÷Y ‡ÌËÈ ªx{|Р`~ 2̡ƭѓ`îJÊ4Ð)ßMúoæ¢úÇç¶7>„wTäÖ&Ò3j{úÊ1j·–z‡CE½IõÛÔçeÄÝ °Z´ósƧ$ÈÀñ¡iØë|{6|ôCº¼º5ÛaÚQ*§~Œdô>æê)#͘ÈÏó{oݧˆQu©ò•þG¤(u¬?TןÞyî‚Ììng|:Åß"#|Â<(ãÔ¼;þ5^±š °ïOjݦÞX©«=.üe„ÏY¬Ï8¹dŸ‹ph2=oåÕs`}öó×Åÿ:ð¾ë¶`›Ì›n4ÈœÚõrÄZxÇ×ié;JrN˜ ŽUEj›Bóvñ­O¿—Hïèä—qÅï’Wßµg$\ïGWªï­¥ËÏ‹òô/…¡{™¾ÛSМÙTL~º@ûÒÒ}U;‡³á˜AàJ ³p÷(+ê}2÷ì¢ûû÷ï>áKÞ;É^ lIŒ`'护c£wÜï)-ÁÈ3À|5æÕ+ZiÉýCô]µ¶ ËLÃ|ÔšÇßä‰Jª«Yªê”_Ëš_÷Úãüé‘å§ñ$°‰Zd|ÎÐí!”Q4ogvFþ}ÐOÈ€¥wã¨gÆî:q¡ n‘¯÷]: vj0Á"„˜º-ŒýÞŸÈp T-ŒÛß  `ë–Áopü*¨IhrèuÌAPq‚ÌÈBVùþGµ±õUĪߕ e/ͭݬh =ÜÕqßT°¨< Ì­7ßô¯±ò‡.R“dáwÙY‚½Î®ëƒìv\|—H—·ÉBE¢Ö„žLÖ 0¯Ã|LÌj_®¢¦¶Ṵ̂—±ŒIƧk¬ßô¤vß<ºÔâu›ú¿5zîÄ ¥RÎÌ‹äPiú)þNÌ#a!R†@òµ²‡ëYˆç47ºCn”£r0;.Â׺>OS^t*µˆ©ƒâNÛŸ;mGJ27ÑU†‹[éò³¾—ªþ]ýÏ“»QTÓo÷!~û—|]:˜O‡JÝ¥%Ÿüí÷¨Ì}öª‹„¢”ÕÂZTÇ{~ȯŸ”#âÊJÔœ{aöÊ,çþÖBüô·0Å÷¼W:˜O„ë§ÕØ rfu 3ÍJK‘‚_T3ÚeÀ º®Ú’‡ÿ5¾>hë6yÆi'ÁzwíZÉp>œ±g,m¶Ûä•Í´®6…¦ PúôB¦ª ?ó¤ÖÞ÷=2\WjðµOe™|¡ì¢Œð’&©+³ |Y„ôÒÔL7TwûÒŒÏOt{Ôd»ãê-aäa)ò«–Úê#¿ŸEE ´è/Ÿizî^èÊÑÐ¥ÎêB_>×$àÚIo¼ÜeÕ<2¶¦ãt.^ØÅ­Oe’„.oÕ.}Ù*-©»Ñ'A ?äê0ïßV×ìÏhIS]A!”:¿®vç¹ûÐ Úšy?9l } ª¢©ÛŒ1{Y93iÚ:Ìë°ŠB¯Û_w•ª‘\¨W"ë°7ãóB(¸]YåŸ^êJ«„a_ƒ\³ü›ZêíbmïJ/6a¨ Œu•éKQß«‰“òkÿ½¥üG^-œG»ýb¡ k„ªÈ‰ü20Ó¡ ߺõ2¾òmŠÞEIßj95Î%ãÕƒs—¶½òI7ÿ£p§ÎÀ|Ód„énaÚ x÷f¤e=äg:@÷BL¶3 01ú”¿3oý힥ëªíbð_F~£*ÅÇ',›¼3‹§û¸ÜÒ?4‡KüECš±«ÕMá‚ûeØ…eµñ•ÝNg¸!HmÂåÆõ0Ç_>;°q{–¦Ü¸0|Ú’†û½i´iMS ÂbcFþF”³é¿š|«¬AÂ;nlCuxìßUIU%‰'€úCôû½…>£¦âüU'1fyK©/µ‰7.YÔC~–Æ.ÝNQT×fU]™Skfm–ò+ŽÉÈÃ߯۵ÅQ]YRÓÃ{qðêÚ[Þe¥ŽfQ_fÞ»vÆÝf¶NJþôE?RÇ2èüG±N…ñ5éò“ñ¥0¸G_pé`Àú½‡ÒÓ©cpfv£Ví´êמ;Ž[ueÅFmáQp=Aâ̺G!ˆ\OaÌ*ÐVë¯äTk„ 4<;µfeJA=·Ô±'Å+¼Ym“XQô•²«´¸°w‰Šv¹¶3Àqê"Î&žô§ÚcÒ¤FF^¶|ù£%„…9›æ¨#ÛÜ[thÉFÆ+ý|#.]×0²à`úFGNäÞXë) ð.+EÕÐ’ƒ ΚÖÈmÃV,_y;E¡­¨…)^?z £Àj å$·Q_?Þà†ò¢]Dž\;ßßžƒ‰|ŸÚ¾™Íä‡Ï^:u.fœ™ ¿xÏ4’Û¨KòͬõI¤Þ‹ï¦=/e¿ ë@ÒCrÓÑ«­É…‹þHݰ˜¾ÆB¾(YfìqÒ›‹ÎìÒq5+9—€ ¤”R …‘¿_)éé‰b¢‘Y­|ç 7x35«æ 1¼Kj>s¦Qì¾cð_:f~í¦Ï¢ªAÇ/ÒDXà Ó[èðøÎU± ¯ú’Þ2lÜLŽdQô‰Ð–láOåo'»s7ãû §Äµ=âÏñš×´ufcF::¡oíÌ‘û¤IgÞ:š ¸u?G¦· GR›siò„†»é€¤·„ìÚ¬Õ¢-ÇÍðÝÙG×cì¨Ý™ Ï‘D~}•5b('}DÞ#¿x*Õ•Ít0Ù¿sŽÀÎ:¼’BñÉ XÖîS«üŒ)”­©‰±/Þ¾ùUŒlœ7XkÉÔÐÅ9Þµ 'ð•…?jèMÛÃ˼+‚xýܾÈÒH„1Óâj§;¿Nïl71!íº©ž*Ï•DÇŽíuvìÙoæ2±-·Ce9߯a> Ë-äˆí¸}æÐ²%lè´tµ1ÚÒçì°ÊdJõ—WšÊ|¦zŒÏ|ò¼RD?¤›k©ê°Ý þ;Cš:¹·iöþ®øSô©1̇aé›·† sok²Ô{ÙÒFFž;}jÒý‡‚²ª||mÚÿyçìaŽœ&ÍÛä÷…üüÆ¥ó×2öÑb&P»Âðu'3t? ¿bë¡Ã‹õ;R¯BBðÀiç(Ø!±‘Ƕ­ûÏL¿F¤\óî瘽Āo ¢?Êüûùø7ð­éúû +è“HX—騉å Lô7<]$CÑÓõN_¼"¯m$ÔjxýèNïå:>Í<ÌVXHhÜëÏ_¿ÝIÍè«oÒJ‰|›yw ž¶Žƒ-ÛŸ<@‡:ë=*:®‹œJ—®­¦V.Z0_g){fÙø¢]¾­;ìú«äuÖCG{DAª%±Eˆ8w±¾YëQ vÊ£Ns{»ˆ»Ù[‘ ¾ÜKÏé£cØJ‰Ì}˜`b¨¯3²ù[ëa'Ym nS£kPQü:;ÝÅÑ®…åþï’ßµ†”–é÷ê.q`µi/n³ß”EÙÄf”Îö³”ZpWoÆT tf×ìÈÓ¡^ æ ðK±ž0Ò$Œw¥U–•—ï;phûÎŒýööIÉ÷oNöÖ:ì8ßXVJÒÕÖXVc§ñ °gz©²ôÖÙðeKé8Ú°©»bæ‹J+°kT-÷}^ æ ð÷ÐQ´Cþ1‡Ùr5–.¬qN]¸\ù»º¿Y“ePñÇ×w£¯Î›?\]DÚ¨5Ò‰dÜ{˜™|7ÑÌe"?§¦µÃß¿b·lf¨]H{iÛm½ÏÇLJ§ÿ\䵟å:MŸhWYX}!bò´i`LiuõVM³£M[P\Rºk÷ÎVÃ%{5íD0|ÙI7øùøÇ89ÂGí%­µDXHhQm—õ‰ÓçI4Y5õ!¥_>Ä_=?kΜnb¢ˆ´~k¤sE-•Þ}þ %y°ÓÄ&/6¨ªˆ9ÌÌ|È@ÝþÔ:Ü YŠùÄ€/_ì?jìà†/‡o|¹?¹M"!c]F å¾ùWAÏ«¬´HœóìÅ‹‚|òÏâ¢êßU¤…:w“PPPÔ×Ó¦š Òšfúšm“HcøGòäenvΓüy%…?þP¨tèС‹XW¹^ý454Tðƒ±„´½æs<“]º§îg>~ñüÅç|riqaxxø„‰ùĺIÈõ’×ÕÖ’—­5=Ò : ±5 ÚE„nöà÷¢Þ¿{÷ãë׊òŸ»vî\൰‹˜¸”´L_µ¾zý4ð˜Ú®m=‰ÔÝ­ÎÖ‘ÕÕÕ©_¾|ñåóç²âÂ;Vÿþ-ÐIP\BR^A²—êIÔd¯Š‰ÖжI¤É=øG’óâUvÎÓOóJŠ ÿü©¦©ÃòZššê*ø.uêÛÇOi™?ä½+úöµ²ò×ݤ»&&ÆÅºJJËöUQ1Ðé×±cÇšr5‚0§ùñ6ôúk6¸?2ÇôFÃp¼‡:ýêÔl‹µcÇŽ5«Wsy‚»‰‹ÑžÁ ^éÇ¥ ¶ÀèÂ?.ÏRMÕ>šØ‘Ž³è)-Õ“f˜`Ô¨QkÖ¬!Œ$A~°ŠŠŠçÎsuu%²‚7ãÑ£G‡&ò ?Ø &ðùñsz—,mCCÃŠŠ "ò#@€=øö훘˜ØêÕ«×­[Ç ö÷÷ïÓ§Ï«W¯ˆâûGpõêUggg"ò#@€mxýúõèÑ£lß¾}ÆŒÀ|`ù$$$¾~ýzâÄ l555¢Ly·oßÎÉÉ™7oè{2™LdA~°òëØ±£……Ejj*0¦¥à+ð¸1æ³²²‚8wÀåååùà”²zõjPÀ|ÇŽƒàà`àl¢dy)))C† ñóó›;w.Á|ù ÐZ¯Ù0}ذaØ0ß„ Ž?þèÑ#øøáÇ1:Á`8‚***ï߿ǮŒ±‹‡N:õСCÀ| ({¤¤¤“'OîÞ½›(_žÀäÉ“MLL¦M›öäÉMMÍììlbb'A~´"ÆŽ{êÔ)ìzÆ ãÆ΀k`>Œ£££wíÚ×àƒå<þ‹¼gÏàpÒÛ&]»výñãþ‘v=¢¿¿ÿçÏŸ{ôèA"‘€ùŽ9¢¥¥5‚.‹jìׯQÖ\))©„„555(µŠŠ ¬“3''‡È‚üh]€¹¡ý8bD­ùbccu\]]!&6£¬ÕâÅ‹ArÍ™Ss„éöíÛ A&¶Æ’¡gÏž¥c¾Õ4;‘JKKƒ02e ®!à/Öáǧ¦¦bá *$$$6mÚD:QUU¥¡¡±bÅ Ðëð±  þB¹¼~ýZTT”èä$ȶ€™™0m(?l ‚u0Þ½{ØÎËË » ®±oedd®]»æ…¿ÅÅŤ°f˹|Ó§O§ ܶmÛêºÛpóEDD¸¹¹á³á1Û ¦¶wïÞØ0áÁƒ±¯ª««ååå/mllˆ:И9s&¸ )))Pøùù_¾|‰ »tŽ9òíÛ·pýõëW"—ò#@ çÒ¥KŒáÀ|šššt]O&(à‚Žóóóq^MM HÈÏÏïÂ… ´÷Þ»woüøñÆÆÆGmü†œØNZÁ‡˜˜1~¢¸¸8**jìØ±x ü>AfÍš5IIIzäÂÇi‰3++ ž) @ÔŠ–ãúõë'Nôôôܲe ²oß>ìJaÔ¨Q@~’’’ÚÚÚó È6ÅÞ½{W­ZÅô+`>--- ¦ÔËËËeeeášD"á4“››‹G{úô©¹¹ùÂ… }}}öÍܦM›6nܾÿæÍ›{ôè‡WVVªªªBüúÔ€¢¢b}FSTT˜¯{÷îß¾}£ûÊ FÒðsþ||Lnrr2ëWØ’D¡P¿’Æ•kQQ¨Õõë×O›6 °í-W¯^õòòêÓ§¼#Ýé?‚›7o<þ|îܹ>>>||5Öò$&&VQQáêê ®ÉÙ³g¡´WŸ K€ ?8 °MŸ>}bÍ|€ÞâââºuëÆzç°°°0üzÏž=kÖ¬yôè#ILEA¸`Á EP™6lÀäÂ\LHkÀ€À»@«NNN@º )ëKÆ|t´tûm™™™¶¶¶@Ït[›:¢ »êÚµkCCC üüüÌÌÌxºVDEEØ&3552eʘ1ch¿µAAòòåK(»Ç¯Zµ Ä^ 8 D#È®6¦¨¨ØÈø–––ðwöìÙþþþØžg¬1þ1)) ¤ÕÞ½{ñÒ‰'ûFGG#è òÆç€†Ø²eËáÇA¨?T ü:è L†¡´´ôäÉ“‘‘‘wïÞå:räÈ3gÎ@øÅ‹±%,O·£izzúäÉ“EDD@­ïÒ~% à‚飀€oß¾mbbâîîîáá¿~#;;ûÂ… ÝÒÒÒôôô†é‘——§‹f‚.¼xq¸}ðàÁK—.2dþ•ŠŠÊõë׉EÜŽÞ½{¿yó¦yÓꀺ0þeФeã`4AO€àãããõ&pØ•+W€·mÛÆh…q ±ê–ùàúرc¤ÔÔT}}}0Ü VÀ"QMCA÷@LÊ|ûöMYYùþýûªªª:íó6Šääd1Àg@rÀXÀúÂÂÂÀØ2:a+ˆ‹‹Ó3ÊÙúÄ"-à§!ñ—.]‚_ïСHX;;;QQQÖYúðáÃ;wî@‚Ì@šC¶`©µ¶¶fʯýPÐMŽÅ”+x†ØØXÈ øixiii:oà ¢ùäG€!77wôèÑ `€ùZø(gpáçç܃1"-€@ uï޾·€‰‚6ÚŒÏOHH 2&RÁuÆéÓ§ŒŒèf߀dœc àûË—/mçåççkjjÃÇ 6tèP`b|ƒ›ÆcÁ‚À»À@666/^¼øøñ£¬¬,°0ü PfII ðÈ\øºô`7BœgÏžBýòå |ödÔå ²X 8òHølìØ±¦¦¦@{BBBx´¸¸¸[·n%&&—ËÈÈ€óakk ¥Æ8‘B&¡ ÚA~´¤¤¤LŸ> %`>6>ùÉ“'À£ YÊÊÊÖ®] Æ8³ï(šýd3´!@±@“;wîDÐþÌ={ö¾|ùø~ˆµ´Â!!!1ãWtsg–/_?aaaq÷îÝøøxàKà¤ÏŸ?«)))¯ƒJ¢zõêU^^°$ÈtÄiLzàùïß¿‡7z÷î<ä&° ˆãeË–ÁóGA„ü­¨¨€À¬½zõ200}ÌÍ%a¤ŸÔŠ~ÞbýúõPrrrÈd2=Ð?heð'@26~å ‚üà^`3ñÀØ1J¥&áõë׫V­†á²zõjÚÙ+`©#""ðø–`¦åääÀÔ‚mmù‹TWWƒ2ƒ1@DDd w=xðààÁƒ/^æ7nÜܹsýú`úA >{ûö-°ð ˆ'---]]]¼ë5v š ¸ÔHL¸½1ã¸ÛqçÎ{÷îeffBnÀ¯ ·AÈóçÏÁ144šß¿}½¾8®_¿~öìÙ›7oVUU‰‰‰Ašëc> :áW èÁGß7¥¨¨ˆ8ÏÕÕR´×øÞ€øÁ τ܃\…' 8rÒOœ®GFbb"˜È;vlܸ±©÷úúú zh‡…@Áœ8q¢IÏ™‚(=z466;c¡’’*((æCе 111Øþ2 ]ࣚšh¬ 4˜˜öÆ(ð%Õh§r8p`ëÖ­ÀˆÀ¬LÇê脨@ÚÍ›7¹fggƒìƒ¯€™ UÀ£ÖÖÖvvvX?­ЦŠixÔ­[·€f0]¢väÈ‘; –>hâË—/›ššÎž=nlðÖ P(P½ðvÀ sAòBª@z‚Ї×d½qOII °utt4”Ü |innoO š*A~°3fÌX¹r¥)ŠÆÄwwwOKK»zõ*H,${S…&?È•Vj€5¼páP#ØVqqqÐ`©mmmÁ¼b<==i GÁ–„MGÁTâÌ™3Dp+$p[YY™‹‹Ë¬Y³pŠÅ¥'8ÚÚÚ Ý NËS¥bÉ’% ÆM .Ÿ¤¤ä˜1c°I°¬K¤3p¶‚‚–-[@q6&=@ºÍ pÝ@ ** X<)) t'”;ˆolÎ0SÉä°°°ÈÈHP«À‘î(853– ?x/Q€ýemïÞ½`õž>}ŠMyhªžk<À߇ÄgÀo :tôèÑnnnXé™3g@Ð`Hw9 ¦ ]5ØzÈÌÌ\µjUBB¶j˜ŒvË´õ( ‘ááá3gÎdúP“Ø"°ø222m³;¥ BCC·oßÞ¡C‡E‹aS^磠‹öîÝ;(¨gÏž¥Ý^§…àççgœßTÀ :yò$9_¨0àU¸ºº2yêÔ©Ý»wC½òðððóóÃý$ùøç>uvv¶ŠŠ Óoóóóµ´´¾|ùôllùÑ?nذèÓØØØËË‹®o­K—.KP0Þˆ-3¨o¿4,ö-k vìØ<tÏž=¸¤ÐÑѹrå ëi‡ðË—/3®|‹/Ïšáì9ò³P0†ß»woÁ‚ rÔ6Aº³¨ºrå <ìmܸ‘:ÄŠÆÅ‹—,Y¢¯¯OŸãÈ¡Ívok3ßôéÓas/!!ñ«›ù Œh``å¡cÇŽÑ+555é3ðPT­ªª‚mS#ž%tæÌ33³´´4ê y.3Mzäöýû÷#FŒàLvÈ(€­I§•—%®’0Àjjj°£»>œ+§ qnذ¡¶þ—–Å„ ¨3:¸åïÿýæä?NF r¹ŠŠ "?„¶‡óçÏs »Á–÷öíÛ?Á£aii™””Oéò€2â7š3 |||¿!gpÎÜlݺ•Òvý“€mÙîÝ»322¹‰¾öÍ Šùüüüzöì¹hÑ"šH C1´ •.HØÏŸ?§oííí³²².\¸@—9sæÀHqíÙom$ÁiECXZâââh惵æÚ¥K—ù! ´ ?~œÞÙ}ëÖ-(±ÕËâBhh(ìó>yò„ˆãÚÐ"€¼kjjʹbûöíÎÎÎ?|qÀ€ááá;ëT`Ö²€TAqž““µf24pÊm Îi³²²rHH}Lc¯^½^¿~ÝV"âF‚¾=pà@ddä£G¨Û+VtîÜù—ú^ˆüšVVVÉh£q'::6Á>„fc­-.ù`7<==º¥w²×‰©S§ 80;;»õ Oð ,þáÃlÈ|...¿±ù²µ²©‘†“'ORÌ×ð‰­œ§‹Àˆ;v ¸€\}:sæL®3œù! ´<è!)55µ:™¶¶”ÎÉ:5ô·6Ð )cccë;6¶wïÞ/^l[9Eÿ@mÕ‡ÌÇ`0h=8mÔHCUU•¢¢"Å|8üÐjEØ’’Ê YRS\QQammȡ呓“C”ëgjj*ç£M›6Á:lnnÞ¶HbÊ”)0.W®\ñõõ…f®§C‡…}pÈ|m7Ë ó8pÀÄÄ2Ÿ££#Œ&Ï”F>>¾ÇRç\iii››Ý¬ô¦ (víÚõÛ·oC† ¡¢‰È¡Å`llœœœ,&&ƹZRÈyîOÛ ? µ–pZRoŒ>-[¶,33vMxU &d>ø 3 –ÃGý¤š¡VŽõë×S†·oßRåPó(^@䇀P>}údiiI3Å­1}Ã8~üxEE}H,µ›S·@QQñèÑ£Û¶msuumt5r­ ‹/nÜM,-j\÷Ì™3žžž;uj3œ‚Èw°bÅŠïß¿CCLLÌàÁƒyƒ! >¼G¬ÆèϣƉ  Oww÷¦Î¾? $XVVÉzÒ¡#Þ«'ÖÚ2‘ ÙÙ¥‘\5[óޏÄoE¼¤/+kÌëÒeà“'Ÿ7v^†ù! ´;4ùU$`‚ãáÕ-á§v÷’Ÿ6æ¯ãpü*Ê`„f’üô1,šãÖÜ'>Èqe¶Ó5ßÅ¡YXÓOÚ ¯2˜'©ŽŸ§¦¾û;^ÉõOZTÉaü¹Áñó,ca,ÆÐ¡í¦¡lE@@h&Äö}^Y=fعKŸ¿fÍë!Ä Q ;âuúÌÅrÆO·¶Y:÷·ü¨d0:ËÕñ·Fðw½vkÑÒÐøäG‘–ÛÕrÏqÐh(}åÆ!﮸‹ÅÄ@=ÌšXkã;à¥ìÞîi.‰ÎîÞ[=]Dûð¼ßúðÁß'¡ äMŽCCŽ‹ÖY6~¨Ç+O-ë?ï ´Š|‰ÏêMøŠa¨B" 4B]âÁ8&ó*md‚yıfj!rÓS?‘”U›¨N’^õ± ÿÝgYYyÚ}nîã¾²òDK÷µèìéóŸ+Î,}~-ÊÍ~#+Ýå¿Ó‘ºôÍUe?/ýÇjŒ¬lÿ&ŒVÊêÈH.v¥ì ¢agEªüÍá'ùùE'êIŠòѱ€‘ʺ–ÃTRŸ©$+Ÿvv |t--·{÷ ®e‰Hö \Šô•ôu'%Ãq2¹/ÞÀÏ…=΂âÏ…¼úÒy®á<¾¶E~xY&¤¼q¼àÆj»¯ãÄÔVѸ5A~V}ü±†ùáøî8NÄkî„õ8ž=*:–2“Ì!´ß˜‡£*‰€€ÐlàèpcEùX’“m¿Õ§ò!aȪjSü¡dwÑy¢¦Öoé™”Í3 åhö,f2ƒ „¤¸$æ’— ´¼8Vyó øâÇÌM=7—sÅ6 ˆ“óøÞSºi™ÿÙ]Hž»ÊiIs9‹1݈ðd먺Å>gâ|X‘¦¦B?£S); F]–V0™ÿ •†Îd%‰ÖT°x»F£tI{øñîß0‚'áÓ —¼ý7ƒ1Û%¢Øfº ø| ~èm;ä÷&pg˜¶HÈf¥TúNUûXyþ8žh §µæT¢:kL ÃFl›3È ÇÇèMI=`ÏÎÖ8#q…é” tâécDÚè›ZNVW¯%ÃÿA„äC›¸b0‰ Ëó! 4/J© l¾fºíNª2”#šÖÛ7‰¦iôèÔSii¹û»Žç‰ÌÜ Ù1÷=ñª}:Ï 8 *ˆã'3¯ZÍòQF¤•êM5ÑæŒÌБJ å~ÝϾá<éÆ:•BÑ<¸õº_TÉ&}—;K ß ‘ò¨O¨Û&qÞj‘ñQ&{2Õ oºǘd•õ&‹zc_e%u­ ®=­VͰvÝXCYÑ0Süîx5Ynåan­!\Cz£F2%Ö]ÚqGÚ!ü…<ù¢ùᇪ 0 €—UðVÅînW€a}ä9¼=|—€—U§óúoê¯÷¿ÉâÔ#¹Z€\ð’€j$Ï€ìWCivàÅÀUDOçerêçZ ¼YØVÐs£;Ú¿‡NØmÄ}j±Û…m+t-)îlP˜±ù“¢9!2òÌBK†ßjyæ÷µëCfl¶ HY¢ÖJ„ ©n}-‰–ŒŽt3µÑdÅ¿ c' Fü>ÉÌlx=ç>Ìr?P\znºA÷1qÀY³Ic1Ñ%c´?¯‰«„n0ƒ6Ǽuý/í=Ìã+ Ž1ž’ ¥zÁ_Õ¶©“ 1ÈOdJÝš¢j lÞÌ©ÿ‘}Øs{ÎûÅ\.{×ç­Ì´ 8^ã¨R{D<g ìù oæÕu´M|ÒͲ*°g,¶ã%ǘ̇d†âÖ}=æ­(x5``Ƀ¼¥Ÿ¸PÜ©ï|a¿­n ÆåÕUkÕ…&„ÁÄ{p†y”%óôÞê¡'ï82ÔC,™—÷Ö†Ìð5:6>¬fkeHèÿŒ}FÕL™æ?ü‰|„svÛaczïièEßE\!ÍHƒ¥ÊŽswëdJr Ô…ó2é÷Œ²9‘Z©%Å×üq åJa¼'æÝGlb27±íÅ9óBÝá:Ó¡ŽQ˜îyoº'Ù¸ÿ“ù?Ú~7o•0á5#µÀ%fKᬎ{ÍØYDà€;ü´N—5"Øy<³[+ù! 4%¶¦à¢–* † ms"þpº…ße`¥¶I‘Κù—¬Õ—ÒÍh¼»2lja噪’S\’9¦+怤[Í–Š‹ÑS^ÃÄ·ðþ·XssÇX*ÌÔ­iÈ3v»Ï`d1_dHÊÑãiàM8t|1?5 Η£ß"ÿsÞÒ)Ó‚çæ´B]ŠXéWüñIØIBCÛIÈ|؆T¯9¸– [F$§vøn‡Ìw>£RµuêL]c8rn=¡YÝ6‰¢I?²(^£!Šª "?ë¾t×} ¦0—ÉÔ€Ër³åÚÒ<ý" #*Ob®%† C+…ÿÌ;…0[”fEÅ%BóÅl‚ùÊa›¸KË9GwkÐu°™˜¥p÷Ýÿ Ì+,9oYð3¶Ñ Æ€ÝþëVÛMjË C\l|ˆ0[{TÜ9|>©ß’˜¯©%o^SrŒ‹ïÑ›»M㟃ԫ  pPÉa†«v ìæ $ÖAjÑ*ÌgHõ¥"èkcþœàŒ«çL™ -A~¶aú®ºöG¤¥0ýMéûÍt<4ÀÔ~˜†ó …2×Íþ¨1™w2ÎÖYwZZÝÔrÚ0e28º2´À7qÒ4k=ø(- ŠHÈŠÞ¦ 7”±Oo·ý´ÕŽ6-ÂÆ ,¾–e¿Y¡){ý’?¹Y™"ý[ÝîÀ/Wý'Ô¶¦‡X~åo²ž)(È·ò«Â0VéÍ,Ň U³…ù8sŽÝè ¼ÀoëîôÜBAqÙÕvvCû³h¤Š™{÷Ueeå¬Y³Nìq;ÿpÌôU‹'ÂG§ƒ½BcÒ†ŽŸçi=rœUø«ä—™5yˆ‡EæË+=öNR$ºÈÞζ©9sW{Ï×È‘š¬v“’]é¸A¾g§Úþ{¯IÍU?Ï…ýeÞÎ.˜“ç,3=QKë‡MnSÏ#Ι‰ê†oÁòó"ìùBCƒå-®)œê0[XÑ[ƒúê0™¬¸/0¡;]Âè4q aK²}hOXĹS¦À9yC‚Of1“¹˜Wö„Ä+V^)šF2Ù§¸/±p`E„\u³Úp–°HÄ}uËŶÆ&¿7œ Kö,esöèIÓ’ã.бpSÇ‚¡¼.­ ž§ôyàÙ¶iÒš•ˆm*™\dCí gQ¬Ò8pŸXÇw"×jÆâµçn¥ÿÚÉ+¯µ>%ïÕòžìp4Þ±ÃÞÆ>Æ¿~Ô[SèåI‡³ÚøbA˜·kø¢«Íü¨7¸žPð4òYbn±–ägJìCàa½žº t@hJ$P$Gƒà5 -CFV`¾ÈÆÌLÁЇÍz,1R 4FjyŒQè+Qg§" Êœ82ßÁ“Qà3ôÉ.mÿìùÊXsʸ³׈k­õçÈ|ƾ§'Š!á/»al=)ÍWúÀZ²d´,àÜÜi|uöV‹Î3†N×ßts¿Ù(‚M!óõ²;è=šÍ,øçé/úçÖ¿+GµùU|úñËóÑùœáUÏîߨg÷õwÝÏ;8À{ÊÈpÒ'^ŽI#¨ÛáåW) ÷‹–PèAZúø=ba[”­/ô‡|÷üMñž¢áËþ¬ÐãèÝòz«Á w «Öð] ô¶ÚRùYû„°›ÞëIt ˜Ï+¹D«¿¢8þÃú ºËÃêÑS¥BsI~iI’-üо 9_m¢î,.)?™ƒ·ÌXTM¹öÕU'Õ…¾>qeáƒY$ Œ’u5YS׺º¸kóF¯Zò#w°ôPAbK·&ŽŸ©+E¹šÅœ³d#&Óˆ5ÈV%=ò/—sÅ‹;Öã"oÅiºººS¹‡CZŠü\Ò¹b„ôŠaã95D›ö3'7•_.b1yΔ1p×ýz?%ö3:¯Yw—”„´X§ ¶Ž4§>c«7«Ó®±êâºë’‹±³øöATq~ïj¶6P²¹H¬ÚÝ}­lãM\Öv8‡PHLmÂK˜»æêý׫©%ƒÕVV‚›;”fûdB³Ó$!Ö·Ä:æ½€´3ƒ8…K(YJ6AT{·+é[ru ão‚¢˜Ä|su¨¼"˜K4¤¨íŒ\gj£&úñÎæ#¤::Ð&N¬R>\S²lAòƒBdC  Æ}ÔK˜ÃqJ‡uMr8žÝt!¶ 9kÝc¨þ¨>e/ŽakcßI6(”>Ü+;^ó ?ªÅ¿Œ˜ZbŠºÕu¦˼d×Ý%»¸_1ô¼fèÉmÉw´ïsmþ£ 9r5“¹š£ým®èÑkqkÔ˜oæ[ûóDØûë“êbZbŠú«=Ã~tÖpTýÄFþÇ«çöÀ¸Ó‡5oíqÎéêë.áëê¹í>½¡'ãöuñ4‹ý[â¸%ª±íhŸ"?„…»»ûhaXvv¶œœÜo¼Ú:cD£[·nvvvnnn-ŒŸ”ÔŸú ((X^^þg~`­0¹0sæÌŽ;FDD4ª¯¿qQQL´Qã|ûöMRRòíÛ·­9ù!ð>~$40 ))ISS“—¢VUUUTT´iÓ¦õë×Ãæ’·ó±ÿþ=zôx÷îGÓÞÞþìÙ³òæ>ãÂÂBƒ!$ÔÚ×Y#òCàtïÎZK¬¥¥dnnÎ3Qãçç§(PFF&??Ÿ·óñÙ³gð÷ýû÷Ç¿{÷.OÆ1 `çÎÐðôéÓØØØ)S¦ðRìΟ??}úth(++srròññA䇀Єøúõ+m¶°°ÈÊÊòõõåx­]»–6¿|ùRRRòÕ«W¼š‰||Õ[¢ïÝ»9{öl‹#d;›j…E3fÌøã1ÞV?¿êƒÊ÷ìكȡ Á`0¸lüýýãããÓÓÓÛzÔ¶oßÎyûúõkØ ¬ZµŠ÷2Êyœ=ˆÅ‹óù=yòDGG§>¾çæ;~¼Æ2©?ŸF䇀ИL&Ý€òóóCñ¨[7Q\GoÔ4%!ÎÄáÇӑýðáÏä ' DÅQQQ133“Çb·ƒÄ·oßÂÂÂæÏŸßúŒÈ§P‘ÜÜܤ¤$---^ŠÚ¥K—ÚO>nܸ‘šãIÄÆÆÞ¿Ÿ'£vãÆÞ½{· æCä‡Àkøøñ£¡¡!ì{JIIòL¼ôôôÚO&îÛ·WÉÉdÊÊÊÖ֙ŀÂ:ÙÛP€ù!ð ó­]»2ßÓ§OÏž=ëèèÈ‘ ÆÙZÚy{öìáÕ’ ;1µ§¨Û:ttt`Ek[̇È7±mÛ¶‘#GÞ¾}2_NNÎ?ÿüÓšWý DEEa»Ý²oéÒ¥+W®„¹ÆK‘âÉÍ‹}úôyùòe“Gä‡À›€Ì7lذ{÷îÉÉÉQÌ[&“Im›k‹àÉ5 õ¡ªªŠgârüøqUUU^b>//¯ŠŠŠ7Bæk»±@ä‡À³€Ì­­­ˆƒÑ©ÖÇqÈ‚?†¿m+:¡¡¡°¹ùC%gmÁÁÁ'NŒoÓ±X»v­±±±‘‘odÊäÉ“ W¬XáêêÊÑAä‡Àã€Ì OQQñÁƒ€Túþý{ê‘¿¿ÿÉ“'“’’P*µBÄÄÄÀ,:th[ <ìZ½}ûvÛ¶mm==zD·_¾|™— "?Þ$<Šùäåå$%YÇ>Ú Ì·nÝš2eÊ‘#GfÍšÕj#Å>QQÑ’’’öküüüL&fY§Nm¦™’‘‘‰8p`Ûä]ºt©ÎE¡[IpYâ8îëë»oß>øŠ………••U3 ÓAæ;tèÐÔ©SyoÓX€Ì—˜˜øôéÓeË–5ó§‹‹‹.\øâÅ‹“'ORL %oºóÔªðêÕ+ÿÐÐЮ]»Â¢é™sc¨- Tåù! ÔsçÎqÞšššæäälß¾ÝÀÀ ÎW`ûâD¢>?£¢¢Ž;vñâÅîݻϘ1ú£®®þçA]ºt)ü…b(äÝÖvÚŸC›Ì}}ý´´´&’|||ddd¼¼¼hõxbbb­a{åƒÂÃÃaYÊÈÈ=zôÌ™3aà®””ÜLUdD~))©ØØØÚöaaa°}|÷îDÖ®]Ûð¤ >‰~‹Édž={6..îÖ­[?~TQQÑÔÔ„²Ýرcx+$$«u6nÜxþüùö#óEFFúùùAqð—d8ØQ8uêìÍB1HQQ‘ÓAsÊF=ŠOJJ‚BíË—/•””ÆŒ3yòd˜ãuõ0”D;Ùè‚È¡•ÂD¾~ýºwï^ÈI°u›>}úòåË'L˜ð3~2Œ$~>ð[°õ‡ èíÛ·333z÷îýéÓ§ 6À–tĈ<œ³IPæqãÆõëןŸÿòåË•••“&Mš;w.”¸^2œ3‰Æ Cvvö7îܹ¥±¬¬¬²²²(++9vY†ÞÀ»ƒIXYY¡Ú„È'jT§NÖ$`,(>|Êvvv¿½• ~k"‰:¿Ûú¢¢¢úÔBáòÊ•+°í†ÄùðáÃ/_¾À¶XMM ²&ô°W¯^­$=¡„wòäI(‹ŠŠN›6ÍÈȆËMBBm†3”Ÿj3_€táÚµkééé0 (Ù a°ß¿òÃÑé$LMMQ±G䇀€ðcƲ"QçSJkÔ?ÿü[Þ­[·6ì·¿R\\ ÈóouuukŸë+""2“Ä/y 9õâÅ‹±±±PℲ&”r ÔÜñãÇÿ^8Ïœ9xýúu==½•+WÖ&rN ¯a}ú Ê|”Í6¯ÁƒCþã´Y¼xq^^Þ/mõƒ€‰#&&V›­ù! ð(æ³³³óòòâ»téòóþXXXkkkgeeÑ–‡òññÉÈÈèØ±c×®]©s3šGŽ¡Í0zw9™ ‘¨„ òC@@àYP#¢#FŒ¸sçmiffÖ§OŸžÄM Ñb“¬¬ì«W¯–’h%q„Â(-ïB²çããÛ¾}{îýýý+**ZP;9"?„æÅ|òòòô¹N”j´ú !!ñöí[ŠQž={¦«« Å>È…­ÀžþôööþöíÛ† j»éÝ»w}cˆüx ùôõõ£¢¢y¢…¡¡aXX—›C‡Í™32 uiæççËÈÈpx¶Ð{þê\ä™ïõë×­G— "?„&d¾€€J[nn.×Sú¤@ Ãp/,,ä(SÌGňÓ^EE…Éd¢"È¡]2Ÿ«««———¿¿?§½´´4d>j<‹'xTŒ ôäÉÊfáÂ…í¹œ8}þsi™Ú¤é­$<ï žßºtÚÆÚªk×®ˆüšù(…gaaaÔ ݺuËÌÌŒ‹‹ãí™0È|+V¬Ø»w/4ûøøôîÝ»]eýýì™YYƒGhÉkLjUëÑ[Zo©mN)¥ågömswwG䇀€Ðø€Ì·~ýúÔÔTH~“&M²²²bàùˆCæKOO—••uss;pà@ûÉtßÝ“æ-|Øžsß8²Œ6CŒþ¸XzqT%ž¥ÝÕ¢+ZÅsúC½Õuøºœ^ÝÕ¦ì.,kà+œù! 4øùùûv€ŠŠ *íˆüÚpüeABþ‹¼EŸ¾«:¹³K‰^=e¥Çt•lÑ/`^ñâ釟Dºv€ÑîÒYB¢guQ±>¨tp£Cæ_^çרïù§»[þ­0_¡Ñ½>×Ý…T=ËkZ®•ÚÖ¼¢"?„ö…÷ï’÷_š?¿[ÿþYVèÓ‡ø@àõ/¶ÃwÔÿ$¿¢÷JÊ ÇK/,£Žv\.%ô¿Üw"¥ÀZ©Ç/¬5E䇀€PöìÙdh Þ³_÷`Ý:©ßðaútr < Žt8q²HC}Š´´j[‰þþ`/==‘Þ ~Q±ßŒ¾Þ4:ú‘§‹(¯É“E…¦® ã¯M[V]Üm¹qöá÷”%¿@ÙÇ"ð%QEý|úÝMúcOì]{ûÛ[æ]ùKU$% eò‰Œ»tlºŠÔêôÂÝÄ;ßÒ˜™ïÖ~³K½= ƒ×%UkD]rʪèï–cUóæõñ ºæo;þ!â-kžóÞ߸{/yÒÄ®«VõlDoçσMÓø·mû«µkÜZmô?–ÜKº~I_OÔ|yFôvö,1`ûüpëÖWÎÎn¼T`j-E¹×߬bwŒ °°–¥Ì¤V¬°n͈ïtZlGÕôÂPxe~pÑ ÜS/ÎÙ‘>gËèìz¢ÃñuD~Õ4~0¯cÚ[EüÅ‘¦þVÙ½ÝZfGJJÒ²³yM«!B{@Yé“´»Qšc…!ó5ÝWÖ®‘„ÂÐvŸ×kœþת¢_Yñ2!ñøÈ|M÷gg"ú¼G*RTVÝO?hÞ¤ùtw穯ff#Z~{åÏ‘_E&8¾¾‡n—ÞyNéþÛ!(¾ý·ø(¢Õ©H·òͽ´4ÀÞÂñÖõ âš›ëó³¿ã•\Ÿñ¿\y*KÒÈ! ´9l÷Ù¸ÆI2_ó|nS¯·o¿}›,))ߢ¿Óïo;û^ùšçsKJþ).VoCãÀ?FUÈ}e!´á™ÖûI„¦ï˜C=Q‘R›>;õ\ä²1ä.u€«(îI<ØA{ÆF(ÍùY<±Ê죻kÜCÛEuûÞµóþ=¦Ê ©}5tóSbÂò ûJ Q¾wëŒÊ¯UÔVwÊçôZöòk’Ÿ[Â;Oíjž3Ű6êôð$yKøÎG)Âq1Ò`ˆaáuø7ǯŠ)M†C3hÈ%7ÄÆnÂñM”Yèyjš>«žÃ0.¥j8Nc]€aÜ ºŽd•™Ês­E´¼§ÞÇpôˆ7d¾fþ¨„ çÎ&NŸaÞ²Ñß·w3d¾fþ¨¨hGQÑ;Ç_72²iU…áç²+I7žd¤Šõb PR“èÓŸ~4ÓrmCoòã ‹€~P™TÌK“@Ú#Z"ò냃Ös»ïÖðUÙœ{ŸÌÀ‰²ÊTÅŠU$ük€EUe6àjŸtÌ œ{ç«£˜yÜÝ_MõTEÒxi߈K¯ˆÁÏ6@~ÇõØXË2ê ÎA+püâú-ë4FÃ*q<Ò²Åg¥8.T“WŒ¦Ùå¤i_Ê%|‹¯¶ËŸ‡ŒaQÑ´ªJ~‰žBlORÝßCæÔc1_bV–|Ý:|MûY‡ä<‰ã¼¯å¡ãÐ!ï¥Kk ½œ>yc¯ëì7 ÊП˜ñn6Ó%ÕSæ—¾>}:i’qKE?pÏ&«FÝü%u>wvóÓná›7Ròeô0XNy„H7ÎÀäTÇ¿_öT`ðÖµ €•f¿(Ðwõ³´±dÛk¤_Ü·iàiÔúÝzp}ߦMº6›ÖMè÷}ä‚Dz^ÛIˆÜ´^xýЦ  ÇÐ¥+@éði=[Án"r®òßn¼hêØ/"ï~lcÉUù|‰–“,~üÖÆ¾Û:¹{ƒRÑmKKV’YXX€²ò¼*Pð²€²¡• ®UÛêRdSðþ%e ÕÍ­•Ûžýj¤Šøù»±^wó8 †þr«hÜâ]êdÚ œç˜Èáx6}C1„MУp‹Á¨yEhµØ¹óo;;n¡göü=oð}Õc–Ô­¾¬eta“ò&H]O ×6¿oeµ&ä$ t…ýþŽÅ7ωkœ§½Âñ}P–À0êHtI÷¬†Ñ£?W~)åï,ÜüÑÚçÕ‚ÌGAOÿû§Ooºvmü`¤gå¦Þºùúe^ÿ¡*”Gñ r¨‘ƺ*Žÿó{¸„޶”ÃæY¬á~$(c¹5ÒÆ×MÅr¡@zqÞ‚5  ªK»]Êv ¹PG›ûëÚ9q¥²ÔF…~EÁüœB‹fܯ‘Ÿ?]%pK "Y0ò%>«7¸¼vôämÉPÀi³\µ‡Ÿ¡´Cø bD"ähÿ‡¢—ù¥ì€gìˆÛ9k[=*ܬ«]ºàÞÑ[fÿ:óªì“”Áx÷­­6Ã1Œèsgï{j§h[eY(ž½ž]ØÐ_o395ÊÁèUðÅË!˜åb?ŒY„Vük™…EÝgÓôÄ,ióŽË[$H Ôs6ÚÑë;?'žEø>ŠùöŸpX>=oWñUL}:ŽOO Pµ¾O2_)d¾ Á—¯ü%Ï¡'¤e  uðööqqqoöø›m Úâ¹Ð±#vð@­Ý†ß‰Ž_»“q/ýNyé'YEU™¡ª;t¬&81©:³Z<‚™3®©âÚÃþksuX©Šö^NMf­?G~vì §ŽW¨Žÿ>wŒŠz\šwÒ/ižÍ¤­7ð­àfô‘ö:‘t¾´÷ :*|—àø’à-¶ÿœJÐ74 ÜzGæTÇw8\^§WpäßÈÌÁ—r«*ÊšÇ6 ð\‰ÑnøÀúibgjø4ÆÑ4[’MíL ^¾9x4ª'q[^àá⩦FÄh¨Öä¼U2âD:KhѬ¨ ¼.½½ý€·ïIÇy¨©Ehmø÷¸¿±qÝ›£ ·QÓír˜¥Ã?ì'išð \ÂÞ^ZŒfŠ•¿”í?<'w%¼õÓ²,ñÚÁ¦‘ [õ6·¢uyNvÖ„™Œï øq^÷@ˆ™Y ÌDDìœcÐG~‡x3qmè¸óe n«¨¬úwýöƒ{wøù;Ë*©õ0„ói×¾ƒ4ûj…¥ëÓÝ-Û2gým¬PòæÅvÉù̪J~m¹4í±h Oc(º°lŸèü¢%¥K“Ç÷ÀcíûIlç 2…xôõq+ŒàOJ~½ííëÐò0HÏb^ u½ÅQz‹ëôbùº]Ë×ýÔÇj¸œi=ôGîç[9Ô²Úy´†²†G9–ÑööØäQ›zeÆÙÛã´qq±­……_ê‘'ª…³¶ù(‡ejÔâÉ|öI䣤}öÕ¯ã5}ûN˜Ää÷yÖ’^½ðgÏÓdšwécæƒO? ¿’LLÌŸJq˜¥nÊnÝ;û¡D Ù\䦑Qƒ²,l;Ep×¾´´² ù—ׄÕ3r€á8¾’p$wÆ”¦&Õ÷.]:¤ß¿ž›_ö8#µ›dŸJ#zô–æt ¥0þ5¹$Œÿ^ùå[UÅתÊo••UÄï—*Âðåk%Ä—Î]Åeö‹®ŒÁç§múÛ8Ô:(ÓrÐàì½òyÃJëL²Ñ†”8j¦Á„•»f_Ùxí2X%ÅŸô"n¸®] ùaùLóà3ûÛù52ìqÄ*!á­'0Ÿ?5ó…~}qþû©zKHPïŒiÜ¿\4I/KV%Ä‘Ëc'[_ßµMÙ¡"øtZ’Ìqþk0/íV*5šJ¿ÝåbÝ„…„;ñwî Ðë,€•¿îßW^a@΂Bß„;óóñ5{>44óšx3õ|ê93½p& ·Ÿ×u,ç¾xÐYû!´ŒI'ùÛÍá@ëM}òÝ_e¾gÿn±œ§ÈáÏQôákO 1Ëp]3ü‚:Ûâ+†Y¥UíS©§Ò“K]rqÜ–”uƒTa)^ßÞCš9ú?\ îyèe?¿yæžÆ¬0YcsPž-!óÁß¹ª‘¡8˜¨§ÿæÏ߃¦&¤Œh`·jÌWyկĔBÖÜÆÿ€ãºö«Ii¸²Z gŽb(´ò£­®ö<Ÿ™ó¡¼[¯6°Œ=úÐ.W—uMçS“_.†ÉÕe_½Š„*Á?³—ŽÚØN¯8E@hç˜faEóG_yØÌ³g·Ï˜Ñò >ïݧ6¢-•œ~½æKQæ|faüÕ¤‚yU_*Z$0:tíÞ]QIy¢µ7QPÙÆª¾Û´ä—ó,“1`/†Uïá¸å¦‰n×3|Ú —XZò£Õ»P·C7L =æ‰gºAJFôºZâ2N¤zÐcôü†3¼ZbØb5±ÅRëp¦w f}œÀqbIgÙ½ÝÓ\ݽ·zº — Ý6º ,¥ë„—Ù“²Ýü.Ñ4zã 2o&>of.""*ç̱hÙègš‘qIQ1·C‡æ–ÿb.ÝiˆùùÕ†À8(öùYNs:‚a5$J{]¥Ò“ø´úºÀ¥KÔë噽¾¿Þæôc1b´¾žAðßCB¶ƒ^ë0°¡ôþ…à N ÐRÓ:¨zLušSÈVSB~7Ynåan­!\c1­eÇ ¡aˆûç§£Ó|'Å~°²Z×¢¯¬L yzyê^3ÁÏï½ýTöùÕ û}ìZ-%εΓ>«}[ýÔêÎ5 jµïšU=žç £Ú°ŽutœîÝ»$%ù´g¯¦Ý^æéYèîîneÕºR`ö¬µO_ºÛ¯/3DßmUF䇀€ÐJ0l”t ™Y)7ÏÏžÝÈRà÷ï}w¼Xãô?w÷VýAòšh¾Ÿ{ÊȨѥ@¾­[óÝZmôù! ´kH1fÏ&¶ee%Ý˸9Þ±`Ññý¯qpéб㚶°ª±{÷þFFÄÁuy¹·®ß¸jlüG,XòQlïž,''׎:9;£Â…È¡ÕCAA þQæ%¯ÃÂC:tü>N[¤ÿõ‹wü7oâW®¼ÐÔ”×Ö&´YŠwk×´Éè÷—ÿ(sYé‡SaG¾}¯ÔÖ W¯jït3»r5_]]nâDb1‹¨p^‡Š"?„¶ Ñ^ff?Ü:t£G<!án‹ÿxšÀÆhâ‘"?D~mŒüp´‰}ˆùó! æãy¼×JgFEÄ_'¬ ˜Uù€¨(w°?µº0wÇšá+­0âÍý¯_ñŸZ>Ž‚@PkžoUTàeåm&[C±ìˆQ±FËÄ|¿ŒÊ*¬uÌk³§{]z[m€)Öì/X;,Zß¾á­6â0l;6UØJK[EŽûï®»5¶nÝhaáÞV2±5ä_§Æd_Ä|í ˆùó! æC@@@@@@̇€€€€€€˜¯}#æ°ïóÊš‹ *+,­]QÊ4!¾¾ñߺùjê] ,<~ÒB›%Æ ;7``ÉðÒ݃y·€Ð¬µ1_Ó¡HNv²˜xñž¤u¡æþv¨«S|-KÄ|M‡ü«Nê }¡a¹•Û—7·¼]Mà_6¯7ç±7ö–ýóäï™ÁçKŒA®L攌ÍQûxŒùÆaX"ih%ʯsóÒ@¨¬ er`“yµëò7‡Oœäç¨o$)ÊÇ jîcYYù°cÁòS+õ ,“â#ó_}ž8ËD’ü×¢³§Ï®:³Lz¡êVvDÅ[Ìd¦n·î.eHu™µõÚ%gÍ:Óî++R¡klÙ„$åßgF]K‘ì£6QK…~+íZtV>³[%]-uV3˜õX¤Ÿü×ü”ø´Ì¾JÚZŠƒÚóuAEàGˆñÒ4 ¸›\e™''S È<¸ `Ešš ”«ˆYJSC§C³4Ðþ¦ôýfÃoêÌÞ -•eK ç¸Û‰(=k⬺pa&¯7Í-P5>rýK(Ιð)P×TxÄÄEQ*" 4^í£[6Øpe<,‚ÄŠRÝK%ÕŒûiÐ@¹×œÈr,#žÑŽ[?óÕÆ“ôË{þ9õò}¹œ¢Š­³=ƒCR){ûxWàžÔÌ‚î}äÌ–Û©+JQö§OGóó¹ÑzòâÄmUIfìõ•••³fÍ¢ßM?¾÷Pt94²rÔÓàì”y;»gâ|dÒÕ—æMXûȧÎ,¹MYo.¸ òóÿeø ¾ÿø)øRL´il¯Ô×Þ¤ ò þöÿep0bÛ`¾âÛS†‚RŠz7Cm4LòÎ-;€ "Šöö'<[®-]V˜,Ì€Œ•º#­ÂAõÞê³id; êùÜ‹v¦Æ¦½ÄÜb-ÉϘPŸ–.jÌç7ªØ7U ©:#;õ%Cvžós¼9bXÿš™C $‡ú;&±Å<TÝêÀôÅá!ûÄ©#=~ 3nJiŽ€€Ð„µïê:¢­®àvù8jùó°…´vÐgô­Ál…Ï‹X&¼KóTÕÆ–ùØkúœ»æ2_SÝØ7öçrr4$ráh;!©Ñ+bVZ^M^Œ='¯âi~STíc/¬1NÒ('3hõ‡I)Ztk£ø(·- ]?-ô†oD6Þy&õ ­>Ý­ïÍCá)–j„˜Rõþþc&¦8Œ8=ž­+…êW]øZàlµ`ÎÎDu¶,Ô{ÊDâŸdÁÎÒO¦9¢r_P€‰…C{[u3,àÑ÷aKæi¡ÊÑ¢µ¯n¸´ÇšÐ²\×m¯~&ô…e¨xJ+hâ‘­Ff>±±üˆ¤‘–«%§è—æ'k 8å&)Ø^8Øž-¨•iõÿa?S—Ð¥Ò¡K9ísèG^ë Yi€[ë*"•à‰òxjJÉxÁmµG,­û‚€Ãºe^Äš`#%ådöÀ·™Új¸üœû0ËýôòïA—eÔ²ü…Èe/“S?ÐRhÉ„íÔ;ä\RÈ9ìÚ£JYr†!óÒ¦xúýõ“i@BhbˆÝS9-4 Ü#ü=z+õôp4²p¨ßAÑÙ i3t'µžXÖ.™'R+µ¤ø~É“{ÿÙx|ðn»Ì7š=ç¸Ušx1pÕoT·´øý´æ÷àk¡Ú×NQ†ä`¢ýX®>š¨–/ͬÚL “z͘oµš~ÉY£ÏóõÄñJK½IA’XÏ¢„1¬'ÚV(ä{˜›{Ó®µe…‚î—›+rûRpûÇú½iÛò÷ïë À—,¸ï8xšÌÜ †¬2=ÿw-»š½*éÑM<€ `»‰'Ý0 óR}é½"˜BRRc2oT‘²¯'ªëLýUB=°zL¼öÍf>vúh®Qšæ}q‡áϤù€ SÀæXèàÄ#\«5/îä#br¿R[¯*/Ž>±iµ»'#ÜóOú@eé+Ìâf0[óq”Lþ2•T”æ«ñÿa?¯mÁ¤½kpY²4Æì˜jæcuÈréReÁ_­nEæ41ó5Xû +Øs>_YmòñǤÇ, ª¤Áöôð5*~B¿%:©Žk¥ÙUÕ*äYst››¢¦î‹NÜGšžÄÉO²$䕼¯óûßZî±þr‘§pOb1«Å†ÿÌO›QoÞ¾• È.§ö«í¯Ößîû6ŒäªÂ,ÚØuÉEƒHñâÛÿ¶`Ù©¯¢ (ÕQ‡qq.˺Üôi‚Êß~>²VôUœœæ¨`è[Fî~Ì _£cãC92Þ”´ÍŒÜ÷!•¡8‚ý®\63{Ï(lÇKRÔÛÈ0Hf(²f[A=æ­(‚¤X€¼xìÁЧ›Q–ÌÓ{'* ¤ÏÒ\dÈ&sM[i»tâƒYÆÇ×ÓÐ|·¡ÅZ¢?´ô ó— @¯/¨F­”ÿxgƒÂŒÍD¿ŠYL¼Ç}hÆXR%³{/ÅûÇÌ”|G1â·\F_9º>gfS!¼vÔ|Þ:V7ú\®Ö£†_Î ,€GL|0³:þv½6ñ8m‡Ò_>™° dÑqKö5ærpÝ{.óöÉ–Íâ/ý`¡õ–ewÂt.†Ž¼©1XЧfuË¿î¯>w5åFzüºäÿ¼¹ª…ÙrX“+*ª¿öqÚ‹ŒØÄdn"ˆAƈÉ4¢íý±Ü€¹*‡ãÙ,'Çç>8€*kéI¾¶‡‰ý‚ÄhgÁaH{nqþ+&¦·ùËAkøø²…²‚í ±d^Þ Êß0äz æÂ¼‡'3°O–ñQ®cVJ4¬ƒí˜q~ "‹!;˜1Á“yއ¬«Jsv첞¥, ÔyçHÙë„*ãKµ˜‡¡ÄÿíÇEx_\€Õƒ&¶$bÄ>binñ½Î”ßtÒéÊ<>æóKÿ5èCs¢zGТÙ‚ i\"On“f,a2—= ‚´g mÍ4•ÕØtåÁ>lBÐ^ü ÖNM~Z,é,;xð*;b6ÉŽ!ƒ˜Ì»ù×}Ôç®1?ðpd‹æj¬2.sÖà¡^.B$·O$÷z;ݪQÝ íõ›æ—rÀ®äYÄà16¡&þÆC8«›‚‚"ÌSb ;ª+ÍÄ| åw0A(¤ÖØÛä&A®³Kö„¥xGàæ¦±ð÷¨ 0 !nU{°…qÐÎPÚMê×GX'b\¸'Ç»oHÚ+}¸WxÈ hP®©0-ä«?—ÜÜl)Slðn¢³¯½þø,] ».öˆ\0òíàd¶k!¹L ¼'§È{Ò ¼ÿ.¥þü‹ÄdÒsH{„˜¥j®d¼v´ õUkþ óÒÕ€yÅ—-QÌ _OÂÓ(xÀ Cv3Ö[w"{À—y¾ ?¨þQ èk€ŒáÏV×}é®û……1§š9:ÍŸØÓ`çÿy5´sl7S ˆ{Fß>ÍûÀUÝÚZdŒ¡˜o–÷²åöžð¯NÇZ3Íá_TfZçÌ´fßõ···çr`bïib_Ç‹|=Ô¢Òè[{{Ô26ܾ„G\e …1:ÿµèof<1U@î,a¯9æ@ßñÄ0}É«Ç[̶˜„nIe2ýè§U ”Óñ«¼Çµ¿Wõ¥´¥Ê7¯Âƒ˜7˜ ªˆ¾òŸR=õøÇ)ÿ#Z †®1¾˜ëÜmK2—AÚS2 ½¸‰$8l‚AyŽU\ëY@¿ÜT3ø¨ÙÙ&3úýR¬J[ÍHœ¸îGæG]n·Þ^u•|uÕ ÒÞî‹o •$Po²…™¡6¨E ´8 ŽY÷–ÿµ€ešÖ[Ârþ6”ë´à/|þýÌí¬ >}…‹ü·9®p •”wÝ™h0Tg‚;þæ++!:›ª:óÁæYP ãºØ²Ã UƒÅÀçÆý¯€j^sÏo.^¤¥$ó'Ao8˜þmnS‰ö.$ã$èBðÖ–£wÿ5N<øvÿpÔ›%3&ý\Ê“‹»Œo؇{@?­¡ ‚˜Ôøwklœ¢=²¸²•F~>Ï4=2‡ÌnU¿[ì b+4ø¦±WëÜHÈÄr­Ã¤\Y_5"ê {ò-9ÔIióÒ°‹p®VÌKló`-í¡ªóaGÑÌ,øƒÕ3RXTöˆ¶\óNÎȶæUÓˆùš(];ÿ˜vøв7CÃ-;'XÚúÀj->C&n¸ÿõj©^ð»^¾GÏ9š&0ÐaW_ÑžÞ¡ðÏÁ#¤ìØ ä=¨÷ýÃ3ßén º€íD)ÌÝwÿƒ0¯0–?ãÊ¥-°œÚ³ö¨¸s"ø|R¿%#RþŒù~€?Jÿk^ÊNU»RÜ'´•5{pÞý²éä&}~ à±AeNºó …2×Íþ¨-™qç‡)OÊ‚©«MWm>Ú°Í‹.!&~!lC¡A™1ÂÅUßÛk#åì¿°Ó ÷ç¯&7ü¸8®ööÝ mÕ±Ló31E²Ÿ‰ÃGÒŒLæÑ$ïÒrëèõ Â5¹æ÷&öÃ\|ÞÜm ÍÿsqγÅ2ø»±ji×$Æ.`hlQ^˜G,¼˜ìÍYÝÜ툭8£W )Ý@¬„HÙ5)f\-fjÇO %W~¶XÏú§]Žf`Ûš—&ó5&roE&æ¼WVÑSà²OKLyüDRVm¢ºJ¯Õ:‡(÷áqâ÷U^÷îý)%°IWNä¿ú¤¤9S©ïm¡3öòÑÂå8m’ósö8ó¨Xw0±€ÅÇÙàlôÙi[vR'%AËÃ>ΗcN¡ž»CÒ 'QÏÏ1^éQ.Ê œ;âvøTŽ«]{”K-˱Ðßezµ&9è&戛ߙ( ¡^'.h)1~O8|8ˆ¿KŸ…†³N@Ð`ú—¼Êó ¸/º€™››'+ÛŸ´y{% ÷6œ1­Õ•*î¾ôôà±ôñ.€Tñ:ãA„•ÃæËŽ¼v]^2—×paÏÍ|õ§¼ºÃu“¬iå@¯Ç‡fQ2«Ouî<|ôdu%rÕ7.Î|œa°ØúfÊÃÈkEê²b^k-³²|³ð³×š•7ï>µrYoIL?[äã^1ŠUäßÙw,á~ ‡¥z­Iò˜Ñæ§Ãõ„ö¼_],g˜¯r¦EžüO] µh>ÛDà+>æmßä~çi¾PawŸHË…¬rNW7¡!k®sÚ{éñ«ÞÄrÜŠ,g7¿§9oÆÖ¬ni§xý{¹—ÜÐækú2Ó²^u—Ò¤ÅI}Y·¢Ór˜Jê3•d«®sÇ>=U¢4æç?†ò}~Γ\©ž²RͤZ1_£ÁK ÈÒJãž;šsIoD§fÒ´ä8sPkÒ¥ŽsˆìT4'/€6šª².çŠmäï2CR'«8>ë1™Q¿HÝ%ŽÜVe-9”}8m ¯ýÖ§­Kj½t”¾#u÷Õá4´æÒƪ»x£îŸ(ïË$8{âh§Õ³JÀš9ÕNÿ«d‰¾ÿ1cÍcÄ­1#ÎVî'¬^NdàZ=ZSÁÂûX6 É¡sÂ/ÎáêmÛ˜»6œò¤°Þ€Í:J&®JáÕÚv\·íc§Œ°ë¶£œÕ «;]Õ,-ÔX¥z[mmi3…Óçý!Ñ­'«ùDúsŨvu“k1–ÝÂ(le¥†&gu“eº{”i³›ªbôÙC7 ñ¾Ô@EõÊ)sêØµª‚鑬ZìȪêꄤàô—ò­:Î!êPÓó^Z,]ˆ‘ó½§ni­ÕÞê×±ó€XÍHAü¯†pýVÃéoÏ&.9Ñ1÷'j£ArrjÍx"CÒR-Q:4Ƙ±DU [f`kez‰ª>îrgm;™ª’SžAÃsâô¢Mlu4ü9–²ÐY³ùCÝó¥X¦ºü ‡EήðoÎökáNcy ËÑ/Aâp~lÈü]Y{ƒôÉ/ÀzO'êVZZîùsމºŸ<‡èõNOÏs´BÛã.}b»–„\­òûƒüAú ÔvV’=׬f[È‘šÐx|Â’RÂ(üLæ7‹ÉÁ~ÖüˆmiÄdý®'h*Õ¨€"-ÓšÕË|U/ŽÑ´g²ÆÏyù”´¨#¦Ĺk4£ãz(Ë9PN¬ÄÞœ÷ •²îL´ÈjÉÉõ,–ªç¢šj·éѼöŒøÿ–^šWÙp~dHŒìøw!.ÉÉÙ¨# ü6ùm J$ÖÝ’ ðÿµ“Uï”þ¹ðµ–oÍêe> é…”!Ç©nôÐAÛLì·QjÉôÇoÅ8Cà c#Ïj%Ѳºv9XÛœ)—nÉåGÓÇÃãŠr–Ëî_³oV$`‚ãáµ-Œ±´¯hYŸLô‡±oûÏÜ“{z%eŽÛ³z²Uõ±Æ›.„¬×­éO¥0VÝ•¨>°0cè°­õq_BÂü +™iÿ¬È2$Ÿ²¶ \Än›î#6yÍ=ï:]‚½ÒN¦½J2´uŸC$HÁ ¬Ÿe|Ê%ü2Sf;PšØiBÕêØŒM@¥±fïîÝ7`æÀº}0J}é¯: „Åy0&ÓÎìR¹Œ¹ŒUò ³Ò6ƒ€ðsóŸ²õ1U©ê†=eó jžÞcÿ…#þ©³Øè‰ì<éË®kFbÍØé¬ùXgÃkÍhà˜ÁíÍ܃“ÒØÍ³4ìÏ]ôzs‘e†±FÕ©³ú( ¼°f/púˆ(³ÝÎÅñ“e÷v ·­~·"$ŽY y¤n9x¥Þö£lZ²t\¾Ë!ÓÃ9Χ ÅöþŸZA=‹|‰Ï"‚W¤FAßñ˜Ì ô­«k!õ’]w—ìâv\-—ÔyÒGÍŒbÞ/®D«‘Vu$ }0J}é¿$_Â6KŽ\Íd®æð¥4Â/ æG-˜Í%ܦÁZÜüê–~°¶SA±^ÍÏÅ×Q†ÇìAHu»CÀ¾AQÇnë9±OüX@¹@o0^A™ô ÷Ñpž@ ög‘J凙@¢‚ãCO [r Äó|BO)tÞLŠ þò@áˆ;ÅkèìðMͤ Ó-ØógM[Îb±²6‰¨Ø" ´3ÔÇ|]¨Ë;ÙÀh@.ßf±FÏj;qêòêUµîW-YÀIuCd|/ANdeÅk»IÚ¢£íÛp¬Fʲ–mŽÐž.°KÈôIùœÎL @b8*ˆù°öì¥ûN>5äPêìÙ€óé‹FB‘‰ •bØ gXc¤’’M¸\˜E{}VàùÿÀ+Çz™àí³|.›D{ˆùhdÎT´:ˆ©²¿pü®aÆ 𷨨¨ªªJT”7ÂÆNôÚÍÍÍ-((èÓ§¯umm-¯°5[÷ÇmyïÞ½û~¥¤ˆáó<¸|ùòN|§555ðWPP‡l&[‹©§ƒ3×ðóg³³¥¥%eS…eTV6{$MŸ>ýÁƒX6uÕª?ÉUTT€­ÿªeddDÞöôôìôÌœ‡nüïÿëÄÌ7xð`rÓQ\\Œò=ÆA9óŠ3$!!¼Í+òÈÛYYY·ÖÏïO°Î¼¼¼§j566RíILLÔ××ï¬÷+++KÞþñãG'~²b=Μ9óÎ;Ø·¹gÏžØl4pæãlmmÉÛùùùÙÙÙŠŠŠ·ÙÕÕ•nWŽWh›RvbôîMk×ÄÄk锸ôé•þëÚµ6kTjéþýûØ·9==ý/²ÁÒsÁ™; š“777ÿþ=ÙÿóçO—C‡aÓ¼#FPíÙ°aÃ?Â|´’úúúÂÂBö®nÁfÍšEµÇÍÍRîwP½kðL±o³ššåGÜ·§=jW4òDfacC¯êìÙ³˜e¾çÏŸSí©®®®¨¨ûwÆ(¡¨¨XUUÕùî7((ˆjÏñãÇ;ó………ÑîÄø@­°urr²––Î|ÿ(víÚÕ·o_`»ššš¦¦¦ºººüü|Ð|òòò˜µùÝ»w"""`*ð‡  àçÏŸy«Øoݺ5mÚ´Î]µ¬¬¬à6i÷9²SÞ/¼G%%%¨³˜°°p—.]Ø;3fŒ””Ô?àN»ví* @̼½téÒ¨¨(ÌÚlll ]ÀÀÀ7oÞ@[­Gccã¾}ûh;+8óý+ð&!MY«ªªò„ÍäÙi“ØØXŒ[Knþtuu‡ŽYmÊv,!6ºuëF ~ýúJ·ß/y’¯W¯^_¿~í¬·)**ŠÞ0}LLŒŠŠ O˜½‚¦ O±fÎ|\î±òœÍ OyÈZ99¹ððð‡ùÈÐ××ò䉲²òºuëvïÞݹon]âÝé!//Ï+´‡ÂÆÆ›CY8óqÐ1/--•””ä!› yÈZ--­gÏžýƒUKGGçéÓ§Ðèœ8q¢Ó3Ÿ———££c§¦ÐVðóóó–Íâââ!!!8óá #¡x‹ùx%Ü “oß¾ýk•ÊÓÓóýû÷]ºtÉÍÍ]¼xqguô'cöìÙþ±R-áÀ8zôèYŸ2œù¸ŒŒŒ MMM2ûëÉxò䉺ºúàÁƒëêêP§€ÇŽªæ{ýúuVVVxx8¯Ä­nÂÂÂÌÍÍGý/0OÄmAqèС7R.Æ™Ç`pî—9444xÅÔ„„===P?AAAŒR8uJìÙ³Çßß¿´´T___VVvàÀ[ðÁS¶²²êÜÏÔÇÇgíÚµrrrhköïß_PP0cÆ †{÷îiii%''w¾›mjj ¡]ÏÞù·Ê+“ÖfffEEEXŽˆ3—ñîÝ;Þ2xܸq¼b*ÚL@‹ðïÔ¨›7oîÛ·oÕªU'OžÚûüùóäÉ“wïÞ $Á[éëXÁèÑ£¡ãééÙ¹Ÿé©S§ S"Ç2ddd|}}MMM±l$Î|\oåxôèѨQ£xÅZ4vó›7o.^¼øöí[Þro }ëïï¿råÊ.]º ¤‘O???ggg ‰Î—ÇÎÞÞþÁƒžù._¾lnnŽ}SÅÄÄ@ííÚµkúôé8óá`ìÇ-£ÄÕ«WyˆùŒáoJJJ×®]¡‰DÓêvnÀ-———8p˜UxgΜ9xðàØ±c‹‹‹>¼lÙ²Ns³Ð•gª  ““Ó‰Ÿ©A|||ÿþý?~üˆqS PQQ¡¡¡}*œù¸ÿöòµ¼•AõtG£¬¡9u;7€ãÑÁsÔ!Å3ƒ¿ªªªüüüß¿¯®®î‹¾È=ŠV.vâgš••ª6æÍ›‡e;¡óýªÜÜ\iié/_¾`¿`qæã2x+ "-'ÊËËC™ÍçtîܹÎí $WPP€üΤˆŽv?~ØÎßßäàƒdeey+ #¨©©}ýú566öÒ¥Kø™:::¾|ùrêÔ©X¾MèYîÛ·ORR’Wò—áÌÇe`|˜¶måS;†Æ.!¯ôruuíÄÌçææ†ÒÞ®]»Ö¯_OÖ|ÎÎÎgÏž…~@CCÐ^=x}u¿ ÇrÒ¤I8RA÷îÝA¦#ËlG`»¢¢" //ÏCi;qæã>—TUUñJ`ªô˜XÆ©S§Pæ?~<º'===77wÀ€¯566’×8Ã]£ÌGöä|øð!üuqqA\€*xZù¥¤¤×¨£wÖ–AOO¥=kk눈l¾b÷îݶËÊÊBÃñPñâÌÇe@ótåÊè•cßÔððpFéß0%%%²ª1Ô§OŸÞ½{‹‹‹———w¾Z´G–>¾¾¾è9Æchh(ºñåË—½{÷zxxíA€·üŠQÔ××ûøø#¤¾/^tÊf´lLL BZ°ˆ:ja µµµè|Þ¼yóV¯^ ¯Øõë×y«„qæã>._¾ÌÌ·sçNb¾mÛ¶‘·Ož<¹iÓ&Øøðá´•Ì'BSS“L{nnndæCçùR†z++«Û·o 544@³@{}ûöå¹$‹jjjäàNNN²A€·ìÌ™3è8P=På‡ 2dÇŽè ™ž={¡ãê8óáhh“cPÅy¥HçÎKþxõêU”ù Úã­aæGYnܸAf>ÊÉ!rÂÂuëÖ‘Õй$(ýååå;Ós$cÔ¨Q÷ï߇® B¸ÆTj mmí´´4؈/))áÑrÆ™û€ÊÄv‚æã•"]³f %ó“·¡¹ttt¼xñb'¨9ÐúL:UBBýøæÍ›×¯_“¿¥ì‰GFFž={ZÚƒ®:Úfí‚&VVVÆøÍR®Ä³ÑÏN†~ýú‘ݧAšCߥ@®*ÉÈ‘#ïܹÛååårrr_¿~]¸p!ï5Î|ÜOT *…qPÊR¹tΘ1ºÒ222<]m€¼(GnA.PÎbR9nܸ‘<¨´Gꌋ‹KHH€¾f§j† F€FMM'±ŽÆÆÆþýûS>kjjr=öÔuuõíÛ·C%A÷¨¨¨=z´™ûprrJOOG—aîîî¼Â|©©©ÀÓ”{6mÚD™¨xJÊœwëÌ®]»€ç(}@É¡ÃPd R~üðáÃ7ìííÑÐÈtt4 333{ôè©;‰‰ ¥Œ 2~üøNF{{ö쩬¬¤¤=®¯<ñöööõõMJJB—Ê ²ÏÜܼÓB™X¿~=´JX¶ÐÅÅ…W s̘1èb/2DDDV¬XáïïOÞ´Ç»ËÚ¬¬¬NŸ>Ý»woÊ þ¨&]¨4ßÖ­[É̇&ÿ¤¤¤ÈCÁpzddäÞ½{ïß¿‘;µ±±&øøñãä=/^¼ñÚ™Þ}èpR&[‡=ܪ™ÙÙÙFFFÐ÷òò"GC9r$¼SdÙ‡3¶]q…YgPºJb«V­¢Ý *’ùиðbÈGòêfJìܹ“r.ecŠ"99ÙÎÎîæÍ›ä=p0(ÙwÉ’uuuÔ'ˆ[¨­­•••¥º)îÇŽ;yòdçxëçÍ›'//OÕ_áŠÏPï„ à§¡ÿM–ž°sÈ!?î|M.Î|˜Æ³Çíf≒\ºté‘#GX¤C =òˆö‘˜˜r‡®;4R´ˆè:GLŸ>½¼¼\\\œ¼hoÒ¤I+W®444D÷\¹r!¥ ^¸p!W"\[[[ûÒr9TÂÎA{çÏŸQEÖ™Ãã  åääà‰?yò„¼ÔÞÕ«WŸ={ÆC~¿8óñ$ ]¦œƒÁ eLMMå•’üðáÝý;vì¸wïÞØ±c©öí©ªªfdd`ü¾ Iw’QsI5χÂÑÑZ:ª”¡ÐÌeff>üùóçäh» ügkk ¥Ç™Û¼pá‚··7Ý$º¼ ™9"""@ꮢJâ4•“3´·k׮ݻwC·Æ××÷Í›7”¶Í˜1cݺuÞ$tî&g>¬`öìÙØô˜RVV數'®®®@Œ¾:u*Ý–hËs~~~~=b”ÁxäÈ‘Œ,gä´GË"ð”öÔÕÕ)ýEQþ›3gNAAÁ;w:.·-èKKK¯Ó¦M£úª±±qðàÁ¼N{‡öññÉÎΦ½‘Í›7+((¤¤¤tܯ‡„„lÛ¶­{÷îPÖ“@þ*&&++«ãÇw¾ Ž8óa´*''‡‡s††Î,£¯€<ÔÔÔÉkã° }m"'L˜Ôˆ&Á`@ÈB§$77—vxåi777FÄϰ··ïÚµë•+Wh‡Ž›šš€JKK;" J@@Àþýû¡lß¾} TL õgÑ¢E ï:Gúœùx6l€¦ ²ŒèëëóJ_»Å@”з€÷œÒQhêöèèh,Ì$=xð`Ê”)Ðè3IF xQRR’ÑÌà߾}{üøñaaaTûAB?ŠbË–-T.ìfff?~ü€ hÁqAªªª¶³·lúòåKºœ‡&öÌÍÍÑyGžÃµk×@K]¸p‘ÛöÌ™3‡ÊF™•žžîëë ÏèÈ…$P••½(x¬ÀˆËHøgÛ[œù0hŒ0Å|sæÌáÚƒ¢‹gEÂ2ùÖ‘„ž={fggsKüAƒ ý«W¯¨fPÁÕÕŽdn$sæƒo¡E†–—2ì UQ€hðòò½Eµ@â0 Ðvƒp<þ<9+øüùóˆ# QÞ»w/“Ãàî¾ÿÞqã«„§OŸB—bÚ´iÀ.'N¤{Ì™3gŽ= G¶ó·ÒÒÒŽ9T]¥K—‚ª;Eå1ïß¿‡^5¼ 7mÚxK‹3¶ívB¥ðJ|ꆆ†›7o²’;$eLgº())IHHpvvæä‚‡òòr===KKË}ûömݺ•ùÁ:::wïÞm‘›»uëÆü  ½^½z1bYT4@—ÂÆÆæÞ½{@W”ßòññ¡Û`3Д-ãkkkíììà/ˆE摲AêQÆ*ã €¶ åäääïïϤ¿xîܹíÛ·ƒö9ØÚŸ¨««Ê }üø1Háùóç[[[!Ö˜ƒN|Ä[Wœù0 è b„ù¦L™À…¦¬¬ÌzðbVÚSP0@{ w"##;:ž8ëgÏžÅÄİè_Jw=#ÍÄÊa@{JJJOž‘Ë#À‰²²²Ð\¶a¹!pLkO¶µ¢¢´4ÍÐ.3?XOO/!!aÛ¶m ¡e¿~ý ʆœ6– ÂÂÂ;I +'(–ÀÀÀÿþûoΜ9—/_Æ”' Ð̉'bccAHÁM¡ÝVN}#¸) ½ž={¾zõ ”÷СCG/µ¹¹9ŽÑé á@ûÞ½{711QFFfìØ±†††›HÀÛFœù:9€ö(£)rÐÙ‡öˆnø ¬aÒ¤IË–-kOŒotVæøñã-ºPÁèLž<š9èûC‹Fy@CCƒ&Hè•O™2¥mÃP €{àÛA (³ g‰‰‰}ÿþ~ø/88ØÚÚšòÛ’’’ýû÷ƒÈœ7oÈ5* `¾U«V\¼x1È#r~ 2 agg÷ë×/Ðy”'FDD€²Ž†YÃbߢýù ”óóçÏñãÇ/]ºt< LNÉËË{üøñ£G’’’àtsp®ººúºuë 7Ðâ k]]]dd$0ô0²²²àDs¶¶¶Ðï\AÞâÌ÷h“£Ž}ûöÍÈÈh- p } ÄhùW«páÂ'''P!mX†G2݆ÆÎßßz*`›³³3ôÍ×’ÐfÃNŸ>íîîž––Öΰ2¢¢¢p…¶e¥üÕOYY¹k×®………JJJsçÎ]´h¥t£Oö›/++[½zõÙ³gŒŒ ŸqøðaØN7>»5 ”{ž={vþüy¸Ð-Pt  ÑþÕ&OŸ>½xñ"\Øä׬Y³@Zù@IÏ@l/_¾LII÷âÇЀ[é?|øppPg €úÒzþü9£Á8=&&t(Akhhèëë!¡ErÅ3ß? hwȹ³;ðöž9s¦mƆžžÞöíÛÙB{(@ÓÇÿA‹ÆúYÐÁ?zô(”ÜÜ\GGGWWWt&4© D¡Qóöö¦ Ú"@,‚|„.P2HR¶Ü XØ*æDuëÖ-¸Ç &€ªCG/÷îÝëëëkffÖª1I`)à ¨`P§N –€^”——Ÿ8q"póÓGpàÀÊ`[XXØýû÷üø¡««knnnooß¿ºWn’ƒS@œW³ÃYBBBðë©©© ×@›~üøŠ¨]MM å6ذ òjÕÕÕ báìÙ³‡ì~ÏýÚµkð÷Õ«WðAÏ ®`hh5A“777¼5ÙG+´G_˜Ì ѧOŸØŒª#pèС»wïR†“g §ýúuttŽÉhP´nP\3fÌX¾|9Ý!)hR_¼xn;vlÛ¶mÚÚÚ`ù€˜Û|°cÇlO– 60ú´HHH¨¨ À  ~ oÇûùùQéABMM PèæëÃ***æÏŸÂzÎ$ û©¦¸€9%D¸råJÇ6é:1ÂÀC¼qãFNNŽ   €€€´´4ð.H7xw€w~TUUûõëWYYYWW§A+?:[ñññ&&&ð&·9ü§££:+º$àÎ|8Ø xuáí‚æ‰^õi±pïÞ½Yw‘ç¾}ûñ¯_¿v\j\7 ¦L™Bö·¼zõê‰'€‡@ºA#nkk{Ö¯¹˜´{±víÚÇÃQæÌƒn ´˜Ð ÔA•‡¼ýàÁЩ‘‘‘bbbpG3gÎÔ'õ« ?{ö !ù:Âí€:¤ôû€žT*¸£ÀÀÀK—.µx5ÈášÐ½HHH€Ç±zõjº„’’ªè͇€Ø€çP¸#PðpyfŠL‚ô€ÿà 1G;õˆ"99Ø þûï?`Gª;wîlÕœùp°÷ïß700€¿´¹FÛ€úúz™ŽDe  [ ºIÂ@o½ÿZƹs炤›D[®L ö€œV-Z-¬±±1´ÝÀ"&&##’õigg7gÎcØrq <ÔÓXêõë×Ð/»jÕªÖz®RbذaP8ðÄÓÒÒ¦M›œÍÇÇuuàÀ ÕàcQQ‘¡¡!š2 ȻŠÕÑ.€2‡b†+,,¬­­EYÓÉÉ HÞ­[·²BÛ8pæÃÁ ÄÇÇ[ZZ†‡‡SEPl-üüü*++±O{ÀCÐÆQ&c/îÝ»·{÷îçÏŸOŸ>}Íš5óH@Hž>tÛ²}úô©©©133µäïﯬ¬¼`Áæ¡ÔXèT°$ èTàïñãÇÃO@ûNöÄa/–-[†.÷²²ºxñâÒ¥K®z÷îÍèø?~@~úô)ê3L|¦ªª r „ÚðáÃû÷ï/++ eªxËÂÂžŽ©©iCCt€€±€b¥¥¥[¤=ø-xS 7?—••¿JÔÈÈhܸqèÜ!z\pË–- dG—f…S —`bbRw?Á™&)//Ïz˜.ZhhhÄÆÆ¶ÍÙ“€†{Ê”)l§=`ýýû÷KII­[·Ô ]'h‘;(†4îð£ÑÑÑèGòL!<Ö™$€m­º \êÀhÀ*777º:•yØë6ØÂÓÓ ZîDÃ[gff*((—Ù€n+..ƒ*‡Î‡iiiÑzo655:u žËË—/ç ó<êOÂ_T×®” ·ãÑè À[ð° €MAá‡à:з Mʃ¢´´ä/°/”öÐýÀŽT¶åççŸ}º TƒÃsH@9D(¿+W®£Ï >«õõõP˜=jçÓ„Ç~ÐUUU»wï>sæ èNïìZˆ‚g>tÀÇÇÇ(@#ÈÈÈðíµ“ž¡%‚~=´õ7nÜ8þ|{,Ù±cGû³ä€ÎƒÖ¶ ù.®_¿EMM ´oXXØRÚf‹Ìr ¸”0°0­­-TàÚÔÔTF«å˜`Ê”)Ož< zÞ!˜?;å!!!Àš‹-2%¡U}&(7”íà×_¾|ùõëW ÏŽKG'**JÎPÒÙC%::]ìËAœùp° H´´´X 36uêTžÈº0hР6ÓÞ‚ @]ÅÆÆ’gÑØ¢­uÚææPSS·Ã$:#dggƒàCムӜǎ[»v-y™`ä/íN ¹ .+·¡qÂP9E™Y^^$Wk¹J ÔÏСCOž<‰²Ý¹sç಴Iû÷îÝ{êÔ)`GèîîîëHhÿƒóðð€’ÏÉÉÕÕÕ…'È."æPWW¿víº åðÖ­[`Ìÿþ÷?¼É™{€:"²Ò’òD @//¯¤¤¤ÖžeäÈ‘ûö탮ÛMöôôl󥤤€=­¢½ºº:333øEh+©2¤£ëÚAÓ@)µÖ Ð^på^½zMž< ó5@WÏž=k•#Õ }ÞÚ°aU‰Íš5 t§¶¶6<_PEptP`'pa;c¼Ñª:¨dò4ž¤¤$ÔØX¿~=P2ü:mZÁ‚„„9{Qbb"Hv111ØÃÅ,8pæë pqq±°°@çQ˜ÀÆÆæíÛ·¿—†††_¿~1OÞFPB;wîLOOïдÝïÞ½»~ýz«Rçdee½zõŠìôØ"üüü¶oßþðáÃøøxæ*mÊóóó™øq ¸}û6°\sܸq]»vÕ#E{€³Y pþüyàÑ;v¬"î1iiiðÕ—/_Єº,z—´`Éþýûùí"ŠÅÙÙîzœ¬ÞúúúP%Ò:Ú©S§B¯"44”ÉD)œùp0C‹Táää„}ÚCHN§¬ØÞ½{/\¸‚I°1öjkÖ™/..‹"ÛÊʪ[·n/^lÑÝ”ŒÒÒRàÔ¼¼áR&&&îîîV$ _±ž¢È ÎbÅm¸ªª ¤¶ŽŽN``àôéÓé M¸©3gθºº‚aä!hYYY`nö> ˨·3]K¨ ¼¸¸'¼\‰RËÏÏ®‘G}dffBÉ´*‡"œùpgkàÍaâ×Þ†ôlœGDD‹ÓwïÞ]ºtiNNNkýZÛh(A’²=ZÕ„„V’…ª««Ïœ9³mžË–-^éÛ·ï¹sç¶mÛ•§§'£\©‚‚‚¬\öÚµk@¨-8_¹reþüùOž‚LìÙ³'‹ù•à`;;; ¢èèè—/_¶Ê6 =tÁ@k™ÏÔÔ”î(%íðrccã Aƒ@ç±2ßÉ" pZ{ʽ{÷€ö:(‹òÞ½{מDKlˆQàcè@• §Ä3ŽfÐÆµš={6v8ƒù»Mw¿¶¶vBBB‹«Ö8 Fë¾kkké5hYh³X¡=е˗/‡žJxxx›ÍÚÓÓÓ£›°ž‘ÇDïÞ½A_Ò0Õð2p^¿~ýà^ZœŽm-víÚ¥¢¢Âúª›ôôt`ºéØàÔÄÄDxØÉÉ€æ„nš§oîpæÃÑ OOÏúúzÊ®=;懬¬¬³gÏÒîoUkÈ10 5"''GË»wïò>yò$ókBó ìÝùY³fµßÂÀÀÀ§OŸ‚V£ÚOw¬,,,,##ƒVQ1”´7lØ0`åŽ }çææÆâ‘@É›6mMÖÑZ__?>>>88 Ž€«–-[ÆÞÚ8óáàºuëvàÀÊLf¤°ƒ™3gÒj”Áƒcs齪ª*ÝÖðèÑ£T{<Èdy2ÈD%%¥­[·²wDŠÎÈÈèáÇTûéŽv:tˆ*ÆØôéÓÁ$r¸Í›7—••µvܵµX°`‹~•½zõ*..æÌ³600øñãGJJЦ¦&vj X´Ç–L&8pæë$ !3ßíÛ·y"[JMM ÕžI“&AsƒYƒO:EÅ|¦¦¦ä5(ììì‚‚‚ÄÅÅݲ¬¬,è-¶Ç1A´GÇœv ¾……È;Ê=ÊÊÊoÞ¼A9V f§'ÛVØeäÈ‘£=ÖÖÖÀʇÆZ¾x:ðø §ËÅ5ˆ8óáÀ ÒÓÓÉÛÛ¶m#G®Â2(Ó $—œ={ö`9±ËÓ§O©öHHHP~tww dD{ÀyÐAéèÌÀ;vìMI9·GË|úúú”eddÈã™S§NñÇ1ÚCH®CþþþLâ½åå冿üãùÛž¥‡;wîgggã âqæû×abbBÞæ‰„D¡¡¡S¦L!„Æ:>>ãoòèÑ£)?ÆÆÆRƾºrå (BºþŠ  ;HçÑŠ*?OªÑN¸ Ê|”~.íÏGØ6lÚ´‰ óÁœ):Zíihh0r?æ" ¦%&&bm2g>œÆ¸qãÈÛlq—èhìß¿Ÿ’ùúöíËIÑ6ØÚÚR~œ6mYÔÔÔ¼~ýšÖÛ8ÆÅÅ…ÃC¸@ÉbbbèG*—aÆ‘·¡YGiÄòå˹%çôéÓŒ¾Ú¸q#·hÈô¬¬,%%%¬ÕFîÐ_ÚÙÙq>¸6”!N(ƒË\ºt Ím›™™éèè($$äêêºhÑ"®»KøúúÒ2Èe4¡w´=†6äæLLLvìØallÌhvÎ|ÑÑÑÀ|/^ä æƒÖ–L'´ëá0‹¨¨(”ùÀ~T$A³Hå±víÚ#FpwrhñâÅähäÑΪª*r8´yóæÅÇÇ/_¾üÿû<®ì²eËhw¾{÷#Ï=77³užàСCic«âÀ™ïŸ“™Ï:ÐæÀFEEo­Ì%/@$/tÁGép¯®® ‰fÈQÉÌçàà€–6çÓÇÇÇÉÉ ´‡}¿|ùB 944”jéZyéÒ¥GŽÁfµÚæ*Î|8:iii)¶2öM…F e¾ðÄ U!#¤8ø{òäIÊæ¦gÏž½huܺu«®®N@@€Ì|Ÿ>}‚¿ÉÉÉÐóxþü¹¦¦&mÀnŒ\¿~}`` yÏ‚ ¨&V¹‹ÌÌL,×L0ïܹs<á݆3v¡\Þ€YW‡„„ðV!£ÞåååhâõC‡‘Ç– BSSvL•——wtt¼xñb—.]Ð=h45{{{¨*;vìh1¸çu%óa­n a£1EÆ”èÖ­[xx8Î|8óýsèÑ£Br÷¾©¨§˜Ã[… "þ8p`óæÍW®\!ûm‚ÚÃíQö0Pæóõõuss ÈËËÃf,t…‡‡ÇÞ½{±fá¼yó0Ë|€Ë—/C‚ ÇCœùþ!(((@ãkaa}Sµ´´à/“È–˜Å Aƒàopp00Ÿ‹‹ º˜DMM ;ƒœ”@ÉáâååÌwâÄ èm`3ð#¥“Ë©S§0È|ðÜ1^?—,YÂ<…2œù:ú÷ïÿøñãQ£FaßTMMÍŒŒ žêDHCˆð÷Çõõõ¨¢Ú¹s'“äºÜ…³³39âš’’RlllbbbPP6­µµµ-**B=nÐÁd¬aüøñ𸱜ðyúôé:::/^¼ÀÛCœùþ 8W˜OOOÏÞÞ;>ë­Õ|cHxøðaUU•¸¸¸¨¨(f Þ¶mºVÏÕÕʜˑþÏž=ëéé™àîîŽM wïÞeæXZZâ!Î|ÿóQÆcÄ8ó¡f<„”W]Æ.''‡ÍqN2ÀÝ»wƒƒƒ¡eÁ¾§Bšõ‰ŠŠ’M™Ð›PUUEHnX6òúõëg>xÖ‹-‚ÎÞ*âÌ×ù!""‚å´®d@+L•1Ž·””daaA›™³ÈÊÊ‚¦¼žËÐÐÐHMMŸ—&öK2$$g>œùþtïÞûF~úôÉÙÙ™w ˜ÛÝÝn„elâãǵµµ˜õ¡ÄðáÃ/]º¤®®Že#gΜ‰ý’ä¡ú‰36È>ìYTTäææÆ»…\RR²yófjYŠ‹‹KKKy™VIIéðáÃ7’'– ¬Y³¦¡¡ b€g¾NaaaìYPP ¡¡Á»…\YYéèèÈCÿþWLUQQ!ÇÇ,SSS1.L{ôè§kÇ™×|Âׯ_yº1›­†.„„„dddx£%êÚõçÏŸØ·3&&ã̸zõ*Î|8óýà ?æÚÚZž.dhšyËCG@@`òäɼb-Oäç‰ðC˜ ª‡36ƒ'ü÷êêêð'ÅIO/Y²g>6"++ ïã̇C½{\óá B}}=¯Œvò óñDÈEEE¼òãÌ÷Oƒ)â:¥„â9æã!kyB©ÔÔÔ`ßHiiiümř—‹à‰!eJüúõ /^ö‚'Æ-$%%ñ·g¾N‹Úºú7YŠŠ¾VVW•|/»q'j¼ŠÂ©˜²3·°øã§Ïeeå°}óÞƒîò²ýúJ÷ä1ñ!¿ðKQEE|Œˆ‰ƒBV•—ïöfm.(þžù!÷û·oWnÝí#Ý[Mi ×xAñ¦ç|„â­¬¬üVþ#â~|OIÉÁJÄ0櫜[ø57ÿsyyyuMÍÍ{»Kˆ+Èö냱:\Z^ùî}Þ·o¥…_Knܽ/öŸ˜¬L/ey9¼©Ä™ç‘÷¥4âÖͺÚyu¾Šƒù$eûHÊÂþ7î£|®CRS2sÓ_}ÉËebf¢7œóF665] ¿“õ:y šfå¡b=zv“ÔMI.¬F(Fœô¼üÌ7Þ¤¨j ›dƒ­¨’eUÕWoÜ*ýòYA}X?%5A)Iy)èE?Ém^÷¡)Éz—÷öÕ—ïG™šŽÁu›ïÄ?y_f€ò€Á=¤ûIôW‘è$d7 ‚¼úRó9;ý}ÚK!{ûžÜë}ùöãæÍðe¥ Cuú)©òÿ׫§b/àÓ·âÐr~ EoÞ䥿úVôÙÐtì(M®ÔáË7#sÞ¤ ¬%;hˆX÷žÄ"EG9Ír¿"HvzÞÇwið® Ö>Ñj,W 3:ñùóøû=ûÉ PÑ”ê''"#ÿv?~ ïÛ³¼ïŸÞ¥å¼Iê!Õ{Ê;1Q¼Å™—p6ôúÏêêaæãGÙ´°˜ZRZþ¡Û7ãž½yüÀÅÅ…35þIê»ØÈ°‘6S0†LŽìÖ£—šü3í¯?<޵µŸ8DE‰»…|52æÓ‡ì‘ÖS†™Ù2?²gßþðݾÿ"51féÒe‚À"GÕõÍú)iX+1‹À×µkuøGì?Õ6^>Ô]\Âq¢-'­½p#â[ÉWÝq“FXLd~doYyø‡n_yôîåã•+– qÀÈG¯ÒF†ëÛ9ªèšÀ?æuxÈHSøÛ±¯ßC¶Ÿ4y°’Œ,)¯XGàùË¢âÝ5 -Úp.H.ø÷º <öŠ¿›Ûj¡s}žžx?Êh’³¥s«xJHI[Î^QßôëÀ¡#––* 9_È¡÷ª++4,´Z4t!üË(®ˆ=rxÕÊ"‰§SVQ}ä°¿±Ã«Ù+Z{._¾‘–D*ºÕX]é4ebG[{.ôŸ€€šžiΨ¦ÿ^æ–Æ]òpwë8?£§¯3ŸÄÇNp²l}‘v—’±š½òç¯F_ÿCãíltÔcyuÍa??c‡ÙmxÑø ìgÇÕpQA¾I6V8óáÀ"^¿ÿô0úîh»ö¦n·ž³ò᫬©ÏÏ›Ãöq!//¯±ÓC+Üžë]à u?wíñZ¿v Ç ¹ ´<äÜé±Óµó:¢b@Bß}J»|ɵùPÀ)•aúÐÔ¶ó:ª:ð×ÿhÀ${»¾2☕_qóšñ¤Ùí¼Î=¬æ¬Œ~‘ñ93u¾³{lhlòÞëe>c©ÑÄv%$tá3›2ïÛÏڪÇO*kéA9´ó:êúDÍçëwp¶óLI l¹àÌ÷¯ãÄÙó*# ÚO{d #H»½¼=ÜV³ËÙáEÆûׯRÚßSvK?/\=BGY¡Ã'ç/ÝŠ’èÕ§ý´G†¤t?ƒ 3½|ö­\áÒ »ª¦îàA?‹Y.l¼¦á„™9¥EÑ.;OcsÀ—Ó¯PÓj?í‘!3P þ¯¬ñpg×*ˆgo²2Þ¼ac‚:|*$ÔxôH9YÌ>w³©ó“?|üö1qÊxkœùp`{¼}-f-íˆ+[:/?{éúd;«nb¢í¼TXt|¾µŒÙÀ^ÛĪè[I~üc3ƒ‘WÈ~‡NœZ“íWç´äêíhóÑ#¥z²3{TAIÙÝèûìmþPt“ì­abãí³ÏÓ}5»®éí»ß|ÆâŽxpľQÈ%ÇIö퟽¾~çÔ@% CöûX 3³Í/-Êô̸ÝPŸK¾ß‹yÐϽ§Lø·ßÿë ä_Î|XÄÖ­[Ç/òì¸ë3³¹}ßÒd´¤DÛ£f„FDÉ©j Št”O‡Xž5‚‚×nEN´±äÅBVÓ3‰K~1bˆ’,›F ¿ýHxžÜý 2Ì–@±lÞ¼ûÅ;Ü|ÂÕˆ({ ³öx¿¡¨©' ÜQÎ_’½«…ÂîÜoÑvÏÏ‚o?Ÿ§hYt\ašL™·mÛ¶M›6á̇ƒkðÚëÓ¡MŠ!#MoEÝ6ÁZ Mþw^ôSVï8ÚC!,*Ö{֣ɣt´Ø{eÿ#(d…¡:“Ÿˆÿ'Ö~y]VQó0u#ìP@±€VótsmŸÚ;ÀâÕ44¿~û¶Ó$»¶ù¼Ü~ðd úðŽ£="ÿ‰Kü,9u„V[2<|«¨ŽK@] ;v =<¼jù¿’Õg>l!ð•q39Tù4 Ç>àÚúº^PRŽ ‹ˆq"Ê¢¨˜ø—ï%?**º‰‰±ëš7¢ãôí§s¦•µô‚C/.m·WÑÙsgØ7[Ö‚ò›±øâµ›ŽíÚhjè ó‹8cª–±•ÿ‘cn+[íåø±¨T@¬;t­8`äâ= J‹+ªª[;6ÛÔÔl8‘C‰àfß¼mgñO,xÀ™CÈüX$?d'Ñdò\Ÿý~î®­›Û¿z‰co#@º¿ÂésçV.cOcZò£ª«(_Î…ˆeí¸ÛË{Ýš¶k /Ÿ}ãœ8šr¡—‚ZQÉ÷Þ­Ÿ¤|_X,«¬ÆISͦÎ÷õów[ÙŠuÀ(7Ãn°Ñw¬Eô‘t*0°µŠÊÛÇ—cý`„ä\-ÐCš½½Lœùp´ŒÛ7¯¶sa@`0Á)/¿@N¶‹Ç¬f¥ð?ÐÎÕ m€ñä¹'N-˜Ãê»ó<=Û”ã% ½Ì£Ç¯ñpÇ™»ø® ?F¢{Ù‘øÝÎs'øêMÎÓ@PHäÚå‹,Žy~«¨VÕ5àJù(jª©­mgȸ'Î\ðaãëÚõÉ‹mc¾¸è»œo¦Q-u="r‚u+j.„Ýæ<íë°ÈWCO±è¢¨>wôú«iÕÕ׳8³þ$.ÖÀÞ‰óF‚ʼ7ÎÄg>Ĉ@ˆ£û…¦CSòåvKlb’Ï{%^cèÄÿ! ù€šŠÐ9 ¡ÉÎC¹ÿOú‚iÁMçgQ Þ7okjz‘'ñ‹{Y†L&Ï}ôNŒo‘ùªjêFZM梑F¬ÔἬ·ýÕt¸eäðqâ=3Ä@v.3”$·J›à—L܈/C ~‡¿¹tr_D\аP·›”¥(eZõ–U«Òòk-œ\æOqãÆmºáVöR¿¿£N|ջ׬Ë(¨3qÞ¬ ã뮺„¸ö€™k$ù4·ïÉšG ¯îÐÿ­ïP†ûC¹77’:ÿ9˜z …ynñ°úÏ¡#tf;¬>0ÖhhÀä ÛýÉ~@‰àÕÓT߸°Ù8b¡HϽպ,ø`LœU\Z&%É,ŽÑ™à Üm5@ö óÚ¹êñý»V³qÑþ–éY9¬G4Îþüeô„™Ü­™ï2ÒXd¾Ùoå5GrÑT£IÎ,Ôá`}®ö~¤û+œ»:g3Òs>?»ÏýÉ£œùèã@Rª¸B%úy©ÁXɲ”ãݵþÄk8á…Œ?ÒtƒäVû˜ Üøêµ ¿‰ûÞ$%X!Èí?Ã+ ÍG83ûýî†p`;_ħ™H*^2¡áÜ7sí$âRÐeãÃ_mmù¤¤÷yåUÕhøà‘‰h{oð Æ„QX|þ º…ŒHû8úáCÓø+¡9Û ™£7?¤éü”$ô»ñ§¬‰#ˆø÷gÛºø&GäO¤ðzò‘éI ýBd¾ù>¯n»kd7Ï{Ùü,JR ¤ãlØÈ…SžïEmîQ^gðù}–¦k‡ö0KþR||ãŠÕSÐa%­äÂÄc¯‘So½áƒÐ'Y˜SOü¢®n”+§žU3é0óæŠˆ‰UÖá¾»—€ ðݘ‡6­_l›ö, ̧¨£_]SÃJ£ú†U]#,ÔÏÌ7©HKÌ—Ÿ™Žæ4‚Y¼«ªgÊu#¡3õ ÑrM]ý`=L<÷Ô—Oq棧'ƒ¶Í#v¦ŠòÑ=©¨1÷‚T¢›»Ö6èI Ÿ‹ tùŒõ#›±YÙš‚DzLBܳÌ $î’¡Jª«D"H‚,õy¥|˜¸šØéôɺ/K±ó ä‡j·âè&ÑEÛNÁ?"ïɈ\Ϩ¡ú¾º¾fˆŠwgc);- Ìxý*© Ì׫ß@,Ï×…ïAâ«1-£Š{–,)? 6+iY§T†SEÅ ¿¡Eß}7çÍ+,0±'¿dÄ|=“QÑÆ‚‘UÔ΋¶x¸Üqëk¹6ò~© ûùÑ™ Röf«éOËüdù[8rKôÍ­=í Jדæü_ì¦=ÌëÙ¶€›"=vF’>œ­øn†ž¬â`´“É·ôNü³n©—”ôÊöxpa³1B º¶*Š ¯(»õÐaurìMúFÌ'&ѳÅÓk?=}’ö•òñé[8°=‹¨˜x[²ôUla-]ý·äÄÄcÛ&\O›‹D™F_X;ªÍög½Í@X`¾¬woé3so;MB'‰“¿kIK¡£­-:„˜›W…òãÛ„›QŸ ˆqZ¦ ÓKq+R¶ZÞ£´ùÍ›tFÌ×£· £ê:pðH¹þôk8ô/O½­~5™ºÑb°ˆ¢2€2ý9™oQæ£4Ÿ_TÛÌ”ùŽ)ëR_U¤ó¢-𝅝<²ï=‚Äï]°£©‚4¯/^°ÿ¸u=oW\ï¾N¸´ úÙUFõ¡ùH¿¨uzĨìyH ¦H6¸¬]…nL¥73"1|#I;¢K‡a*caÉ÷ÿ$$Y<øæ§­~šZ2¿ã`™¸ë÷lÒ/J×ê=Xëq‡Í¦hx=꫟u¤Þº,â2Txú¥|C–~¢è3Ãu<=û´œf,;l§ëŽ÷37-µºÀ|R}úµöè±õìÛBª?75} €}I“Mdèvê´dL sŠ(qâoWS^Qö•ÃJ¿Ðyt{ùE‘ ‚æ›®0B(#Ò^auE2t ’ ïÒéjHJ39Yü­¼[^õÏùý!˲£´{Úx½}ªCÚ !ªµIE ë°äßu˜T]Kg,‰Ô–¹Î™Š ó“ ýiÏzô¶PXI§)d´ØÅ4×Çålhõã·é{Q!µyò1í¶‹Sî¦û¥T;–þ(ë’„´lCC»yvæ#vLš2b /A§©é…ˆ†‚›cCQâG¤á3ŸØ< \vçý¡ß£ÿœ’ÏÐê^-ªË(€áï2¿8ùÍ©&5‘j&“|dD¬bí•àåGü9G†Ëê'!Ê·ýOaê|*üÊ/ÕŸåÁ2e:ÍǯT;çD5\ñ¬ºUqºê•Šugž•çlðv£Ü:Sä‹ý‘Ó.Kï)6•–²A[=ÿ—üz=¥?ª‰‚0´Œ;þg÷¿7[l»u—lm!–– 2 þBø Æ\Ø?ušörró·BO$>¸ñ¬°z„ŒÊnzH¬QÁËP†Ô ÃÎ2"i+£/¬“Ó’FáÈ'¦1Ó¬,iÖ.ô"‹2rîmHÓ’!ŽÛìŒÞ>wØy¿°º;Qí1\Õ¸B`'j|\aumr ¹iÌ@mFrô -é>èW¿ r]òû\øUEq#S?ñIþiÊó‰%–Fìñ˜šèkÎï& J‚l@̦ñ[O}°mÖÞØ¾Ò¼6åÀÈUÈ ¢…ýŒ7„_Ø]øâ†4!3ú#.Z5ÊŸh!qËÞbüü+~Ÿ”ñn¨­°õ|´$R$RÔð88à¡@·!Lò©j…˜£[âtvÒsî%–ð’yýZð ¬xv xBÔ´¾ºÆdÒEd]ósm|͸–þjdV‡ÿªÃ' «s.ïöp·Hc›Âó'è4ãu/í5ѤsDEî=ÈL„FMX1¹w;]ƒx;Äcøûއ¿i5åÔeK Ö =ø{*rÓHËØ¿‹À¸zÓ”|áI¿=ù¨NE®‡"G[(À§Ä‚¤Ùˆêÿïo=ƇÁ,aÿûoOá‰H5ÇiV EX~›þú*ÉóÏÔ<ëÍÆ¤Ai$ã¢ÿ+A^¹u1bñL©O>sWˆ¾q[íž\¸¤âù°°t=›$oS•gÅSyF7Õâ‹ÓÉ™Ï~Ùjºû×¹þ¾’¿û–A[è÷ñ0‰<”Íl ù¯#i~‚–­[Gß¼u”çòS^jÀhg×јx""‚íæ«ª‚‚†>2lÈ–×PÇÐ¥¨¾®Ž¥K Û~“n`Ô•ÅÁÆúV‡üODˆI« ²2Múw_Ûk ²ªeé jk[pÁcÛôSÍñ€VB*¡””…0[Ó]_ÿ“µÛd0jG×¹·¶ŽåÂhîtò+Þ7VÏaªSE„©ë°ÂèÉ' áíÅ9Óle'Ðk‚Aö¤}(C(¦_Ao}+¦uõÌËVˆfľŽq¦©9ÊÛýƒÝ~ôŠ<-'5ÿ]×D~T᱊kð‹Ò>ÜŸŒŸ;…ýÚÞÉ.¨m“ŽýÓ|©)ÃQw]³à—Q0pK¶û||³‡½¶ X\äV,ü«<ùS"""ŒtRàYЏ ™^=5ÊIû†ÏÛû†üq_l¾‰ éŠäƒÖ–±t§g"n·^¾±Ì×ÐÀp ºœ%¿ ¤‰%"§õGeßËZ[ÈýzK}ûZ˨£½ØnÛ˜½I{TÈ-`ÔÇ¢…Q6›G-6wNHÎ=B^uƒÂóqÀˆ‘DQ’l%ƒ:ÏÖÉš½c*âw}¯ÏØyžE—Új:Mt{?Ð;=µ„ ß³éʶm ³É;ŴͯÑíÓ ·!ÇX,À^RÌFäûôîUJþð=Rkð$tb‰Ø§)…oþ,Š0œjë¿ÊYEÔ©¾Ou»ÔŸ ïÐ _âžÕÛ† O‘$¯#ȪQ¨:Qí&NS¶Ä9×· ˆJW$Às=ÕhgÝÏZfu˜Ê³„<œØ%—ØÐó#õä^DÒ\ã²D?Ú+ˆ)è“ ¬ÕÖeX&bŠ:Lެ(cØç¨úAQ½+› 2ùE€–ŽºÛøj±¡¶pÙ£‰ëñÓNÌÍºåø û!›ÂyŸ_äM«ºü»P“½­¦® MÔ幄áMIKIuÖ†—›ÌW‡à@äúHä|¥›Z@PÑvF;}9±ÇLdW‡™IŠCöýKÞ·²*šå5Å9yÅ}T“ºjuÿÛµÛtÅZá&~f§P6p2 g³K ó5[ˆãW÷³ðïñdú¥ünUÈןÔÚ›ÆayÍýׂ­-d>>¾Ò‚|úîoÒäŒÓ§üC ûºèÎN¦°ðPÒD¨ÚhJm¾~Ò8-‚4“PìÍ]&ÚÍäå×™E…íÒ…¥—®»”4íNºÎ½i4%¼Ù–èÓ ²$7 AÜÐviñÈ?þ,ÉÏf!Èïv¶¡ùFh«¾ò›”"³yY©Oä:ÜÝr…mSŠ• W~»‡  «Dš è3cŠªpEJóWè13‡v &þ­yϰíÂôÊÖkái²äŸø«‹ÐSša†:@™ù™ªºŽ^8TQòòñ7´"=÷?T fV¡…,f<˜ÂÚjØåö·ÍGþ7†|ä°Ô늊 òÊënÌûö[öur@Ž’ÑO.L\7E–l@Ô—@¤q¤ùÐæÂé7-áS£xÄúât*uyŠÉPßÚ÷ú^$&=g>6¢;¦&·¸ŽÂœwòê â²ÿîô)˜,¾à;ÍÖrfa"©ŽÊϘ¥r.êÊÇj²;yèL‘ÝÑÈ —¥!‡ŽØø%é¥gYÖ8Y{EúûÃ9ŠÊ*ŒŒ,þ”Ûâh¯~žL;.UÝœLrâÆ•u—÷¢OÛPÈ2é3ŸÕOëoÍJÞ þmRS_òÇEôl–¾*¹p+– PTdå0y%zÁµé9÷.¢)aû€jªœŒ¿VýWnûß§ˆélI~Mߒϲ†+¶0#^ðþÂÐæ:<'à-mžI²ô4õj í•ѧ(VÎÑ–­ùÖgæ[é[¢¤¬ÌÈÈ/sZ¬®BJK“ —ÒZ®û»C¨í7N.ÜÌö§„›¬¿~]iÄe…ŒÈ`£¿–f–|a˜µ£¿‚]ó6V£‹'¦ø½›B–£ÐŽê¯I&¹ý©?bÚÊ@[ž!ôÞ>xîˆ*Î|8: Þ¾bÈ|P™±Y3©Œ&½DÏCq3ûCq;›Ú#¹P"î+-´”´·VÿPí)¨Ž¤ë¬­Ë0G?ÅA).ùAmY`ûñÝkVó@u0t‡³îÄdÔðŒï|]¸28+5kÌü˜Üô2óµõ„vœÜР?‚¡}ú+p°ØvïÚ ÿÐþ#ÿrT¬Îè4£‘#>ac4,ëu2b…G/ÃÑaamš«”8BR[G9:j°yX†4)2Š^¼ z§ÐAvÊcM†¡ǘ•b ¬ U43iKTÃcáYç¤iê³>TDX8+枊÷±ºK¶¼€RXT´mÓ\›\øWÄÁ9תç´ÕÔ÷©O47Ö&&†?8Xn ‡1š~™›2¬Ã={H<¼Ï®nD{лŸ,ÒyÁfæ«.Is]½% ˆ´j\^{¥Ó¼[—"8˜ÂÖÚª¤éW‹ËækLC†mç#Šûk­tE )[“£—öºÈ„.cæ“•î•x‹û qNR‚F›ú¡­-Ò¾Ö¶°ž½ãñ<.f¬>«³³_=çz~5ÚÚØ´\‡­,Y©ÃŒ”çLdмlßÐÈXe-=î™ù⡦-³Ü,Ï0À|¿ÆÛXãÌÇÖh¼“);`I~Ûàß²¤ª&-’Þ(Kü_÷Ñ;=ï•xi¹‰®Ëùfee¤c.]Ó¤›Ï:3*ôÌu•ù¾;‰Ëð/4^¸|ûÅM³ÕÍGèŒÞñdWóà™À0¢°sŠJóöŸ®x+ubr!ÑW âé3k]'ªS(ÃIae7ž¹¹é)\oˆ³Ò_·m$Ôó{õm¹k¿¶n+¢k޵²åzå|wGñå䑲}dbB¯s=Ð8k»zN©Ï¹Î|y™™ÂŒùF›qý¹'ÅÞÒœ1 é¼`ó5¾n¦=¹ÉUY¡"üHLð¦13‰ÕÚ¢6MM·`c)ž+#ÿ)ÙWK/FF_ãùnüyuuuˆ@ïSQÚCåÈ£(>+¦ìÞúÒÚ@µþ[ÍŠ]Äùó“…Õ‡Üü½6õ”|—ÛéÔ¡µ×¯ž3ÕAu ío¥'Ü™îÐBâÖ™³œ ëë»òós«”ºüªŸå<«Í§wo}Ø3ö¢0㥥i+ò]èj =w5\] mf²Ä“ 5åÜ-Þ·c'´ O§Mw*ýÅÍÙSB}íÜy³™cª¯{1"Ze˜> “¯ —å;Ï0_YZóÚÿw¹¡è„’™Ó¶}7n~Q›jn=ÁïBsÒÈõ£„×7‹¹jª`.UMÄ1»-[óþ(¿n±ï}Œ¤^ @‘LÖeSÒÛùÀÿW.Lj÷ÕVyi æâ¹³n%¼ì÷wÒ†îjvN ò„¸ûŸ'oó÷Ðó{bÄÚÅ—r„B{Ò&íItN¡SÑùZ®è}¥{yûZÌâÚðõí ck×x¶ùôé“ì.ÝŽ¤ÍµåCö;Ĥ•¢¹›ÞÅÙ©sgÍ`ñà%ófG$$±’Ø¡ƒð«¾å² Û'Ôg¿¹Ób®Õá€uk×´xXEñnvÑÞ&9O›Š3KÔsðäígÝAÄU5®WR~Ogõ.™ö¬¬¬nß&&K%€»#HÅqM¤ƒ›cwzz®ôööv#š¥$™ö ­ ãnÃ8YË‹|njâ­H«ï’õSäŽÆû—§NÏÊ‘V¶¶õ\ê2wij´›8©)/*àÖóýž—±hîìÖž5kê¤Kу¸ÔýÏx•4v´n+T×ËDn1_^Ê£éS&²räsó¦_îÔáºI“Yo_0ÛézÌ£jZ\)Ìœ·éˆ17'/1"d¤ hØ1=•æÕ?Ú–³î„•"×ð¡Sw»”¯3ê†Ô6g®¸Wâ7F$v3d]r“és½uÄ€îè<_fpóˆó÷&b/¯Å¤LÐŒõùÍ©èW‚¢þ›Œ5}xJö¹­Xr=Bm$§3gÂ#a5†ŠºªòÎÝ{¬ç¬ä|ùÜ:shýºµí¼ÈÂ9³Nœ;?Ü|çíÏÿðžO·-G…»¸R!ß>Ž^±lIëêðÊåçÃ"ës¾ó±\HÚêjÜ«ÃGY¯Ã?pÇ™úM—ŠÎvz¸¼ljJŽ Ð¶ú³Œ2)ò\/sZ.7“þ™«¯%‚uV++x÷ !âäɺŸ•ÍþÆt'‘®\D7v¯Yõó'"ø;äÐñ¸Ò­†ÍôÖD«›|Ͻ¬Ë–àçɇ!).ÖÈqEyîÐZOV´nn«£Ÿ¦öSâh¾æÒ÷i«ÝV³åRêC8>[wõLkY„ »qf^{}ÆÍ\Æa…Ý»M‘«$D…8¯¨î]ãÞŠê±ÚuUlRFyNY”ùÊÓÃõã'Øúú4›:Ÿ£}ˆ_ ²ýú!ÿؼªAËraS1yRÿýäÎå ¶SË&²+;ØD›–ITu*äþöÿ sþAa>É´P·!S|Ñ=ÛÜfÁ?„7B-L ÷pv"-79Á£•Œ"$ BhhlhàãTÊJBC=?Ò¿Ë–«ÐTßïÈdÊ<ŽòüÌ9³Û5àºêÚ½xN:%Þ 9áéîÚ†­Æ˜ìñÞk1Ë…c¦~J{궪Ui(a!!¾ŸUœìe~ýüOX°µY^—.Zõ䕜Š:Ç 3êÂ)·UÈ?ö5^õÕOŸÄ^‹ªöÚN ¦Îß}þ–àé4D5‰. eÐüûð§{šs >ùÞ¤ ß}9Oa8—>àwÖeº|¦6Ù§©ÉäãQ?ßõÞ'šÇ4þ—˜´ƒ÷†ª×zºyûî7ŸÁ‰øúïEÊŠò|­Oúc:ZïÀÁÃÆ“çr¢É@7‚Wº°³7àºÂe·—·¥órNØÿ³ºñg “„æ,½¥]»’“)/-ê&Ù›6ßm¥„¢©Ã{÷û¶¦6T”ÊÊônCÞðq&£9¦¨ Ç…]Z¾dQkO†–¨ª(çÄs:¼¦5ªg>"¬Do“6lVÔH5_öôöCè¥gáç*j¯?4¯¹Ö%Qâ¢ß´G7l±Åıëã‰Ó~™Hs>ëY‹ÆèªÛ/êWõèp`ðV¯²¦¦Ðu^ðψ@ˆ¡y2ÙÁ““´žn®1v˜Ó¡¿ÒX]VYúeøè6ʈUË—íñò¶èxò¸qÜ{óæÍl¿ìº5žèa€ZM?m’}û/¥1Xånl|WAa‘ÿºu¨Íaì(pוþGŽNœÕ±•ãgEIþ{‹Ö,ùKI¯\¾{—åìX®Ã£†k]»Ùo¨^Ga똭ó3_DyAÜ6 {ÑΠXSΜ˜.÷ÒÙïÔ¼™jÌSÏÀùÌÅSHmåïou±3›¢¢Wâaƒ„™ Ó~ÞBš–-CJË·z@\¸pا—h ÑGgóîSYå²´C;¤uß k¿—˜Œn×èÙÚ5ž^>ûÆ9-é # $ñÑqo#ô0:tþŒPWù.é)[hï·L1ˆ‰K¬ëÞG‚^¶ úb» |ÅÒEû6é°Æò’o¹m¦½æÐÚ5:¿Ð…uøx;‹t¢åÛQR CDÅ»cÿ¹ÿṡtÛT–J ’6˜±'.¸yýÊ!Äå q#é%¢ú¢™ÌPœ}]=T$AÞ'œ»•±ÏkK²]ÿ÷I%ÆehjúD §^QÚ|EG>%m“ιkÏò#^¢_9íó›ÐŸ§ tHwíÞcÕhÙÏb‡ >¾M¶6QÑ?þW-7ºE~—RU~æðF;;-å6íž„”ÑT:óñA¥,¿ðËQ¯1†óz‰ömæ3—ÿ’¢®ˆ‰‰³ð…S½¬_g^SSsààaEiùÍýzþëgÏ’-ךQj‡‰Á@@")*>|ÈKsšIïæ—} ·ÿ«©þãØ§,düXúkÚ«×£ ¿ªLlvæþïëÇÛW.­6_»ÅÁ¡µƒÚ­kWH^Ðé“'ütŒ÷è-Ò¼bÃRîFôèÎo2C¿UÃim¹ê@p V-Ùük8/#%óyŠÅšÕZ#[±§ Á$ú`A¡×þWU¥¤3­ÙUoùQWΛ­ZÝùŽ”ÑRÄD„·8:€´\ºríÃûuí¾2=«ü^øâq—Ïy&³MV6yèÅ–{‘ÖÓŸgß¾÷ðAìpuméaʽB£ý÷ñerJb¬ú)ã´¦h·ó úÂB‚Dqpár؇œ·Š:b+Âh•e¯’²_¥ëO7šoÜ>ó*Ï„ßää†^¾,,.1XY³»Pcsy›‘üà®´ŒÌ$¯ˆTcÓÓ×”}ÏHŽÿðöñ,“¥M˜/‚Uu +zgÄ›1±OãÕ´¥½†)ÿÕ|xùôib¬¦ŽîÔqc§ŽÓl›pÎï·ç¯Ã®†J’—QÖìÆß½‘ã¿åf%?ˆéÛOjžñ µŽ­yHù8–ù?­[^Aaò³4PÁo_ ÿWQNûï?^¾ÎÝ{ Š÷ë7T~¨ÚÐ!jƒÛmbɉã´à¾—–=JN~—ý¶ðs^EY)/_MuUW~Qñ2²£GŽœ m0A›ÝÒyþ¬ÚÉn¨Ÿ?'?MË}ŸSô¥ òþþþæææü=ûˆ‰ËËÉ¡¨,mˆa†í`ÙþRv6µ Iž½Èx™‘™Oýð£¤èh@Àò+ºtíÖKXDªÿÀQ#†ÔTÖÓTnχ—×ô§Œå|¤>}–þñ}Nñ·/UU•(hM®añ~CäåTG W‘1n¯pN¯=oA×ðC¸†ß¼ýRÀp ôé#6PVFCeä=m½ö¹†ä+ÈÓß_@8)5=ãÕëÏyÔÒ’¢Ä„ MM¾Î…zõ‘ìß_ò]Uq¼ª"*B‘òq<â}Dô'Žgó@öà×Ó‹isêè·¢¢S~M–æââ²mÛ66óˆaòðG,ïqsÛ¶u+Ûµ? øcÿkx"Û_ÃP'VSÄê°aî_¿Ž I¤| ¨®®æ¬wâöYÖõRTT„)ÑQoo^tƒwDŠ‹‹Q" åC :( %''§ÿþ():eee(ò!”ÊÊÊýû÷{yy¡¤èPôù«¦ò!ˆzèҥ˛7odee9%ÀåååÁÁÁHù:ZugÁ‚(ò!¬‡‡gÞ¼yOž<á”óóóÿþe\‡bÕªU`ôQ: åC XCUUÕ§OŸ8(À=zôÈÏÏGסˆˆˆ D途`™…‚ÏÐÐPcccN °¢¢â–-[\]]Qöu ¢S^^ŽÒ)Ájjjúõë'))innÎÊWYYÙ¹sgp@@R¾QÑA途` oß¾8pàÝ»wutt""" Ø<À§OŸ.--…`¯X±âû÷ï=zô@™ÈÝ”••-Z´ÈÑÑ%R>‚5<{ö *Ô222ùùù`¡Ø_ù|||¾}û–——çççwôèÑÂÂB”‰Ü„„D—.]Pó¤|Ëxþü¹ªªª‘‘Qnn.h‰œœÜ«W¯Ø9ÀàóŠŠŠtuugΜ9vìXXBùÈ­¸»»ÇÇÇ÷ë×%R>‚e¤¦¦ZYYñððÌž={À€÷îÝ;yòä²eËØ6À›6mÚ¶mÈÞ?ÿüãääDX@”\IŽ––Êb¤|+IOOÏôàÁƒþýû‡„„¨©©™šš‚ÿ“’’bÃІ‡‡¯Y³æÀ«W¯®ªª ,((صkz Ä• <Ø××÷ãÇ()ò!¬$''>oܸ²w÷î]ww÷‚ ç'((Èn¡µ´´444”””äçç·—’’òâÅ ÂtíÚå&7Ñ»wï¯_¿‚á322B©”`%C† !”oÇŽPÄ+((<þœŸŸŸ»µœœ0a|ÊÈÈÀ'túÛ·oýúõC΀›€Z\Šƒ ÊÊÊB©”`1cÆŒÁðv.ðÅ ,‹ŠŠ~þüyùòåöööòòòlÔû÷ï?~ˆ ñððÀç›7oAöúôéSPP€2” ëðĉ³(5ò!¬‡x”4~üxø>|8Q×Ù;EÏùóçÃÃÃíììØ!¨†††%%%°0b}’n &¶jÕª´´4½Áƒ¿~ýå)§Ë^aaa^^^ii),£Aʇ@°(b¦N ¦¦¦Ä–ôôô/^ 6l÷îÝk×®=tè!;( F "–ÕÕÕásÖ¬Y Ì ,Ù300ˆˆˆ€@Љ‰¡!=9È>'''¸&«ªª Zãëë‹Ò)ÁzŽ9B 5wîÜ”””‘#GJII 2$33S@@àÀ'N¼}û6I ~°ÐŽ“ÁÊÈȼ}û=z¤¦¦Fl´°° f®Ù¾};h6xV=âQ-Ê\ÎòqåÊ•çΫ¬¬Ô×ׇ‹ ¥ R>¢U¸té9âÞ½{Ïž= {Ó¦M»~ý://oddäСC_¾| â÷àÁ°YIIIíbøœ‰e©|ä„mªªª:::÷ï߇e=qqñ¼¼<”¿Aii©´´ô—/_`>çÌ™’)ÑZ06 ¹zõ*¹ìêêJXÀÎ;ƒì 3dDªäëׯoËpŠˆˆ%#pïÞ½þù‡Xöóó ™={6†·!^Á2ÈÞ²eË/^ žå2;£§§·zõj"soÞ¼‰d)Ѻ,_¾œ\¶´´$—Ge``N¡P`dO[[ûСCÇ/LBBâòåËmH(Ÿ>}J®>~ü˜) „òaøkK¢,Ÿ¢uÙ°a#Æ- .|öì¹ ²×«W/rÈDUUU=99¹K—.)))ñóóÍðhmmM–b¬¥nu]]]ÆU==½aƽxñ‚ܲÇX’>zôˆÕ¢ƒÆyi_üüüöïߟ••5kÖ,b‹¿¿BB’=¤|Dn‰Iù$$$˜ŽÙ#-Á«W¯ ¶vðÓ§O°‹hr9þü‚‚òáKعsgff&ÓÆyóæ1mY½zu];+&&RGxAâ=epp0x\¢Ï>¢JÕ—/_nÞ¼iaaAl©¬¬„«ˆJ¥ÖÍ>R>¢µØµkÓ–k×®}7‚ì “ LYYY¨­ÆÇÇ/\¸Ÿ |P÷òòb|ƒøwŒN ضmÛíÛ·A¥ˆzzzÅÅŰ`ooºxþüybt˜æ’’’…#ÓvoooºÇƒŸûøñ#Ó,ngÏž…óhhh0‚µÂ!ÿʆ£rsUUUÆÆÆß¿‡kƒ±Î„ác 8ðÑ£G(•ò!m ˆÊš5kênohæëÇûúú~ûömëÖ­ŒÛwà€ yzz‚_ìÙ³'±},¬]»6$$䨱cÓ§OobØà< ¥ue8sæL½Ê7zôèz­9r$È8¨¨¨Áƒ“Û—âdffÂ.ww÷U«V¡K¢åÔÔÔ,Y²äáÇáááL{¡VYÀØy”hSN:UïvÐMMÍ„„„º»,--+++ë!Å'++ „dïÞ½+W®$wÂÁðɵwîÜ I6Ŭ ”Œ………ëÖ­«w¯œœ\C_Ù›0aB½½ÁÞ½{÷âÅ‹ºc° 2„h¿NJJud4µˆ&ÉkffVPPpᦧšÞNJUUÕÉɉµï€HùˆæµïFŠ¡… 6´«sçÎ { K²²²¶¶¶L{ D<)…¢q›ÌlÂÁð1bÀ¾yófûöíL“¿/X°`Û¶mÌÇÚÙH¼ÀjÖÝ5lØ0=°§©©©ÁÁÁL{Á’bx·E555ðŽçÏŸG­@ÿˆ——dÖâÅ‹}||êlÌÙÙùĉOž<ÉÍÍEÉ…”hgŸxhýúõþuðàAø‹µkè€ÒÒR0a`A„„„©»111[·nýðუ££½½=Ùƒ¢^À,Ö•[F@/_¾`ãÆsçÎõ÷÷gÔZ&Ž9B¨`hh(XÀº^ÁömvªªªV­Zú޵µu¼Z  j)¦®® +pê=2×ÊÊ ìþîÝ»¿ÿŽn4¤|»@£Ñš2š‰´´´©©éÝ»w? ¢¸¸øÝ»wÄL`¶ê=¬gÏž¤Öîܹìxooo>>>âÅ!•J%{2¾%:þüáÇŸ>} Jfcc£  ¦°)q¡š?¾¯¯oã³»ms@à§L™rèСzŸñBp`RÃÁÁáëׯð­º} ¹ ¨7@‚@¦@š»áÔ{dMMÍš5k.]º4oéÒ¥èCʇ@°½{÷nü‰" ôŒ¸4€ŠŠŠ@Ô””ôôôjJ°ÃËWß‚³S§Nu\€C®‚÷‚_éÞ½;üŠ)??C¿ráÂpÊÊÊÄ샔è†%ø­[·@¿eddê=rܸqdÃp¨...éééàr6mÚÔ¹sgν$ 8õ È PtˆÑ_¥‘¯xxx@òB‚ÀÉš)ÁŽ8::6kâ:=pZMôJÿÔÔT oøàìì %cCFaذa`ãHYýþýû–-[Ž=ª­­mmm=iÒ¤ºßŠˆˆkH®VWWŸ8qÜÆ½{÷  Ôf|@ þìåË—·åaü%†?Ü311IKK;uê1ÿm½LÀ!¿}ûö“'OJHH˜™™™››7ÒrµÝÉÏÏ?sæLHHȳgÏ qÀé‚?vÆiä[Gpu„‚ÝyóæŠŠ _³¾¶¦Þ~r@6|otìØ1²3{rr2Fppð«W¯¿Ò£GOb5&&äìH ˜}}}FFFÆow8/ïrƉ‰‰ …`ݲ³³UUU'NœTTT¾ f·)‡ô![~ÚÛÛCYJ ö®‘¯@àwá«_¾|ÀŸ;wÞØØ¼¿|ùrxxxll,,CjÌ™3ÇÐÐЧñïÒh4ˆûñãÇA)! À×îÆA÷R>‚c(..]!š\6 °/>|èß¿NNNs¿ ŽX€rÄ,Ñëׯ‹ŠŠþøÅñ8äjJJÊØ±cAc-©·A)†?º¬Û*çîÝ»ЀN:uéÒÔwòäÉ7pÅ:ã¿xñÂÊÊ >á<˜[XXx'¹%** $è1„¼é¢E‹È™uY8ðÄPÏHJJ‚„ÒÒÒ‚š={ö<œ¦œê àzAõÿûï¿ €¥³ÀA÷R>‚#ÑrrrbœºYâgê?OÓ¿X]]MŒèqþüywww Œ蟣£ãÚµk›xp-?~ü`|„xçÎ .ܼy“I###PĆž16ì(ÎúõëÁ¼‚’e=œüГ'O`yðàÁÊÊÊêê꺺ºL#—ÂÈþjÏŸ?ß¾}{dd$ÈÄ…q\˜†˜„øååË—àM¯_¿þôéÓ‘#G‚Ϙ1ã“~ýú´˜. ¤N\\TB *>Rµ‰I ¿ €¯ƒƒtÇA÷ R>‚ã9wî(Ö_ËIaa!Øðmõ*Æ x…ÿýï/^ÍcÜ%59ÆJ\\è1ÁË—/ß¶m[CãgÖÔÔ€MdR5=Æ-ß¿ AzðàÈ3xÄ)S¦@i.&&FsàÀ ïÌ—™™#ƒSï¸eyyy÷îÝKLL» *U^^ 7ìÄ‹ ϱcÇ222fÍšµnݺ†Z·21tèÐm8Œß½{ œ––•^^ÞÒÒÒ~ýú©¨¨€(Ž7Rt»Ya»råÊ7=z4dÈйùóç5 džp´¤|WñÕ«W™Æqþk¼¼¼@‡zõꟓÔE[[[(µ7lØàìì >ïgÓÒÒ"ÛžTUUíܹl™€€€©©©µµu·n݈] µ Ü<[=Ìp˜¶ƒ¾‚³O0IfPfp¼àft; åC ¸'(‰™óXØ©oß¾bs ‘ƒeWW×@œ¿;-ÙÛ¨¨¨pww?}úô«W¯V¬X&²‰^ª^}ênAòðð9¹víȼ¼<ˆ¢¦¦æøñã夡Ðêà0m1Ø»wo89è8ZsH±ÊÊʲ²2øìRPP€¸€ÈA*))5¥¨´´4)êPómÃB Õ[ˆÂ‡øùù!ð ¸mmm4)R>¢†ãäÉ“,—=ÀJsð÷ïßÿøñ#”¹,ÿ•®]»nÚ´ , h±ÊúãÇGDDHII^ª¡ñÉš£œ`¿¦M›ÆËË v"?  ܤœœXOð£žl°¡°EFF$R{äȑెãüñ§Á€A]¡   ™¬¬,(qZP/Ä‘xö[\\üéÓ'øQ8Œx9iÒ¤ºOzü(ñôÌb—.] ´p*°¹`궈@ʇ@pÄ첬:arrò®]»®_¿>wî\0–8ä^+++77·°°0r~>V4ãlìL­=1| –sçÎEEEêÌŸ?4Œ|@Ú,RSSA ÅÀ-=~üä­ªªJXX˜huâè訬¬Ü¿òxˆ)H×ÇŸ={Ë ^?~ü€í UMì½@ØPðµ`—¡ö@<5ýï¿ÿ@ÿÀB,@™ µÉF1àóÒÒÒà8ò÷Í›7ÄÐ  š Êð»`™Ú)à4 8 ¨;ü:ñ.³´´tÈ!p°¤ ‹ªènBʇ@°;ÕÕÕ"""P·|ª¹Ã‡ïß¿Œ—­­­©©iÝ™ !Þ!:B¹|ãÆ –Ä }PÜÆ™‹C®‚1U§ ¡¯¯B'2|H8**• FmРA„$€¸‚QS©{~¢õæ¿ÿþ;sæLÆícpê ø³û8 '/^¼Õ”””é‚_úô)Èü¨ŽŽŽ¶¶68œ†"¡=þüåË—!)455A†™G~~¢2 VLh9x;40£ˆ/ˆ:q<ç|œzÏvt4,#xGPwð‹ðu ù€Ð‡”hgÀõêÕ«)]åÂËËk÷îÝ***ð¹§Y_wÄYºt)”³-žÔ”î àº@Û@ØÀ aø\ &&& . Û`ËŽ9réÒ%UUUˆ£L6ObT3œMZZº‘ãcccÃÃÃïÞ½ ÚŠ2qâÄ)S¦=è¡FrûöíèèèÄÄDWWWP‘±cÇNž<”—·þ Dš©Ë9F§{êÔ©˜˜ˆÔÂ… —/_NM)¦ŒÓx¼ ¡@ AÏ Ñ^½z•““#..ÆäŸhDJ<‰ÕÇiè$SB_¿~ ÉNâ˪æT¤|Dc@ÉE¼jîA9V¯^ _„’½¡gšÑ¬'”§à6þâ yyyƒ&Ç[Ò™h¬¾-33ÄT w555pN£p9›ººúñãÇÉSíÙ³,Ôœ9svìØÑ¸Œ1â€SRR_w»nݺ+8°ÒÒR°©S§‚!ÓÆiè$}úôa†” ªªŠèrpïÞ½÷ïßC¼@!„õvÕ§P(LN·¬¬ÌÏÏïèÑ£ Ò‹/¶³³#-]CÈâ4Ò5âãÇ iP¥ç ‡!ÞŽ7nèСäIç&YŠŠ‡3@ý²Ì7¤c÷R>¢EHJJ62‘^½€£Á[»v­››O,RaaáÙ³gÓÒÒàŸ ðCׯ_~(## Ù›7ojiig"ÚŒ0¾Yükà<§qˆUýû÷C9 ¼ÓE9qâDpp0Ô@äjjjôôôÆßòþ‘àØfâ0m/r?B¯nCP~~~Æ* Bqýúõ7­˜¶Fz æææBNAðàÊyùò%ÔEÀ}‚äƒ`ƒÂ„ï¬Ûé´<1|j0ššš†††`‹[ØL ”Ñ¢F£5ñ`(­ ¸177ß½{wkO²³hÑ¢ÊÊJPe0£äFjXXXdddll,˜B(¡L700ÐÐÐ æ^˜={6¦m“tŒc.;;;ïÝ»jûö틈ˆ8räH”Î࢖/_Îä†AQàSIIÉÕÕuúôé­¼ºoÁ_úûû_¼xª 3fffuEòðð¬Ç!V³²²ìíí!. Ïeßt¤¤¤ˆÌª» $ ò¬ð£GxyyÁ#‚Ûƒ<%äqŒoÐB¨‡Ýºu«gÏž3fÌ€Hž”¨Ÿ4QöæÎûýûwœ¯_¿¶Yð ø322êܹ3j`à@$ F8 }åi¶àE>|šñåË—S§NïÉÕøÓãÔÔT¢&ÎâÄlpÈ-)))¼|ù²œœœ……EÝù DÎ;²ú Ÿ` §M›Ö!2dH½s8@ ƒ"Âç›7o@Îá€*ÑÁ߇ñ`pÿPÿ€ŠÑÊ•+Ñ„Hùˆ_ôîÝû2V]] e"”D`Z;<111'Ož„«_¿~àE h#Úîûùù<\¹råg€*?”m“zîîîÞÞÞ p›7o†Ð2zÂÍdggëééõõõmDŠ@öÖ®] G6wD1–0räHbõùóçnnn „p±LÍ/A– êCF,àŽ;êŽÓÔu{Pcƒp‚*ß¼ySLLÌØØxÅŠDËši8䑹¹¹^^^pi)((€s‰î}¤|ˆ `Pþè¡È Z<²ÐÔC‡8qâÇóçχb«n;Ì»wï?g³ÆÂþ À븸¸¨ªª‚˜mÂiä`(…‰q>ìè覰¡‘©! t7nÜç¯om@_dîÞ½ÛÓÓSSS>§-Äð©3ˆèCš@ìêÛÐJP(”Y8Œ¡ÚtìØ1ÐB p{„…•’’bœ¾ 2ÂÉÉéñãÇPáØ¾}û_´êBʇ@p$P_–””ldÔ«™3gBy÷G¥i.§N‚‚’J¥®^½úŸþa|ŸôGˆÉÈìêrçÎÐÐP–§Õ§OŸ–-[Ô®Y&!:x‰‹‹Cù»hÑ¢ºÇ@é ²×ÜY [¢I*±loo)`aaÁ4™&K–,yõêU|||s§od!L½¡>áááqæÌyyyKKK¢Y¬ºº:Ùg´  ¶ƒR‚Š7}’ ¤|G"&&ÖмêFFFL“¾þ5ÿþûï‘#G&Ož í†Ú54(Oûöí[UUÕPÁ åZ³&‹oœ/^Ì™3|ñ<­å'"Ð4¬ÆâÅ‹ë²×¬Gm 9ÑàÓ§O!ð Õ.\`J›pŠ |Ó¦M‹1bD»‡éC¬>þ.B½¥K—îܹ³wïÞ}úô "½ ¹¹9??ÿñãÇÛåm1R>¢)..nhœ”)S¦ìر£…²7ð`•À¢yà°$äà-iÂ’F’¥¥¥“&M‚2ñÚµkPP¶FúgeeA CQ[·Ëì’““cUÍ£5PVV&R.$MMMAAÁ°°°Î;{ÇŽ[‚#"">> µ<00póæÍ[¶l-dWKbªÛ·ow¨±¹‘ò!¸ºšTSS3|øð¿“½={ö@ñå”LË[ƒ=z,Z´èÓ§OLÛ‰iþîœPjkhhìÛ·¯çŸ:t(ÈÞĉ!çRÿ4mÚ´ÜÜ\)))ޏ¢ú÷ïO¼žtuu=~üxzz:1ðw×®]‹ŠŠ233›Ò:©'æÁðî vC† 8|ø0l,//×ÕÕ•––&Ûþ åC 8‰U«V0n)--IÛ3wî\ð>>>ÄjVVVCSá0ÊmkkÛtƒÛö€ì1>ä„Ђdœ\—㵑¸}û6ÄåÙ³g>9‘“““‘‘QãSd°!ópàŽ1bÄ´iÓÜÜܾ|ùRXXÒ÷ws#åC ZÇ“Ê7iÒ¤Æe¯¦¦¦ÿþžžždSo¶ââÅ‹¤ò:ujÇŽŠÅβG²ÇØ«;ºWOœ8dOKK 4.€„pb~EGG›˜˜„‡‡'&&râƒÁ†øüùóŒ3|}}õõõÁÔ´ãPŸ,!** j–BBBpì5e¤|D[púôiBùbbbˆ*™>}:ìå ÙÃða8ˆeeåº{§N$((ȹöˆ˜-ÃÛVäääpÓÕvøðáÁƒŸ¢Ý Æfl òéÓ§… r¢ì+V¬€C)£¢¢Â¸ÝËËËÓÓ“ÓeàÂ… %%%~~~ðÉ1bÊAð|YYY}úôáŽxÁe©¥¥u¯ \ºt‰ã2)‚{011yñâERR¹%>>þîÝ»111#žƒ:88¨ªª2Z[)))®)C544±téÒÀÀ@î»,—-[u/p·Âœ*™ƒˆ‹‹Û¿¿±±1øZ¤|D;@£Ñ¦OŸ>tèPòuQnnnrrrÝ~œEXX˜¾¾>cã@ˆ&Ä‹›òîÖ­[•••`Ö¹Rù0ü¡.ÔTòóó###›2"§²¶o÷îÝ÷")‚K¸}ûö¤I“ÈÁ-³²²®_¿þÓ‹³©©©`dIåigÿþÍEMMmذa³fÍââë³  ÀÀÀª,l2’*«˜;w.ÈùéÓ§ZiºG¤|DƒÄÆÆ8Ðßßû9sì¥K—¸ ^ d_=(4Éùµ¹ ]]Ý… r÷%ºzõê1cƼ~ýzðàÁÜ/p±îîîgÏž}ôèTbò!mGRRR\\\tt4, stg#&I Ÿß‚ ¶df]væÐ¡C\Ö« ###!!!>>>vµlÚ´iĈÿý÷_ZZR>¢íÈÌÌ,//Çðv.\#{€ªªê™3g0|¸Kލ¥Y¬\¹l+ðòåËýû÷seÔž={&))ikkëé鉔h#rssi4ÚŒ3¸)^ÚÚÚDQ2zôh¦û¸ uuõº1¹Œ¾}ûúûûkjjs_ì’’’8%^HùœM@@À… ŠŠŠøùùAùúôéSXXÈQ Íÿ ‚·mÛ6(.¹;+}||  ¬¬œ““3mÚ´³gÏre4;îÖÎÎêjpÑrhOÓzËËËÛ¹s'Ü•; ㎔`=«W¯&—‰±¾(  oß¾åèx•——/[¶Œ\…Ò„ˆZ—.]***¸)¡¦ÂØ7‘›ûÊ•+Üw­2ÌM¼¯e[ahIì –Æx?"åC X¼¼|FFÓÆN:qz¼š”ËrPDD¤îFr¶)n^YYãYYY®‰]÷îÝüøÁ)±Cʇàl>|È4ds¯^½>ÌQ›={vHHãUUÕëׯs_&^¾|™±3¨¾¯¯/÷E³¨¨ˆi|ç 6pMì¾ÿÎ4ÛÔÒ¥K‘ò!­BÝÿûï¿I“&qAÔ‚ƒƒ™Š’ÌÌLaaaîËD¦f`Ù9t€Æáãã(--%VayñâÅÜA¦Îì+W®Dʇ@´Œ‘8bÊÙ¿cÀ€ÌOË阙™=z´¶TâåÚr Œù(¾²²’Ëb—žžNÖÕ JÊÎCË"åCp<§N"æxÃð×`bbb\µ°°0²“Fqq1çNÅ÷GHåóððàÖh‚0ïø©y ‰•••,TWW³s8‘ò!8žÙ³g“^ËEœ>}:¹Ìe·ÖERRòÇBBBàÿ¸8šäû°aÆq_ì¼½½ åcj˃”h-FÍ}‘",BîξÜÜ\nêßÖÄÛ¾={öpe좣£'L˜ÀæÏ'ò!¸%%¥ÔÔTî˜óš‰üü|°AðÙò\»¨¨(×Gª2ݺu›:u*WÆnüøñì_ Eʇ`#jjh..;þ⋳fÍúðჳ³óßý®““SëEê¯CERUUåîîÞ“´jYÓÍ›7¿yó¦åçamd««i®®;X›V |-&«"ÎòlÙ²%++‹å§ea¶"åC°«W;µñýýÙ3F,<ͳ…°‘ø{(_-QÞ"¢•”@ ˆ­|Ô8ÿ~ÚæŒ[Ji4~b©ú-…O†qWDv™þÀnä#ÊMQ…{&Òä-Šÿ·—–+Æ0ÀOì±õ:f¿†oL*2UÛSÒT†rúç\lïÞ¢LE måó‘#g7 S™¢­¢À1ûò6ÁÝË3+· ·¤¼•ÝEé¿y{½oìb°¶E~†+•Ê.¶±Zùj’HÙÓÑU¾ï),P(„€‘²§£¯s?ò>,Èð¤Ñ$~~»+ùŒ²óo“==8SÇâ*}i¤‰Î÷àûo°¥*Br¥4u~ì´é/Ù£ŸpÀ@t3"ˆ6¢â…óvÛß7ÑWs¨´Ö˜•UB‚Ò6â?D,(JJHôöÇ0]*õnsÏC—=ùÍÔèÝXùçèè%\ëùŠ’‰W~¤W+“5mú$2 ãǾ»¾ÑhDåA–2hì*×oË%þ¤V£ÙÓt£=°‡ÿÍ)ÈA4ênÓÓÄ!*4ÚøÏoÅ2ÝŽ¢í}IS'Çê¢|“èÛÛ`OÜ-{-újõ·°+á?*±É3—ˆàϾ²³_ñ‹Ëˆó×*caî«’ª22ôúvòýÄW™â2*Ô•‰½ppO)¹—ѧòyGõ}Eß’÷VXx `kÎv~Þœ.{‡ãÊfÈÔNœ{÷_õ…®«.¾˜;V“£ƒ2ò¿‹K*NÐV'(Î{õ¥²·Œoð¥L ŸÉŒiôoåÒM‰ôÐÁÙÙoeú ÔNQ]qÇ(Ÿ@÷Ú‘‡èC²Ô ôty“|ïçÎZ× ­u›ç©¤Ê,¡Úü«hô¬µ’‰•½øçŸàÿ'Äê'7 ÛT[³8\+€§ïZC·"hh½à#=ö f¯õÐo²±k¬* ÀììL5ìï\¶ž ¥%aK©Ô“ÄáJêòØ”3Ô‹›¥©7-áÎ*X ¼~0NŸÍX½ ¡Ö(‡kEëUZqTLÛk&·”=@jœKVŽ .Ö•]Ãù’Jƒ ­“ßžˆGGqTjZ²ÕzøÕÕéùÞ‡®Ô Å2"whéoƒ%é¾°@œvòË_¤³JIsTjBr0Ífàþ<`qk êÍbåãj¾Igû}|åí½¥³´é“ò*n¦¥î×—~Ñvø\Oâȶ¦ð‡50UGgZW†ç µ ÏNy?c:°VMe†þ믫*ºõD[Â'ðk9.€>Ù¬Í&ø¬Ù¾ìÆ­]S`ËC¿±Æ®z˜5-f¿Ñx›@ ÕïóyøˆXl-Mª†?lœ²'ú¦ýbµvûÿ¦H ßêO;)t§áàñÓf£ްÄΡËÏ‚lÿη$†Lɨ¦¯ÂêÖ±¡öZ%);Ïö»x×>G…ƒAöˆ“Ì– D…aœ¢|Àž{´=–ùô¶§ë>ÿëôMinwŠvë a sFUUõn8 áÔ%WM0,ø&”ä„òé×níA·Å¥Ö°/eô¬ö.ÄÄ6Ze7±e0,›¾v˜B9\÷´öà ö¤ã{ÏÖýrþâT{ç#S Û5sLµ{Ê QØ›dt"8œ“þû†N]£.Í’‚c’œ®|ÔÌ›C¦,:ôèìZ5ŒŸþ ôxHÖԾ̇ŒÜ!¯¿­ŒH_Æèu¡«£JBÂvˆ†ÅìTÇÖþöXD]zÁ5XJ &­X‡¿¼ö…ʗǰ¬ç–¶)m€{dNrlpòOšVD¯ëOO>e7ÊÔRÎß?‰ØµØåª·1ý÷Ú E’¡p“Òu ¹ªï–æûMËÒ?¾‚·˜™fw:ò_º§vxJs¤è²ø|•½3MƇ m¡®¨ìà2nm°Ì…qKò'š8~W†]»8cú\.‹¯ãv[%Ó÷í›2S%(©u6fQiüÍO ySNªtŸì íÞ©^¸–&òý5Í •ATj,\saJ¼Äê9r+†m4*ýK‰<Ö~[ŽÎp=™T¸LE„h*e÷Ðiìïæˆ>5qqcYÃç×$¦ƒÔE'•Ê÷å/ûš6h8¨!vÓsQõô§Á =o¼¯ž*Í‹åG%@~ð`U¾¤ß›¨ì‰x×ÐLÃÊKöÒ–ìm`g?¦¦.L«Gâ,Ž4`–?ÒŒFÃ\C§l=éiûá¯Ü«?KH‹ê‹÷j¢|[c>û”´wÊàÉT¨ÔÄÛ¢¼W÷Õ'OÔü¾hìÓñ¹Aª* ËG²þ2m·E ,õS®D_)²oÜ®ËTò° ’ؾØ®èCĪðh—]s§÷q¬Ý¯BÊÞ§ŠŸ¯ÜºÑ›Jïeè4¾#ÕËšþ@bø˜ *¿šîm8±q–šž+4jdHëîvŸÙþ÷>~0ÙàßÂ_ý¿Úuré +„_j÷Ö=!Û)Ñš% ýYÊÍc?[šñŠf=zÇ+KÔ‹‰‡0— ¶hˆìZ2Ð÷Î;â(Ÿ›ŸMû` 2cÂÃ| gXÛ[©qÓÐZúù®ž‡ o§¦91†9(©R»om躖?“˜XûÈ$•¦Ž+IzÈÆÉë=ˆÖž>³u1±¬)AÉùyþT*M¤NÊ´—ýÁ°Î|x„¤†O̶•7ñÄûõþ‘Å.±î+ð~o_“$†“³xËfQ³ø‰(\(Ø¢ž.1ð·‚¾6^_ t®Ý¤fN½r¸]L^]yþµ…&ÙxÛ=¢Ùý¾e™wÊ2ïFµŸÖ«íª½5ú-ÇšcëoÑÖ×Nr¡çh*Õ…q¡no0,ÿvB¤|ˆ¯QÏ ºùVïÙÊ/©I¯úÒ0êó[ µåÎÉ%ß;XxR騾ü– °š":4 ýqÈ^jM¤:[BV¶¿Ä\*õbûFdÏéx¢î0¹kÖÒ¾wrƒ~Ó–Ú6›2O¥3)Ì»M<ýyª|Ÿ©c%º{({áj±ÒûáÎ9jÙw]´.qÕºEG„ðÄ%H«Á2lƒ›ônmô§–OBDfo»s`Í„ä ë 7hW¶P¦]ö†™SoÆÊ?KÈŠ á@}FoæGïþÜe\jZ>^¤ò…Ûôv¼ƒÍP¡w,Ù“0¥> ,Ë¿7HyœÄrm²y=”ÁÉÐDsÝí¯6nÞ”Ú‘~&Ìv:s`;}‰ÿ—ƒÒPzæ=zmñô»Ý‡ã)+éÏaœn‰À|2-çú^jÿÉm7ŸJùbÂ¥\uëX=XÞB;*AñJ,Þ¨¿ÄQÙ ²‡ý|ãò°‹Xn Nd6ÊŒûW Ûê7Ý–6ȹ{Û¶YKQG}è®T‡þBÈcùr ÓÙƒåQóhnðµ[á½ð.Þbâ ÞJ¤›h]ͦ7 ,IÙ msªÈ}y²ƒå'ôF"übº¡vcŒ=ÿl^@ åCp8|’ºDQ˜û*Ñ×eÝég‰纅ãlÓ)Œ«…%µ/†I×65s 滩ݣ£>U\~è­-Áð€ëÁ½4LƒþèO]KëçC§Ú—=Ù¹6ÄŒ<Ò@KM,ÅÊßÃò8ÙŸw±]Nʪ0Œ‡-2îy:½Õ55ñ>ÈaÇa5%.Šä«/:™ôn[ÏmQ_è*!áo>ínµ˜ùt2äõ7I/8³Q‡ÞV2çá#ŒaxI)‚;‘’ÓØs:yÏ· zàN¿íeîô¿Zåëüsôò²bv‹‘ÁÎç+륢 9v;ýå?Ĥœ®¿úì|Äè­É«èåþûrL„gŠ’ýÞÚ¢]Ñ ¹|_ we¸h'}Øb""ØœºÏœ¥ÆÑ_ÿç½òs[åë¶äŒ[•ºŸñ ñ¡¦ŸèYëêªþGÌM`HùØ ?“‘Çßò$ §½D—{b/A9ý{÷åÍKz9ÈÏ<ÚkÌ΃ØZ5Üл†NÕW!Ž8òBï2un—+»Å.bëvle­lÝë·m£ñ˜ò¡»ö}{Écø*ˆ™èa w~=ª= PgÖƒÞ$äPÈ bxÃÜkôf#ôÁ1þÇV±ìç»RËÒ-øÍNYÞ†XBø¯ˆØh=ó_)oÜm×8ž—sôº?[aòx'0¿”O÷vTj¹eÔ쥘ǃ´jL/òÃ]?ö_¤Ýšc׋¦(*yò/[ÍàCrã¤gNå/WÝ¥»¤á̹"ÍïcRÛRIG„}¢öå3U¡œfê>À€ö6œ;JÕÊÏÏ’é±ë†mÙó)÷4Þ.¼ÿpýI#%£Îøãí—Òûóá ʧ,^ª3Ç>-x‹¢‰«„1Íc¦œÃØn®ØP¾ˆM V9»¤KîïKĤ—Ý`Ÿ¨eDºËëo’P3ð±™feKoȶs£ÅÏ—$Œ—zÎìoëj­Æ‚3¹¶ôtäs`³çzúHÁƒÖb4lÃplDðݾÁ’wûžÁúl¦—K )³ÅhX»ÇTfÖÑþ–Ǭ´ùL¨4“€|«¾b¡»MQÔSÁŠ­fð!9ãhýûG»yÒ3Ï$lÞûԸ贞Ò"lµÆ”B©ïú»ÊÏùöˆVoî{øø¥ìO¹ª‹ìý×o®$³ûdÛU÷ßWȉõÖpÉIžg»Í>äê; §‹[Ìç'È¢Ò¼ìg¿*¢8¾o>C»}cãäèÚWW®¶¼¹‘úfáªÕ oÞ\iãKÊž“ƒç0“õù!ÿ^‰a°hWÀ^‡ÚtøôÃcÛšógOŸmŸpÀ ÃoA»[4ø ÿƒ1yØáÐW3Ô‡0¥L»Dsñ.mš,ã–„Ü7GŽ]¹ñ²hêPzûÙa‘aÂ&ÖÉn^⸇€'=ìo߸ˆñ‹úœN6ÑS&’k˜¿Ä+§³ÏÖELŠøÊÀmû¯Fôá ºùQ[Q¢]"Û¬)ŠF‡~.ý!:@M]±öb žÉçe¿*ûÝ.ËÈÔ6ìŠ ÊÍû®¨e¤(ÕÆ£šý6-ŸÿÊö+‹±ŸÊWwZ% ìKFdÌc¬KIÓgÖ&Lí€Ì•ÙÙ¿Í.,%GL·Tœ÷**&–œÞ¨=•ï¢ùÏUÆ8Óâk'•ˆt36p ¾:Êîn²Ç¸Ú\Œ :~>¢œ‚©O\`aú+ÜW®\éܹsß“•Þ˜Zm)£ :~¤ijèÕ?ö.Êø=ýD‰”]‰¼Û«œ¤?ómÜG¯`a­#îô.\™±¡.g»õîë½±Eþ¨Žþ2³ùÈ4'ÏSú.Ñj›F¶ÿõ+tžÇ]ØãÙwàPg‡ ¤+‚–¡%üÕÝnbëoòs™O\ÑÇ?²î1UPQÝÂ&1·ü½+T·~§ï1³~ý?Ë=&Ì•;çÓuÏ)¯5ïŒÖ¼FR¦í™ºÌ–yŒùê äš]}9²Ìnϲz“Kzž¹r?´tÇÔ¥lUQûÃEX§ ü…%¦¤$“ššMèwÉ«#Ä >Zò ¿ŸþôÇ=‰!ôÂvÂDe;[°ûúŒO}Û˜ùkm·ßñ$Æ‘©wZ¥mêú£øþ£°œdÌ[|ìû´þZZò6— 6*§kiýÞ/ºX‚Bw–ŠºXÚ=«5XP*M»õýaƒÊ7ÏÿcmVþ”=@sèÊPÿ+v«pÙ+O£ð+‘{OŸ>m¹óM(°ÀÓâ¨ï(³Ú2+äŒÿÇÂ[ýD&“«·¿Ñô„è,ÀßÍc,Ý;ý÷ÚÜwÒÔqN¨]õqþi4«(”ÎŒ?ºjVIÃ/ü¤'®‚±…˜vÛ=;dˆ囼 }š9½†¦UÙïõîì\z‡¢kNScã/cÓl~ZõqŒÇC¤@öªrüAöŽ'шÑMgKPæ)û‹ÉßY¥|µ³ÿhïŒcÚqŠá9'){éùU ½¾S:ÓGްÔìcÁpÌ3ËÅ×'x™î¹ « {Êf½u³u–Ð߯OœæMK°ÆºŠƒì…&¿ %GTÝAö<ßæ»+þíN,qÐèé§_+{¯*iCø²)úC¹õײL'ϲ·ëB¬V§›:séƒ8jš8Ðîb_®ÕÊžäZî¡‹›§ÎÄàÞ'¥½’“?¢t@´&Íš¢HQP”(÷wyG.š3íòzÛºæÒ7dÏÀåá i²¿ŠvíyºÓK¿Ã—i¹Ú‚H¦®#ÉŸh ;ßÀ´J x1ÿ ˜g½kã–éÎ7¦×wÆÙµí•èZž~’¾éMägô3©hb ÷Ú Vhá2^G¾¡]UïO žIåøãÄ^´ò»”nt/ètÿ‹31[9§m§bØTÓ=µÉ—°>G-ñ 7.I Å0kÈXb×´½)3•‡Ì$G¯6:¶Á`$f@sÇß8^OsÐkqµrQiiUiýÁ)&£ƒa ‹o“^`Øtò<ʶ1ó´0LËz€‹÷;Ø@OÊ‹6눽•¹ô±òæºÝ˜·uârÄÅû¡D@´*Íš¢ˆ¯ßüœ4…ù‹•­õéýš¹_~¢TBa:&·=`ƒ¶å{9;_#û÷—åÿÕé¥ ¨}ÏWòx§¼Ñ6ŸØ*b°ø†¦UJ Ò‚}­¬vyO¦÷Tõ¸Y´Pñ·ÊçùÕý~Ê'À/< ¾ä¼Ñêç~p2oÚ_ùbne8댭wWiþbaŨŸS"tÕ­û-퉅Q[øOÓ­¶HªïœëFàÿ×–V»6×÷ª‚o‘J¯Èìß7þÞSËûßqÄÂóIÞQÄò›wµ&ßnRÆÜŸ¢;@´€fNQÄ'¬r^ê—}Œ¤jx ©x)ÃÈ}A2e©1NŒ_!Ÿš¶#=U·jbÛˆºX£Ó*™XúÀ,¬’ ØMZÈÐ[#ï®mø{;eâ?· t§ËGBBVG§!å«mëêMNKðÐocwã- BXeÓ SEåoRWü§kˆŸñ«õÌ L¶8]b½I}øpËU¦MŒj׺¾¡»@°ˆ&LQd%A þùzŒ_˜þªl°„ –_{‚Uµ“zÇ•“S wÑDÀxeÊ[ô’¦ÝñC>åKô“ï Â\ÿ´JÎÂâC.S×kп¯"†¥©üúþ÷„Q = ¶>\8´¹Mjú6 óÓ°8žè·«ßJ–)ÚÔó­Æ0|VÙK‘1ý_OŒÊ4,=0øÃd+_ך¹³éÃqÛWQÛ.iüdÅV pm£_å-q§pI®U¾&\jjÕOÛçþݬâoiþE>Ÿ¾÷íýëÍ™„)˜Â’µCM6ß«¥Riv·h·ñs[ýÚLöèÓ1ND Ý2ÆØu{pö&“z§UúóY5Öj–ÄîŸßM­5…••ØÃ€Õô8îT—ØùswŸÍÔg»“¯z2Z)º’Ø–ü) kýNs *ß‘êT^zIŠŒ–éí¦ÏÂ:z]&öÞþœÅG¨»†®=ŠÝ–ÓhüX¥[íŒ!Î:=[;ÜOÏ€µ×ýy‰$\Ç0ûÆ¿²z× Ëàåt Tr ¥îŽt3Fw.h‰ÉköEõÍ4DNÜÓÐ0.7Úix—º¿«nOý9ÜB½Ó*™8Ç™8cõ§‰NuC=¿"®jE¥Z1$Q[D­á÷|<Š•ï®u@o›“wÊ`â)rÏ®kïôðΔ´O×(}é0ôy÷M(jì[Z[á¯}_øá0…BŸŽË÷àzËu0ìÞ¿þk#ßä¼ Ãèʇ¥¹Q(nŒ×/º…¢ãÐX ¾þ†4íi\¨ë®£oóó0ñf–Œ}Õ1qú§÷;y¡g6qöº=Ž+Éû\÷aÝ0 |25`¾§ëDJ7¼u/‹Ï‰ß)ÿR»ìÚ·¯+†Éþì:²k×¾®]±1²‚uO•D£Ýn}ú18báà«§úða"EØ¥$ªÂ£ºç‘ÒZ´oß´ŠŸ]Ö!´Gݬ]Šë.¦xöTé}ß[ÙUå=Ñu€@ Hù~¡¬eÙØƒÁ%6ÎðWw»£Ío‡mø­ç¸¾Ù}'ç`óÛÁ6œÊl»·Ãê<r ¦óHh-µÑú-Tf›½Í6ÿ\1²T@—@ åC )@ HùˆÖ¸"y¹mçž¡¦¦†‡§¥ó¬;99±d!¦ðÙòȲþ²äañeY]SÃËNÑdÿûŽÒ )‚Káá¡ôù«9X¢££õôôþnò¬Ö–„–Ÿ_TTôóçÏìŸ}-é„ ¾ÿþøñc¶Š¥(‹ÏÙµk÷ŠŠ 6I–ß‚‚‚>>>K—.eÛk)‚˜7ÞÈéùóç ÜÖhiݺuOŸ>UVVæú|Œ‰‰‘””äúh‚$üïÿ+//ïÖ­÷Å®¬¬¬¤¤$,, )ѺÒ‡›m¨¬¬ä²¨Àçĉ¿|ùÒ²òÇ‘‘‘úúú\ÇS§èÝ£===ÿý÷_î‹>øŒeç@"åCpUUU¡¡¡ÄÄ\Cuu5ýFååþ[õÌ™3ÄÂܹsüøÁ­Ñܱc±pâÄ îS¾””âŠe×µHù\ËâÅ‹KKK¹&:‰‰‰ÄÂçÏŸ=<<ììì¸8ïÖ­«GŒÍ ÍB¾Tcù{>v€|&ß«W/¤|D+bnnN.—••íÚµËÑÑ‘;¢6}ú¯©=¹[ù¾ÿN,”””XXXøùùq_/\¸@.wéÒ…ËbçããC.ûÆÖSá åCp<çÏŸg\ݳg×(ñþ’€xˆÄ­0½ ½téW*ß‚ Èå·oß‚Ø÷èуkbgmmM.çåå !åC Z×+ŽaÙ²e'OžäôxÛH***fÍšuùòe®ÌÄ1cÆ0®|ýúµwïÞÜǺ½5öîÝK¾öãtLLLêVI×®]‹”h#‚ƒƒ¹@ù´µµ™¶<|ø[³,))©nôŸ?ÎMqTSScÚÈ5ÊwãÆ.]ºüïÿ#·DDD åC Z…ââbâ~ãååíÑ£‡®®®ŽŽDMHHHII©gÏž¹¹¹ZZZ½p¸5û÷ï/&&&/..ÎÌ̬OŸ>#FŒà²8–––†……ݹsçôéÓ°Ú¯_?¸z¹&vDsÜ   //¯””IIÉèèh¶ -R>g#((H¶‘ƒ¢3&&&44”;¢I,ìÙ³ÇÞÞž»óñÝ»wÄB÷îÝ÷ïßÏ•qäççŸ3iÒ¤¹sçreáB%³’AʇàÆŽ»sçN*•*!!ÁMñ5jTÇÉD999îŽ \¢[·nåʨ•””/ åC ÚŽ³gÏ())16‰ä˜Zp7ªªªÜÁ#GŽp«ò 0àëׯHùˆ6¥[·nS§NÙ³¶¶öööæšx þL4uå«W¯FDDìÛ·ÏßߟC£ÃÇÇ·uëV":\ÔZ¬¬¬§¼áôè•0î€F£AŒ8¨R>D‚‡‡GKKëùóçÄ2œ^½zÁK¡P81F‡î ʇáó•s‡òÁ%Çæ3Õ5 —òòrN—=¤|nÆÐÐpÑ¢EüüüÄ(ƒöïßÿðáCÆ B9==½Ž“wPÂrA,¸¦‘‘¡««›ŸŸÏÑAʇàfΞ= ‚ѵkWb‹ |>ÜÁÁt‘ƒâÎÅ]¼™°¶¶†: ‘YœHUUÕ!C¸CöŠŠŠøôéS®‘=¤|îçÎ;FFFŒÓC§§§×ÔÔˆ‹‹Ÿ9sfâĉ‘Þ½{ƒ‹ ï ·cÇU¾¨¨¨ëׯ¿}û–Ó³ //O^^þÞ½{ÜôÀ)¢£põêU(FGŒHnäáỺºzРAà0Ö¯_ÏþùøñcÇɵøøøïß¿“T8…ÁƒƒòMš4‰£?44ÔÌÌ,-- W^]Hù‚mÛ¶eddhkkÇÆÆþvðòfeeax+íÂÂB0ˆì‹»wïrßL 1lذ~ýúq؃wuu}ýú5ç¦9F›0a‚(Ÿ±±1_]Hùyyy=QQÑÌÌLÆ'ŸAAAðyóæÍ¹s纸¸°§KÁ5C²ý‘ÀÀÀÊÊÊÎ;³y8‰7aïß¿744äФ¶±±9þüµk×bbb:Â¥…”ѱٸzõª··wtttݽS¦L!†ÒÇøùù-^¼˜­Âüøq¨˜shÇŒæ2qâD6ïPVV&##ƉoÂàBZ·nÔùöãtœr)¢Ãa„3}úteeå;vÔ{ÌXØ…p÷îÝìxpâââyyy$³>}útâĉåË—³[À^¾|9f̘[·nq\^âçÏŸÇð¶y .¢1 KJJ`aË–-GŽ9pà@SæRUU½té¹zýúuÐΨ¨(mmm°ƒ¦¦¦­Z=uuõ‡vÜÙk³¡ŸsrrÖ¯_uOOÏå8l˜ ééé Êàä***@’A›qÐŒ”ø\q` ¾;vÌš5ëàÁƒMœ!h¹ZXXèïï•ñ/^Œ? &PSrÑ/ PZZÚAòdoôèјRRR,?ù³gÏœœœÀ0­[·ÎÅÅ%,,Œ}"^]]}ùòåððð[·nÁu8eÊ0sp9yâ )ÁJlqÃî ÊsssÐBrÐ?"""B¶‘!¨©© ‚â;66–F£;V__±nÃ?ß½¼¼?~üà¬îÞ-äÉ“' 7nlù˜ã`šA6Î;'**jeeÆîÊ•+íÁÌÌ̈ˆˆÛ·oƒ›‘ÓÖÖ600K7Ý’Hùˆ¶£wïÞä#Íâââ 6œ>}ZGGÇÆÆ¦¹Ã~òððÔû&::úúõë÷ïßOMM•––ÖÐÐ7nÔî±…B¡€ìËìy1 ôþæÍ›Ã‡oúANNž<)++»jÕªÕ«WïÅiû(TTT€¹„OLLLKKëÑ£‡ŠŠ x8Ènyyù!C†pîPÝHùîDPPð Yžîß¿ ²É“'¯Y³fêÔ©wÚ 8u·§¤¤Ü¹s'>>ž,_NNnÔ¨QcÆŒÑÕÕ4hqLnn.øH°A`%;H.|Ä£ ŸõÎ_qãÆ H°é]ºt122233›ˆÓf!„ü‚\{ôèTJ²²²„……•”” B&prLOÅHùN‚©<}òäÉáÇAÀ·-X°À¢…ó Ä!º2‘}ïÞ=ð ÉÉÉ™™™ðC={ö?=”­"""Üò`ûŠŠŠ úúúúvvv`AfFŒahh¸dÉ’©8­÷ë••• l>„Äþüù›7oDEE‡JÔK@Þ”qÐ ‚”à~F}‡X­®®wxæÌ™ÔÔTƒU«V±ÐvÈà0µ<îçÏŸá¡h~üø1xÐEE(—UTTÛ„“ÒóÒ¥KW®\“ݽ{÷I“&Í™3çëׯqqqûöíûöí[Ó_ÁþH´$%%AJ¾zõª¬¬ LöðáÃAÛ455ÕÔÔÆã k)øýãå]‡Ãh A Á2DkÍš5,wB {6ltrrZ»vm½‡Þ¿,KJJJVV–¤¤¤’’”æ Š`YØ$õrssAç^¾|©¥¥ellljjº‡éHØ[XXeff–““ÓÄó§§§ß½{r´íõë×P9——‡zTÆŽ;gÙ²eè2Fʇ@ Zj ã«T*ÕÍÍíìÙ³PæÚÚÚΚ5‹%¿¨¦¦¦‘p#pê’¢¸¸$äÞ½{=m—£­­=eÊp­—2•••¸páBFFÆäÉ“/^ R÷/NÏdÏÕÕ‚MŒN@£Ñ "ÑÑÑ ñ°±´´¢Yêvm8º&‘ò!ˆ6EBB‡X½}û¶‹‹ ý I[¶liÉ™yxx@öÔÕÕ#""šõÎÌ¢ NÝ]™™™7nÜŸôàÁƒN:7‰‚#ÿîbII ¨Ô¹sçzôè±dÉkkk²Is©ªªºvíÚÍ›7ãããÁ‚ž ‘@;Çá + ”`SÛˤ¤¤€þåççƒ)ük/^ÇÞÞÞÊʪ_¿~-ñÎÆ´ä0,, 1//dB;þ|^ÞzŠ—«W¯îÝ»÷õë׫W¯Þºuëœæê%ˆexx8ˆøQ¢äŒ3ˆÞŒGúâlÚ´ ]W¤|g0räHrù€W´sçÎæž¤eÅŠðuYYÙV j݆”•••`¹.^¼Ò ¬  L˜³³óÌ™3ŒŒš~fÐÈ£GÿÏÞ™RÑÄ|^åzäV¨œ%>G9B¹ÊÑŠˆJåè‹H‰P¢t()¢"tS}IÉUŽB…BÑAº¨$ÏYûí¾}žçL‡{~ßû4»;;»³;;ÿùÏüç?¡W¯^EõW--­… ¢*æj2?=×ÎήªªJSS³Ãµ!PòA ~ÍZ2h•%–––¨0ëÌt¥CŽ?ŽêC‘‘‘½¶¤-===*´²³³Q…Ø€ì3•ß6l@uA GGÇí0O:…Þmff¦¾¾þ²eËtuuC/¤‚ŠÛ[·n©¨¨  ",E(ù  Õ‹*Nâãã>|ØM‡¢—/_îåV¬Xqûömôrm–ùæåå¥:DäÕÌQ5Õçл6l*ÍÌÌ\]]—“ù‹÷CGG—`eeuôèQX~ PòA @ TWWOOOÿçŸ~z *öPM±‡ÖàF“EåÖ¥K—Nœ8ÑuLA<<<#""ÔÔÔy‰(///UUUôÐ__àU.׬Y“‘‘¡¤¤KJ>d`3yòä¯_¿–——spp úßOGòz± >õ x]wÀúúúz{{kii¡Šàv2´GõÉPÕÓM›6ñóóŸ:uJPPð¯Ü¤¬¬¬žž^tt4,3(ù ÁWEEÅëׯ999Q)ØÅ`*odddrrrþü¢uuuBBB¨ëbíãÇ‹/þð᪡:‘éNÊT+ͦ¦¦•+W^»v-44ÔÀÀào{"""EEE°À@ äƒ@ ¨zôåË—}ûö¡‚ Õ–:‹†Ê’?¼‚ ¨BvæÌ™Î…ñâżyó&L˜pþüù?±«1buÊ?ª†„„\¸pAGGç·DµÉÏŸ?z¦(ù ¡>Uþ>}úÔጺuëÖ¡zîâä7ÐÖÖ644,--mˆD"¡GYYY£¢¢òóóÿn¾v“dToKIIé¦i-“'Ofgg¯¬¬„åJ>2Ø@•?MMÍ'Ïuß³%-wïÞµ³³ËÎÎnÈ×××ÇÇ癞ÎÚÅ‹Ù妊ŠÊ±cÇ:ô5Ó©©©ß¾}9r$,$PòA Á†¿¿ÿË—/W®\Ií0¤_RRÒõš·m˜4iÒõë×Û‹=±cÇž8q¢›cx ))©ªª*4 ¤¤4sæÌîOðGO-,,„%J>2?~ü¡C‡¦L™òàÁÚýD"qÆŒÝIäû÷ïïß¿o³•nnnqqq}›G<FFFbbb»víêÎ) .„eJ>2haddLOO~õêíþáÇwçô§OŸîܹ³Ø›0a–-[ú•Ú‰þ•““svv^´hQבwïÞ}ìØ±U«VÁâ%œ BîÙ³g&&&ø­w•Έ‰‰yûömxx8uÏÊ•+yxx^¼xÑ?sš••U[[ËÆÆöñãÇ®—¨utt„’J>2˜¡§§÷õõE%îù ‡Ç‚‚‚lmm;;åÖ­[UUUVVVøfQQ‘ŽŽNÿcbbBo•Öiiih;‹fmm K”|d# pêÔ)T%¢Nð÷÷ïLò=zô¨¬¬ÌÌÌ ßTPP8|øð² 1#ÓÅ´ðòåËñãÇÂ%ÌlÙ²…v•öÎ\½|ÿþÝÏÏïôéÓxxôèÑŸ?ˆùEŪ§=zTHH¨ýÑ7^¹r– (ù È {666ÁÁÁhØÂ¢3í7iyðàÁñãǨØÃ‰‹‹»páBrrr{/ØpÑ>(ù ÈP……XZZ¶?:mÚ4\ì…„„ ö퉩©iVVÖŽ;ÜÝÝi÷ù PòA ¡Â¾}û&Mšôøñcž+W®R•––ò2î:::rrrƒ#ËhFFŽéíí½yófêN{{{X äƒ@ Cj?çÿýG+ù¤¤¤ÊËËÃÃÃçÌ™###3˜²~üH ÛÛÛ/X°ÀÊÊ Õ„k~·mÛ6f̪Kš!kärLFm¦ÞŠuýê®äµæ¢…,[bÂÎÎ%ùË  _FF†„„¾™——‡j{œœœ_¾|ÜGŪçáÒ½¢¢b¾z//¯ù6ÎýööÔ˜ßÉ~¬*3‘›‹J>ò—111¡®òª®®~øðáön©%kÖ¬©««cddœ4iÒP{é»÷úög±‡#$1)ýéµIâl¬,PòA ¿‰²²²¬¬,vww 455 722âàà@>EEÅ!õÆïgÏZºz@ÜêØ ’ÇOs´·ƒ’üMPQ—““ƒΜ9óáÃZk—AšÙÚÚZyyù!õƳÓïj™ˆ”»a²êÉóBI1Q(ù È_ƒ‡‡çêÕ«ZZZÒÒÒC*ïRRR"""wîÜR¹–Õ˜Ój›P)Ë«—]z WÇËN½ŸëÖͤ‚¼m|7÷ô ÇÝŠ‘³…’üM._¾ìááÊ€Û·oµ¼›˜˜|ûömäÈ‘C!¿ MMœ£ùÛínH££‚ uÅ“«¥.®·ä›b±}‡uÝ»»•.ĉ*÷bÒÎyIÒ£r§SÑ\9ûŽœÙYÆ3y³‹>º;ë‹ØÅMk¦9ãÌò½øðËY²CTj|9¾¯ïwÛ:FzÁ_§6WyxÜÍ'TDmñöÍkÛÜ\UEy= (ù ¡ Waaáóçχ`ÞwïÞª¼|||JJJC!¿„a„ŸÆi¬,r™s-»ôž%±b‡µæ”YÙ¥¤Æâ‹[Ž?œ¥NçXxvŸ,Ý @".ö(gY¦fgÝC+$y\Zá_Ê$ÚÿC™±zV¬³Ò‘ôK[]Å`©_vi^Þ©5s¿šH³¶º!’üetuuãâ⨋ö 5wíÚ5D$_7±¹|ý;]“ >k§¢rkœ¤adüwwg*z§ý\›C[ñ€8|›41'æW:M|„YM Ia1´½Š±LzH^wCèrÖ^Ë&”|ÈÐE__ßÙÙyO]ïTá"9:‡bÖô&úœøSÒ¡‘zLjQX¶ÝópÅIjÄ~ ñ àÛçªîô×ÑÆ¯KOatÏ.uu{‡Ð‘5Ж«˜üìÎG,8¿bÕ‡ÇÖ)@É@zUUÕÜÜ\êZµC ‡¬¬¬¡ûúö`÷y¸*†’]zøÛÃ;õ X¸æ=h`£Êø§äCk¯¼])¦#ˋǜ]z™²|Ù¥Û°MuøYšê>Õ7D=¹ÕÿCx¦)Ë·CßzÉõ¼ £œi¯b Gq×y®ðÔù HoP]]=dónjj3”ß¾¢Ýùlš)s#<È›û$ôïl<ŠZ6Í&m˜z¾ÝŒRhi o÷ ÅÚ4WÉnu(ù ¤‡šÖ=(ù ! ÝPÎþ€^q%ù†>”³O"‘`øCšç9@É@úsu_ýºèÕý?«©>|Øõ¨NÖqc'ª …ì××¾Y˜†fÿku-@š¢ÈÙ;FTHH –ö8ó÷ü™`“•Ü—ýdC«]ÃòôwÖ]ߤÐW™‚’”}J»zí6aÚTf11F)i %j{ÌÕÕûÉÇxR]óðé^Zõ^™6ƒ)ûåYWþ»E ÊS™%Ä™$¥€¤Ô04û󽚳Ÿ_CÊË|X›–V-#Ã3{¦5a(hÃßRdÅfSžý6=&â éÃ^E»ƒtj“²SJ²cv›uÅáqÌÓZ{z;Ë #©Bqir٥᨞§³pIÜ¥³ç )Bq)q~ì|*zI¬£Þò8©?ÐÈ[M^€/¹{/êuSÓ_’rýìéÒnqÅ÷äÚ“–‰%ô—æ_?˜U«Â‡ôZ¾¡äƒ@5ríš_Cc“‘›•%w×qYX†OŸ1ýa•WiPØ©rs3Cþ±Û¥gtôþoßLMÙV®äê:&3q¸º: úCßʎœ:Ynb¢'(8¨½ZÓ#`¢ûµ+îyGçÆe–ÌÃû-ÉS0ÉÇ(d€õµlJzbžÝ—*Rœ ei'#«-{Bñ°¯QëB!Õˇ¼T #‹-ûöÐaÞA½Ûh"¾Ñ°¬äík3µïÞ|=rÓc]Œºô÷0Ú.x—3§þÔÅÞÙwÜ¡äƒ@ ÊåHßQ£ óæÿŽ_J~>:—M¼ß¿ß;øß’Åf\\B.û×£üˆ,ˆžš}æ_=wÝÆ¼?~<8t(f1/ÿÄA[JXÉJïO|›mAe¡ë1û%ºøfviåE_×Õ3ûdÕ¢›gÞý7UðáGåÖ?Ø•ã¨NÌ,ÉþŠíúþDV`ʹÛYékjc³Üýø8'ç! Ûôf¦¡äƒ@!eS®ßHY±‚ûÓ>œ°ÖnTUÕÍûË×9n(Ù¯¬xpñb¬µ Ϧ3lذ5kxjj}}/:9môÅF m.ä5:¿ÍþZBc)®–‹ËcÖ“ÅX–ìX¿ìÒp!¶ŠØì×dAHBÕD S’(ÙRØž¸ô~ihz™,/ù<ÝXûUïÄÅegœ2:²n#ék`Ð ENóVÉu·uyúQêPòA ß%"b¯šñÏÅ6¶áëG…†î45]5räè~žý¨¨}²“þ\ìQafîä4:,l·¾ÞNAUVe|œ±u˜8YW—’®„ì=\ÅHk«}ãÔù#gB?¼88YÛýꥩ±øïÙœðÝÎ>aÕf]Ù}XŸgö‡Ño‰.œ…†JIÑ'‚Ëêß<Üxn?ì26<ø,šrxÈAÆùëfrnÏ-™ýöuÀŽÍ¼bQ›¦Aü&l_·®G„“¥%wvö9nnÍqãdúmöƒyÛ®Õ)››sæ\-¯P/ªSýw¤°Þ'nh½±M~Ù…+d1?,6kñU„ bùV8QLaõ–,!'5FoáêYzöÔ°áZ²º,o¯HÞ4³ÆÒYAÑ¡yìݽ{?ßPòA ƒ//¯­[ùz.}YYâ‹—w kEE•†`ö%Äß¼}øäI¤¤,lš~.ù*DE´Ù9*¥*{üb¢„ñìrlÿX[; – È€ã`€wÖû8Æ3>y’^^>º¿Ù¼ïè…ì Œ£/*|ZZ2†_|•g>¢W)©§—÷“iïÝ’|Br‡&#Ù—þð̦È9î³v6ÇE¯²À+Ðбƒ³:Õàk=Ã"P²óœ¡ëÀ2p‰‹ó·[ËÝ;×’”dºv-bÞ<§þ“ý»w[ZöRöEDâãnðŽž0È&üíéaÔÁ|ö~.ùX:;ð(•@ÈßX6—‘Ö]ÈoRý5OTd0¬×®8o«Ïîm.›<úCöëk‹Ù؆オ–´uXöìÝá¼Ñc0•"K>âÞeš¼<ú&×/^½_JÂ=ºÖ==ŸVxÚô@ÍÆƒÿ*{†õÀvSÑ~*ù:Ô¥ÖÉü³±@J%Pc§ì¼êüˆ‰‘Ã1Z5ä¹n]ÞÛºÙfv–†ŠW¯Þ §o˜¢k@µ¾Ò–¡*|¤].®ù%uÚ V-3œðó;ùžI¹ÀÙ™Yë*ÇHPò<ÝÿÐñÒ/u“Õ7¯·¤uÊK*{æt(3ï=×XÑ•–딤:î!ÉN¸RZÏÐÐÐ```ëVH&$ôÊúõ½mréì2êáƒë Sôû<ûN»¸ðövö7ò&'ŸWW_ÔŸ Æ·ÚúÛw3 gYÙD¥ø„'t­¯G4£ ×€íþg©ûí´6$—’FPPðÔƒïØN:fŠð v]¢Øb·ßNçF&”\½]J@L,%q{8 Jðùìg€è² ©¨Èü—´Ø#!»T´OžÌïóÈÊñ'`&^ñÊÕt¸*sÈ®nùú€ù‡«ÿ’Û i&ŠÅjäå0«æ8É;Ðàõ´ª4;%fXxÈòîÜG#¥s²®ºP¦GQ¡ˆ!,lÛ«óE¦ÂXf=e ^hª ÿ=È#ÈÃ6©–ÜÞ,§½ X| å¤?“šz¤÷ÅÀF†•~èû%~²2O:;ê“K7`“Üú¨JpÿIAÖý‡•e¥"Òr¢ÒŠRSÐ_wÏ.™]ô"`«ý‰°«øŠê¸Dl"Ÿð­¹òll;Ü„Ùy¾ñŽîùç%Ș—Ç‘.ÌŠq ¢£ú¬Ãü÷%_1¥­§­‚©kÍbo9‚œ¬|t’CvøoMé_%"o{Ñ•ºÕâXÚt.UTzz:²~(ß,ö’‹*ÕxÛÆì&¤ü\ì9_~îc8\$¢lЉ· \ìM*¶T$•¦1ó£WÌô˪[O£/‚ò¸1š˜Ø“µ»²\Ö­þÌÛ·•pöÉ¥çÎe‰Ž>¦§·ª³Ÿý¨DNž»O.­­ÅdllÛ· ´¼*)õî˼G£ED¥§pŽšª+ô{IU?ö\rZ8jßY†—Ä¢JÀÆíœev¹³Óp¸â䓯™‡œ[­`ŒÛnèä0Ãýnöímö|Ä5…íF Ÿ¿nl¤í¯É ÿ˜[‹Í–èÿ’¯#» 5"h|Œo¼BN¢Ù'[肨&§ly9k’…›Lw"ú Ù*îoãPlé'²³SÕ7jL5ábv¢„}MyMMÖ™š¼uªºW*Þh㜻²dê:"ßÔöƒ”£ˆ%n,4Í+ëà\X±Bú3Y™§/n%öHÅ…q)!tô ÓçˆwÏ,º†@X Á¿z U}˜ýg—V­âîÃ`emêý‹~G;iYyÙ›¾7ŠJÉ JȈ+Í@˜¬YH(ߤ% ¯ûz‹9ç(±Qöëù“ØÏ?MÈ.ÍC7½³÷†‡¥Gœ9…ê Þû(~;=0ß.ìÓv]õ¹àím‘ú-vh‚øAï}§ÂŽÍgÏ)¥œÂ¢þQáFúêÅýѬ³-¡aÛ°æ^åÇ·øá6¢±øÚŒÀƒ;7QšìSV¢ªÃG·cþ„î‹íÜêèþOJ¨#s¨c³~IRnt7Á!Áΰb…ôsï¼–“o5Ä•{öš{ƒƒƒ~‘ýXO·ìh^ ¥äO—Ÿe®¨Øÿ7``Èòôéþ™Þ'Ù¿|å©«+_>ÿ™3™<¸>¥ç;ó‹ß§§¥½+,”˜4^f —È?"ÿüÝK(ÎÅ&¤‹ªÎCm©,¶§®_¥G3Õ2‡ÕöÈ®[PRæ«7'HކUéÍóÙ—4¾¿i¤`dsám¾¸ß±p)‰Ù0fŽxýCœÚ´ÄæÖÖ´öU¤Šôún_°þ7²¤K°»‰DT—Ù-R|}Ì·Ä¡f!ÔÓÊÊ+”ºG]„’[k%ÅØ&ki&+H¿i\²¤£~ÎIòh“C o88ê-·|4§,=yÔÔ3Øw1G¯ðZµ5‰lñq«ÏçOhààÀt>¡eq"uMI^¡VOÐM‡£î,;ðÝuõjRßH>1^ÈÑç/!>>³'$_MmCüÝôg2™ÙØDÈÆ)²3ôÐß (¶tcæ\ëë)}¿£óñÏÞ'üÐo&e‡j‰7Â==ElXíVkÛ·BÉ¡[@”«òRlû~d' Sħ›¬«2ÖF¨|pæ7n{h¦œBÿ±±³kÁÒó(úd#OæQ˜ð¶Þ|ÖêêJü¨ÚöûÉ›¹Ìâˆ`piÞô7RRŽ«©w4hd‹†·%€$ç<€*Tì¥W„(±#òãPe‡·Ç™¦P™À!f‡ ~ÍÒïðüB ¸z¬,K¸‚Š=òN¬;ÔÊ"X²]¡®ÎÓ'ÙÏÌ “—gìó·0kÖß±¯ÁŒSr ²>¨üüQXJNTzŠ öëó ÊòIáýœ-*ç)ó;ûîzΰb¥ëOS7'Î#ìOßÎoöv"ùfú1† € ‰“PýëTgÆ6AÓ{fœ"lSHÑ‚Á9³ì ¥²$f¥Îz›eæù…OŽ!’DÒo òaMZlN:Júö%§šCÙ¦Ê9¦£ aG¯ÇCYyDVŠf ’º4Óæc—…½ÿ­zÒ´RºxƒôSÉ÷QM½#kþÔ³Õ*]ñ™P™…†žˆ¿€¤;•㵜¥hÛQ=æ÷û=f"ˆ*ØZÕlWë ;´¸‡í÷Œ@qÿþ´¥Žî¿–V/©ÆÐ¶+LeØ·oŸFŽìmËä¤×òò}ï>[Nnø‡Ïxyg£’òÊäÔ{/óñ ŠˆÊLá%¤ü»Æ)=IQ›íÆ''Õ¶¿SUáTífýÏ;ÈoWêâÙ2­O¾ ±ÿê¿ Âó÷¡›´V0¸Ø«yz„ùÌòSŽó'K@QcJ1·Žù+óÜC\¤´}ò@³=Π›ÛlñÍO±­Ç#3¼UÚ$b~úí²0,ŽÝxy +YH?Dl"CÇT— )ä2oc}¡ÅðR\ft}aâ!ûEtl’ÚÚÈ’/žŽ—&ŽYÀdYF†*…`Î’`Ìr¤ëá™(ŽÉ×èö|ã:ø„ „a÷ïÇji™õrö¹¸úK›ôÁýä¹óº%ùšSîÿøþCXZ^P\ú¯§ô͉»>ªËMiÖüˆøŠêÉ¥¤¥Zn@æ¢ãü¬–r‰&6ÓÔ´USâS³KI¾JÄ3l³ÇåÆ(ÈÝ<ó¹ì?FÆó&E¤ƒùQòYîÓn ÔÕ²¶‘ ïd÷3¢ßF)Úœtšç‹4¸{n´Êú‚uöØaª=™¢ÊIØ4”)Y}¨&¯övµÒ :^(²]Ì9»öc—èª]Ç8ÞÏ먙.M™ ¯µ;7Kù¨å¶p4¼çt¤–w±§Óžˆ±¢Rìä.…”ÿŽú‡„½úPÃÂÌkb»ÁÖT?Ño‡`üSEðÍŠ¬Ë'îcí²FÀO«YH¿CRª“¾¾oãqä•9AØ}¿I0»¼,×µ´¤°vž_hË*úÙ{Ûx7hÓ"ùöëÚd¡‚ð†¾i´\ÁÍ*_73yX&»ÛÉ/˜ŸÿV«×}8KJ÷JWgSYè–Ú] (æ?+í üiÑ»ŒŒtŠqÊ$Åž0Né vŃìÒ[¸Ìûö`·Üê«^¶r_ èÖí¾wæ¦Ç™:+ Æ£Yu€È†øåÖÎàF–bþ\μ÷ž…èê§Š˜ˆigûeÕÎàCª®û[îº%ù l×w¸ßÕѱÕ6‡ç0ÏŽbÒqËG'e5oUtq­Ö1Û]¢Æ8¶‹#k`•i@1„ED´¸ZP›o…þÚ'äèÖ*vYCGYX»Bú+""ÜÎ×PÿT4›Ú ©Zƒ0f‚‚¸š³Q­WÞ#– Ax 5¾)bMýªÖcÃäšcê#ˆ•ˆ• õÄÓTØ*Û½Ž° C×Iõ€È€·ZI¤z"‘ÿ[\üÐsñã]IMÅÅåÌÌ#yš}N‘ÊÊ^}j””äÇ7=57Ym³ìòBL ­xbÆ)©i3YØ9D¤øÇxã:@h,~šó0Í·?Ô·räÌF‡Yb¶Zûf=ăì?áñ*ÄP<Ÿ]BºßeaA ’¯__±²uà¨SÍk;âÕ²Œã&+¶§÷Øž¦‰§…J5ýÖ’4´X¸´PØI~œ\½miÒÔPÍù³ÞNmf{ƒôG%P•ÇÌ~Í33¶hœ¹ÍÔ°à4¿,½ç'š¦Y;Ì ñG¾"‚¬%Ø^ksž°ÜtRG쯠;2"ÃØN‰ é<ûL9O³<¬*o6N‘VD½ÝjlüÞXרTÿ£±þ{CSCC]SC=úOcJ}cc½”êìî§6€»ojÇìAÃ#ŧg³ õλê0ñ‹ùƒ =xÌ|"›K^kLu¼ºBÈûMm]OEÖ ÈŦ½Fº½™¼e†(Sø|úRò5ÀÚ ù]jHŸYÙúËÍ™z»&!Õ~býÙð?ª˜D(azá“Ki`’Ø@Ì'$˜UsÓÓªª¿'G½jÁæ0 56•üw¦üƒ h´9Œœw%y¡¢âft´m{W#¥eI÷†¿ÌÍçž *­Í0Šoªž0m&žë~4646Öýhhjj¬Ã„Pc=*‡ÈÒ 44’%S}óþFô׈ ?FÐѧ£G¡Cÿ2ÐÀþ0ÐÑ34ƒ†™Ðÿ‰ŒXˆ‰‘ÈÄ0b*Tº²´¼÷âqdw ͵ÒÊ€Ûj´¬½}å£rÚ£°€›Å®(°6~™¥RÏÊÈ€ýMï<‚¬-¯8ÈÞ;OxoÜ’Ù¥—Ñ͵_·‰Ž¸„M{gWàþ5±Gdé©"Þ'’óä ?là÷§oˆ@èõ샟^“sø|‹K¬ôHÙ÷øßãõÄ˰h…9[„«@X)Zì›LÎ ¥kW΃ª:°n«oÇN5±³WÓ3 ›¬8Ž¡þ}~ ±q2.˜˜˜P9ÄLd»ûÙI7Uô»ïb›ž²Zº26‰“q´¬½;eø‡ŽS–ÜIþËGÑ#UŒWà›[ÖXŸi½…摜ý«Ó_Ô¦÷”íìí„@$Dƿ쵫¤ø3¿–fcå×ÊÆ<<¿`•PWÿ½×³ÿ“ù ¸œT9g6&Ö#DĺÅÿ¼:Úv`Ä{wå 6ËBõ™ÞYA®²-µbvP YYl=Xc…ÆiÚ<ž¤$?–³@%üþ Ö+- äƒ@ -°sŠ|ÿŽ Þ‰êÓôŽ@·½ ¦Z%‡Ù,»<Aþí,Á1Âîɤå¢;ôRgEÔf&uÿf¾|©ëåìÓ1«¾~gcítm؆T2=}RüÙYÓ—s¥–édôµ˜&GWû’@ÜŸµS‚¥* civÅeÂÚr¶‹ßû¶ôÙsó *ïD犌—*!~b×Ù¯eFOGsç®Ýº+úÿ­ÖW”Κ=«çÒïqÉGúœç¸Þ3$Œì·EDÎÁlÕ¯5°Ú‚@þœWÅõãE;1-1V¿Òèõ·–Ë.ƒ %¶ü(~QRˆ’(žâI¤ï5Ÿ>ƒQ£jè˜â7Q-'7f&nÙ§%ÜñÕ†z?ûÅÅõ“d:•HìJszTùxÄîrhÊîx´ »s yØTÓíÜg˜ÆÊsذõ2‰ÈŸ¹GKŽ(mŠïdyITm.r[7Æî”[¼Elþ'u~¡®Vý­¬0F n®›¼¼¼æÛôk§Äe…ylDzA ‘*ù\ä{h|f‚¢,ÿmèÏ6«‘%—ØÊ»›9Tw8Ç}öÑæúijøÌt8FàäåÕu*ùHHšË¡á Öc½y¤ìô¯ ß7a½ˆúñ·n¼ÂÌ8+3¢8”¯£‡v¦¸)ÛÇWp¶KE¼sfKNIÀA_Üa;×õ2bbü½Ÿý§Oêº|(–^følO/L{°umÖ!x]×cý~<’ÿœŽh5µNË\[ËœF³äŒŽ¶úéLœÈ3€ŠÍÖ­[÷*ë™01쇷÷0&ræL-þžuDÞ“’ï{.Eì .¬yq‘HÂ=´Í·£;ä˜õûÞ–ªbËà±33w#µ4XÓA ­$_.É`>{§Jº>Q U-6d¡ÜÝ}ž)[Õ%…I¨P'ØìϨÇmp©æ†ùJ"&œÕç°aJJÖ¶!$Ìvy‘é3a£Î9ÝÀ­]7J§(hô‰Î×O^„¼‚òÀ*9Žöv?—;ê?Mw!ûhþþpK¨N›~ü°´è g@=(ù*ónãgÅñ†™–Ù6¿«×>HšÎÔš @@q3ï6ɢ̑ڸw©Aô\O!‚×ëÍOxÃí"ßéhcT”@OãQN—¦M¯t­4åè"’®0€@ŠS~Ò.¾`Å¢=éR¬~€†õ|j¯@ê+ªKOáçÕ+ÄÕ ¤ Áï­ c|O°ÿkPcÛUå˜•É '/ÚûÙWUú½å\þ.ùùÌr®ðŒæærsÝ„SîæåäT})ÿñã{ŸÜ =ã¡ÓÕ—öÚE{Pò±O¤;M$Ò Þ+MÄ>LjæÕì;òäB{ººº7n _+`&P‰ÈÀÀkšx=92e:³³Ãž=þ¨t#(ª$Uì©ëª'ßÀ,•õDˆï„@ ƒMÍU_uaåaânJpMÙÿêÓ0F‚EDbeaH‹²ÊŒÎSç—Gÿ\¼Š˜®­È`úzéèÏûòïê.™8uÑößúgñ•ÐOÇdµ)‡_˜¹§x'¥šÕ,Üeúé;Ä•ç_:ýüH×·1c†L_=óç¿8:öñr r²"°(BÉ׊Lɾ"§Û²ÖeÖÍÓ£èOËÚ]Ë:Øâⵎ@žÃ8•j•%Ïî¤F‡†6ÔWÅvØ¢‹ 9v¹¬«¯ ÍvgÁÉå^êñ†/Èà±ïtfÃKvèr2™(6¦koHW_Ùžû¨Å†àý0H…—ñÒÈ8.޼O‡°å·DdvRæxî\ Ά0ó³¥|-Jn‹€»§ù„®ì6““èÔ5 û*ûÓ¦Jà3ÖûŠÌ‡LššKa9„’¯-²s¬„ì·±"tÇZ+/l±ÙìÀy•‘öCó„n»‚(n^ð‰ÜÏÙBi ªðqå]Ü e²ß³mÃ2ô Q(d0¢©iq㦯îœN}VÑ I:ºÒ,°ÀÎùâj†Gеy‹búÈ?Ñ‘lÌH"‘˜™=uýÜ»¾·ï¾öaö•”##}ŒŒúleöììò °BÉGK#)#ýöåX’Ïö…äOÃÒ3|‰á$æÉØT’Jôl=c÷TJK¢„ûp–À×icJˆ…èPžI.ôE_T}<ì¿ÏmÏQJ§Äæ»YÞ*ð•C u=Ò‡ÏÌìcdoá(ÐEœûÄ¥K­û6û¬#yhê“Kçå²­Z¹ –@(ùZ¡GÏ|ƒзoTã¡\èÄö@ÊwE«¨½¯AµÜW$J;Ž,mšÅ^‡ËŸÌ^ ã–‚ û=@ ¿Ü2m%™é6ckî÷ò©D‹®>!èOƒ@HF[g¡ J>È ÃÀÐöØñ«VþegfíŠPþ¥ïgpëÌ\ämk;ª÷/ýìy¹”ôpX¡äkEtU,m&PÕ~MvPþèÁÌ}`¿™¾Ù¦ÏSYmùÉóÇ@]uóQ cŽÄÆâ««O$Ì­­ÃBBÐð0€ØÚ‚ò*/Ÿ£xLkkùw™!øZRö‡-àû† JfêèW}½Û…‘gOàï_æààÞ²o²pqYY O¯æ>RñïjXö äkß ¡ƒTæØÛÚ}©-ÝN)1ÆÀ. de‰‡a†s*—/M  (õôõ|?Ïhà…ÏÿË9óÈmÒwÂX4€‹=€­?Bîùäš›uÚIn™/3$$?dæýŸ¿¡|ßAɸq2§NÅ-_Þ{.9ÜgYó¯y?É>Ï(шˆÆÆ½wÅ'yìËLaÁƒ’¯ؤ»¶+±½„ÐŒ¶·´[† §;? À˜Î—5ß‹˜ï…o2tX¾|ƒÏ6Þ^¸Öç2.–‘ÜtôýhޱñÚ={·9oììWUr6Ô33Ù`©ƒ’ô1..;wz¹¹õ¬ÃÃêolÏ~¨ªjô·ì;oôعc››{Ï ¿úº‘÷ttŒayƒ’ô Üܶö¨æ÷éÏ‹¨Ø3ê§Ùw÷ðÙ½ÍeSOe¿²‚'3«FGg,iPòA þ¥ùìø÷_n:º¿¼rP|ü!a>•~íÙe“Ç¡C»V­â``öwSNMeààdÓÒ2„e J>Òï°·wø0º†ôBCøWD¾¾%Në] Àÿš5®Ož$””äèè0ÿ¥$Gøî{ïè°iøè J>Ò_QPÐCÿØïmºˆã}:GE}Ã?fãÆ•(û’’Zè/0p×¼ù¬ãþ(û7o88Ø6¬„… J>2X縹©±ÁÿÀ^u vYÙ_Âpöì—qãÆÌk2@³ogçú½©)0h’"ë”)ô¿ªæ^¼XÍÃÃ5gtË %X_8½Ã:l¾yڽˉ·óÔÕx¦© >¼Ó!À7oXRR>ÖT7/\ºTx gøˆv¶nhàþýkqqTUx¦N#ÐÓw:Xòž%%µ¬¼œdlddj*Ë”|d3uÚô‡P-0'/éUñ«²OUߪë¿ÿ@èFpp0ñ%>q’€€äÒÁ¨ä(*ÎChÕóž$½*,úø±²ºº¾éBO7Œƒˆf_LLfÜ8iS8CJ>2ø´@™I:2“†höQ-PJR ýÁ’%@ PòA %@ PòA %@ PòA %@ ½.ùº^9@ @ [ëƒ@ @ Ö@ @ ¨õA @ j}¡†——|¿ÁÖ­[áî5œœ<˜™ ð9÷rÑíÏ|ÿŽx{oƒoóWÙ¼ÙcøpÂ@¼ó¯ßý~ð·àîî1baejèTã>X(¡Öém¬­·Â‡Ð}† GŽÞŸp¯âŸsï?LXwÁ·ß¯@~À7ÞêUV_CᣯïÇ0|8|°}Ý$èP?í?‚Z@  f Ö@ @ Pëƒ@ @ Ôú @ µ>ô"µï3’îV|Eƒ,c§ÎQ“¤ƒÏ@ ¨õA ý‚êØÙb³rºQÊ&1vë øÀ ‘‚Ø-+,¼_wAÚ0èÔ¡5¼¿ëM/ÆE`eØ[ê¦À¬cÚ¨ÖG¨ÈJˆ}×À=MK‹ª€\²Òµ¾¦Wû7ì* ™° R-‰ÓÄÓ[‹j¤8»Ô9ÝR¨›ñw´Ø‡@¾éè¹)!ùã™é³¹4v–ÈÀcX‘¯h(®ŒÍÙÖom­ø)&À|¥olî[¹+¶;bH+d˜~ù„š7/p•O÷ú›h¹æ:•”¨oæV&W ¡Ö@ Á%û ý[ë{pôÍ^³Zü\ðõà0¶Þz* q ÝiæEn´ZëÛ¾÷Hóß°[̸[ZÃïvò D=¯x±rÓ|·íù2wu,V㧸ÍÿÎ Kk›Îª½Û—*vçŠfÞ);VªÂ5¤Skêx€Ø®[ÇÖ±µ­›GÍ^ë¥IZú«aB„y¹˜~­œ*|§pú½£¹¡/@^®w”&ø”ؼ3ÉP”¼“Ë3'w+7|#é—²¯üž“´ñ>4àzå™äݵf¾±­O“ßc!O#Ǿt[î–Úîò¢«öžõXªHmžå…N›éµöÖ‡§¹e³!ìq«èc†EžÕ$Qp< 2H = ¯í@ÑÍü3<R>é¢;ÞªK¶$6—9lNUê`æV €gž+7é~»6èëùD¹¡æœ5ˆ(r¬é[o8KsASEbðZ³gŽmT:¶QãBþ5´¢z’Ê üŠF Þ«5±Ê‹P“un“þúÀðÍjá›o*i‰즂tXûŽËO–|Ï]%ø]•—ît\¶X^\ˆHÓU@;Usìo–s§ûˆá¯Î8¿ûh·r¬¼»™CuuSxÃí"ßém:1RŽ­·° ,êø’z§2Ï,“kÛµJã–©Ì {ÕAüèâ@z–!^C$ôÝõh4ÒÐѱú:‚ÚR'¥1ow¯ |á)Æ¢-[dúæ’G³'ç´hͼ—¥[ÇóeÕ[ú‡Þ^wÀU>ÿ¥ï˜×ü¡phÚ†çN]rû}ÃhvNöªJÀÞà»Wù4.ÞQ£"„YnÑÁý¹Fb³ÐZÄi¥¿ÞMl°–‡´µö2²ú]ÒÞí룥Ÿq3=ãÖ&ЦÑV[[5²ÂVô‹åœ™•R{°°é¨æDfÊÞtÝE‡@ ¤od ‹.¥øSÅ×?ËÏ\{!=k™'Æç®•WÅdÜû’’Õ$R#‘¹EÀ!³×GíLapË@7Š«j 7Κ@%‚ú¡üóÿRlãTœÒžˆÏ–œ‹JÕè-k2Œï( üÆ\¿×ú¾gPˆl½ON]…ðí]R6U³:ê¤yÔ 8G•úèóâ{²ÈÙþ×ê49uq–êï²²šÏK Ö |›Þ—ìâ'ï`€FëcloLIÏ@»5–±U{©ƒ‹N6vž9&=Ò?¹ߎ^.Ͼè¥×\W¢èaËè—…µ»¨((DωÖІUDâ1·ÄÎ2ÌX£Äù¦Ÿ¾$f9]´]©­©§ rÒÑP ©íì@ õI­LÛÊŠßॠ4ÚDå’Ó5–kÞ¨»ó(¢”šâfrò<&§j“Tû @7VÃ-8Û-˜ÚWUñ¶ðÙ“¬»·oœ KÈNŒôBèn£ÏšLµ¿]Î!˜²¾Ñ¼mz*éF0´O?/Ò½Ãy] $+ݶ±Æ©Ò,U™ÇcïïZ_eÖ Z•ÏãN­—í47R±„_‘¬°àØñ|ì%OI€—¬LUÜ»N«}9V ~´PH¤'Œ3YpìX>>ÖüWµüÂj†÷<|q+•oêîŠ{.”‹ú À† B9­<ÞW5‹/­n­òÉ_y÷Ð` M2ÛE ñÊAãJþϺXª µ”×õ¦p·y™„eïñPMÃÏ.Öjð@Σ÷@¾ó«66P®ÞzsQÚJCþN•Ì1n¢2ú›½xƒV“EΖĆï"×¹¯˜{Qîû_+ç Ù÷K-t»ŠãÉ=¤Ò+ÃÏx/¥éÚo8iÎà–Ðɉ ô4 Á ’ªý½­ÊÌ2ŠvsÛt¦mxHXÃ|îtíéÚ:mùÛ{´d`heÅ·Ÿƒ°©Ï1GOÒž>;³Ážýo6UD†œoµƒôt—˺úzê ‡´GKw';y©;wð8ín³#a4* Ñ6ìÎňéɰFèš‘ ÆZ û’/Ž|zÔ¬•ùõÛ(ßP|„DÝTþgÃnrúËÁŽ{˜j¾eOÁrŠÙ7Nê®±&±fµ”sFìºé‹õAÚu슻O?:³rr«T¾çÜ:Rmž–†ú8¶¾3 #|Š9öº¡Ó¡H–±¢%%$$¹‰° õ.à \x%ðe‡„Ë.šÆ*/̧,^Â#ʽ£¿VÎñÎ’ R`ƒ==ö¡ÑÓsqŠŠ‹ˆŠNdûóOŒPqP‡sWô¾Ó£ÔÄœÜ×aÉ0’‡w”Ø?Ò"|½âD¾î>§éSVJÌÝ{™9o^|)#w33srˆÉÈ)¨¨Ï’áO¨÷dß/­ßP÷ò)Å(fWk•<»°‚ªòµ×â2ö*°S¤mõ}½¼+š8CbPmõwyO'aƒ¦¹Ì–ÙÓFïy•€þ¼ZöHoz–¹KŒÒÀ&ZF S/9I™ìk“`òÍ0ô·fóåç>†ºy?ôc'…¬ ¸ºõžÇ§÷<î*©Ò’ øy„Æ”–Õ´D$FµÇ8ÅÜ$Gª¢KæÅáoß“WD;Kò;‹¯±U—Yú,él`8eˆŸgSÆ…‰?«;è­s#ÞHïà”¦À)0V×ÎDµîÝÍЋͫg¨¿ºsãi|ôãÃÑžéàöfYþÍ@ÙÈeÞ4IðåAÔŽƒߟgîÏ|8®kŠº§ánN‰Ý‹+`žrhiOk¨©;eLslÓ‡ø‹?Ä}Jߎ5‡J”t-~lŸ ”î”ÉcÑeiiA^B2iÊò[i»ÈËýrÞXhg®”W66ÿçËÛ1›Oï‡Ú_ÏhF ~«5û+cÀgir1üf Õ±SÅf½òòB³óN¾Ïáž{»ñ*u¯äG÷ô$Ÿ?}Ýßåí'¥%-­GeÍeRòSXëÊÞä$_ò¦š_„¿<´”Ø;ÒmÐ|¼¿)û~ÆIÚZ 1ó#Ãan«7– ¼¸‘Fë<#Étùj_³ÙÊ:4j!¹Õ§´ÌÕD›¯áÅÕ#Û)·"üì éyržIÚ' ñ¡ÔÈÏó23Òî§&¦ÆGÞlå1%w÷Dú4Úµì$ú"ˆ/®,y–ù ãþ½ÔÔøøY­¼¦ìY çžšå­‚¦^÷­uÉiçÙ.7ñr'…L\¨µÏ•}›ÖËÿYÞ*ÊŠß¶R‹*€zë%+ê„ M•q¼ÙN_µ íN‡ŒuŠEœ5)ׯÇ]ôüê¹,€T)#W³•ó”ÄÈqZ´D…%]g7t˜8×4ï’oÒÇÌ×.$'å=¼›BdaÑ3s™5×|¾š$ÍÌàQÖ—k><¹~úLÚ£¸k•…DPÃiì°Smîl=-Þ~4€Ö¹•,¡"ÄŒÓ3¼¹j¦'(ë¢ÚêhSEQNn^ÑëêÆz€0ðŠJÉHËv60H*/È}œÿöK5¨#Ç IˆKO¤^³êëò/ûsÑ0Û×’¢¯ p¦ -•v/=³¼ž‰ž¡g#"6IZœ¿ÍGXõáY9 ­º8Ex>äŧæ7 Ò3IÓ,<‡ÆÉÉôñ÷Ðt&Ê*Hëý²Èص—‘µh½óõUFrtròý§/_¿{ñö V“0OYêb!«ÎË-ˆ¼Á›^sQ‰¢Ö|ô·¬9f¦A:|˜šPæ­ÂFišê€GÔÔ€ÛT#ä°Xófñ5;e×ôNn•cö·”–s7(ØéÓœ‹’°ËÊ/˜¯×Hi­²c8æ-šï–Ò2ðè KSÚEØI¡fCÕ¼;ÛbïŸÅ,®jŠþ~^ÎØN®ËÄÑòÆVèïç×ä•\àä³`À>gŽEÿnðLÀ„\^ÊÃ*U¼“4ÓpGKyžª5§¦ðfNqsß§KÂe‡fÃŒaS$B©Ý‚r2l•99-}2fÇŠ÷è²^X#âÙüõäž3S=G®dÉ~–ß_ž;Å(‡šÀTuQð-1-›šÂŽk•+ØpõÂqÏv_¡íå9ÒÜÜ :ÉLlzK?½´†æè¯‰ñÔtt/äD«õ¿þ::VaU};Uý^-缊Ë|—Á^oõaI¯ +^õtïxmgtËIkÎ8|õ—_úÐ:n¡v«Ì“òB•t©âÏIŸÜ6’ÛŸ[,ëOïaÒá46.J·,+;6G¨8iÆI3YHTFš-'7‹ºÝÆ#EóÒ)Í‘… ©OÜxyRº3ÝkXÑNQ²r-6›qg+²Ž¯Ð'¿>ÇËe•[UpYÇÍÚ*¸è¨éz¶UšB¢B °¸›70´8™äå®%ؾúÅ” š·ß…tûút¯¸6õÓ‚2rl_³h^÷êÔÒÃ"èBº­öõ:â´µ³w@óK²n´†Æ/)½xCÈâŽZ‘"*Vn*-m»¯_š{çšjгŒ­öt§áµ¾ž`TCú^;Š«D°q2a#S3VàÄz¬ÊßÞm3ÜÇ·P}"ÖÊÃUuÀÊŽZã*372ÿ‡‹ =Jz—~#³Õeì[à].JKVˆ¸ÆÑ¤yd"áHËÖø®îUv]lôi=êÝRΕ7·–'=‹‹L¢ 0âëO¨m¿Ÿ<{ø’kBvýÏí5™$mônEŒ&¿ŠŒÌ‚µ.äïÀLס*BEjÈZ“­”EêÍÈR¤!Âa¼ý%L“²ˆ‰Ý9‹öŒŒ Ã÷Ò}´fÕ%Ü"[ä“òbq•oÆâ3&´²!jë¼…£ÁÝËEŽØ¸hý3áh UÉ!å uPñÓ9õ)KȽ˜a9w´h-uif"ÓÐøîóØR—´ñZ©¾?ÿ|‹Øk|"8Õ†”?žùp6_«–±‘Øô4pÃTfú…œ;jpŠ ¤/ J®Ù7×™¼0LRz~•š2Ó/}híù…2?ycIÉÆ¬ ™d€v«áïað’´˺”Ñ߀u_K‹§&æP並ÃÍ.-ÏêÙy;\å°ˆI§}†ÍS˜"×Y™Ì¢èù‰[eÌŽbzÛŒ-gþU¤Æ%Q:NÍRKOŠÐ¾h¼:u™b†wŽ­¾õò°ô¯Nv"Ã?I¼¥7-¡q…DK¥úìÂÒŽg±0™ŒjC÷Ce™yþÞIõi¨T 2 ¢ö¨h.ÓQSWRP“‘k5[áèTºÕen¦¨|¢Çs^Φ=o/[*ÙCŠ«w/mkbÓNºy,ñèèã…@¨Ö×Ðð¡õŽºæN6­Ý¹ˆk¦Ë£=7(ÖµY)Y)íR˜´ìJx€Tó§À57A²Ã6/ómVá2#Ã2Û_ÙÌû¿ƒh‹’º-°¸Qßo•WUaê+_RßH³6tª[¸iy«—BüÓÌ›‹ Ô ›ä\±4³=†·2.dF«ñý…Ü)9gЪ|X.o·ô;‹uáe]Í©Z7”¿T:¡åi%ËÑ@Õ»Ìä„ÿâ®GÄgç&žF´Ñ4×]=á<¿«Y—Œò%Håﲞ>¼xöã»òÏ¿‘‡«zó¼óÓÚJ7HÂ*¹>'keBÇÊ=ø»„û‡ÖǪƒ*ie“÷‰.öùõTeÍ÷šïýõóÆ8F¼vìô¨DW·J>Ý68Õ6ø×.)4g3‚lîôp2â¿KÈ¡q¥ðŽM£íëƒíâó=ÈAù€”tl½oj¹Á2^¤‘û=ÂnHiWÞE „ÂâbJßݘEi%‹Ësÿ;wô\ôÁ´ÿôšèò¾·–Hw*ÂèšW Ÿ¡*Ýþè8éÉ`Z‘µ­Kå6ÛD.´Fv£óñ€×ƨv)‰ ‚ׯ ‰¸f†@z…aE1§)†”üìÌ¿ú¡µç/”ù?¾‡A\g^ˆ¹Ó¡Û Ü>3è_ɨ[—Ra«E?‹¶œauŒZãé™Ë óðŽdeɈ¶­ïJ5Ì%É×M+Ê$W³$êÆ¨û/ÔæOøóì•’g÷µî•`iîNÅ «I¼ù9V~îrô·-€f'é}œ—Ñ̰7 ñ€à]ùw6u*Þêr=4d¨Ó¤•Œ'K‹òŒbÉÂJÿv¾³C+øø{WËgææe"y…^Û ‰ÆIgþˬ “g¯P öòé›h´>Ö)[Jž+‰ÍJ™öjtWhmˆÌ ”ÆŸüñÈÖfc]T›\ÒÆvèF4F;LÑ™N³ØËÉ3¨‡4cÕÌÜ”eTdúZùYm’ÍŠ¹ÚÍü17·XÚªÁ é{¾gÚó+àÎÿ¤WǬaÃ~çCûËežø§÷0ˆé¬å-7Ûgå½IyRŒ¹«“Ü(*_»µ[‡äÚFk94‚‹”G=,ÝÚú7d$WÑq²9Ä…h>ïåWòO*±×œ³fÙÂÿ ÷\—úh¿H‹Š^ÝiAñÖÛ᫯o§¢<ͤè'Ÿ†îŠŸM§fÞ ‹â½RxQ©£ñmâŸ{wßUÁæÓ>/xÓh_:­tË8´š2ýÁ;»ÍòNoyù];¿²_mÇdE?(mt"F+ à\ ¨õA9µ¥ÉnžØ•£Œéo´zÿðtL“y—Ÿý–c 5ñßùÍ23²ôãAÃÑ»ÒÁííJü=sn“}ÿwr·ƒìÙüΨt ùæÚ•På1Ýæ(6·dœMjÆV´YcÏ+·e]/—¬UniŹø¤À¬™ä'ð“U>d$ÞâI¿šPå@q3q‘ßÙÀ· ïäìŒW/x´̤Ílöy°n÷Å®'±Œ›ë±^*È/KG¹êØõ •T‘ðõù)“éä 1¢ò_ö¡ÛkX€;ÕÕ4Vÿ5_?>»wóäέ©¾È<9hB¶ ü¥ío–ù¤øÇUJêlå†È«D››¤Æò¹×‚W{ž¦,Êdä²°u»óÒ½üZiiŠ¢P»EÕ›úÌ“ŸÖ*MeÃ¥·žÝ¹ˆD¸B@BŠ1îÍ}Ú;çÚ$Ï Q;þòŠ–TÅÄÅÙÈ«#óÆk;€Tù˜~¾‡<è7^e>ä»Í–¹÷S|N*"œ§Û‡åt–½7'æ/`¸yÁc6]³‚ºsÞ |Í!eo´ÀäÑFñáaé6QؤCQ‚æ¿aÛìMEh§ñjòní·²Ø‚Ñ‹ô)¼#é(ÓBÁíØ4ÒÊÉ”*».××ZÆ/¾Y3¼™JrQívmNóñA5OØ{ao4&/0àþ»P‘•û®{š–÷à],j}È/T Çì§¡:’’sÆÀh476|nnÁtÜu‹ŒZ{Q$ûlÏÄÚÉYàÞ’å}£b–-Ž¿dîµR0YÒ.öû“üS(Æ0F Þ«5±Î/BMÖ¹MúëÃ7«…o¾©¤%"¸ŸëîéN÷§Š8#É™ihem“»uFcc=æPÛa¼ îÙ…+þjä=I“v¯™šl* f…¿>±6œa†@ u8w‘W=3Ú‘àm¦‰[Ô–?¿ìµÌ("1Ø\9xÕé’íÚ´—Vο¤¹îb–ÝB^"6óœ»Þ†°Ç¹çVÉ<ûJYÉPsÒŒ…²Š¦Äºëá;äpJu­fïJ¸a?k”ýDòÄï’ïæU45Ârã´X‘F:úA)ò Ö7d©2žá—5œ Wf•©¦œRêî!¾gîln1Ó®@EÇ)ïY/Ö¼´TseñÉwÞäÕ¸PxGÚ0F˜å,ÑŸKöVœVúëuèeëWO'ÒáQ˜±Ú€Übå·á-æV«ô`‹¼kl‹DôüT Ñzä–ÙtýÊÐc 0é·4Qœ¸4ÀGé7Œ Yëc¢g¡N¢ä[Žt|†è‘ô‚yÍ*̈1‹÷Ä~~9)À×Qcoo¹PVÑ4O9DÓkÉ(¾6¬X_6ó™§ëñùGÉŽvˆÍîÄYFÐì¿ÁÉi}•O/Û›…ýt)q!½£§‚,ÕaÉîW4|z\ô ÊDãP!д¾¦o_ð€˜ñÔ¶sIèÅååm£¹îé£<<”d*ÚùztÏcrª6u`ÿ‡§“yqdø«ñwn"u/)(«ÂR7U°C ~ +yX>H?á°8Yøy¡¨_:ߨNcÇ*=ÃÆÊf½±ºX»sÆŽfk£¿Ð1P-ùoÙ ÊŠˆFËfµWá”MÍÁQg4ðùëÐêùM­/ûÐ9ÛsÝ]m¥m€Õ‰!ãa ‡tE#¡T},'¶Ö3,¾µöCÝØ@‰ÜÞev·®õg§ãi4»ÆÖÛþäèªà „`@ºËp§Ëˆnª(z–™q;.âìž´ÛÁöè³CÅ{ ~mè¨PDÛ£ì" Üv‚Þ‡WÏ(¡z¨õýìI^´6 yßjß4¯¼8ɶ#E7v¯ÔsmYnëèŠ óngùNo›díûŒÛ©÷îg–Ö’;¬™8Dd䕦«M•fïàJ²¢ï¿¡§Çôù††Á)ú²cF€ÊgΞ‰NJ"¡çsMÓ65_¢ÉCUð+Ÿ]ˆ¼ó ÿ]y-zTRIËÄdä¨6Ù'e'Ü~]O ÷44Ћéiÿƒ&ð<õüوļwX>—ˆªÉ23-©–ô<åJäõ;ù¥ïÑër‘Ò4Zº@}B§CÃírÊÉ'"%«®¦©>qTûwÑñýT§Gœ»œœ_„^‘ÈÄ%¡¤5¯ƒ¼´$ò$õÚµ¨´üÒ €0ñ‰INÖšg¨ˆ>ÖºþY$GJL•iyغÃç’ÜÝÕi¾Õ÷üvPÖ8fćßY¦/Öi×ÑХݧµY|Ï=¸)t¤Ú<- õql½“ß=Tõ…–3ZޝݽÅ3wyÛ–wÑõ'òÙgÎÖS–‚Ó¢†°C Ò=ÊŸ%ÝN¿û åõ  Ù#ÀImô·ØÎ h6zJ"ák¬7ÜÏû+ÉÊÍ4;0Ñv{û¡‚5Šâ´=°ÃŠ‚)kl‚Ùºò­Ï«® 5¶A;ýí—3–}@¯Ê·ï^íú©ŒÅåÐÝtÙ˜»l„Ì“9ËŒ ÌVŸ&5‘Æ]xmî:ÿìŸ^S40-Ý–FSÏ:½ÉÐ5ŽzÔ#ôý:K»äÖçD†‡l°àŸÝï“åsÏltæ)I#¾âŽU¯¬{°Y[ÿ5Ò,ÏÀ„X»Ý÷Úœ²×ýggZ‰jŒ‘ºWZÛ£˜·&5÷Ôdo•Ÿ¨Êí¶»öìàÜ–¦U›û1Ü}ቻéºv‹V‡‡¸‘½>íL+s¥íϨJ3fŸ9àŠ$2Ê)ülää%¯1Å›t"ÏÙó ç $†9\8wñÉS7ÇÔ( 1ƒ¥»4FiënÍõº„ææõ;s …•$E*°û?ÓšÇE];¼çæËl±…Yõ{·úg0œÆJÌÊYñ®öÖ×Ý>t½ÝB +³ÕìåG‡iÕè!IÕúŸe`‚ ‚àa• …ùŸ¼]í½¼ü££œB3 EÅÛõ^±yÅðÑãzHpÚ;Ñ^“þ^§+PWZ¦È/¡%­Wû§k^â ×»WÝ|ï]=ÿφ…*‹6ï5šh(ýãú®ð V?wÈörVƒÙrªÝEkãì¾å¶±õþðuf× œ5ïÝi-ß ¬Êÿwu›Xv±pÁ<Ý9F+úÔèÍÓ“¶jŠÿ£©¤¤~<ž¡o†=;þ¤ì×ÁW¶Ñ9Ƚú苬EòÌÁ¥öé§"¡ÿŸò½”ºôÿûZumËå²tö=u^¥È|”½S8dr[sî'“z'soÎbÍí}XØo™ÂX‹œ•.·_¬Se÷‰,º•;///#7W ///W@´M‹ÿgaéunw]…‡s ctPxªÀ¨ÿ?},z==Wþ÷~¬”áö¨Â™Ë:ÊXÄügbý"¾Œh!þÏ£ÅåÆ?X>®KhÐfü×aúÛ›õÙT³¦HÛ‹Í›»fÀüÅJܾu÷q«öŽûÉo?­Ì7©øî̯Ò+m J\Õ öômÚÓ© '(Aïv¤¥úHõ‘?± Ê&Êå\U·uQÓñïç—кϴ½}¦Õò0—w‡ô?|}p/r«fÿò$ãã{î‡TýöÌR.öÀPxÀ¢¥=œ¹|TiçÒØ¾š{ÊI¥þʃ¹>YžëàMÅ‹¼\¯ÔËÔ]g¬ÈõµÍÍæ!ÿ]Ï®µÜ]$ZãÀ/Jku£Ý/7Yâüög¡I+c®…ÙÀ—HÍj±Î­Ø–ߢbÞs/r©ÿ„¡÷,Óž›ÌC¨¦!‚ ‚ ˆÚn®Ë¹}3­ ªO}9ßí´ÙÒ¿g™‡ñ³\}9MqÝ%ÿÿ+—·—s…y{¼šýã¦?.“™ºT_®CëÆš7þæ5m¹-¯×Ã3Â~ºIŽ@ å‚ ‚ ‚ ×WŒ¾kOÏ['góß‚«ÊuÞyï9¢M‰›gÚNë:÷ü?óSYlø×𸧼ÑlÕ⇭ž¬½ºt‘üû²ŸXî å^0hXáÌ‹›°|=7xs ôsÕó‡Íù3 rýz?ާºÞâÞ*Õ÷¥\4¡üOAAäúJ «uþó,!…óÿ-y¨ÛöŸgwÒêÓæL¢ØURà[ü3Ï[¶‡нÏ8Àý3ffÓù/Ï{v™jÇYaÒ£ï~땃·’æ¸M^D;.rݪٸ&†¸qó¶ÿ½orö‚ß’þ}[ ä%™é÷2/2ºè— Ô5ƒ›Vòœ"óv6¹6ó¿v†õÂÖ†úîff×ÈtÞ3™{F ‚ ‚ ‚ Èõý3,çÉØÛFR£¸'xë}n½÷¹ÒvÚå³näs,ÊN¹\8d²®Ä(Îx-Vi]XUâ®2GŸ-R­èS°&¿7Â"S;|žk8Í÷ÇU[çÚ@÷è1iã¿9zH³,´©[ÒÞ!Uø¬u;ÏÈÏí¶à¿E/öÈ‹î)}†etp$‚ ‚ âs}¬¯è0ò;%C^JˆŸ§§»§ÿ›è‰ÿ®mݽk¾êؽMƒ’ÑzäæŸ_ùºÞqqó ‹þø¯iÕ]U}ÀÈÑ#4ä%Šï'ÚBóà΃ìQ³³%IÿxOí'Ø9DðßÕõ$‰r¯VXtâÀ¯ÿv,ÌþÞFæ¿Õõ{ÍÝ¿kì_Â%¬ú‡vê“wíÒfVggÕ×úqàØ.£—ü®SÂU‰ *,ÌK>ksöƃGßJ|HhÝ]{âŒ9FÿÎ#¿hÆò+‡] JøÇ&еíØUe¼ž[öõ½¤lu®K‘›_X8?/)è¬í¹.¿ ²Õkdd #ÕL ìÖQÇ·yÿ<«q«ªÌg66[«aî·±±‘””9rd-(ÉÕ3Â,þþþÎÎÎfffBBÕwî›zõj^œcccOŸ>=vìXEEEjÌøÎ_UÓ’åää4dÈuuõj·šH‘j‘âû÷ïGÃ4iҤߟ”‚µ³hÿâ$¶µµMIIY½zu­/‰¿½ìܾ}ûéÓ§Ë–-kذ!5a|v}\þ¯YOõ±ø·¼¢h!«:ÿxÞ¿®Ô€åë”~O§®X_úÞÍFÌY1¢äU"SWj”¾§¤úôu¥7²M»\Þ­ôk–Tž³ÿJkm:NXwdB±;)ûz~zIÿœ·¹ÊœµøWªîz&ÝI&Û¼ys5Ìú«V­JHHÈÉÉÑÐÐÐÒҪџzF˜%--­iÓ¦mÚ´E9wîÅ™ þ#ÊnܸaggW¯=¬çx«aÉúòåË–-ÿ4$îîîgΜAÉ¢”âõêþþÍÌÌ|õêÕúõëëÖ­K‰RÓ«ñAƒ½ÿ¾.Ò”[uèëë '''ïÛ·²z•¹>‚¨iäææZXXàCzz:jŠÏŸ?WçÇP5šÎ;ão||üíÛ·Q#÷êÕ‹bÂÚ·oÏùܲeËÔÔTŠI­§   yóæœ¯mÛ¶ÍÈÈ¡ÈÔ °|øå:lذPLj4¦¦¦žžžÿÌ ^R»]ßïEWW×ÙÙ™ µ““„\Aü‡²²r~~>>äåå¡}3f < ……ïŒ9Žšýüýû÷éÓ§‡……QX*ϱcÇâââ8_ÓÒÒ† âîîN‘©Ý4n\tH3111¿:uêPpj:ƒŽg?gggÃ-$%%q›|¢fqãÆ Î'èÔÔÔãÇ/X°€"Ãw´´´8?‘äää@u }l×®E†\A\¹r…Û{$&&>}úÔÆÆfÞ¼y>âìì|çÎngÒ´iÓ¬\¹’‚S¾~ýºhÑ¢" ïß¿oaa±téRŠOmEFF†}Ä $N‹-’““)>5š 6xxxp/©_¿¾žžž§&òüùóqãÆq/©[·®™™¹>¾Ó¯_?___î% 4X²d \7‡\AÌž=»È’øøøåË—;ú‰âÃòóó‹´yÌè#›6m266F¥L!ª0;v,qùêÕ«555{öìI!ª}Œ3æíÛ·%®JIIA–ˆŽŽ¦(ÕPvíÚUÜÏ„‡‡wëÖBT³øüùsñ¶òòò¾|ùròäÉâ „¨0ÊÊÊ!!!ÅC}ÿþ} ¹>‚1bDFFFñåuëÖ2dȳgÏ(D|¡K—.P-Å—‹‰‰M›6íêÕ«¢ «H‡W¡©SWW‡à Ž^µŒ;wÞºu«Œ bbbTUU‹üàMÔ"""ôõõK[;uêÔ   ŠR ";;[RR²dÁ-$´fÍr}ü¢{÷îáááÅ—çççgee¹»»CÔQ”Èõ.QQQ...%®JKKKMM555577§@UccãÒžK¼ÿÞÓÓÓËËKSS“U^\]]ËVÿ™™™={ö|ùò%ŪÖíbfföÓÍüüüÆŽK/5Õ,ÒÓÓËx”———IâµQXX(..ÎŽP¢ÉÈÈ8vìšHŠU%‘‘‘)Mf€:uêlݺ• ¹>â¦ÿþe¬}÷î¡¡aïÞ½)V2ÅÊʪŒ ²³³gÍšõæÍŠU¹€b3fÌO7‹ŽŽ622º|ù2E¬ðáÇ¡C‡ò¸ñíÛ·¡&¡))n5Å!´nݺìm233/^A᪴hÑ¢xçÛ"ÍÛÍbUÚµk÷þýû26ÈÉÉñóó+(( ‘®Èõ(«V­úøñcÙÛäååéêêb3v&4¢¨ªª CДø†'øö훸¸øŽ;xy‚Ap(í¥ÙâRÒÅÅeÏž=k×®¥ Õt:uêT®ß¬¬¬¤¥¥Q×Qèª?Ý»wÏÊʪ[·.Ú26KHH8wîÜ´iÓ(bÕœqãÆ}ùò¥~ýúß¿/#Maü:´|ùrŠXÅðòòâLÈ6y(G%nicc³páBй>âO:XOO¯* üˆˆ011 æ,¡Yû*÷ÄqhùPöéÓgòäÉÙ X‚¿ÊÊÊ«ryàÀ>ˆˆˆˆŠŠ>þFïÞ½µ´´{¡ûUTTÔÕÕi~çÚÛ 944téÒ¥HâÌÌÌ‚‚VS 7jÔ¨Y³fíÚµÛ´i½5]³(Ò% É:tèP¤5Ì x||üÇ“““á" @áªþ888Y2a„èëë#AcccQu'%%¡ðÊÊÊR¸* *:8=ÎW??¿áÇKHHœ8qâîÝ»AAAh+?þœ’’B¯‘ë#þà¼.$¤¡¡Á½¤Aƒp 4ngÕÁþá+ÒµkW H%A Ѱq¾Nœ8QLLìÛ·o999ôx§#//ÏIçåå5mÚ´ÄÄĦM›Ö«WO€y¾wåÊ%%%ŠRM'22òùóç(ÎG¥ßkÁÁÁ°|ÐÇŽ£·‡ªˆ°°°aÆÁzxxÀø©««SLÈõDÉ´k×îË—/ôæwU ÙJqà;¯_¿VSS{ñâÅãdz³³………)&µ›-[¶Œ1ÂÇÇ'..®{÷î""")))†††È œšÎÒ¥K‡ òéÓ'²|µƒÙ³g÷ëׯiÓ¦dùª† ÚÚÚÂòQ@ÈõÄOhÑ¢ELLŒŒŒ …¢ŠÈÌÌüúõk¹º'¼Ûé+VÜ¿_]]ýäɓŧn'jÏž= WRRRUU…ëëÓ§ÏË—/±p̘1óæÍ³±±¡Õ\‚àÉõ½}û–\_Õáïïß¾}{ŠßùðáC~~~ÿþý?}ú´wïÞuëÖ‘ë«ÝLž<ÙÒÒrÛ¶m;vì¸víÚðáÃO:…tòäÉ‹/Ξ=;}útŠRM¤  ÀÐÐÐÑÑqèСNNNšN`` \ßÖ­[ííí{õêEá;ƒ êܹsaaáÊ•+iþCr}Á+0$/_¾ä}lt¢®ONNŽâÀw¼¼¼ºté"%%•••5pàÀœœœû÷ï<˜"S+ˆlӦ̀¢¢¢F%((رcÇFijjž8qÂÖÖvêÔ©222Ô­¥&2~üø¹sç:;;=šÆaªéäææBQøøø >üêÕ«þÀþ8bhhxæÌmmmŠ ¹>‚à(§èèhŠCÕB?vV~~~ì8¨­[·ŽŒŒ\±bÅÖ­[ÉõÕJÞ¾}»sçÎ>X[[4H€™ƒøû÷ï#FŒ¸råʹs猌ŒbccÛ·oöìYhMŠX bÇŽ)))»wïFA¾wï¤F“ŸŸ/%%uùòå7n(((¨ªªRLø[MMMÔr®®®¨úBCC%%%),äú¢tëÖíÎ;‡ªÃßßñâžãååÅÎÈ'##¸páB df]]] Nm¢°°P]]ÝÉÉIDDäôéÓ°ìò‚‚”¬þýû=ztâĉS§N}ÿþ½´´ô¡C‡ôõõ)n5{{{õׯ_ïÛ·¯gÏžð “]TQ---{õêehh˜@1á7n<þ|@@Àš5kð999™bB® Ê’’´2Å¡ŠHMMMLLTSS£Pð(þÐÐБ#G 0s=?þöïß¿jÕ*r}µŒ!C†ÀÒkjj"Å“’’F……‚‚‚È]»v•’’ºvíÚTTTΜ9óîÝ;(Îàà`Ž9$ª-&&&±±±ø ×çééI1©¹dddÀò:u 5°––ÖöíÛ5jDa©<ëׯ‡Hó÷÷Ÿ6mZ—.]®\¹B]rÈõDQUUE£K“7TW¯^íÛ·/Åï\¿~]^^žª¡wïÞGŘÀsçέY³fïÞ½¢ÚÁèÑ£7nÜ(ÀLÛ0þ|vù_ýõýûw|X¾|9’ÛÀÀ€6 &000ÐÔÔ5›¯¯/°Úâàà°téÒ„„!!!ccc==½nݺQXj(ááájjjOŸ>E\»vmãÆ—-[Fa©$Û¶m;vìØýû÷wîÜ9hР¦M›Ò#>r}Q¹ $©ôäÉ¡*¸}û6u¶®"ÉÈy¦7`ÀÎè—/_nÙ²åÌ™3»víJQªéL›6M\\üàÁƒøœ””äêêjggÇq}ùùùø0iÒ¤ 6|hР»»û¥K—Þ½{Ga©0™™™úúú ð{›6mZ·nÚµ€€€-ZPpÈõDeéÙ³çÇÉõU<°¶¶¦8ð({ös›6mDDDBCCååå!"ïÞ½‹Ìœ˜˜Hs=×h4víÚÅ~]¼x±‰‰ 'Máú ØÏ›7o^µj•ŸŸl^ll¬”””››öMNN>|xÆ ¯_¿Nñ¬>Œ;fý:tèÐ+W®ÐË&5”Aeeeö5ݰ°0ƒ¸¸8ŠLÅ€gž8qâœ9s\\\ðuïÞ½€ŠØ½{7‡\AðMMMgggŠß±··—••mݺ5…‚¿@#vêÔ‰{Ä^½z¡™„ëc?›™™ 0ÀÇLJbUC¥d·nÝV®\9oÞ}úó?bÄÔljjj_¾|±´´422zò䉴´4ù·p˜!,,Œã víÚ½{ñâE NÍ"99YIIiáÂ…œb"##UUUaùh.ÞA½„†, Õ×ãǘw;544êÖ­kooîÜ9 ¹>‚à?PW¢¢¢þþþ}úô¡hð‹ÔÔÔGݾ}›BÁ_Þ½{càááÁ½pâĉëÖ­ã^baa1cÆ ´©'Ož¤ Õˆ®®®)))0oœ…û÷ïoÒ¤ R³ˆëËËËã|íÙ³',’ûÔ©SìøFccã!C†¸»»ãë"†;wš™™! µk׎¢ýËøúõ+LR'&&†³ÐÁÁe3**ŠâS³7n\Ë–-‘p0'ì???]]ÝØØX”SŠÏOyóæÍܹs£££­­­oÞ¼É.trrš>}:ª,È ¹>‚¨Z´´´®^½J®lÙ²ÅÈȈº–ñõë×sräмysiié[·néééqž9s[Ž3†Ó²Õ–¸¸887›"=X .·oßž˜˜Xdû"®XZZvéÒ^ª”]rìØ±ààà¦M›>|øPQQK60¬\¹òÊ•+?æ~I˜¨"ÆŽûùóçW¯^q÷Ü»sçÎâÅ‹ÙÎ`DMaòäÉÏž=óññávw;wî´··GsÿRCçôéÓfff¨£Ž?þàÁva||üèÑ£¿ÿ~ãÆ Ÿ“\Aü"àO–,Y²oß> _(,,DÏ®€àIII°p)))ÅWAë_ºt‰Ûõ 0ï‚BåkhhШՙ)S¦4kÖìãÇŇô4hŒ\ƒ ~êú€———¬¬¬––œ»DYY¹ERRÒÊÊ þŸ]x€Õ¦­­-Ç%üÅÜÜüСCÞÞÞ222ÜËçÍ›ËG?ŠÕòóóÛ·oŸ‘‘‘ **ʽjðàÁ:u‚¤(•H``àš5k^¼xfÈÄÄdæÌ™œ†lÆŒˆÛÙ³gƒ‚‚(Päúâ—2tèÐÔÔÔçÏŸ+((P4*©©éèÑ£[µjE¡à/sçÎ]¹r%çÍ"nЦBšÀoùÉyçÎnnnÍ›7‡ £V+Ž9‚ò÷÷ïСCñµêêê‹/†²,¡å*îúPânܸ!''÷þý{ŽD~øðáCxx8RÿôéÓœßV3DFFvìØ±oß¾çÏŸ/1_iŠ”}øð!jÂ"«à´íííããã)JÕŸääd”Aø:Xô"¥ƒíËyŠNp@m¶yóf??¿Y³fmÚ´éþýûœU .D0mmm©÷¹>‚øèëë󯯡³]•!##ãøñã$køÚQ4¨¥ Ð"..®ªªjmm½`Á‚â?j@ÜwëÖíØ±cô`§špêÔ©Ù³gGGG/Y²¤Ä &MšÔ£GÎ=E¨W¯^q×'À¼¬É¥¢¢òôéSîåH}HX ‹–-[úøøtîÜ™]ÞµkW\CaaáÆQû™››s%Ê Âh``àââòñãÇ 6”ØÊÔ¯_?$$„bUͱ´´433Û¾}{xxxñµÃ‡¯S§Njj*ŠÍö¨Í>üåËccã+V°ó.pÖ¢ÚÙ±cš'DÕÓÓ“"F® ~?kÖ¬‘——'×WyŒŒŒ U7nL¡à/'Nâh­çöíÛ&&&rrrçÏŸÿøñcÙÃê§¥¥ÅÄÄüª¶9rdïÞ½(eÖÖÖeÿFéìì˃€†Ú®A}G}||6nÜøôéÓ9sæàCpp0ï?Ð@R+((?~›ÊÅo!>>~ß¾}.\’’Z²dÉÔ©SÇÏËŽ°ëÈíhÅPñÖ¬»NOOwvvvss{üøñ»wïÐjhhh >\KK«lÖ——çää„ ^WTTT[[{Ò¤Iššš}úô¡¼D® þ >|(##3`À€æÍ›S4ÊfÔ¨QmÛ¶%ËÇ_<== ¢¢¢*<¯ú“'OÚµk§¦¦†¿¼ïåúùóçÜÜ\h…ØÚÚŽ1‚’£^^^fffPŠË—/_½z5ʲG…¡òööæLW.DDD233Ë»< ÌD!´Ýþýûjì±ÁFÖÃÞlÞ¼¹Œ¹›6mÊý¶Xrr2L „ø§OŸ`á¦pöê“”¾ÈäH ˜Uvª1ráúõëóçχ¦ Q~=ph–––È]HGTY+W®<ÌÀãî¾¾¾ÈÌ»îÕ|2Ü êdTì¸TTÑ ªªª4hÐD†2v/((@›róæM¼¸¸8eeeì2Ž2¹>‚ø£ÙCý¨¤¤KñK ¬¬ì–-[h`wþbbbòüù󤤤ʤqãÆP ÈÃhãáʵo½zõØ·C?|øUñîÝ;kkë²'¨Ý@3;wjÖeñâÅð0~>š©©)¤*Rç§Ã”¨¨hÙÓ?”†°°p@@Àׯ_{öì‰û æñ1cÿþýYe\XXhcc³cÇŽ&Mš¬[·nÒ¤Ieì%&&V¤Ë››Û©S§îÞ½+..®«« ±þ‹_*¾uë–••Ôó€Ö¯_¿¡¼aŸ!t8Z%‹*Á;È<§OŸvuumݺ5rüöv†rÙ§½{÷ZXXhhh;v,44´úÜÝ›7o=zäççY¿~ýnݺÁ¡¡è <˜û‰z ºvrrrww÷÷÷ÏÊÊBáBÕ­¯¯?˜²A® J`РAhPç¾|ù’¢Q—iÓ¦ÁœHHHP4øÅçÏŸ{ôèqèС²çjãyyyggç:@üt€Aâzzz 0]DÆÿàÁƒU ÂO!pe°y숚k×®ÉP™æçç£È@Òyxx˜››WòòàꑬÞ½Q£FOŸ>Å%7.**ŠÍ'<î+((8ýjgg·gÏžÄÄÄéӧ÷jÕªì݇2p/A¾ºxñ"B‘‘¡®®>zôhˆT~+›––væÌ™K—.¡&×ÒÒ211Ñc¨ØÑà{gÏž™™‰ ¦)jªú§–{÷î]¹r‘åtttfÍšU<óðȹsçvîÜYXXˆ¼jÆðëï(==Å? E/<<<::ŰK—.¨óQÉÀ…vbफ़Ap|}}Q“ šÂÑrrrQv´µµa1P"ÈõD9˜f6¤$µ¤¤dDDDëÖ­+|qqq{{{öó©S§6n܈cB?Õšá®]»{6þ|CCC~ éäååedd4lذóçÏó+qáú¾~ýZÙæ_HÈÑÑ‘ÕÄË–-[¹re‰ãø• ÷Ûe Ë—/‡Åƒ \°`A‘y JdC}ŒKrrrzüø1-²1´>Ü ´´tÙ‡BLà”HGèñãÇÃ*,e¨L ,--·mÛ¦  pâĉÀÀ@ª¬ø,ôõë×ïÞ½ÚµkW¸Ô‡: ;`FF†………••UÆ ‘«Ù®ªîúóòòà¾BBB^¼xGõñãÇ-Z ÊÉÉÁ’¡VAþ)—kE)xôèr2Œ"‚;‚ETQQ»ÃAú3PÎ!Èõ߀VC­º;,,Œj5kÖ@Qñ"æÙ¹sçßÿííí]ƒÊËËGGGC.œ>}š/ôf10ƒ€C ;v¬mÛ¶ëׯ¯YýC //^¼èêê*"";1oÞ¼Ÿö)/ïÞ½CL²²²ØÉŽù{ý°U…ü:«‰q©“'OvssƒV®ØXV’’’Üã—ÂîÛ··Ÿ’’2räÈÙ³g—=‚(·§-.Ó QF\\\ØI&¡æaÿúë¯÷ïßúô©OŸ>HÇI“&Ía¨|L‡uëÖ}øðžÁØØ˜žð…ÌÌÌÛ·oß»w~Ù£GÔÓÓƒ±‡5B}R™ƒÃðß#o ³!íxyµì †‹{þü9þ¾|ùu);u**½Ž;¢^íÖ­îBII©7C¹Ëš:\sdd$;h3jleeeUUUÄD—² A® ~¨ÇQËËÊÊBšÿÉØÌÌÌlll}z~~þ©S§ªî¹Ò‘¿ÇlРÛŸ399‰µ}ûvž &p'g‰§§ç… `¶ tttÆ?tèП>ÿdÇ wwwøð!®}çv̘1œMë°öñãÇð«0ÛíÚµÃ6Í0™ã<ŽF‹$;{ö¬¹¹yNNβeËpãôX¯Â„……¡±@Šøùù¡ò5RSSc»–g¨ü)+Ο?æÌx'X£ L`øéŽyyy¯_¿†…{óæÍÛ·o‘aPEÄÇÇ#HHH ó îêÔ©r.5F/† \^pp0kçØ§:t`_éìÙ³gŸ>}:0”Ý3– ÈõÄ/ETTMŠ+PSÔâùˆ‹q6þü»wï¢ñÞ±ce¾ðõëWèˆ77·Ý»wÿš“Þºu "LLL š›ïcsCXs?+c_¯‚íAÁÑÖÖž9sæ/x4;;ÛÃÃãÞ½{ÞÞÞtRRRZZZ£G8p /ó2W2Aalmm‡ †þüyUß,ä)Ÿõ™ÞŒ5ópò»víRTT„BXÉ#‘²ÛÎÎîÎ;ÐǸ©Ö­[£ÎIHH€h–——‡mCH±KÙcÐË2”hP“’’à}}}ƒ‚‚ ¿‘RÐñÝ»wǽôîÝûÇçÎC¹À)LMMÙé ©‚â1Ïûøø tlhh(Ò UQQéß¿ÿ!Cº3˜˜˜ðëtqqqÈ'NNNOŸ>EÆ@U3mÚ´e Hbø7Ø*OOOTªØùV3--­I“&ÈQmÛ¶EmбcÇÎ;#Ÿào7†Š] ŽÌ¾Æ‰ì÷LjӉˆˆà,8²œœœ‚‚‚’’¬c…{!¹>‚øÍË{ÆæÍ›ë30s ¢6Ã-8;;ãÖàOÆŽÛ®];ø\ÌÊ•+á²³³‘£PÏã\HYÈ÷Ÿv&¬•äççÃÛ°‘(hH;öýC4 ýúõCªi3ðëŒyyyïÞ½ƒmcýܾâ2êÕ«‡UuêÔG4hüJªP\O§Nš3¨ªªV¦,¿|ù÷÷ À©qjIIÉöíÛãDptÈ«=zôhÓ¦ .Ζš3‚\AÔf5j„ö $$ÍO%ß}ª¶ÀÙNŸ>²ìÊ•+h)Ñù‚››Û´iÓ KNŸ>ý ÄzÙ°/ݼ~ýúáÇ¿¦‹¦çkaa¡»»»½½ý½{÷¾}û%+8jÔ(ÎX£ñññšt'T‚‚´¦¦¦&ÂÈÑñ¿˜ÐÐÐ-[¶àÂ`B`ž×1ü–D„EF*((øeM5iÒä kÞvíÚeee%""o?oÞ<ÎËlW%¨öÈÈÈôôtXö•KX…! eŸ«E‹%>vCõ‹àÃí"‡ÈÊÊâ˜p¡HŽâ?a *»páò.nüøñS§N5fàÞL¡ì_‚ƒƒÙž]0pIIIÐýœn]È™ÊÊÊütô׃Ji†›Bª¡uƒ·aUávTTTz2”ë°iiiïß¿‡sCêàŒÓ‡>3¤¤¤ Ep¤ÒDFyOHH@Cƒ2Þ¸qcvJí¹sçVæîP4^3¼}ûwŠ»Ãõ|üøU Î.!!çoœ /‡DÄWàf ‚\AÔ~Ð6@ ¥Dûqøðá*ì—‘™™ éæêêŠ;rpp Tæ ¶¶¶k×®…¥>†ú©V×víÚ5üíÓ§$ì£G ƒ~åÙÛ·oÓòìÙ³û æææ…†Ðjƒ0x#GŽ„ÍÀÂß%èÅ}ûöÙÙÙA¡¢€À‡°q«@:ÃTÅ@¥‘ŸŸÇ…$ƒ[€ç„²?räÈÕ«WQ%"Õºwï¾bÅ #†ª¸† YŽ+9qâÄùóç_¼xGŠ%-[¶TUUE:tè’’ReN*..ÎË«z°:¡¡¡p†0°1ø Ï#,,ܶmÛ:°OŠà©zôèQ¥þ9qˆˆˆÀeÀó$''·nÝš}¿‘3ôˆwÿºÂÂBÔQpG°a¸ww÷OŸ>Á)¡~@Ⲟíû÷ï¸x4|Í›7g›JG;†& ÅãÃxß»wv=((Ι5ÏðáÃa/˾ììlöíMÖ¼ÅÅÅá X\U^^ûô팇úq†CÁåU`l‚ ×GDÉÈÊÊ¢5}úô)ÔY³fíÚµ«&ÞÑ9sæ@%=z”íÉCThš•+W˜˜˜lÙ²…/à Vþþþø «¿mÛ¶‹/Vxœôâ@)>yò„}æ {™á«  Ð«W/555¶/ÖO'§bÇâwuuõöö†ÚëÙ³çàÁƒuuu«hjoø”ØàéÓ§ÃéýÍP ÓŽ¿®òõ‹{èy¨ghh¤š¼¼<¬‚ŠŠ /bÚ××VÐÙÙ6uãŒ3*6]di£|ÁE _ÁÆÀ<Îe(q{ÜÈ£GpUØ·[ˆ STTÄôïß¿o–;2/#.ÂÂ3s&»•““Ãö@ƒ=CÌQ4`ùzõ걥邽ؾdHw˜ŸÜÜ\äRœž™v«aÆ™™™¸_öñ|"…0r°m8‚Àñl0ưL8©äT¦5ÿG¨QêaÒpa(¤0Þ0ÌÈ<Íš5ëÛ·/çÉ ½½=.ͨü®ƒ;BX _Š[“g –… ÈõÄo”(š®îÝ»C~]¿~½FŒõõ)VPPpúôé»wïR:VÈ)ˆNè°cÇŽÙ2Ô ‹gûnÁB"cð2L+¤³ŸŸû’”4Ä«¨¨(Ät=”••ûõë‡1’¡’×Ö¨Q£‡æÃIÝÝÝ¡w!ß¡þõôôÊûÂ*Ž`eeåææõ‹>iÒ¤ Õ?Õ ‘¡¡yü%‚N044f‰©-!!Á¾ÑÇŽ(ˆÛïÃPù Seà|…Ù€m†ÄGaß±„åàñPÈ]ì4n0<¸Tø}}}x ä1ÞhC®6`(q-jo8”ߨÈÔÔTÔäˆ ‚+,,Äí°SSKAäú¢fÃNè———§­­žžîííÍþ@[Ýððð˜={6ã .ü‚aÿ,--7mÚ4hÐ kkë/^Ôè{âÿøñ#20T5¼ÖáÇ9¯óAáAÕqLššš’’’ÃïºZv¾µk×r/„&¾yóæ;w>|˜˜˜ˆK…O3f ÷+g®®®G…Ї0511ÑÕÕ­¡3  ÃõeffÂÑq¦‡†£ƒoiÓ¦¬¬,â/ßKP|&ô_´¾û†sÛ¶mgÏž……˜Œ‹‹C)ÀeHJJJIIq†$Áyá!‘UbØ×&?|ø€»À-ÀÂá:7nÌÚ<ÜŠîåEOO¹¨Š³A® j uëÖuwwgׄXñôôäï{MÏÐÐP\\ÜÁÁR€Rª’ ‰wíÚµoß¾E‹mݺµæNß ‡\úäÉ(ZˆEä[999˜SSS(<ü …äeõ_R{ ÷Â7oÞ@­¢l åçç£lÂéYYYAWÿ;úúõ«¿¿?çEÙ¨¨(Ö~'%%¥¤¤ Úùéx$Õ 8ŠM›6éèè\¼x5Ò‰'`_aœš6mŠ¢äìì wTugg§…`Çaû°Áê°ïk°†íÛ·o 6ÄŰoB²¯A—Μ9SBBÛ¼zõ E…#ÔaÈÉÉ…£?~}îܹünU¼7;Ð>Ã{ÀHÀö¨««Ã„W¬$œÌ—/_ªgZDFFîÛ·CNNÎÈÈŽz+÷6H,vxFÎC*dc˜·ÌÌÌ›7o>xð€}ÃÃÐÐ;tè¿G§ÈPÞ«B?þÜÇÇ•3|lÒÆI^^žõØ=zôÐa¨|à9¹ËÝçÏŸÙ¹V¬X{<ïš——×°aCd6vhÜ ÌL¦CeÎŽž?þêÕ«¡¡¡¸;Ü\t‘ÙSxˆBBì|ë?ÍÞ¬UfçÀW”/Üؾ}û"·×ˆïA® ˆ ‚Æ~Þ¼y»wï†Ôø :"rÿþýÕmž€ dúرc!ÔªÏUùúúÞ¸qÃÃÃ"RZZ~LWWoßOW¯^=Vggg111???ˆ¹ê èæM›6YYY!,ÛéýÔâQüñññ(¹ìãzÈ÷åË—s¦/§OŸÂ:®CæöêÕ ©3dȈø9 |¼÷-Z¤¤¤T“„HKKƒ©»páä>´¾¸¸8ª –-["‹®]»öÀðºRRR¬sC¾Å_vþ^ŽúN.×ÜÜœ{¾³Š-’yù6++«[·nðTšššð<¬]äãT«0¥Hköý[ØWÔùìˆ&²²²;^ޤ¤diÏlYpµ×¯_ß²e êp\¡¶¶6Ü2e ×pêÔ)ì‡;E|Øì ~YKÆWFö@­¿‹LLLlß¾=‚‚DA›Ež ÈõDmÀÆÆ†ØÊM]¶dذa7n¬>B°FóîÝ;蘣G&%%ýÆËÈÈȸ|ùòµk× z¡Y-XdŒÄ_È# ¹¡Òà‹vìØñ{S';;ÛÄÄäæÍ›p»ªâ,mÚ´ÙÃÀ~MHHX¶lŒM§NÖ­[W¿~}xo¸ Øeeez===ö)ͯ™ºôÛ·oUý ÑÙ Þ¼yÃŽ¹¦Y³fL¾b9œì g÷ª ÿþAAAlÒÃ<;88 ®Ã5Ü»wV?66‰‚B1hР¡C‡òkœ¡ÂÂÂ/^À¨°C¹À¥§§Ã»²ß!¡‘î]ºtáeÞùŸÒ—ž–³$??9ÜÎÎÎÍÍ ;vì¼yóXˆûE¨±ªiÓ¦0x ,XÅPm«Ó&Mš g(q-*”#ooo„:<<\XX¾)ލ"&¿¦{$Aë#‚?ôìÙ3..NZZí4ßGÒƒ ‚‡¨¶/Ö,bbb 5ÌÌÌ ù~ñ© nݺuêÔ©GÉÉÉéëëO:•ïψ*Œ   äïêÕ«‘‡¡Ò~Ë5°S nذáWN‘}þüyHpè~¶C”¼¼üýû÷=<<êÖ­{îÜ9(ò_ qqqvFò ðùóg$%§¿ò<¼\óæÍÛ·oÏ™,÷ˆÏ¬á΢ûà¸öîÝûÓîd|a¿zõêÝ»wƒƒƒq…ð'NœÐÔÔ\¹r%ÑÅ‹+ìñ233Ÿ‚¨¦ˆŠŠ¢Ñ’’’:zô(wo¢Jõ³uëÖ°°0~u@úÃ5jTýúõ!þ~Íé²²²Nž< ÷òþý{ˆžyóæc¨Î!‚âÇC~Á0ü²“ÆÇÇkkkÃiØÛÛ/[¶¬JÏG„¼rå ¼ÐСC ¡¼Í¸7ƒ#Bz 0Ïd8°{÷n555”îª{ž_11±ôôô¯ ¦.222::º^½zìÈH5x9×:h3ðx.Ø Ü¬¿¿?ns-CÝîÏÁÁÁÛÛ >ŽZCCc=C‘á·ñ7""6µG—/_.qÊÎÀ* 6†¹UVVVQQAzÁ;•ýâåoäË—/Û¶mÃ=âö7lØPâ›^^^–––...ØfÕªU¿krŽªCRRrC‘åïÞ½srrrwwG²6hÐVp8Jäú¢ZP§N(0H™Æó¥yîÛ·/{ŠmåyöìÙ€>|XÕ9:;;Ã8…„„Lœ8Æ„¡fÅjΜ9uëÖíÒ¥Ë/ººò·ê¦CDZ9räÖ­[°FÓ¦M›>}ú:^Q!¡• ¬éß¿?<|cIðŒŒ _ø:|iÙ²e÷îÝÙw{÷î 4‚¡òç‚_222Š={ö¬££#ßïåëׯ.\°³³Ã½àÊ P(f3ð~99¹W¯^,X°‰ˆ8„††¢²íÓ§|RG®ð€F¿˜¼¼tèÐÓ§Oó}¤ÊwïÞ™››_ºt †aþüùð8 _ŽŒúøø°~IWWÅgáe n ß \aíž={ ;ª  Ð«W¯¾}ûâ/çñrÌp‰ÓT\À²eË`Æ ¡=<<øxäçÏŸ=zÔÁÁ¡U«V¬Á^ÈP®ƒ ãÁݹ¸¸ Ô(\¨aíP¦N0 A>|… Õ”Ò×:gθkX¾ŸjòŒîtö+‚³qãFdU«V­X±¢fͼR1š7o^dD«ÂÂBT#ÈÉh+5j¤¯¯?wîÜj5Aë#ˆÚ „š““är\\\ÅŽ¡¡Ð¢E ŠgåÑÒÒ‚’ö÷÷çïaÏœ9Õ%))¹eË–¥ µ,n+W®ŒŒŒ„Zå{ÿºððpMMÍ#GŽð±kå;w¶mÛƒ †>ÊPuÁ«Á˜>iŠŠŠ7¾y󦸸8÷6ÁÁÁ÷îÝ{ôè>äääÈËË#¢h0€Ç{„……+ܵ¯D`2aŸFý7CåÁNá ¤¤4kÖ,###wOOO¿~ýú7%hz¶O—¶¶vÏ­LMMp˜çêü껟Ÿß”)S`Ú˜|þ¼ÿþÇ“¼nݺ¿þúËÊʪ¼“7Ôh‹Œµ“››{‘9 !‚E¬}# ‚\ATS h'L˜0yòd´= #GŽŒ¡HVž.]º@ –ë²2ˆ7oÞÓ§Oׯ_obb2cƌڽ'N@Í_ºt‰_¯Ïååå±Ã‡ðeèÔÛ·o#q³³³W¯^taøõ!’––¶°°pvvîÝ»wtt´„„¬ je†Êô‘«_¿>¿†ñ„u_³fÍÙ³gQ·T2a-<($$„˜/Z´è/ûFEE>}/!!AMMm̘1†††3Ê{æææ{÷î…FÀ¯\¹R­JÍ›7oÆŽ‹ çõë×¿àŒœ©)“““Q)9::¢‚ªÎc~VõêÕ›ÌÀYâééiiiéææ¦¢¢‚˜ðe¶F‚ ×GÄìß¿_FFÆÝݽ\C~‡‡‡£M‚v$ËÇ:wî¼eËnP1îß¿+..t÷îÝ?*†...pÎP±•GŠbôùóç•yˆ ç§°lÙ2ؘ Ì~^a¯_¿Ž àçç'Àt»6lØèÑ£2°ƒòGDD`¡°°0_fV„„ÍÌ̬äA.^¼¸téR ¿Iûõë×íÛ·Ã7öèÑcóæÍK~ºŒ=Ê ,*4ØàI“&éëëï`àKr zyy…„„°¯W”÷=[¾SPP0wîÜ;wî\¸p¡ê:©– BqæÌæµóµk×?~I¿uëÖ?¹`‹'û¹°°ðòåˇzûö-2äÆé…‚\A|-îüùó!RyÜ>--MMM-00†ëä ½zõ‚6­Œåóññ™2eŠ””t3ïéXË€pD*|ˆ­>}ú¨««øð¡bG€Ý‚ÍCZØØØ¸ººVõ]Ãçà~oÞ¼ùðáÃfÍš 0înĈÅG˜(‚œœ\\\\zzº‚‚BçÎqÙ•¹ŒºuëVæ Ï“'Ošššâ*ðdIvôèÑ]»v!æû÷ï7g({—ÐÐP¶S¼ j¿ršS1zö왜œ¬££Ó´iSúßRF`õuuuQáàªÉ|BBBì\”°Hä8ðß2IµBPPЈýúæÍ›Å‹»¸¸Œ3fûöí¿lx^‚ ×Gµ¡C‡vìØÑ‚Ç_ªªªÖÖÖ:u¢ÐUMMM´èØ÷óçÏG)))wîÜ‰ŽŽ¦`²zÈÛÛ»bc ÅÆÆ***>xð½¼ûž>}zÕªUúúú(Gø[E7Ÿvîܹ7n¼|ùîÔ{äŒòÒ¸qãçÏŸÃ@e.X°ÀÊʪbÇ©W¯^vvvv„Cþ?{ölyŸïÅÄÄà‚ƒƒƒ÷íÛ·˜¡Œãããa/\¸Ð¶mÛ%K–ÀégøõYÂÝÞÞ^RRòõë×¢¢¢¿ì¼nnn&L€×}ûöm5•€BBlOÈÄÄD…fÍš9::Ò‹,hmíììØÏˆÏ¤I“œçλuëVz݆ ×GDù8r䈖–/®oÖ¬YØ‚‚VyvïÞžž~ðàÁòîø÷ßoÙ²B‡ÂÈÍÞ½{—/_PÞ¡ÅW®\ùéÓ§gZ+ øí™3gjkkÃ/U ÓWÙdddÀÁ«„……õíÛRånO$&&VXXÓÛªU«;wV`*ðúõëçåå•kœ7¥¤¤ÏÉû^ðäˆ3ÌÛO¥^ºt ™áóçÏ3fÌX±bÅ!†êEǯ¡¡Ñ®]»û÷ïWà÷…òW¯¨¨xóæÍrÅù7Òºuk\3>ìØ±ãðáÃW¯^­}³ÿU2>ÈÛìgd!äíÌÌÌãÇWÏÉ ‚\AT;º1;vÌØØ¸ŒÍÜÝÝ=<~ü·²²âã´¿3x{111ssó üQëÓCQ`¦Pš8q"ê½-[¶PÓ@ë#â'¬ZµjõêÕeKL¯ßÕ#¥ö¡§§ ËãìUÉÉÉššš=zôpqq¡Ñ½Ëfùòå»wïæÝõíÚµëÎ;¬xú)öööóçÏ?xðà“'O*©ð™0œ—.]jÙ²%<Þ¬Y³3üÆè-Z´hÞ¼yJJJ¼®l ÓÅãÆ†††ß¿çåÑSbbâ˜1c²²²nß¾]šÙƒI^¿~}ƒ 7Î$õÕaaá>Àô~ûö z¿OKKSVVž2eJMy¾WÚÚÚ¸ 8XÔ{666sçÎ¥*®8âââì+ ùùù¦¦¦'Nœ@È—š‚\AÔBFŒabb‚ÆUMM­Ä 6lØ€U4;_ؼy³¼¼ü¨Q£~ºeAAAÇŽJqã…… š™™Á$HIIýtc˜.'''^,ÜÌ™3ÝÝÝïÞ½›’’Rák+,,<{öì¡C‡àgp@è³ßnóJ´paaa–––`DD/c¢Ö«Wró§›eddÈÊÊ9rä§]a8-,,Ž=êëë[|-Ö488xõêÕ°y5ô…sؘ·oߢt#zãÆã×açÏŸY›æCGÓƒ²ããã#&&vòäIî¥T%-$Äh„²6iÒ$3gÎÀ9Sdr}Aü€M‰®/77Z­ÚŽP³ÈÌÌ„Ùx÷îÝOýž––all,=ß+oN†³‚m({³Û·o#W#¼eû4íÕ«WÞÞÞ["++ " &JZZzÆ 3ª -Z±(!!ñðáCEEÅŸjÍœœœ²·‰ŠŠRQQ èܹsiÛ¤§§ëèè ¶qssÛ¼ys‘µžžž°4 6‚ þÅØØ¸wïÞ%®Z³f‘‘MÄ/=½`Á11±2¶AÀoݺeCítÅ" óP¶ë‹‰‰™2eJÙSÞ=yòF¥qãÆåº€üüü rrr»víÚÌP㱘ššŠ[X²dÉÂ… ËØ²nݺe?ëóòò‚yþðáCiOß¼y3`ÀT2>>>EV988 ¼`­••ÕË—/kY^E@ž>} ·רQ£ gçÎvvvŸ?®õ¿-gÐÒÒ‚ë£?å—ÎÇÌÌÌÚÚÍJi¯ó¹>‚øƒ’’‚¹ÿ~‘ÑÀ mmm_½zE!ª•ÇÌÌlúôéð$%®]¸p!Dpyç.#Š£««{åÊ•Ò\žž^‰Ý]55µ«W¯&&&òr¢ÂÂBh)”‘ &À¨³ƒÎ×2àŸG½|ùòÒ&?*íYœŒÁû÷ï‹çù¤¤¤-Z,Z´ˆ;Ô™™™°ˆH‡²_¾­MÌ™3ÇÝÝ}ݺu»wï.׎0Û;v´¶¶®º±@«-h§’““UTTPØ·mÛF•/Œ5*%%%88XPPðرce?Ã'r}Q›t(>kßéÓ§÷íÛGÁá çÏŸ/ÑdddtíÚuëÖ­ž)›à¦v¢ÄU^^^PØ%ÎÓ ¯¯ŸžžÎËȇPÛË–-³³³Û²eËF†ÚOGGG--­M›6•(¯ëÔ©S¢ëûøñ#ö‚…+bù`•‡ú×_}ÿþóFâƒà÷Œoß¾ýæØ .4oÞ|Ö¬Yeô{,—/_:uêäææÖ«W¯?¶¤­ZµJMMíñãÇTïñˆ²²2Ê ŸŸŸ˜˜˜¥¥¥‘‘Å„ ×Gb±±±¾œŽL!!!iii#FŒ àT[[[EEÅöíÛYîëë«««ûêÕ+qqqŠ_½ÈÂãÇ8pàÓ§OuêÔ¡ñ%%¥;wî øáÇ;vì8dÈ"ªñĉ?~,c°{öPÀÜÜ.åÏ ©¯¯/´uŸ>}Š 8‰¬[PPPdã±cÇN:uàÀœ%¹¹¹ØQ__ŸóJçâÅ‹===á¥)ÇN™2åСC—.]úéüòéééÐèPêdùXP¢ïÞ½+))Ic{– x¿qãÆ¥¤¤xxxÐxѹ>‚øƒèׯ;àGì^¼x‘ÂRy¾~ý„Ør/dz¼~ýšâÃw444>|XÄõíØ±£Èì|Æ ëÞ½{iƒCfeeÁ¥ÄÄÄ8::&%%ýá!­W¯ŒŸªªê§OŸ„„þkµá–¿ÿν¥­­-\ô7¸móìÙ³!Ê›6mНË–-C`ÿþûoÊ«I elSXX(++‹x"ßRÄ8(**†‡‡wéÒ%,,¬M›6ráàà€Ûºukkkë1cÆP@r}ñG ¢¢ÅÆ~ŽŠŠJKKƒÈ£°TžóçÏÃQs/Y¿~}pp°»»;§Š~¿(ÒÝîðáÃÊÊÊÜý¦äååçÌ™ûQ|÷ˆˆˆ!C†èééA^Óïßà7Œ n޼ɽœûY_nnîŠ+ ¾9K/^üâÅ ößåË——,YòèÑ#¸ŠgÔÔÔ ¼Ïž=[ü¥ƒFŽíß¿?…«>>>(Ô±±±åm…hÕªŒŸ¡¡áéÓ§oݺE!ÈõDíGCCÃÔÔ”ý|íÚµPLø‚££ãرc9_7mÚH–¯êÐÒÒš8qb×gggÇ~ÎËË“’’:qâDñ>«qqq222‡*{6¿?–;v¨¨¨\¼xqòäÉÿ¶ßBBÜnêÔ©ðuíÚµc¿***bË¿ÿþûÇpÝÛ·oÿ“ûïýÔ ¨Ks}XÕ¢E z!¶4ºwïîää$++‹ÂKïÌW€«W¯>xð yóæaaaô®,A® j9ÒÒÒ™™™ÉÉÉbbb^^^Æ £˜ð…€€€ãdzŸoÞ¼ ÑÌNžKT¢¢¢Mš4va_„sqqÁ΃k99¹ýû÷±|ìæææeÌ>G++«1cÆp\Ÿ   g4—7oÞ¸¹¹±îš}qýúõ3gÎ\ºt)Âûþý{Òâeƒ<¹`Áÿ>}úYnkkKž¹lÔÕÕ/^¬­­|HѨƒ zþüy—.] ( ¹>‚¨Í´k×.$$dðàÁAAAPÆÊ,""ÂNùýîÝ»éÓ§ÿ9s‘ýFdddY×£2mÚ4vy¯^½–,YÂ=^yBBBïÞ½MLLx™¶€!QSS333c§ngç``W!°k×®e}`ÇŽ-,,P“´iÓf×®]øL¡ãöÑèùóç‹,;vìÙ³gÉ6ÿ” 6x{{oݺuóæÍ ))™˜˜ˆúùpܸq‚\AÔZ Õºvíš““#''G©<îîî***ìç¡C‡^¸pÓ‚¨R:wî!Àg¹8r䪈íÛ·ÃàÁõ±Ïúbbbž‚¨ ™ŒŠŠòöö–••¥hð…€€€Þ½{ 0Ýr”””Hºý2ׄW®\‘——oܸñ™3gž={öôéSv___mmíÇwëÖÂU.$%% ´{÷îõë×sžõÁæ±Òtuuõôôâââ`iìÓòÒ¡C WWWdNv ”÷Ž;ØÌLðBݺu<8aÂ???ŠFÅ|õêU›6mZ¶l©¡¡A!ÈõD-fïþýûõëׇ?¡hð…/^,Y²äõë×¶¶¶ôá/CNNÎÁÁ®_¿>vìXؤg²¸I“&edd¤¦¦R *ƶmÛàîX×Çsjggcjj***Š|ÉÈãœãDFeooÏq}fffµÌÿØ»°&².:”P¡‡Þ{¯Ò‹HWADŠ( ¨ˆ]ֲëŠú[׎XQ°+ˆ"¢Ò‹‘Þ;¡CÈ?!”€tB s¾÷i2̼™œ¹sçžW?ÜÝÝoݺº\ˆI‹gð‰æççÅ3qöc Õ™tñà('©PRRJh(ùÞLBHH¨ªª ü†,¾¾¾ëÖ­ù‡Ãáh4ZRRòàÁƒžžžK“†’’xûö-a(ù@™ ÆÙ ©×ÕÕ=~übir055ݼysÿ×€€Â¸Y‘#GÀgR}Sè0CBBôõõ¡´¨ ÕBPP°¦¦¦½½}ëÖ­SGNNÿøñ#== Ýf¢¢¢ ö(..nii©­­oAhh(hÛ„ÞlUUUˆ¢)bÍš5þþþÂÂÂ8îÁƒvvv666VVV Õ‘‘‘?“†‘‘QIIISSS`` ???4òbr4"‘ÈÑ×?„0&ttt\\\Ö®]±R} ›êkhhh²I‘‘! ðçŸîܹbc&ÁÌÌLIIùòåK___??¿²²2ЪÁ;9ÄÏÔáææ¦¢¢âéé ª¾èèh666ðCaa!DÎ!''¾jÕ*PW¯_¿"drسgÏñãÇ!Õ7EœÈ`  ©R¡©©IOOâa¶ÐÝÝMOOoccQ1M€Ãá êƒ–÷$9@! ôŒ·‡¨˜"@Õ—’’©¾©t¤»wï†x€©>È Ðz¬¤ËÈÈ@<Ì °X,ƒñññ¨˜>ÎíÛ·¢‚´ ¥¥¥§§‡x˜:$%%ÓÒÒ ¦sss[[[h¸,HõA€©>ÃT***³‚ÆÆF g1qˆŠé‡z’yBT80¢¦fff†x˜:ddd …sHAAÁ„„(ÉHõA€©>è>qqqˆ‡YA}}=:sqqATL«…C$L ’€ŸŸ¿¶¶âTª/;;R} ÕÀ "aVÐÞÞµ_Ì€ê¥5ÄÉÃá ë% ˜™™;:: H>>¾ÒÒRˆêƒ|¢ JJJˆR‰DB$Ì– ¡  €x˜nw‘ ©¾¹ &&&Hõ‘ p8¼¥¥â¤ú @˜hë@/(*).«©*ol¨kon¤¡¥£gd¢ƒ3Âhéhèè((¨ü.ùƒGà‹°îîntG;¶³³­µ©½¥¹ ‹adfeaGróðˆ‹‰ðó.𠤾©%;¯¨¼¬¬¶¦ª©¾®£­™–ŽŽ@Àhéièè©ih@ñ|ÿùëQ “Xt'º£ÓÙÞÖÜŒîh£c¹äààäà—f€Ci&†..¿¤"¿¨¤²¼¬±¾®U‡ëî¦gD€£¥Óƒ–|æÚ- €âΓW =c:ÚÀ;ÐÙÚ 3®‡`agaçàæåæã†ÂëaÆ`•WWT4Ö×´6¢¨`08žd&j:ZÐ]PR^ |ª¾Àoq¸.Ð]`:;;Z[ÚZš1èv8‚$™‹›WTXPBˆƒA”ö›[\YP\ÛÔPÛŒj™‰¥…#hhi`tpÒ¿Ï\ÂáºñÖÛÕC,è;ZšÛ[š(©¨˜@ëåààåá“àá^àÚ¸¶©ùg~qiYY}MuS}-øº¢¡‡Ã™`t ‰ÒSQÃ@Æ.Ý~pïY(‘½5 ÝÙAÇ€`am”KPPPZDpûa [P^_X\YQÖT‡wª ÁtŒLô ᮩuŸSíÂtt€¶·¶t´4ƒ{ö8U$7¯„¸ 7äT!@ª„EMcKRjznÖ·–¦aq.AQN+¿0X¦Vs $ÔVçWåÕ–sðð+**ª+É‘k`Æye5ŸS>ÿüAAEÁ+,2ÉÁ+çËÔ*ÏFuÕ~ûV[ZX–Ÿ~“’ÕÖPåå„zÐŽÆ$ÍÊÊÈ›Ÿ[H„K@Œ™•K,ŠS©¹¹Ï’«Kò«K‹Ø¹x•)ËÑ-¼Œù­íè„´Œì¬¯ UœüB\‚bœ‚¢ôŒœü²`™šÈ€´òæê’‚*Ðcæ2±²Ë*(j©*- »±­=)-ãGVªºŠK@˜S@O,;Ÿ X¦T3•e—V•àý0x׸„•••”e¥ÈuÌ-è‡sK+?§¤ÿüNI ã‘Dò‹pð ÀyÄ$yĦX9Á×””çÿU·¸Œ¬¶º*7’ƒ\Ͳ­ø5+;ók]E);èTA³d@°rK‚Ei*5£ <¿|ØÁÒ,R’£Ú} @ªÒ¾3óË’b*‹ …¤$T´ødÔÀ2ç‚32 Ë*ƒ¥Kb^Õ”øÊâ\ YÅ%º:HÖyM&ƒMËüšF£%”4¤¤5—€…ä'¢¢¤â‹œvïŠÌØ®ÄȸÜ/É´4tZºÚj r °Ý´¤—Ÿõ•S@XZU—‰_R‹_rf,9¥¨6'%¾qi™ÉñX,VRY“ORNFË,Óç‡åuŒ [Ê0]ñïc~¦'Óôuu)ÊÍwË,ª®‰M(ÌÎà“TÕa”Ò”š§Š`‘SKÿ–¤¼ê©q•E¹bÒ Æúºó=<€©>f mèw1 ß’ãø%åd5«¯˜•˃5ckÂçÜÆæg¡÷jª–˜˜h(+Î#2ËjQoކהÉëAêâéy)ŽˆÈª€…ð5,.-#.’WHt…¥9 #[r›ú-áÃ;&v¤’©Œæ™iÙc‚Áªbh ¼ølkø¨º¤@ÏÐXg‘2Èo •œ‘ÉÎÍ«h`*¯»,3ôpFE]°€Ÿk0˜ÁÏËò³5tj«ÏÓ®*4û>þsz|·è=”[€eæ/ƒ…KÃÌ–ð9»º1ýñóÖ¦zS3s%YéùÔèSÕðöí›Úª2ÐBxE¥ …fáâñ~XN ,„¯/>&f%~_n±” ˜GÏ{tJfrT+'¯’þR9mc°ÌFxÀ¦fÔä·´=ó°®¼ÌÀÄXSI ©> Œ+Dûÿ5!JQÏDH^,sçÚèf+ñÙÝõàexQöëe+äe$ç,™µM­Ïž¿hj¨Ó0µU3^6§®_B,à‡ŒÒÚÄwìH‡•+àd´‚3‡KÊøùáÍ !iEIUSg¯¹sm4´ô„`‡ë~û#%ÎÄÜr~5dô“›ž*ª &©¬i.2‡45 ¦d`ðsHô笄úFÆúšêó‚XÐGÆ¥&G¿“Q×–U±˜Úè8ÒŽ`Ö±Z ~Àb±÷Ÿ…–æ~_nc'#!2gɬjlyöôy{K£úR›E¦6sêÚ@ïðCjqUrøM.¾Õ6Öôsuu ¢ç]U\YËÌEjî\-\Ýß6ëîz•÷%ÉÜÒZeþw¥B€TӂŠÚÇAì<ü º&ü2szp*J* }°4·6_»u¯» ëæ²vîLš_ïÒ“?…+虨›ÚÎñûŽ`ã0qð?D~þ–ûÞÈÌ\SEi^[2ª¹-0(¨‡••¹«÷\¾T J1u° ;;®ÝºßÑÖâæâÌ„`˜û$×5·=xð€’ ¦fle±nÛ¿Za%°`1ÿÀGMµUë\]ØXæèbåeµ ÁYÙ• Ì-Ýdç2«TÔÔrÚF`ihiºt#€†æêä0wæY~ømlrZìG%}S- »9n¢,ì\K7‚¾|KøhjaµHI~î\^mSëÇ)))ÕŒ–Ïñç‚’JRE ,èö¶Kþwº1ë\œ p(ƃTðøœ]þ§´_F~iÈ£@)5%«7Ì/báŒLà‡Ãu> ©))Úá³uvSp~8èEhEq¾è‡ç|“ÄzÿšjŸðÓ3\2ë½Ó…µÁw$äæÝóNC'tJ‡~ˆ)ø–¶Ýg9 f©>&ŒïEåÏÜQP³œoeõ–‚åÙ›•…9îf¾a‡Ã=ÿ”žd´Ú]FÓpþ2ÉÈÌfîêÝÙÖròÌ9EeU³%úóâ²k[nß  ‡3ÚËç/­Ô‘Éé?Rb7lð@²³Ìk«¬oº{;ÁÊnêä5¯I&L¬ŠËø–¹Îm=×,ç¶Í-­~x›WLzÞIbPPP*˜ƒ‚CßÕ”nt_?óA6臟¼~Ÿû-ÍÈÁ]NÇxþ’‰`塽µÙïô™EšÚF³ÑW^×tÿÎM Ç|Þ¥é%,*9/3ÅÃÃ}¾ç"‚©>&Œº¦Ö[7…¥-æ¹Þ#aØgг”ÝÝëœfl>wlzVÔÛýN"JšäÁ$-œ”O„öæe+lä¥çîäI4{óÞ-aœ*y€ÿ3<úCCeÙ&w·Y_¼$ùÆ{ô Ló]TƒÐ¹•[™Ÿ³ÑcìL¦jlk¸À-,1LJ"OrZKp¸îûÁÏ`0J·µ3×ë•’ñÚ`¥³¸š.y0IÏ€WS]ÕñOØ®²—Ÿ¡É“íhÌÍ;÷,K7‘YŠ)©ƒåÕ»·m¨zO7Wr]‰R} @Š·±É_“b WÑ$-ùýºE&+ZPu'üN®qvà›Ösu`°þ·î° ¹çu#ýH ´7‰Œ‹‹s_ç2³¢¥ÿ,zýä­ #3ùñFÏm§Ïœµ°²V’“™­ËHù^ðöEðb;&F—RÕ‘S»pñ’áŒgþô5éSÄ{7ZrxFAA©nfÛ\_ûÏñ]Ö òóL·¾îÀÁ/D–~˜‰ËÂÍ')æmLl¬Ç:çé>]rVnÄË'‹W­ƒ32“™Jzfí­Í'Oÿo¹íJ9Iq¤ú @ c 1ØË×ý¥äÈ© ïW0²°[®ßõñu"ŒÊÑnº’¶}Í+ }tßÐ~=œœW>Ó6_“'Nžrrq™;  áp¸[AOÁàÒÒm;“OK7wõÎN‰IJJö\ï:ó$$&%mp^ @€Td‰Ú¦Öë—ÿÓ4³eãæ_¿WÙÐ2ÿëç‹W®{{‘>KMD\Ê—Ïñæ®[)(É] ð5iæºõñ£}9áßxqÍ_HF‘xt2†”š^eQ®ßé3;}¼gl´g{åê59U!¥…@²˜’שÿñÙæ=­£=Q­mW.]R3²Fò -bU¬ó¾$_ºê¿e“;É+ó)1;#ï‡)(ÉžI8‚ÙÌyËýþÆKMUåeIþ¼ÿwùŠ„’– Ô‚Xí@VkIYÞ÷Óç.lߺí ©>"` Îì>žMs-3—„¬²šÆ"ä\ •~Ë÷t<=;þÂÛkGOYŠŒ“–†ãl¢û¿.Žhøh@6ø§E[ÿöÔâéRïúî‹i` 8W[[·ìÓ;„aÐS0…•µ·n˜8n$¿ÑD£@Tq3'—ß©3»vl#¡gü:ÕPolï¾p˜¤¢¤2vðH‰x^W²0^<‹WÒÚŽ¾xñ¼†ÙJVNž…Ã?·8;òä©Ó3“Þ$ùÂ… š¶¬ÈD2(à ìÖ9svËVo¦iéÀ/¯E\¿bâàNKÖ~QÔêÌ\'OŸÝ¹Ý›„~8èeX[[›áªõ ÈSS/uÜ”ôæI}m½‰¡©ªmlk¿tá‚î2&6Î…C&Ÿ˜ 3;§ß©Ó;¶û@é=!ÕׯúŠ#Î_=ñã,ŽE¾>¸d) A×\»wm hØø»¥û8ÒÎ9f.|¤ÆRú¯s…[béäò/Œ8¢#úßUÞçˆ6xý¸‹-ª¾„ß½÷nÈÎ×î5(¶k§!¿ùouõ{ðËæÅ.ífž‚T56ݸ¾dõ†9*ù(Z*±8:ffœÄz[@Q×øâå+;¶‘&_B臘êŠrBJè…5›¸‡bé–èÎNÞ4{éêeeC‹%ù€32ë­püïÒ;wî˜ÖJ:@’¯\V3²XP’z8£ÁJ—Kÿ]ܱc;ÉA|­ëW¯tYP’>AyË—¯îض•$>÷©±¾ž°ÞÉBƒ†¹]ÌËûôp¸®†êÔkkGc®üwYÃÌvAI>Yص-W_¸pi×N ê+€Tß°XùÛEc$º“¢÷+ކ†¢églع'ÑÄ{…2¢¸´³ ì³Dš¢ƒè›˜×´Ê ѳŸ_ ­Íïû^pqùÿœP»´†™ ŒùyË`?±ä»}qÔ·½Ê÷ÿECÖ½ÛâÑ#ùÄ ²ùÑ|úÍ+W l\àŒ$KX\{Ö}Õ’áþ¤·3ØoŸõxÃ%ŠÖçžÈ#¯z> ‹Øm}rÈYº&éþ“Êå›yI䄹E$º°—olö˜j«ð‡¤ôüìïú¶Î Öœt¬×D=½ƒ`d˜ùE„q8ÜÕrê\¢ “|0,Ó4·»ðߥ½;wLÉ×A’5 ü" “dFf6k‡ ÿÛ³k' çø¡1ØëW/ë,_C–™‡Æ©n,úŠ€—ûTýpDlJYažî2Çë‡õ–;}|ÀÈW’•žªS½v]i±)û˜÷ñ+X9yÔŒ,þ»r•TíÂÈMõÞ¾g«Á/f;÷Ÿÿ-ÚЯ|€ò3¡©lUP>˜ŠO{׸‹*¶f‹=Á'W#Î*Þ¹êšÞ-øõj3Ë7KíЛj Ÿ £1i‰›møE¸¨ÆW§UháEšI4D :¾Ž±¿Ù¿a·ö‘•¸_Eoõßöƒ<¾ó•W®Rc¼Y n.?îÙ¸_‹X´´ÝØdEøI¾Qq70XF]ÁÊNª ³¯/s<üô/¦†ëòàþБö»ˆnÈ{3‡"+åûC×äçÖµ0@ÏÁÇËÑÿD LMUÏÓàv7{ûbv ŒÓœvm›çãb}ûH&ŒD]| ue¡ï"­–Mº’ʺƸˆ03×­ Ü¢t–9¾½{QRTd†W~õ†™“›WLz!“ÏÆÅ+&«|?艓ƒÝ4’,º IfArK©hÞ ~¼ÎÑTuÞ z$©¢ÅÂε‰%øá×,M&?ª¤ª!1êùº…£ë.w ¹wYBdçT:¥< áá[ÈLrð‹ðŠJ=|úrÍÊåPÄ©¾‰€fH×–‚¡O?u¤¬¢_ôdðŸí<}ù;cÏ݉!| ;eÏz °8™ôzz¿N vu¸[:ôDbb@^¤—‹ÌÎ.Æþq?p>ièÕE@ zà`Ì]W×»Àu†Z ‡Nî× ¯¾äNtµg\ÙE´rÎ7ÿGÒ‰¾¯ð¿¹i¤Œä«ÿ÷/Í®ßî¾ÐöµÃ]îï¬ùðg?›®ÿ[Û¼ëÄ3ÈîÅǤt MÚ¤X4¡£;úsRºÆ ¥YF§r´¢í(Ñž¨¸#KìNô|T±ö0bFgÜ¿ÞóUÿJF¸RÕÝ};ý£3ðßßûyæ]jæ5ZY~7 º_ù›šáöWo˜Š‘fœ•‚¾é›;ÿi¨¨ 9&™€þéógÊfÔÔ }5 ¦j`ôø©çz—;iNqyáof.[¡‡ZTI3êé¯Ù9Ф^G"yàu&¿(æù½´Œï* $X3#&%££­MT^ "ï‡o_TWVž´~ñ⩪¡%ÕH£5:`4tÊzFŸ<›t"ʌܢ²¢|rZ„sÒTÓû쟓W()& ±©¾AHNNÎáhmí“Q8€¢¥"3öæÙÝORí)²á_ÇÞ^¬ ×Á’ïð§Æ#½=Wgo…÷ÜE\z{ÉÂöjøJWœ°æ?>ò,ùÄæç: Œ»iûÏ’Á;l„Î.¦ÅŸp¸_7ƒu–|jÏJ?ÛðÕ¹šÁûñ$½ýwïïÊôÏ&Œþ}ÝÕ?ºòê^¹zOÃ[úxÃÈt›Ž×‰?.ß?RDøzÅãsÔ± Ù•±ÛèïþýöÝí>ÿÎ.À–÷|<`ùW —VÏ>ë4}$Í¢ýí·ži’t¹ JEíůބMî™øõ;æ—ì ¸Üϯ)ßrrglÉ£÷ï"äµ æ{5%‘ïÞ‘\õA$rt:Æ‘‘¡SW} ŽŽT7†ºúü°Žá¤ý0¨Ÿ €GDbŸ¤RΗ”Ik•O‘ò:ÐóÞïTÂß½“󄨀Tß „í±Û3æ^j×RÞ{öíDžÒµVðôïO;ûÞ ´*…ø¯~¾·÷Xû"†no_yî•à©ð­O?ÓFMàò‡Öé|å® ßà:ï~ ~<¡:‰!}ñç•kýR oÝß™‡‘=鲜XŸN¹£2ÚjAÃD>?wD¥w=®è¿õ®¯i÷”§{`ÓŸK/3áNýLý¯ø˜œÎÈÂŽ`ã yÍ9§óNý_;êÊr¿ÇE>:pÄåÖP;?¯¼ÆVô% ÿG}}¥AƒKÕÖ×9EÕAÀ†4MÐô~ŸŽ5>IůñŸªj¸&ÞÌü9)A\I2ª~H*«ÇÆÄÌŒêË)*oFÕA’»¬ÜT0š/YÙSœÕCŒŸÅåÍ É`áä¡£‡§ef©L-K~LÊW8# ’¢t°®ãâ˜ð¼ƒ´ä UmˆÃ~H)«GÇÄNBõ}Í-êloã‚V*ï¿HWÂÇìÜiqˆ Hõ ‚¨¨h~>^fˆ‰åáEƒwgYÙÿ†¬dü#z¨†º{Îo´d6 E Ñ)3ä•ɰD§îbD=ÿµ­STfêuL|SæOòý}zßþð:¿ú‰LùË}ønÉ>XœLߥ:ÖšHh¦ìá{ÝäD&aÃÆVþô&~ñý "ô¾”wi$üÈÌ’&qÊ LUtÐIó­ýƒM{4:v>y½Õ`ññ‹ò6OâRóÚ-à„%LÊj±óÀ#YÛsû$…8gãΉHË'~NYnn2¡£*kQ5U¼¢RQõƒ[T&å㛦æf&bºÏ•’–&(%?§~~G]Y}Á.„˜¥¥¡E¥Ò¿|!¡êKIM&ñR]MeõXzv!ÖYaé—äÀ˜ÆZT;ššš‘ƒy<îGDF!=ýËUßwÐK),,ï@ÑZS\‹¡fáåaÙ§MÔWÖ¶6¡fhÚàŸ0žç}Ð>c1@*ðˆË¥| okoŸè쾯éi Í,ÇòÞÂÒ ©ééêƒTß þT{dP6—¾a–”Ÿá£8s-ß%5Ðc¬D hEÇoÇ+§³¦pP–ÄŠü`H"™Žä»“gÓQçPȹ߸ö ÞÝÓí{£íÿº·â?b÷Œwå_ã¿]N(övîE¸ã¶í›Ç¸òê]K$dí#È3,¶º¬HÝÔ–´Õ¸ôE(vŸÎÌŒÖdòå7÷Ù¹VIZÓR[]ô9Øï~DÏxÜÅ^&¢ôt€ûÕ=›NÅûj2%ÿñ|ýjUXS^ÈÙí烿‚»l;í9¼V@S´áÿ‹8ÿÆJ”MLƒ—¤/M.!±Ìø0±hãË÷l.AÒäLýŸºûÉo£î²'²â/Öù`cM”<"ßhªNlö~yQ¶åꉼ›ô÷?»e¯5ÿWs¡ñn"…IJÓ¡èR}£¾õ5nÑYS­ºn£ý›óéO¶õ KàðÀ¢ ;¶>´º¶†¨c½2ÄuÃ-zI1n.nN$ÂJ+eÀº|ÍŠÑ/ú÷º¾ÞÃÁ†xÁtÌ g— ųζΓÞ9À‰GÐç7Ô}SÅC xˆ3ÄXE…mŸÀ:T çcÞÕ˜È×;Qöß+¡ÁH#"·¨œ™•Ššôς@siZTÄ»ô„§ifð­†cã×ó½²o‰¦‚Pÿž»ß§íiÍ|ù>øìo·:ÚÛ™EV]‰ ×ëKÇŠƒ+;ýÏÇ#ËÛ÷bDè_N{óôíÒšˆÂ&> ¼Ì¤\5‹‰“¯¾¦j¢GU–—“jí2Z°žo€ª_\¨÷? ƒiÇ`¨áp¶¦(¯ çà~cŸb!WjOemmXF‡5”äT¡°\b²Sï~açâ-+uÔôª¾ötkjŠ䊟®ï•| ¢¶{ŸUA­¡*+êQ­ÀÀ)&„èkœû%µ,‹sp£]ÏèãÀß”!7ð×íí5å5íhÐXxEy§bÙ´pFjmEU ’4$£êǹ ℳøÒ᳌uÔ•UÕ4‚FÈÆ#ÌJÔÓÖÞÖôXæ ë؈Î{yâÐ?·¥½4ÍóhýÀŽãMÜc¨ùxÿ]ê`hÄ>V4@çÓJOC/«¬æãžäRf?ŠË虘©‡[ lÜ4ŽÈ¦©°¸…¡¦Ñ0p ˜ëPÛPå• Õaè™ùu=aZÛ1ûl®,Í;\=ã²[ŠÖ¢ìŸíh¯¨Íy˜‘¼ µU]]]Z£ª²‚mÔ†÷®­±§½Où{kúެ³•¡òÛ‡%¡ü[RžFloCÑQ—SXÑ£¦¡çàEÚoèáý~»‡êj¼‡!x„9érÆ´×ÔT 0૊[˜Ü}Øëê‡yJJÊ&¤újQ-tÇÌÞ£¼›j’/»/ß}ÜVì®Í½§—W†q˜‚C˜“þ«aÔ—·azâBv>$3|œ.wžaö9ʹ"X»»»ëQl,ÓÛC a^«><4w„7¨cÕû½ËSeŠÀ³ ñ=ò†N;´ö¥ÇÀ¬¶ëŽ×U ×E‚-WŸö.öÖ'Š\V)·åÚ?×_HzùÉ„•Ä,í”+Ÿ厙Ȃu#Õig§\ðdH>š)€JíNÖ¹'²ÛDZ ÔŸàÚ,ºm¼åдá!ö0d7ÔÏ7š[oh`@Lã"~+7°ŒµŽAÞl=XFzM˯ò2tÆ¥¿ÉOš.›Š’ŠÎXSÛ0¡ r-M(n’fΠèÄŽÕà€Ïs3U¿¨×ÎÝêÝ5j®TM\sú¿ø†çl{U¹aÑT×oD0³UçO·%—U×™¦dÉYgWl Â;Ÿ˜v[¶çN²Ž©' Ω$AD‘е‹LEt`Jl09–pTÿë>—ãƒRË^;]½[´·µx¿R莞n¥ž{ :©¶ñðî·u.JÁμÇ{:4m ÔyŠ_]Šîé·>—zÔ~’£4,,Õuu$Q}5uã'yüY| ÙV9ÿáš‚¸ÄGÏ ][‡cëlEé{iW;²1ä^€C½¢~_{è:¾E5ûÊßÜS8 /¿Ã+;'–…£fª/‰ía»}šÄ÷Œ¯¯`äÕµµ“V}uµ#úáqÑHÑú 'ÿÐ`+Dð2FNZ²Ì¥Q—Þá‡PÒ;^?ð5lÏ Dõ­µ€’Àè/=ß}݃=Œ€–¹òê-áëþ·uöŠô Ûn;Òö‰èÚ¼õ]¶åQwŸ—Œéÿ(©èሚún䦷6¢Ç3(qˆwÅ1H*ˆø3Tx‘øJðwݳçŸßMÏJz³ˆõ¸\â}ê_É~«ùË Ì¾ãàèÛÓ•¬`î´X´:êõ»¯…Ä; >|0Õàƒ …K¼ÓËÒј&k1êAu ê¯Z,u70«ßÍÚ[Ž`f-Ê)ŸYVÕÖ2 F6-ü›ê›½íwû¥Y-ÀŠ÷o—µw¬k©³9'enß3èñ¸ø¼ (ßiµî*áçX/–è¬HyÑcœ{¿þ…ÄærOº+ŽáÇaŸcÞ©!`db­ª©‡TßV}htå=’29„«ÕTã0N ;X)~¶äöe)8\yÌ…eú>ý;¤†Ý¬²¬nÇßrÕ"v‚œûÓpŽaÇD,'Ú˜÷ú a6¡Øé¨Ç´çU&˜rsø:ŸôÕy=áeó^9¢5?e—vè³ÐM”9¸ŒOê™U¢.PüÃ}0¦?yé0dw{.âÕ&`öçݾ¦Óß_ªòƒõ8\¸èìì„ÑÐB< ã`°öŽŽ Ò…ÁRÃhHy)¿ðü>ì_Üž•lï DzóÜHŒüx°O¡¢†ÙØæ4V®ÔʃI©2IC£1Ó}¿ÚÑhØTº¬["¼{Ô—õÙ [1¾ÿ=Ù¾ÄîÜ-‡Å2‰ßL{­~áÕlzZé¥MpP7:r‚ÿö‹b.ÿTÆ¿í|„РԲ`€HSÚœŒ|eºÏ/Èâ¨CϤ£®o÷œTïX‚‹"}ê)+|hB܈~ðxaNËC¶«JŠVºLJÃ`°N4i¼]M=n’ǛŷÄCj/´Þ³A‚±uHpšm Û<ÜišñÜZýõBˆk¹Ë±Õí'Ïöoó˜xràÞxQrýÉ{»'4|FCžÉ4ø˜LžÆáˆý[/!ˆhN /–Ë:fŸµ¼bÐoÃxl *ñêeéÐòÚIbUVÔöÛñ^:ÃxPj&ý£±éBæñm—,>íþyjåXvKþ‡þOïñRÿ3r?–0±°ÎމÛÕ…¡Þ»š?È{* ÇMø)ûe ¦,x_h;iiŽö4Káð^ª‰?Ô=¼È¼ÿ(Éú7LὡuúÝÀùkhm»)“†˜îèì¤)<ȉ}ü€¥³_#Ž hªüЫè€ÃW¶°ö:ÕÝ=W[·a}ÿ3˜ôñ¦ 7VÙHF…z×ÿìÑx†GŸûïêkai®,ÇÒ³0âÆp¹ymŠœ£{B=–}®áy3þ;Õ÷RŸ÷ÂÂU}LKSpãvìcìÌ«·-·m¢— lq‡;4ÂΜÌϵNˆÂí$y*;Âq;&°¿þ™¸?FøvèÈÜŽvàB †ÁtB< 'á00Ú‰e’¡¤¦ÂbI*o$‚±éx² ¨š/aucó8r¥öõS ÝgŠÀb°TÔÓ¾j $;Ƀ) O/'¼àSÎü‹ÓÑ÷Lò}5=ÅŠ¯‰Á€T-!Æڽ畴?zþ/x5µ,Ößa‡AQ²ÃÕÈ n î㛞 ÏÏ]”ˆY1=pê>ž}¿ý5¿˜”êÃ`10 ᦥMˆäñdñíê­ª"G4æjªÏQóä’O68À ±°),Ñ £¢êÂ`§DãB*HÎÆ3°é˜%ÀÕàh/m«þw£1KuM « NôÒQÖÃ(Ú¬.ôЈmÛnÕÙ«sz—&öfº¦0–÷ýðÄî%%(üÆÞOíh\Ènâa~ 9Á,vsƒnD›lIM>¡›ŽŸoÀ]L †Z@ʨ?zðƒÐ ‚ ê@•öÔ©¯,1à `ÂkþXæwäÕ]§Ý]jʉ1ICCƒÅŒ$¹[ÑM­ø¿ÒP¿8|¦çqóx^y¾ß;Ê‹ª m8ËØ/üZAêWð”÷~u‘f¯÷áßmĸ€†»ï?]æ±]nó˜¿a~Õ†zÂwª§…J¸Uó¬,L­Í=ª£41!µ ô~`G p Jp2OØ6§ÿi`áÊÿìXÕ93b¢£­…c‚ã7à ˆö–æ¹y£a³—+µ£¥ `šîˆdeoÙ¤E‡n3½_ <ol`¢;WÖ÷3î­rV òY#(TÀÓ˜aËR?ýX¸%ÔdÇÔçåßü…»Kصýöã};ï”ÀÊ;=íú88§$?PÊ¿æÑ«3V$d¦­¥…™4£¸9ÙY:ZÇEòø³ø ?ÕbNQ=(Õú µÕ8œ´žƒooiba™<Él,Ì­MS¢Q”~00F¤h°ÕÕƒÇ!µçåöñ&{âáñ-íŠx[]Ó³•­ê€uºïe_{ºh¤¸Ø`ã±[¦g˜*ÿàÁ@ ùEc úaÖ‰é/zFDÇxüp30Dg³JÚû½/Ǹþñ:«önߘèÚ"Ð?Âh­/›ÐåˆÌ@´IQZ”0uskil£˜ûï׸WcŽ–Z››™tª#?ï’¦k7mê¯Î~ãÞž>É6Ü?ó|ï"¡ŸPôDê7SžÚWp ºž—Òïø»>ÆU!T¼ö›üu<"ßWS9ÿüÕåZü•_ˆŸÜ¨âµ[ë¯Ó i»TåÜNŸ¶VãʹpèÔƒqùášj~¾ Ž›ƒµ« ÛÙÑFK7ÖôV‹ó³6åÿðcŒ½è×úg﵄ ;ÿsè¦Ë±_UúW[nîp3`Pi/Où{ô k¿àFõ;¿:~¼ƒ¬û±?=T$@{IFJzo ¾€%Çp¹ãð ãð«¼S8Lº£$3¥!@ªošÀhõwˆH MÃñ(°@wBO³(3g]y)ßÄ“}K®Üõ×/³_ª_x(9>Ù«õÄOèÇ¿x{¢è†´›löŽ&jØå_²çŸó)Œ4EŸ¢ìùï[Ž\'ží)ºödÀ^gu|þ’¥¼ç{r0d_5Q¹ '£¢¶"&q–‘Q]VÌ/(4Ñ£d%Å^<N’ûÂgèáÓ½6ÊŒKZBCæ"¯ÿù5óiK·n³q¹R‚fûPµ2ʈ´³²¬XO{&V®ç¬))`HjrlaÎÀ瀩¤¥Ç¯’¯GphN~ô,+*ÂèhHJ;rx÷MhØÕÃ÷zvý‚O:X÷&—’Zv»”òü^L|w›žÄ²ÞŠ@Ó ¢¶¶ŠDƒ¹ú7*Ú€¦œÐ;W?]8ÀËtÐñÙÞ}|¹Ç${¦Ë ‘|J8F È/TSZ0¦êÆŸÅwxféݨƒ_ &äžV¹<60àMdêõÃiÌ<¢úŽÁiç©BÏV³õ¦y’÷|âÏöï³7Å Oî”H¯WšDr`:ÝÝެZ +мüÔSFKGCÃÂÁ5¬2ä_0¹_ÑÖQšøæùË7ç÷¿hoï g79zeõ’_ïʫɴ½~xþ·ÿÛ)˜=þº¸v½® ;¾ó¢w½Ãû j_9ü°¼ çô9õÌÆÉŒuÜvK'»#­bKöÛË7ï~|v³¢³Xdq5é4GøÍÀ*j%‰NMIá$ü°´„XøÛ7ãò®s1éØDÖ„J-ÒÈ0Icu®MgKXðýÐìвCØùÔ,N·Ñ§ÝþT.ߣñé— øLÐ’‰Ÿw‹ýÝ6ûú´{g/&…ÔE·HÃß2¹óƒŸ‚‡[ËsøXA\9·—Å>’’õ.4ŸE@Û'­âr° Ëñ€—´§²ª´x©‘Á„*ŸPUQ ”Ü`öôa\Z¿fqÀÏA]úâñÛb ïEb±‡¦ ½üÖÈ´­@QÌýGÁ/ÏýÁz;fÑcÁ)¦ú2Ë8ZÑv°*íý‹ôG>µ·œYHíij£¦Z½éëÇp¹ãð cÛç8î1êË‹¸ùçÇâ(¬êƒIê[IB7Âp—’.Íý6Õ7ì#ĹâFÖ'Y»¬ìS/ã·{iw,‹4;ÙçzÞîÍßî{™xº*œ29™zÒyhfBT_æ=ë“ G †¨ÐÃf‡ö.Üëü¸øÚúð6í‹ËÿŽX¢f¢g…?¾™šYL˜I!×TWÅÄ>Õ®V¹åëÇ·¶´™—´Ùm4Ãm$Ä£æJeS±ß¦BZëêh¨¤¤ âãK–—•KLJžê£¶Ú¸w ïÉoì¼Íx0‡^òf#ì<$µ¬€×0ËÆ#Få™IÒÊû´•7i8)üù]N†”‹ª+*ÈÇDG‰ÈwŽqdñÞ\‡nÄ!u÷é:o¬¶ß;U»Ãªv1ø_’ƒ·ÏyÛ„3ådIO™d i™’‘ýðø’!ÿ¦·¦Í'¯¡FHnüÝpãˆ{´5¼rË}ü–OÁnÁ½ÝÏl;ñ¦ÑŸ¾‚œïFÆFn} yÞÑ‚ªcdaŸ¢wÅï,e±é¯Áï §M}­X06Mûmš?p™—ô/NÕù/gâ-#>æƒ@ÑzÏy?aSÿXÈu#Elý€Šçñ=¯*FÆ2Ãt£µ¦”ŽžŽ—sÂÝS2²r_¿~%V}£³‡\±þ—XSHÏižÓˆM\*VUFÔ>ªË‡gŸ}Žz§‡Y²òòPè©>æ%–êkŸð;)§cƒ‘(™'ýÀú>Íiþ„~9ý•ßžÿBo:¦¡¡²Š€ˆ‡¡ÍCô*ä$!ÙúJ)lü½kŸzœ8 èdÁŠî}ú|JL› KhîËí6ž³Œ@WkU @A)-.<‰Ÿ® ªþóKòÔ× 'd§')«©Ï̹ԕd?}ˆ@ÕTNqÕ>òºµ¬ ×Å~% «T‘“Š|ÿ®¡ºœ•“"˜@riÁOç)“l¢§uÂÏO^Ûh&“*Ãh˜ñ“ÿx¤F^è Æ(¨$dðò3Î0¯m5eÝ]X9IñÉ4©ªýü’¤²Ø‚ÜŒ Çàü®mUiôƒk·ží_ÈÈØ_Æi½ôXÈ^+5Á‘$Þ§%«©kN℺‹”b>¼oAÕ3²°AÏ:ˆ®6TuE©»ëZˆ HõA€0/ƒÁ”5u¾ÅE*“è™éÿOÏ´}v  w£˜”¤0®?ß?OÍñçæÂ!Sê óþuQY ¢õ5„.Þ_ þÇÎ7Úc;ÊYÆ”ØÆ&Æ“ûífÚ~§Î4ÕW3±qB¦…FU×TV¸9:ÌØõ S?êZ¯Èñ5¾ ‡w°ØÐ8>ÄpÉ‹§NrÖýÿAy±ùŒ]<ìæ—›G—6çmÎͱ©± —,™Ü±–Fÿúlml``f%?“£ã×_ÿ—þúqïßQWÖ„ª×ÕPÜé´ 3ã?hYØA;ˆ/qu/x€T©-¸±g³ç¹·cî(jö[àãšœÃÔtõu3-mg­Ž½£ '5©.lZª…0«°61<}îB]E ;ÀcšQ»á[ð&n„Ew½}Ž_²La™5ðW$¤a·¯Ä‘y}Ã?VoÛK?$‰ŽÅØÞá||ü-ptIâ¯æèßW¥[ê´^?œŸš?ª?­bg'J¿%³°±O®™ ëeá!Kì7,p£Å{lø+kë3yR-eůi_‹¾’QZàü7W êj\׬"yÍ‹eÒÓÓ ¾¥Œœ'Dò4úarDù÷T£’œÌ¤k0µ²ŽŽxµØÎòÃÑoBV®š¼f3ÐTÍÌH/ÍÍâ—]àd6”ä´·µh© ÕGB$þ«­µ˜”±¢"ª,¨Ôü|âùoÿÕâúWdÃË|ÿeD›1Á[õ×\ëM0|XÑR…“$ pMSµf®ÎÎ×®^1sÝJ=ŽU§Ðøÿ†_G÷ÝÿœMúT•ÜÑÊŸ >FÇV« Þq±Ÿž¶ ~L –°Úi a‰%Á5Ä”\—Ø¥:T¬m;z›·çêØ%DBZ¹+àÿ{"+þû,ã±oTufJâÞÝ»¦Â¤‚´øœŸ_c#uM²EeŽ—‘™áóº9;œyúH>‘…<î $ùSèsïm>$¬ÓÙiíë×Í\¶PS/ÜÕÀ°-õ_cöîÙ=•JTådrrò¾%|ÓZÐ=3Qa2ò bBSjGpsr.1)-¥0¯¡®½ ‡Ó³ñH‰«éêkÊó@ö:­@²³Ú¬ZtséZÏ1WqPÝ•œ6~e„ã³?÷Ã~Ø1B=b ±èÏ´Š?‰7³¨ïxY1šoEO«8>¡³ŒÎ–ð§Û¶yO}°ÖªåþwsÓÅ•5¦9g$v´·:®\>ó§¦Á<<7‚¡³©Ó& ÝBd¿»ë̓ökؘ§k™D ¶ÉËëÊåK&ŽÇÎêN–’‡'y“3 )ã`.ö¶vaLÖxLh5ò!Óú6øÎæ-[¦’•€5¶ÖWîäg~•_´0ýpAZLww·µ™ñëÓÓ»º­¿{ç¦©Óæ…¹:9E7öõƒ.®n8R}¤D[óàE1S²ÒŠÞ¡y½XtápÇ~9¸áÙ^ë]ÑC·†u ;Šÿ 0&ê¾Ën“±~"Þ/\X›@µø=o¸*yÞ~ÁP¿£½.Æ£¶j7×ý©Â$}LjÔngG»JÑC†;M“o36ùøøÖ’Õ nt"¦#4Ðßc£‚4ëx¹»®½t#€š†vb)%É•9é•%…7¸ÍÖ€¡³“³ëƒÀëK¼\Ÿ ®ûý£s+k a¡i=; ³Ûz÷Û·ð$“,Ôü!9"覅õrqaÒ'p——–hm7úôä¶áªõ ÍuP`;CïßXïáÁÆBšŽz¯õ®®\ý° ¤ÂB#³ì[r}uµû:ÒÌ¿àåYm¿æÉÃkàóNEIµ°Ì²»ë탶v«… Vê#9¸Í7ÛžO6üe-ôWïG1ƒ•ÆæKŒõt”¥¥EÃdÝbÝô¤Ìbþ1w8›Ñ¿ÕóVúß–ÈÖV43(+ÿ³dð#:Hçȇ%ñ•å7;Чî ..—^æ_X6¾j ã“½ák¢ŠWžŒy²G— ïî´v=GØî­Íz2ýõÂÄ›†ãÄ’oñõO·= zã•¶ŠøV:çÒzð:ixmJû.U:Èv§ êÊòp8Ý«;—L=N“^WKÝÛG÷¼¶l!mßÈõ7nßooi’Ñ0X8&”—ÕÔP?‹’aÞuë×ø_Z²z=œ‘iO‰í|}ÿÚÊUö’bÂ3p:>nNOM×®_1\éJ–i3†1¯¯¯vX3’M:Úл—×x,?ŒkC…ÝÚ¸q3’” oóò¼róN{K³”ªÎÂñÃ9‰‘ím¤’|Hˆ ­utºwû¢±ƒ'-¾`ž÷ö×÷¯;¬uä @ªo÷xŒ[›õÀuÙÚ'ùCª¨§`¹6x£˜ÏùÛ§¶éÞ p$~íJ!.îž4÷½àáG"Ù‘½+µÀ¶¾@oÄ`ZÑhƒÁ >dßã+ºÿÝÛ7tfQ}¤d¡€e,㪶íîFgbÉçPt×­ÿ¥Èêr&V^ÈTugïºÛa{íî,ûá*…o1<@æÓ»Ï¤¸ìÕDyAU çÑ>›Š; ™ê BNRœg£ÇåË— lœ¬ä?o³¡8;5>fßÞ=SPô+<Ö9½|û>þõcmËUdÏ$Ä¿²svœ ×Ãˉܽ{×¹sçU—X#ù…Èžlsmøãû[¶z“vÌáè@r°îÛ³ûÌù‹Šz&ÜBâäOrcmøÓûÞÞ[I5(`$ÈËHrqn¸víÒâ•.ŒÌä?y²¹,/ácľ={`Ó r½6¸> }“ôö™†™-Ù3IIļ ”ZºÂŠä• òóìØ±ýÜù šf¶lÜüdO&¦¡*âE÷¶mÐÀNHõM³ò“u|œ×9aPe™™Iq?|ˆ~•ö˾yç}ôÎû‡?Õ1_ŒƒD\Ùéíó:¬=ÇÉ-y¥Äßï­º7Úø”¼²ÊV@Š „Nz‡ýÞÿ‡è; nì²·ÅÚãûm׆wÎØX˜îÿíZÀ:&V2NIBëŽ yÈÇ/¸{û¶é;Ër3㢲òûçômœ¬ä-9œ×­äCSpéhh|÷î |ò¼à[ІÙJ2~f³ãÞµ67ØÿÛÌŸ Ó÷íÞùøåë¸ÌmËÕd<-+æMGGûŒ‘Œdgùmß^Ð3qp‘qJP¥Ä…!98÷îžÆ<¶VæyE%A·ÎØ’³ŠÆ¢ªÃŸ=psßÀË9]KÁéé÷ûî»óðqÁ·45ãed@Æ§×Ø.Ìþß|¡˜R}3ûNeáSѳËÖ?‰õXüŽ%ýCñøkó ïo¾È±jC%ŸdÕ’wQÌe»Œ7‚ƒ©%ÊuçÉ]&óЦO«Gìo¯«ê«?òŠE÷wS›rûâ•»÷nDå ³Xà~°€—z-#ÓSá9ظÞ53ûç«ÛÛ­#¿1rèšÒ÷¯Ÿyxl$íh¢a!ÄÇë»oï­ûApvòí m_?…ÁéèfErŒkílJ**ïÝ:¯½Ì…‹Ü¢“Φ×oÛ¬\=óéR‰±j¹eEUÍ­€‹šæ+ɰ öðŽÝj‡™:Û**ªÍë¿de‡Ý¹úaz™ñŠAUD<ä¶~rºÏ%&$à»w¯ÿûÌœ¼Rjºäç‡Ó"C˜™3ã‡]׬Ê/. ºu^oÅZòkͤèhxýðîê5ŽÓ7Фú†—d3Ú·+r˜µÿ ð;§¼øå?Rµt¸þ!m÷ÿT¿ÊÁ˜Ôcî¯ðÑÇÁgíj‰ªÅƒfð¨"ý£\=4UM`j^ ñƶšo'Z·0oã¡@§ç ¾ö™¼´„¼ô¾ç¯Ã ò~ê­p‚ÑCÎ4ªúý‹ sËý¾ûf2zsw][ߨtãÆ eu²Yè¬ -&7ë›§§ÇLŽ*œx¸÷ÿæû!6ñmØS[gòˆž)0žŠJHͤ%0pIŽKN{s碾­‚fúQ [#Ÿ?’•ŸÅ&%Yi°< y]ZT¤»Ü‰HõÍXÔ÷>ÛxΖ(»¦·½7¸ì=³ÒL[‚Ÿ‹hm(Nýrc×ɨÁǪØêÔVvàÌ}Å› eâà€¡˜i·ï%úèjr˜Ú”Cv‹ü†T–‚ò5f»Z`Ýz÷åMŽå©}þ]CµÐÿí „ÊcNñéï%:Z,´:× ¤ÕVÝ9°R…Šû½ÈNý'„#¥LÌD«Õ«ÈKA’o†aciÚÕe™žgáæCC;_ûZq­õáOè.6œ­(™™ißî5u¨KýÄ•Ôçõ¨­ªœôϱ·y{ÛZ,/×¼DW,‘1 áïß.]»iþ.èG‰éøø*˜…}Ƕ­síÚtÔUÀxÝÈÞ}þÎ ¦À´~zùˆ“‡o§Ï¶¹p=vÖ–]]]ÏÃÂ3S’,×oŸ¿mp¸¶¦wÏUÔÔ}÷íž• `gañݳ«ª¶îÊ~ ö“^¤7_Mʲ>IŠÃO<³¶˜•kXj¨–7‘Q‰ÑL7Ïß5JLë‡ÁÜ||»·û Õ7söÕÒÖí·ôåÖ§m¼{rçÝ“£$v1>…x-QMx7ð÷[8)¶à?Èþ[uYõî²ÊK¯hq]!ªÊêâ%Qï-ú¾~2aÅ/ä·ï~1ÀѪýæ‹d_–‚ýî«.ã×7î4ÍßÓß}˜ëÕþ·:®w0ªÊŽøTÄZU}‡8³ú;ô;Eï%Ó… wæAEEµÂb)XBßE~ûúEËb%ç<ºþ¶Ê‚a/Í­–ùîÝ3ëƒdgùã?êPwîø#ù…å´ )æO"l \WvâÇŠ’BWWW3íùhÌFzZ`‰JH {úQÓÜŽk>%ãîj©|,§¤ì³eÓ\¾N]°$¦|yý➺‰ÍüʦÓÕXñꉲšºÏÖÍsÍÛY[€åùëðŸ?¾k[¬bd™OSÔ:ëJ#_=]lbºo÷ÎY¿.vÐ×Ô6ܾwODRZÃ`MIýpV\DuEù:WWK#ýY¿s#°¼Žÿ£iaÇŠœO ,c›ªß¿x¤ºHs»÷(؃Tß,.ë–‚s?`j b£câSRòÖäUµô,¤à¹¹‘ò‹Tu4´tµ$‘Ãt}!—œÆáŽ&Ÿ9y1¼ µ…þmÓ•,ì)8¦<õöµÛÏ>D·0ò•·œù7G#dÏÈ‘­n;ƒÎžy”RŽ¿6~i5{Eæ1ªí})IŸHÅTN«аw±Éßò[¹Å*ó«¸EäZÛØ.Sáš‘EÅ=ç´—E‡¿~ÿ!9ógvAe+áOŒ ÜBªòšÚfV¦¬ÔÉÎ:¬–¥¸¼âQÐ fv¤Š¡õܽ/è–¨§´t´ö«Vé̱Ygì,Ì;}¼±Xlø‡¨ô”d•Åf<"RsùÖ7•äľ£¬®egmNM=ïF- °TÕ6ß¡¦¥U_ºb./éN…æ|««®\µzÕÞ91šjJ`©olzØÕÕ­ajC Ÿ»#©º1I¯šQö«WûîÙ5—‰µ±4,MóŠKž>¸ÊÁͯ¸Ø|./¡F‰éˆyó×Õ «9ÇÎ!9X÷ìØúá׿}IUYbÉ-(6wßhP_œùV]S×ÞfÕܺéÆúÚ`)¯®  c`R3±žË+y‚Ï{rDHSc½ýjû¹Ð a᪾~À"ú6`qœxÔ´?øØþàð5óªzü –¥qØÞaâÕöƒERË, NèùôWx‚²Ë¹A^BÞˤô¯ï#"X88åuMæÎx9tCeBä JJ K‹ms»K”O–KÀÒÔÜþ¦àgެ†¾°¬Ê\y/ReßÓÓ¢D$e¬ÌL Ô•ÈÌ’¹8X f_¿ç¼{ö†Ψ g:w%â:›Ó¢ß×TV.1v[k?OIfcfÚâ‰ô‘“Wöø%•¢¾) ’{®Ü†úü)U_glfêîê4ˆØ»Ÿ÷269-úC’O@VËhî ±Ã6×&~x‹éìXjfN0€¹ì‡—››€¥©¹9$,¬¨ WVÃ@hÎÌú£Àu—g§¥%ƈIÉ-37Y¢¡2—ÉäåDîèé4KËÌz÷.ÁÌ&¯k LÜHŽõN½á)ª©åSlÌo4´´übRÜ¢2$Ÿ|B_ü£¬èçÂÜtttrJJzšê‹ÄW““TTTZê`!|MÿöýsrJeY1Ÿ¨°RHœäC¹(p]õ¥yŹ9¥?¹xÕ5.RË´d6fg‡U„Ï 8‰Š‹ûžù•ã“â•™}‚ëh.ÉÍ*ÌùÑÖÒ$%'¿XOWÙÊ ù‚ Áà`» ð‹}µwtÄ$|þ–‘ŽFcÄ$AwÁÊÉCrwi®+ùùtX FFNÁ@W[É֬ɌX>nN·Þˆ µ£b?ä|Ïddbá•â“&yH,º±¶$/”y8'+¯¸XGKÅÎlÈÀ/ÑÕ ákjfÖç¤Ï5•¥|¢R<"’œ"$ŸŒMÑ­ëñÃe…¹<üBêFÊ`!³D²³¬stèsªÍbzÂ:Z1i.QééèìjC•þü^”û£½­UJNÑPOGy™%êƒiÁÂĸÂb)@”Ú1¯¸$çg~iqQMe955;+;'=‚‰‘™ÎHEGGM §¤¤ÿ„Ãuwuu]]Ýè6 º³½¥±µ¹©µ±±¡¦ºUKEMÃÉÍ#$,,+%Á-'®%'VäM¦²œ Xú¿‚:$óGvA~~eEY{K +;33;=Ž`¡îarHr¿.tGWg;ÝÙÖ„jkmBÕÕ‚¥©¡–ž‘‰›‡WDTL^JBI] PW‚L÷Wqbmf bKþ‘“[Z\RSYFCKŠ@Vv&Vz‚Ž™ÜD'A{Æv¶uw‚–ÜÔÞÚÔÚZq U×ÑÚÊÎÍÃÇ' -!.ööή¦÷ã7) If"$ʪŒö!!úý¬ì-óoŒì=ö !d”d*ɨP¤(‰*…††ûÿt®«”"Üò}¿zõ:÷ÜsÏø>çœû}ßç9ÏSOµU=3ž¥Ùßdi))Ö  `΋W¯DDÆ>}ú:þnòJÊUªV“‘WÈ»]T–—¬$Áˆ‰UÈÍÉáa¡é9Ù™™ïß§¿O}ÿ6õMâë·o’rssTj Æu›j4RkTG¯Q¯ÃßX¤Ú½ºuáuû”éæää<ŽyñèÑ‹çÏ“^ÅKW–SP¬ª¨\MFNwJ²râ’2’ÒRÂ=šàfœ“™žw¿{›ñ6MÆ­#91';«ZZuêªå¶±jëÆª¼.å<°:ÚMu¸j+FÊÛ÷÷óîÃQ ñ/2ÓÞ+TQ–¯šw+––•G<+JÉàVœï‘¶¼ûpVF·LÜp7HIz’œ„û°¬œ|õZuÔWoªÙ¨E­–¼r¡yEÞTå„Ó|ÝÇ<ñ0òÑ³ØØ¤—ñ’•¤”ªâ­²œ"2„J•å+HJåÝT…OK“™ù!ã]Fjjú{œ’¯RÞ$}HOW®Q³NºšÕW«­«^“geAßbYAünÔUÅ_ ?T…âöM1Ðk…¿¢ËÊÎF’W±bEɼ¡½¤)˜¿óLFä«T©òá’t®ÛYŠ[‰¨¥R <ãït×,''‡ gee±ßš(nßIw*VÔlXß={qï«PA"O«Åy¼Êº‚(ÊW´ª¥ûðÏ€s¬š*þ¾{Z¶jÕJBBâæÍ›tS%Èú‚ ¾€$C²\ å\FÓk¤2|>¿ôt*ÊT¨PA¦8”úÙKç-݇Eð´ÌÊʪZµ*œYA!BHKK§¦¦R~)P뜜ŠAü ¼zõªcÇŽ‚¬ ‚!”””âããkÖ¬IÑøuÖÇË{ȇ/&&FÑ ˆrLvvvJJJÿþý)YA!BÈËçu–¸téÒM›6Q4~yÝ>ñx666 ‚(ÇìܹSBB¢]»v ‚¬ ‚!XíÓ¾}ûÈú~>|Àÿ5kÖõDùfùòåÍ›7§8d}A„h‘-##ƒÿNNNóæÍ£€”: ¬®/ --MV–úô#ˆòÉñãÇß¼y³aà AÖGAˆ™™™ø_¹rå+V >¼V­Z“ÒÅÛÛ[^^>##CLLlÊ”)Û¶m£˜DùƒÏç1»­­-Eƒ ë#‚ D‹´´´ììlI¥J•ÌÌÌ"##)&¥‹¿¿¿œœ¼:==ÝÅÅeöìÙuëÖ¥°D9ÃÎÎÊG-&²>‚ ByýúµŠŠ lØ0ŠAÖGAˆIIIø¯¦¦Ùóôôüðá&ÚµkHÁ)nݺ%&&–““ÓªU«ààà—/_ZXXôíÛ÷ðáÂ(ÌŸ?ßßßÿñãÇÔV‚ ë#‚ D‘ððð5jhhhˆ‹‹CNÇïää|ô葤¤$…è'Ù²eË¿ÿþ{èСÎ;ïÙ³gùòåûöíSPP˜9s&¦)>QÖ3fLlllJJ .vÜN) YA!r„††Ö¯__GGÇÓÓsÆ #GŽ 0119þ<Ò—Ë—/·hÑ‚¢ôÃdgg»ººÆÅÅmݺµM›6ŠŠŠÚÚÚ+V´¶¶>vìØüùó.\HQ"ˆ²K»víÌÌÌÄÄĺuëfooO!Èú‚ Q$<<\SS³mÛ¶ëÖ­kß¾}ƒ à{—.]²´´ŒŽŽîÒ¥ ¬ÆBú1V®\iee…Œ°B… rrrÈÏœ9lܸñ³gÏúõë7vìØ-[¶P ¢ÌñèÑ#Ü9===qóTUU]¶lÅ„ ë#‚ D”áÇ·iÓ&11ñõë×p’êÕ«ÛÚÚÞ¹s§^½z7oÞ|þü¹’’Ò•+WhÜá’’••µ|ùr¤†‘‘‘U«VÅœ!C† <3]\\Ϙ˜˜¹sç¶nÝúÖ­[.‚(C 8ðÙ³gÉÉÉ;vlѢŪU«(&YA!ºDDDbrîÜ9¤2ÎÎÎ:u‚¨$%%ijjN™2™ÍäÉ“/\¸ ¤Ƌτ ìííkÔ¨qõêUö´±±1TðöíÛÖÖÖAAAˆ³——×?ÿüS¥J•7nhhhPÐBÄñððÀ}Ò××·qãÆõêÕ[°`ÁàÁƒ),YA!º@íÄÄÄ6lˆé:xzz"›éÞ½;´ÄÊÊêüùó=‚º°.=ù|>\føþý{r¿ïC>yòäË—/y\K05556 âêÕ«92oÞ<¸t×®]‘D¾yóÆÔÔË@¹)t!šà6¨¯¯¿wïÞääd\à***AAAêêꂬ ‚iàu:::lúŸþ±°°`Ó‹-²··5jÔöíÛ7nÜøøñc%%%h!ܯgÏž˜såÊÖj‘ø&½zõ:zô(›†õ1µ³gÏVVVNOO—‘‘Y·nÝ”)Sºté‚À^½z²]¥J•'N ‚ Q ,,ÌÌÌlΜ9¸âe¿~ý^¿~ýæÍŠ AÖGA” ëëØ±#›nÚ´©œœ\É ^}úæÍ›ñrÍš5ëׯoÒ¤ ÒJ===ä‘+V¬0`ÀÍ›7k×®M‘$ˆ? nqÆ ÃÕš˜˜ÈãêðqÏôôô400 àd}ADÙÀßßûöí‚—½zõÚ»w¯ÀU`wÐ{{{ ^.Y²dáÂ…xWAAÁÃÃѨ¦¦¦¯¯¿oß>))) )˜7o^VVäM0'::ZWWWðrݺu***K—.•——ÇËI“&!ƒTVV~ðàAÍš5gpŒ5jÇŽÏŸ?'÷#ˆß.á¾}û†„„œ={–Õé½{÷®uëÖVVV)))‚¬ ‚(3xzzÖ©SGX* 5‚¿‰‰‰±9€ÃÀX‚‚‚ò¾„*V„(¦¦¦6oÞ<,,ìãÇ:::OŸ>Å[Ë9,X€•üÍQݺuë©S§îÝ»'˜óòåËÌÌLV0GVVvòäÉC† 9qâ›Ó¦M›/^hii?oaÎvŽ)S¦9räÊ•+Â'â×áêê:zôè1cÆ.O\¿fffŠŠŠ¸éáH!"Èú‚ ˆ²„³³s¯^½„çÔ¨Q£]»v›7o†{f:99YZZ*))EDDT«V sBCCÓÓÓ ¤¥¥=<å¾¾¾p¶‚Ë»¹¹Aù^¾|)...˜éèèm†ø!…²™ý8ž={Ö²eKDdúMÄOrêÔ©1cÆ´jÕj÷îݸÄó=z„ËÓÔÔôãÇ‚¶AÖGA”1¶lÙbddT°N33³êÕ«*00°F=zô€òá]˜ÌB,séÒ%hd… V¬Xѵk×rO‡§OŸÞ¹s§à[7nܨ²0P8X´­­í™3g„ç+((DEEyyy)))]¸pAOOÍWUUeë‡[²‘W¯^MíÍ¢¤¤¥¥9::Bó „›R||¼ð»˜?mÚ´uëÖ±¶ëAÖGA”aÖ¯_¿k×®ÂÞêÙ³g>ëcÌž=ûÿûŸŽŽäuA) ]»v <®oϵk×Î;wÊ”)ì-KKËððp×AÂâÅ‹7lØÐ¬Y3$>R"™““£¥¥5räÈ‚§ì]¨Za®ëää„·UD#ß[:uJNN†œcâôéÓ†††‚·XÕë_¾=bÄ|\¸Â ˆ‚8;;㊓––Æ-n-‡ð»¸GõèÑ£V­ZÇ6l…‹ ë#‚ ʼ°aÚµk—””töìY11±ãÇt6iiidQ<®² -6lÒ/ÿÈÈÈÌçÀ4ŸÏ‡ˆ®[·.22ò3mÚ4¨ è‡±C‡JJJ‚޾ɱcǺtéRôzÂÃÃÕÔÔdee{ôèñÍþåpwwÇæ6nÜX°v<Kº_¿~Ož}èT'þ6RSSW®\¹sçÎ:uêàNÒŸ£àb/^¼À|\)»wïöòò¢¸d}ADy)QÆ Û·o_Ä2p3haÇŽïß¿_ÄbÖÖÖ6¸ldÍš5C† ùÆ÷VÅŠ‹8xÜXS¦Lùøñ#œÄÆÆF° Ô±7‡ oƒîÙ³‡ ™åàà)©}Ú­[7vJƒÂ– ›9sf@@ÀĉgÏžýÍ'– ‚¬ ‚(W@º`YÅì÷ß¾}­[·þï¿ÿ¾»pÓ¦Ma,ŒáÇ_¸páôéÓ‚Î' ")))¨ÌÌÌ\¶l¦N:HÎ 6…«Lä̹yó&öÍÝÝ›CÚ×»wossó_Ý»zdddãÆ!Z¯^½*æG6oÞ\¢n!îܹƒèÁiÏž=ûÝ…'pdgg?þСCk×®ßËË ŽçééY«V-œ´#FŒ>Ÿ¿IxxøÂ… ñA8&<<<(’YAñ·€ìGZZzÒ¤IÅÿÜ@MM >Ö½{÷b}KU¬¸wï^LÜ¿_SS³jÕª§N*Z2¥¤¤fq°—ž5kÖÙÚÚB±’‚iÃ!\ _B^xîܹçÏŸ·k×ÎÆÆ¦OŸ>ÊÊÊ¥·Û·owêÔiÚ´iH@‹ÿ©èèèŸmk×®]8 ìyhhhÍš5¿»¼„„Ä&È3òàÝ»w=zÉ’%EtéiÎÁ¦ñ©­[·b£Ož<éÙ³çäÉ“µ´´èJ!þ¸c?~üòå˸otíÚuРA9¾+‡û÷ï_»vmbbâ¨Q£p&9r„âIõAp $÷±±±%ú”˜˜ØãÇëÕ«+Q»JÈÃÇ™ÅÁC5j´oß¾âôÑbÍÁ¦ÓÓÓ7n܈ ºuëæààжmÛo~ª%ÇêÕ«sà3®®®W®\yÿþ½¡¡¡¥¥e=JôˆàâÅ‹‘GîÙ³©dI>gÎÖÀ²¤ Á…»êêêÂ]¿ûL °<³1ž7o\}Æ …ELð©1ìevv¶³³óuttìííû÷ï/##C—Qêäää\¸pŽçããƒë '*.üþù§/GqÖ€{ËæÍ›ýýýMMM'Nœ8˜ƒKõA/‘‘‘ýúõ‹‰‰ùf’’’Ïž=ÓÐÐ@f6hР’~™>Î㪠»té’••ùî/÷ økÇÈ^æææBäöîÝ{íÚ5MMÍÿýI^azðÖõ¥ðL¬ˆd™bXX˜ŠŠ rM ìX­Zµ‹…‡‡ÛÚÚZYY999±q&JJ\\4;))éÇÊ ;ÿþý“'Oâ谫߬ð,ÁcNÉÉÉ#GŽ‚ âo'**ªM›6?œ¬‹‹‹c%–––ÁÁÁp¶[ ö¢Åãjð&Ož¼oß>$|Ë–-+þ Ø|"—ššºyóf¨ vL[[»k×®[uuõ"Ö`Æ‘o>lråæævèС7oÞT®\¹eË–²²²×¯_755•––.éÁ2dþüù?9rº-Lj# ÞP/99¹’®AIIi¦_¾|9vìXäÜ3gÎ,ºWªªªN‚9lìx)2øÚµkc==zô€9Ó0ñ| jççç‡ëç .I\>>>***ÿüóÏèÑ£…ë¥ ‚¬ ‚ ¾ÀjØbbbrU—.]Z³fMãÆïß¿ÿ3@ÊÈȬãÀôÓ§OáižžžÝ»wwttüî@ù€ÇŽãÌÉÉÉñðð8~ü8²ÏÌÌL$p§êÕ«¶’G!•„ïÚµkíÚµùÞMKK;wîÜÕ«WIXë‡444à„Xä²ßì åðáï_¿†Ù–J îܹÿœ={ö„††­¡˜ [8˜åîÞ½ÿêÕ«Þ½{cWëÕ«W|“,˜Áã;vì„vKKKHx1û "Ê 8sBBBüýýoܸq÷îÝØØX˜ëÖ­MLLPâÍ9~àÌŠŠ‚àáüÁÊq}õèÑcàÀëê ‚¬ ‚ ¾ÒúmÛ¶Á@J«Ë)S¦ôïßyÖ\*ƒ€«©©¹¸¸°é¤¤¤éÓ§ïÝ»3 ðn‰tåž™••tww‡¼ÅÅÅiiiaæ‰' ±Û·o‡Ð¶BYYÙ":“xñâ…¯¯/2`d«‘‘‘©©©uëÖ}òäÉøñãÏž=Û¶mÛÒêQ†uy ;õäøÉ>WÄÅŇq°—ïÞ½[½z5"S¡>|8´­D+„4Nãž™íååuþüy??¿ðððš5kBŒ;tèðݶ¦ÄŸ×Epp0Niü¿ÿþ£G*UªÔ´iS]]]œ°;=Ž^ÿ­[·p1^¾|ÒX£F ¸¢­­­¹¹¹p—NAõAÅbÓªU+dÛ¥»Z$j/_¾\µjôyaiY ¨ZµêJöy'¶rñâÅæÍ›:¶)))ùckÆ™ =ztñâÅ °¦!C†8:: `S7oÞ|öìYýúõ™– -f»ÓZµjõáÌÑÖÖÞ³gO¿~ýx\¿‚ˆ?V'|ðàhlZIII]]¶‰%[´h¡££Sü–·[·nÅÿÓ§O#ùf}u–Jäåää¦r°—Øm„e÷îÝ>>>8Ànݺ 4¨¤Ïò¸§;q| :’õ÷÷GdHcË–-!É8.ÄäWÂñ7“’’rûöm\¹aaa>ŒŽŽÎÌÌÄ Ù¤IÜ.Ú´iƒRhÇñ3[Á)»C³Öž?~„(¶oßÞÆÆ¦5ÇÂ… ©,‚¬ ‚ø)lõîÝ; à2õbÂ*vñÇo~àÉ·ï‚ìóøñ゗Ч-[¶`[È ±ÝÁƒ'1}ûöíš5kvíÚ¥¨¨ˆ†½ 2‚w«U«6ˆ£àÙ  ¬ÛóçÏkÖ¬ ùD¶ —Ö××/¬ÒÊÊÊÖÖ–)ëµ G½à03ÄV…GFF" }÷îì.Ô°aCøaÓ¦MáBµk×féÞ½{RR’õœ?~Û¶m={ö,ÅÈc·óu³‘››ëîîîêêêííissó=z`7~¸Üµ8¾9ˆ$ðÊ•+p´$55NC†£ÄqVü°ü—o²³³}Šÿÿü󼍸]›Ìð»K¦¤¤À ‘Ð39Äv_¼x­3f̨Q£dÛ]»vmР††FýúõK±Æ ‡cÃ!<311ÑÅÅÅËËëÚµk(Y§N:wîŒø™mAwóUŸ ƒ Á1 ÷îÝCðá60äºuë"€ð™fÍšµlÙ‡_nº–Aù†‡‡£Ð?~üèÑ£'OžÀåàüJJJuêÔA)³ß`Åì§PŠ[ÏÌÌ  ÂÙŽ€cpª³ZAè·±±ñO¶ö$‚¬ ‚(±ï!C®Ì†Iøœ>}šÇ m·~ýzèVéÖ;………¹¹ù… ˜ûݺu úgxþü¹ŒŒLBB¹sç BÅ瘝8ÈÊʶ瞉lê5}út–©³Ft¡¡¡HÙ¡gEäèÐ!˜‰––VóæÍ±|Ië¬pq¶@NNÎÊ•+û÷ïohhÑ­P¡<^U€‘Bq’$''ËËË«¨¨ PØ%UUU‡É¦K$KÊÊÊ À³þo`Å0O©J {%!!ñ“ÁGоÛþñ¿sçÎÝ»w¡…0d>ÎV5‚ ¡ðÿç÷ç®S¸ %†W+ÊåÕ«Wïß¿‡ÈÕªU ‹â`û C¡´åø;2b©‹s›Fp ÏÌè 8èKd}AÄ^”zРA¿§~¯0ærÄÅűßþOœ8Q¢ñЋëNÓÅÅåòåËð›Zqñ)>Ÿèéé  ÀØ}¦¦¦Å©aû.Èàuuu±K‚ºAh•GŸÊÎÎFÂÍ´PP‡ƒÔ¿zõê¬a'{ö–¨¡¡QÌ^m°ØlWçèèˆXuéÒeÞ¼yÅ9Ò¬¬,¶'PDì Š2>>¾„U%%%UªT©jÕªpÅjÕªÕ¬Y~R»vmx,;,ÜÔS‡cΜ9ùÖóóÊ•+~~~Pb5ÊÞÕºukD÷“5„ þ&ß=^ÖG Ã?þV†cÁâ p3Fà¿°¥çææba ¦ÄÌ ü ±Â¡!V8E±¦ÖX ë©ÎþuW"¶cíáÐðûùúõkX:Ž2‰“ *ŽëT£W¯^t%²>‚ BDAn7qâÄ#GŽ:t©¹ˆìL9=1¢S§N222‡F²[ÒõÀÜÜÜ jþþþâââÆÆÆ°—=zØq”hUbbb…ÕAØ`k×®] Â4Òñ–-[bIX4®8®5vìØ[·n½yó¦¤Í)%$$ŠhDZPN»GFF²:"øì'¼‚y¤A†¨À`<®"n+ë¢sÉ’%Û·o—’’?~üˆ#0QpC°MŽ(wÈóìäƈ=„i@RRRÒÓÓªT©¢Ì…cçk}ûŒàä™GDDà€¢@ YcBü/õöÃØ+œZ¬† ÁĹŠM`·a¹¸ íÊ•+cŸ±Ÿ]è.‚V¡BGZZŽ+°Dv,]çÎÕ§ØÿŸà$ØÔ”‰¤"‡ cë,.:¦s7Fl›5kÖ€£{÷îtŸ$²>‚ ¢¬‚T~Μ9C† Y·nÝÆEs'Û´iú…¨X[[CçÍ›7nܸ|v„ ÛÝÝÝÇÇ'00¶ ¥¥Åzö‡ çø¥;É’ãonåíÛ·ì6„ðÁƒȰa&LB Z·n 3ÁqŸ¿jÕª¥K—öïßõêÕcÆŒ)+{Ž´õرc¾¾¾ÞÞÞMš4";oÖ¬Y·nÝÌÍÍÛµk7„CÔvFTب(8X=Ö®]ëæææääôèÑ#øI:u7nŒCÓÓÓÓÕÕ-݇ K´ç¬e1—Çž¯X±*øìÙ36ÅÚv2?‡@œ`M¬U§ššþ³ê¬ µƒó°Çؘˆ¾æ`-H±!8Ï»wï°u|¬J•*Ø.œ[©þÖ¸´°±.`Yþþþ·o߆¨Cw±rlEVVÈtr{„±¬G‰£‡²²² ü Û*Ψ$¸0q,ÑÑÑO8°QÖ2Û…yb+¬A,óL}}}ÖÔ³èjÀôôt&Š(‘‡^¾|™­‚ºÃxY­)ÛO¬aíH«é%‚¬ ‚(K¤¦¦Nœ8ñÌ™338Xg!"»«HOýüünÞ¼*)) ÿa#°™šš2ƒ‚¸²…‘¹®[·®W¯^¨lÑÀ‰Èã pPÏŸ?gsŠðUdêÐöð^DDòxˆ’~˜¤‘õ–·ùã˜Ã!˜¼gψ“˜˜˜½½}çÎá/9àQ0«ÀÀ@æiÄ$--M‚C\\ùøñ#Êr…s@NNŽ#«¶bËáð±Î_1(_CŽÁƒ½„ …rÿþ}Ö7&Š ‡•Â~boa‰8–œœœÒ(Dòû÷222°Gö²Çކ‡Ä[8RVyˆ5 dñ¾ÇêY5#¦‹ÙYÖÖ”ã"€]Šå`O1¢ÈàÕ¬Ù*k³*0yÁó™Ø4Ýu ‚¬ ‚øM áFæŠDmË–-ûöí)µóöö¾zõê7°“Èt›7oÞ®];¨¡¡aŽâ¬Yõt–§BüÖ¯_¯¡¡±råJÑì0;iff†=ÄQó#0¥‚£s€x@9(&$$ÀÁ5j¤­­ÍžgÓÔÔü;ÊÍÍŪXÒ/°5üGÞÏ< åøîÝ;))©|õ]Ø:ó”ã˜1c0Áž¦óññ9uê”§§'–·³³ûßÿþ÷“¯ žOž<ÔhÅÅÅ jÿ°‡Ø1ìkÓ(uuul´8#ÚCذ樨(6¾›`+‡¤A̘†1ÛÁyËêß vaŠ19d=¾`mØO>ÅúÚA1Á`µ´´ŠÓ( … ІµóLü +€-b¡^¬¶ÁÇV.µ °8 t?V.kÌÚ£ ª|™äãŒT3² L„B1„-Ó­› Èú‚ ˆïèß¿?TêÀ7oÞü³Úéáááëë‹]úðáR[}}} cc㞥¸-ä©‚ž'ß¾} £Ø³gO§NàHÍE¡\úõ뇤ÿÚµk¥;>8ü VžÏXà?ȹñÿÂ… [·n}øð!²p¤ÝéÌ™!ðÖ´UÚ0“a2ƒ¤œ=½VZ»š¯KR8ÛÞ½{q–Þºu Î?zôhœ%²Sz1G)„iÀÙ"##oܸqøðaf_°&œ*x«R¥J¬v»„°À…ú¬vÑÒÒÓ?öÔ"®ÄâŒ[‘‘ÌÔ……ÝCÙA˜6oÞ\GGGOOkcÂV*%‚3Õã šË ºÏa=è >ÙÙÙ{du‚‡U|ùNiÖÒõdžàÃ>†£`'° SSì Š†µž¸¢ ú·t/+‚ ë#‚ D¤GÝ»wGwìØ±ˆˆˆß¹id‡nnngÏž Ä Û377·¶¶†æ5mÚô÷7(E’º–ƒ9° Ù;wvèÐá ÔwðàÁׯ_oÔ¨Qñ?…d÷éÓ§¬–Ið”ËÈß¼y9~.KÐÅS86b(æ¶ÂÂÂüüü`_(G¨dƒ ‡€r455P¡tE=ßÓ˜¾¾¾7n„¦Âp¦NÚµk×¢E.**Š9Rëùó¡%¬̆ ²ŽL`†, ð·Û[ÈÛ`t{hIBBBrr²’’‚Ï:AŶ455=A‰Ö wH=Èü;w„ëuq2`Ó8:Öm¬‘‘Q‰šûBûYsÖŸ)GœÕ ã,Å ;WY $l g{À’= (æ‘UÀ€ýôPÒ¡sssY¡Ds°jF¦‹>|`ƒ…`s°DX4’…õ+šYAñ«X·nÝÂ… ñYû¯ÞVVVÖ‰'NŸ>}õêUd{&&&ݺuƒmåÁà@AYX†BD‘4CÿºtéòÛvÀÀÀiîÞ½{½¼¼¶oß.èƒ9qFF† gHAS7–#%eU:E$^Zhs|³wŸ´´4Xë•+W ô¡¡¡Hß!8¨Ž;¶iÓ¦twƒ‡-ÞáèÔ©ÓÍ›7YÍ$¶‹È0‹CÊΞXcÆò{†€f³)pgyØ{äïÇÇ"x:QCCƒµí,¾V¬X±%GZˆˆ±±+îÝ»G…"P°h!b[œ¦­%îÄ~w(~ß?ù€» -dêÈþãIIIš²_7XÕ"³wÖRW ‹¬ïVPü‡{qõ¡tàð?ÆÖñŸù*kV Ed~£ÖÒÒÂ4}Ñd}AÄcĈ/^D’7yòä_±~$‘îîîôöö†ØÚÚ6¬/G™‹²í³gÏb"&&ypµjÕŽ?^¢Þ#©©©ááá,›t·ˆô¹/«ÇT)())¡€Nž ÚPÁ:´mÛö†”µýiŒÇ)„óA{ÆÁ:wÅEʺÿaU¾¸5j„xbBÐLWZZºh…ÎwÕã¼ c>î!(5>Ÿ+ÕîâÔŪ~ìZ‚ ë#‚ eøðáÈ€¯]»¶sçÎR\mrrò–-[œ‘å@íFeËQžB‡,u¦²qãÆ… ºººZXXDGG#¥ bþôéSÖY?ëÈžU, óFV‡—… ×.Lpp°¥¥%V…쿼ž„ˆÉDŽ|3kÖ,6Ž=;;s 0 6&J4 „##£ëׯ³éC‡M™2¥E‹8ù‹ßŠUdaOÝœ5))éÖ­[wïÞ ‚-Ã:Ùó~,ÎÆâ´Ò”””Ì÷h¥ÈÒ¹sç¼¼¼äôôtx#TõŽ+Ê¡cõáÅQk÷2scC ²+=!!þ†õ°kà‡¸Ìµ´´„]AA¡8ÿöí[\øØÄ;wp“ÁúLœ¢l,–ÖpQúþ"Èú‚ ˆïK;vl``àO>#àèÑ£«V­BÔ¯_¿iÓ¦Íå(7Ⴜ±Önááá0:&23dxõë×Gn·hÑ¢#GŽ@Ïòvç(•b‹:u‚ÿü¢'âþ8±±±0???ä¸)¼Ž=Wfjj µ[Æ!¼<’ì}ûöM˜01iß¾ý°aÃ:wîüÃ[ïω€€¬ iô±cÇÊ÷sU«V-làGÃ××·…‡âă·À1ÌÌÌP(ßJ%8ŒCxfnnîùóçO:åííëÈÀÀøï¿ÿ ?†W¶€÷²ÆÌÅY7 È«ƒÅ­ãÉ“'pcÇ~b]ì߇3ŽÂÖ ½„ B­ÙÙXmNN ˆ·¦¦fë·âž™””´yóf77·   fÍšõë×a—““+!…±M‹^ìõë×P8ø!«E„fgg«©©!¶l8MèœàqJœœì‘Ô|‚-çóÕ«WÙ= NX­Z5A+ÜbÊ*AõA”mX‹Aä?ܤ yö¸qãÒÓÓœœÆrˆþQóù|2Ò –°ÆÄÄ())± Û¶m‹¤ŸuÜÿóÉØqôíÛΆüøgVÕ¦M›'N°êÊÐ —.]:yò$/11QGGâÚµk×-Z ל5kV©oшCðòîÝ»6l€K z³gÏ.铬ž633sÆŒçÏŸWUU¥›CVV¶°>pq;w'ü›BxáÂÖ×.TÊË奯¯Od}Aå„7nÛ¶-))é¾Ýׯ_Íëի׊+ä‰àÑ!‚ÔÁ:à¥ÈŠ222š6mª««khhˆ´¦ÇïٓÇGFF***"»ú±* Ô£Gà÷}àÀS§NaqFŠd½è„¿èå–%ïØ±cÍš5?~ttt8p`1WoYù™µkוâ`ƒåÖI>«÷î‹‹ËÙ³gq=âbÄÉlgg,lùš5k 7 ÇG¶nݺsçÎçÏŸEa'544RRRl™˜˜À“KôY???ŸgÏž‰Ôù‰ruu}ýúµ¹¹yŸ>}ÎçÙ,Y¸ù¨³³óÂ… åää6mÚTÌ~;¦sÀ7`/7oÞ$÷+u?á™±±±æ™3gBCCuuuq 0@VV¶àÇ¥¥¥ÇpN?ÜöïßߺuëE‹•t ¾¿"Zç&&&Bçà„7n܈ˆˆ¨Q£ìÒ¥‹`Àuuõ ÂLNN>tè»»;n¶òòòÖÖÖýû÷ÿ=ÃÀd}AÄ0gΜ{÷î]ºt©˜Ë#ǵµµEZ°}ûö|yÀïäÑ£G§NB¾‚ýQPP€ÑYYYÙØØ”b)¿”ëׯ#UÅ>Ÿ?¾øŸ;vì† þøÎ_¾|ÎÿlÚ´)R½Aƒ-ä(£—À@×»†ƒƒÜFáää$¨)ŒU½zõJJJBL¨ÜÏP·nÝ™‚9¬a§««kLLLÇŽqò³].”{=¦ÓÓÓqnÙ²E[[Ÿ¥Õ¾‹²²ò7GÊÉÍÍ… ^¸pÁÛÛ®ªªjjjÊš²çcÙ°1@øSþþþPAOOÏŒŒŒ®]»>ÜÀÀ€‚LÖGAüyÁ÷nܸQLå8}úô±cÇâââ~çN&''#óæaJ5jÔ°´´ìÙ³§¹¹ù4޲|–ªêééݾ}»8Ë>|XZZÇþûw{½ñòòÒÑÑÁsÿòw9ÈËËÿÇÁ.yóæY[[oÛ¶­è^sNœ8ñòåËš5k¢4ûôéCw•Ò!ÍÁ^òù|ww÷­[·B-ôõõÇgcc“ï#222‚Jæ„„„Aƒ9sfäÈ‘‹-ú¾|þfÄÅÅYO3Â=$ñ¸.‹ÝÜÜàu¸W®\™Ýáäì'’|ӢȰä®]» Ž5²··‡%~³þ– ë#‚ ~!ÁÁÁ ,xñâEÑ‹åää4oÞ<+++$$©ð¯Þ«øøø½{÷"“ŽŒŒDn×»wïþùg4Gù+‚I“&!1Âa FŠ+‚eË–-]ºô÷ìXvv6r5hLff&4oüøñGý«®AwD7n„EÀ–/_^Xm^õêÕá(MD,00î-¿߆Cط׬YÞ¯_¿3fä놪Fû÷ïgÓ¸¥`¶mÛîØ±£|Àñ«Á©.ܼ–ñôéSggçÓ§OGDDèééÙÙÙAðPd]8K>~üEæêêŠÂ5jÔСCÉÆÉú‚ ˆ_ dÃÊÊêܹsE4cKKK311©]»vhh¨¸¸ø¯ØÜÜ\¤nÈÉ®]»¦««ûï¿ÿ"]þ¿Ü3yòdäL½zõBŠXÌÏÏïýû÷E´ýóe±uëV¸ ÒåU«VÌíþNØMNß¾}q–"µ-lx(PRRÂõBOúýzq°é¤¤¤ñãÇ8pÀÈÈhÉ’%-Z´^r&X}u»vípÏQTT¤– jjjŽ‚9(ŽuëÖážvïÞ=cc㢤ð]Ó°aíl±°°°ùóç_ºtÉÚÚzÞ¼yEóAõA?ÂŽ"º}ëÒ¥KFFÆÍ›7K×÷²²²öìÙ³k×®¨¨¨îÝ»Oš4é_Ž¿¹,` –––K—.-Âu7oÞŒòú[¿r劃ƒŸÏ_°`A¾ó ¸ Ž9Âô»jÕª³fÍš:ujÁÅ cbbÔÕÕ?^ÒÁ!ˆŸ…²‰Ó©©©Ó¦MÃ}ÆÔÔ—p`§Nâããy\{iœöS¦L™3gEïWÇdÁœû÷ï£tàÕªUƒŽ3FAAA[[[ðkJmîܹøj€®^½š:†!ë#‚ Jk×®ùûû#=-LBÖ¬Y¢¬¬\*›óõõ…ÒvîÜÙXym®ù3\ºtIEEñ)l ‰³gϲçÍJ…ìììùóçoÙ²¥gÏžk×®EBFEPLŒ“’’"##ÅÄÄFŒ±cÇŽ| ÈËË'&&6jÔ¨DcB¥t‚u´ƒé‡vìØñÎ;õI“& Úè².LÞ½{gmm °^½zº_‡––Ö6ö266· #GŽà¾7~üøÁƒ£Ôsð¸±=¦Nº{÷nssó•+W 7HõA%`ܸqË—//8?%%¥Y³f3fÌøÉ±ø|þÖ­[¡ŽÈ´†îååEa/[[ÛoªøñãÇ›6múóÙC.åììŒ|k)…ýÇÐÐÐÀI»Ã„™™YA÷ƒ"Uïß¿?…뢩©yáÂ6íää´víÚ!C†¬X±‚=N&''wöìYW—>oÞ‚ ˆ¿|_²ÔG|¿Ö¯_?66¶¤k»ÿ>ì .Ö‰‰|Ã7%bÿþýÕªU›9s¦‚‚‚ð|???áqÌŠÏíÛ·{õꥣ£sèÐ!h EøWP»víøøxWWWH JªI“&Ÿ2¡Š>|¨¦¦†ÿÔo¤HÁ™óù|öøßæÍ›Y%R×®]“““Ï;§¨¨xøðáÎ;S¬~?¸ Ú‚ÆÅÅõíÛ%2lذE‹­ä@Á-[¶lõêÕS§Ný{úý"ë#‚ JÆ­[·^½zegg'˜“““£¡¡áèèÈú¸+&...ÿûßÿZµjµuëÖ«W¯R`Kiiihó¤I“öîÝ+˜“žžÞ¼yó­jÅŠ«V­:sæÌÓ§O)°¿Ö)‘‘‘QãÆwïÞÍfBœ?~L!5ÄÄÄX3ÂÄÄD===YYY999È^JJ .Ÿ‘#G–âãÍÄP»vmè7›>{öìĉp}±~ž³²²`ƒžžž®®®¸Ê(\d}AÄÖ¯_/ÜÃÄÛ·oëׯþüù":ó62mÚ´=zlذ±ø,[¶¬jÕª7nDöÉæ\¸p¡eË–Å_ƒ­­mhh¨¯¯ïŒ3(ž¿ÿK—.U«V EP£F ÌéÞ½;²ÕÑ£G ú± D xÝíÛ·11eÊ”ƒž>}º]»v¸ÑM:ÕÜܼV­Z...¥?Ž5û!¬C‡ûöíc¿°Ü½{·fÍš666[·nýEƒ d}Ae www|Y²é´´4(ß¹s羫|×®]cõ»ví*Q• QR¤¤¤ìì윜œX„àúõë­[·.¦ï={öìêÕ«²²²É?…¥¥%.1MMMgggö4æŽ;TTTFŽYØ@„ˆ°†w9}}ýÈÈÈF]¹rwȪU«Þ»wÆ`êÕ«wñâEL¼xñÂÂÂâùóçnnnñññÙÙÙVVVoÞ¼Á»JJJ(²>‚ ˆ¿|56lØ=bÄçó4h lÓ¦MaËãK²‡tçôéÓqqqÀßÃäÉ“­­­Ö‡ø÷‘<‡ .À©5š(€ŒóÕ«WÆÆÆxÌÙ¸qã°aÃBBB(8¢ÏpŽÃ‡·mÛÖÃãsçΉ‰‰Íš50`ÕŸ‹µjÕb½Ü¼y³nݺFFFPt ¨à­[·’““1MQ"ë#‚ø9~ü8k!Ã㆓ž9s¦Á7—|øð¡¥¥e= Š‚®ˆßCË–-‘¾°Î$¢¢¢}¶°——W¿~ý¼½½Kq4?¢TðóóëÔ©Óÿþ÷¿µk×öéÓïââBí¢Ë l@¿¹sçÚÛÛC!ÂÂÂð²wïÞG¥àˆmÚ´‰åóù“&M:vìnž-Z´°µµ}þü9.C))) YAÄ_—†N:'NÔÐÐÀdÁe|}}ñe¹zõj|_RÄþðíC‡ÁúâããyÜOÚ—ÉÍÍE®£¯¯ŸœœLMàä½zõš|xãÆ­Zµ¢:[ÑDLLŒLãææfnn¾k×.++«–-[véÒeíÚµ²>‚ ˆ¿Ö¤ŽŽN@@€««+Ó a‚ƒƒÍÌÌå$%%Q¸þ,}ûöíСkÞY§N‚ ܼy³cÇŽø{§p‰2'Nœ€3ü÷ßS¦LþäЋÄoFRR—áÑ£G«W¯–e½¿¢IOww÷jÕª:t¨råÊJJJt·$ë#‚ø+¸xñb‹-˜Q?~\ø­ääd]]Ýaƽ}û–% hkkW¨PáîÝ»‘‘‘uëÖÍ÷®“““››[JJ ªLC¨]»vóæÍGŒ±iÓ&²¾²HïÞ½»témX¿~½½½½¾¾þõë×),¢ŒMzzú©S§úôéãããÓ¯_?SSÓ5kÖPdÈú‚ Ê97nÜ€Ú­X±BKKËÐÐP0„ þþþ?¦¯EŠ6mÚœ;wîÕ«Wù¬¯ÿþ|>ÿÎ;¢2ÄéÓ§á 111sçÎÍÊÊ’””¤˜”9äääâãã-,,Z´hÑ Az˜VôéÁ1bĈ *())¡ìpó¤‡ÕÉú‚ Ê3÷îÝ›:u*¾ü>|Èæ$&&jkkoÞ¼yãÆQfîëë+--ݬY3ÁL[[ÛÚµk£È(>e ==½aÆ 2ÄÀÀ`Ïž=£G¦˜”Q|||mð÷÷?xð ½½=ÅDôÙ¹sg\\”_‚Õ«Wòä oCÖGQn‰‰‰ 122b#7œ={9( Æ5Môõõ·nݪªª*h8tèPEEER¾2ʲeËTTT lÐv HÙÅÙÙyРAššš½{÷¦áʵk×NLLÄíÔÀÀ÷U|!ÊËËSXÈú‚ Ê©©©®®®»wïÆË•+Wº¸¸¼zõŠ"#²ÆÅÅÉÈȰÞ\Ö¬Yóäɓ˗/SdÊ. .Ü·o_TT…¢¬³ÿþŽ;¶hÑbðàÁ‡¢€”¼¼¼œœœ=zÔ Aƒ/^{vvvVVÕ’õA”%ìíí¿™ˆ¼ÿÞÜÜ\ðRYYyÕªUC† ¡ˆýq®]»fddTp~XX’KÁKö8JŸ>}æÏŸ¯©©IqeÒÓÓ;uêäççWð­‚íØ±cĈ4Q&!!¡wïÞ>Äz““Ãæ»¸¸/¦  °xñâqãÆQÄDŠøøø7ž9sæùóç¸6Q|¯•‘‘9{ö¬™™…‹¬ ¢lpàÀWW×ììì¢KJJ:tè¦M›BBB>|ø@=LüA utt‚ƒƒ‹³ð‘#GÁ¨««ïÙ³‡B'² ƒˆAHHHÔ«W”OôY¾|¹¯¯oÑËHKKãÆKÊ'jŒ?þ»mãqÁâb$å#ë#‚(Kˆ‰‰9::Λ7ï»K²~ù|>í$''÷þýûï.ibb¢¥¥EýŠ> 6,ºI§¼¼üªU«(V¢Ïúõë!Ë–-+b™?:”b%jlÚ´©E‹Eÿ¶‚²#å#ë#‚({Àú`/_¾,b++«ÌÌÌû÷ïS¸D„»wï6hРèeŒŒŒ¾P·«e…Ç+++'%%}ó] ha÷îÝ)Pe‚¥K—JKKöƒZÅŠÅÄÄfÍšEA†®©©ill\Ø|>ŸºY"ë#‚(“øøøhiiö®ŠŠŠ³³3JtPWW_ÀQD©µnÝzíÚµ«2DBB‚¼¼|FFFAI€õmÛ¶BT†ptt”‘‘™:ujÁ·¤¤¤à𪪪%ÑÄÈÈ(&&¦^½zßB™â¿ Ãd‚¬ ¢,Ñ´iSCCÃk×®|«qãÆ¦¦¦K—.¥(‰óçÏwss»wï^Á·ôôô`}Ô°ì¥D+ÆÇÇ+**æ›åÓ××oÙ²%…¨l1eÊIIÉ &䛟íääDñeÔÔÔÞ¾}«¤¤”ï™[\¤Mš4¡øõA”Uüüü*T¨o¦ŠŠÊ¤I“¨)‹Èr÷î]))©>ÏTWWoß¾ýòåË)>e…¨¨(áæ»•*UBÞyøða NYdüøñö1cÆæÈËË‹‰‰ÙØØPpD99¹ÌÌLü®~OOO§>xÈú‚ Ê0ì!áîdddœ­¬¬(8¢Ìƒ„  eŸ>}–,YB‘)»ÀÛ½½½¡îì%œÁÔÔ´Zµj™2ÊèÑ£QˆÃ‡g/áðýû÷§°” ÄÅÅ¡yŠŠŠ©©©<®innn®½½=E†¬ ¢ ³téÒµk×~øð ŸÏ¿yófû¢c#GŽÜ±c{9nÜ8R¾r€………““Ó¼yódddpIž‚ ‚ ‚ ˆò YAAAYñ'yóŸùÚ`qÅ*˜N ~møp[ AAAeÈúÞ^5U0óš1ýbâ ˪T<Œ¬Ww£ŸðxOØ+Õ¬,¬ ‚ ‚ ˆ²d}’¼Ê_Ï’¤¢!‚ ‚ ‚(7ÖW,Ò#Cn\¿tçirrR:_FFFº¦º¦NCcó6µ ¯ûJq=àÆÍ ˜¨7IéySªÙ¸¡®¡™q[íšù~réæÓ,IÎ9³²xML»4®òÕÙ©a/?æqKdeeUmbnÜXá[[}èsÅ7äaT|†Ç—®Y¿…iK³vEìé}ÿ3gܯ‡Ç¿É[^½žVËöÝlÛ(òx™tžAA„ a¾·B#ßaJB^Ç̶AnÀ¡ÓAx•ZvÒ¶.µ‹+ƒÖ÷âÜÜÚ]¾?Rý¡ÇÂvÛ •pÜ‚VuÞùîçt÷?ôØXš›~stb‡ÿù}yoÞÕÄ…&_54M‹=0¸îçWU¬»¦­ÖQgòEöúÜ´^Î6t<ö+åë¾;ëÔÐO—‘tí—{u쵨J›yt¦AQN©\¹b^î#!ñ•ªj[Î>ðd\ÀTM»5˜á{Îg²eA N؉i#Ư~Z`]ö‹ý– 5bk ÞÒ¬ëâ°¼©Á^/–Zå_ô‡¾– ·Ý“á·Û*wµ áÊ8~ÿabÜŽ£©#{`s«Í#µ|ëç&_7V¶­µ’Ó$·×sÚ)—ѲɎÙob0˜ÄáÈë9&ùDFNÓfŠ›ÍžûŒº£HZëñn߬ZÑÅd1æÀzGû/ë{µºuõµÏy*ô¹ðëƒCÔ)bù¨¸„4^ãܨ˜¯>µt¦]¾zqÅÖC§·š·2„¾‚ ‚(ŸdþVÀùO¿˜óå?7öŠÛW«õ§«×<ïÅ£-ò|M,-øðÌ®ÿÛ|p®ñÁ¹¼ÕþéýÔ¥u¼ÿwªúZxß¾N›z¥Œ×z¾FìͦaLùx“xå)_±W›§:ŠÕÙ’›GöæñLçïYÑ¡±l–xͺËß\ì¥Õñ:§=ÊçÂ|óìì, ‰²ÛKa–÷Öµ,Jö[# *Ÿ06+bmV|y™þ`cCˉlz؆óþiÃRÜè+‹ú9úlÐ|ë!”¯Ãã!)ŽœÕVŸg1éÔ½ÉÝ•ñœWÇçt˜pà^èÉaÍc_…»ÏTÐË«ÐK ˜ÛÌ.Oðšöò˜gÅã‚üöN%¶9ÉÏñ.¬˜òìQæÓɰö_žÞLPGª2¼Ì‡«û5YÈóÙоÖ¼Ùe›Ï«nšÕp¶x-^㛬cÍ ÷˜¤@Ö÷ P™ìËŸÌË9·wÛ¦ƒ;Îù}c‘˜³NƒñÇãõ=˜æòéG Eù|þÜìÄ ý›·8¸Ë7êŸ;ç2 <^ƒ¡a#´‹ÕÂ3îVx!ïH(Èå›c=rdíÂÖ“‘”ÄK~Éã)KÉ}w›xoèë€ ‚ ˆòÊÕÅSG·­YIxVÊÛÈ[®^_êˆL¶›oÎÙÚ«ÕC˜›™ºb,øéž/«ÓgÓ‹®6½4¬`\S‡n°¾2S¯2õ¨û%-›{<Þ2›‰b÷i~Î|Ãö]˜7Ñl¢÷4ˆG‰V+¼£4ÊH°OËJ± ̲Ü1½XZì£O^V«^¥¬½|S¾ýÚ5øbêfs_ÜiÒ©¥Öû_Ÿ±Ý»6ûê¼o<´Sý¼|E»•ãCª/ƒºyÝKiÌE_BæÓÙ"SI–‹ò÷‚\x»Óº}N®íöé…”æTç+×5Ìòž^¬>éÆuª¬Z_¥Ó·ÿ]á~AˆÊ˜¤#]ö ³¬ôá)ѪóÈíøûjfz¤ÏÁÉíG}iWyx¾û’Þ½ë9( eÝá vâ﫽ŽpY2qÄÁórQ#çºô?5T†W¥±Ï/@°ä­›Ñ¼¯zs‰;¶ópa»(ùõèÆ‹æoŸÛú{Ç•.ùõ³¦³7\˜Úî«ê¾”[‡VÆÐ÷AAå–а[•“>'Rbï^\ùì{£=‚ÖèÔÊ–2Ü û䊽ˆºÆÈφP¥ë¾}:ƒÿãñöžÞ“å÷Ù1û;Î<ÅdàÐ ‹YígÚvj¯\žK&ûÃûÏB$Q’nZ2b£YKWáf èFu ëv¼{yÖ–žï½:ªòù–—¨ôùÔøa.¢˜jVWúzküOB§P]þ«–œŸ{•Åã‘õ•2™×M¥ „Fo×Ýä5PGPj2¦&­»ð<¿h_å*yG²^_gr `n«a}võW|¬ZcK+=ÞëãµÒnÌn'UkÈ oÿÜ´6b—˜ÞJ1ÓÏyžÉðÍ…ïk‡göõËâçØF'f÷ù]CY»æþ«kOþ%ãì«Ç]ªÉŒ\ºwÜq¡– Gþ‘¼?3,p™VÞ¥{.ïo=ë}AQ~1=êuÅø«´,÷‰ Gƒ7lëÚ_1øò²/ORege|šúç|¬k³bd²5:nÞ3àÌÐÏbt_Õ#ešé»E½³í8ö¹þ§ä«ý+à«vïÀ Ëkd›WY¿O³ˆGÚr£·/ÛßÐn\{Íj¼l±OñŒ ŽÍà)çs$±—¯ãØTZ™¬ûUAÿjÆíÒzŸ¾ä Ò­6¨Ð¥ìx®=çv­&]–ë§3üS½\Ènû*»í ûXý¾o|}¡í¤³ÎÁ’½}~ŽÊÁ&FNÒß±þà·WTÕ&('|Fë&‚gðBvSÙ=ìKê/0ƒÙ D£Ái>È6ýåÝÐåÚ²Ë Je aO"HΫ`?¯kA÷b%¿NË›#+«TE£¹Žž¡‰•Žz5ŠPEÐsÀ7»1 ~b¯¼öxšUh L¥Êu4ÖjÒDKY†ÎŠ2IÔFÒfC¬–ép£q»y—ëÔ<ö¥•`e³¾]y×=0ul¹óCC[~-¡›fî’3îÖÞÔDUáKÝT§Uh‚ünÓ ….¯wqÕPGŽ©ú¡ÕMzjr9(í‘ÌÀ÷[ÔÜ?Ù9fš¥ZÁÅ¢/L3¼:oj‹“ýúÈ•½õìÚó®{çÅsë‰;í› /üÌ}5+žIo]:ýÿ*ë“j8bóËÏwóÌ ùNšŸÛUJé:óùÎ<Þëû¾§/xß ¹ÿ06úý;nI~åêjÚºí:Zu1Ñ©—ïl5Ì…?Ì…—çwÁÓûò­°GŸ$¤}º®ek¨éh·Õ·²îhR¯JþJäÎüÎÙ!§·¯[»çÚóTEEEžl ]³žcFõkU[š—óÄHµI¢ûÉ"S®‘ÊWŸ×\Ì_Áã¥DºŸ=wñÚ­ûÑi5È&D¿¬Q_KϬk[›¼•|L“Q|þ¨¼Gw9Ÿ<ç—ð>ïè*W×êdÛ·¯]çzUx÷Oo>-õ98Õé´%QâÙ•©mû­¼lg1P[·µ¼TæëØ{¾Ç6œ=þùoùÒûœÿÒæÿn­;Ê'=©Bˆî÷dþîþõ9“ùààì©>Å[¶nƒ~[úÿÒ!Ãäµ&øûôÉâI(×%åû…¨÷ÜõTǘë@2j‚±ÄùÅ!;ódLÒnçËdÛê y—綪5—׮׌nZ¼ä[îK6}Lîе=A·U…uâ£æâëÛèâ=ؾàAÞ íQ>_û­ö›|®6 Ý×Ý*v`K™WòÖkætoZv•ÞÞaO³Á÷ Íë­c³Õt,VÏxqý^ô—e5gúŸ_¦ŽËÏë{ðY\Õµa¼³ÓµjMo`7ÖÁ¤‰\|ÄU—Í?5ß­6óÆÑ12"Û¦\y» D¤®¯v‡ÿ½D5-“áø+銥kw¿~L¢U÷qÎÝÇ}+`õüoöw?¯¨Ñnþ&—d“ʺÃgâïoiu§E÷~‚MÞ_˜ôIùùõð‰ yÂﳯ<Û›Ç;eßCMí £Ô„'IÉ7ÖmÅû o_D'<á¥óªªÖþù8éÉõ€À ¤Ò’•ø’²µÕ5Z4Ó¬•/‹M|ñ67%%õºÕxqÁA7£Þgeñ*+ÕÒÖ3QW–øÛ‹&çMtì+^O¾Acåœ8?_ïgÉo%U »·o%MjBĽð;/Þe!¯‘­Ý¸•^3ÕÂkeù²¬hn=NÎþõöƒRSKãVß®[Êx|ýò½Ø×’•¤xüJ5T5š6שQXÁ÷.Î9ó×`z2üJÛo¦ÝbovØ+-ðæÅž²·æ®µüçýаè§ï³?ä¹vóf­ «LOzz7üYòûJ•xxrªõšh6kœ›’ÂÝGd%D?I/2o.X^%;ýÊ%R í—®6ÎãI¨7(D¥$ê ºþÌøøî#I<)^º§Wx½NMy|•‘nü‘<^Â}·ƒÎ‡®ß¹x&%J†—¦d7q©±M'ëö5¾U²j#o¸J I”¬Ä˪Øtàпž”dµÒ5MæÏΫï­iÚ8ÿz*w<ñ,êøúå§"2•¤ù’-Z—¨‘DFc׋¼övIσ¼½üƒƒãbÓ¥¥exÒíúÿ£cÜáµ ü:S/ð§Š¥=ôóð¸èq'òÔá`ÿ=O»×ŒYöC»µÕà–ùrƒÕë·iV§¬oœ ßzëKüÛ}‰¿´rÞÌ,¾TËϽ½QL‚·Ôò½%Þ¨÷‚%&¸*U­¾µ’…Ÿ®d}AûO\]8§½ZAU|€?X(1=:V}ÁççŽCÛqMÑg¹çu)žçfӺ׽Ï˪é›4à½ó¹þeä–%gR†èqß6¹ANmõ޽)µæêO…~sµ^²³Ë¿¶<Þ†m6êR`°Óª ÌB[)¿¿j¯aö¥â¨™©Eõ·>—qîrôÞYãU8ëzÉ|ú…»^ƒæ¼¨{1‚w¼ïstY^K-òz÷þDýöiQç¾,Üé¿ð=c?' Å[¸Ès†.<¡k­JŸ1Sxçý æw;u†‹Îÿ:Ú.¹(XJ¿}gá ·›áí6ÑâÓ‹ÿ³wðT~~¯=2²IvF"+$ѰZÒ’¦$‘‘DZTª¿†HKiÑ ¢"¤RF¤22¢¡HddÓý÷J×LýÆ÷ùÜï~Ïýž÷½÷<÷œ÷Šœ“¤ÎtÞQ ²,Õéd÷”©_ž‡^û=þùÕ)ÒÕe\æ¨Ê5%X©OMì7ußÛ‚þÒå7L³Œ_w¥Ã7£1^Ûûî<ÒF›þg4ðŽU_¾AýÇ› ä°ÔÜšÖšý¥ÙÁ×x8f;¿Â¬è5ð\f”T_„^?ÚŽFÁØZaÀ«z?5÷…ýdSŸ«¨ÆÌêýzë'…`}ÞQ3®&œ'`{l™Ð±Î‚œöòéx %¼¬¬x—_? £ÛKnzd*<ûTlãB:k$J¯hL2!¶{ÑôOÔ¥»±ÆDS5´ýÖÙ¬´q-&¢T•ÈUì:ÉfúåŸ[ýý$6<²Hï8Ê•1è.H-ù¾|“-ˆ“Šg“Ÿëò’m÷%f~{‡Ýá‹dµÓõ(y‹|c"õý{óÝÕ¥ê/#៹PŽ4|S¤“ØjÿviYywG—!¡3Ï̘±ãi%µŠ%ÿ\{ûÃoÜç53a¤fìm1®*Þw×Hsö›M‰ùÝ|ÝV̦ýy$lÂÊȨ®ƒt'›2Ï=áÉÿtf6>¸Kìű>3Ф|Sä]ZHþ NsèÎÙçr¹±ÇÁï4íEzÖ#ÑjÝ(êÞLí]0ñ™4â•vòÉ“ÙT¤ËOð/?°>€G{$b»—šÉ±nÝ{vý~jFôEô"ßLÛîæ¹Ísúi‘—àCzÔAÕiO÷¦é&o÷4‰¶G‚]»þĤK¡_?äøê.‡ý*ãxÞêz{7åXôýŒ Šê#<ƒ&‘«oFØùŽ)n<–ùâ{r4£h'c‰íU61'¯=Å[*“GÕé<¹ò!DL¶¯ñ¼Üî )7Ókìð”1§ýIõDš»lgvKÆ„UûŒwÜk¯ž½{úIÍÒŸÛÌý;ᆸ~VËÌ;váøúŽþ¿$^¹ÖQs''Óy“¬·@ZÞ)Ö>VS†×¡mzŒš… ©º Ú ñÙaªÆ¶³ñ3UU•DÇrÎr‹œ5ð4V…Í—žEªú3Ø“tzµò¿üÀú,üгV ×.o²…õ…÷ÜæÏðÿ€E™+¾/»Ûð¾äöHÛQ…1U]¦çÚ±2rÖn} ÌÝ:yUU×}à vþŽjŠûx #ÈKޏ9†öX/*(ˆåçç2Ðv]QI]÷m ltS\Œ4íÅö1¤9E]ÉžÙL5V€Ã>þÒÆÀw4Cr©]ðŸŸí–œCjÊ«è÷¤½›ÇÎZ7ªQbXâ‡öÉ€£q=:Åáróò:ž¦£³8±hqKEÆ­+§¯Ü9šxÝëÉu/²ÍÞ}`"Ó¯‚ãꮘw#žRÂ5åÑNÂï¹üÀúþ­…¡gOÅ'?òå É RémÌV†1Óÿ—ð¸Jûc<¯s>´bäÃ@áè¾O3rt=Co<٠ؽò'%²¯¡;Y±îCÜb9C:œó3Œ„ÔÆQAïÙäïàšº/©ÍNî¨I*msª³—ÏäëÏË»WØÖ¦%~$M‘ìñ§6îýš¡t™yÒö¢×*óÅg&bÉ6xêíI—:kÉi;¤Kñì®Í)û†š]ÆØÚ½È~»¹c;Éð–¼i&kEßS?Ú¤ÐÑ·“èÁ&Rôá÷]~ÀŸ¤¥$æÒ­ä&ŒVNw•в¬¯wвbãÂbï?Ix—›Û¹XT/Ÿ:m†žÁtUqjˆô £jŸ›K\ç¬æýªG:¬ø©OÐ1ÂO,BÛo¤y¢8mKÿ]6‹DÈãÃÕeÞõ4_¹ÔtS`±aG'Ÿ&6âÿ'7ÔØvô9!±øÖ‡/û`™çuènîøÞ4þØŒ…ˆ“vû×+w-J&¯VÔ:ûä¾®@ÇçyA”£¡;©q™¦ó:uÈ¥nŒµcã„c‡3Ûã¬Zãvlu§p}~}a!±krô˜ý¶[»ÊçæÑFîÐíÈŠœ½óµHVÙìAÜXfçe‡ëíåþäe²]2+¹g*?“´±éñã ô?»qï× ÐÁ¨7>•ǽï öp· _kúCâÐÛ_%mm=Ó>°ºMÄö†>Y©ø-·qU¡;´,N·çöX‹ø¤SP†ÚðH‘zHÚ\´Aõ»#2° ðÊ`X{ªŠãøz‹}m¢¹„Úâ¤Áö¤Ó}7ÎüåËo¸ñ%JW|fú64ÝáºN÷Oä `”\ÚÙÖ×T?3…5gû^Ÿ›‡^á^»:†d0:cÓˆøà¡ÛïÅК~ k¢ŠÌ?^w·±ñ ‰>± ½zÝLFoŸ×açï ùèEt°ë0ì•«Ÿk»asI"…9j¯=øää\Á“ÝŽ!jïá¨7®{í²…µPÒjÕ¿­)¹&…îKÁyÓÚQ5÷©±ke(kSÁô™÷Ê9¶é!f²!fÝ3‹üÁ0 ËŽ"M;l.<¤×#_°ÁEß ]ë`Ñ»{—Êú$ÆôÌÍ}—ýœ;ûù©ûºfFJn¶4—+oö^MàÚLP&vЂB$Ëçê_o,BÏ®´§èÓºó6†.×^¸ÌâìÑ1ŸâÞàЕ¤ìû*éý©RcÇ,›Ó÷ñíëy ¾å!‘T8º\]ÍÄÔ$^{çÛVwvîî-…œÎé/öqüäå7l¡¦ù¦»Šþ‰:ì„òÛ´¥®ø]²ÿÿÌ}äìÒ Ø…Y],Ú:÷Ï%‡]'E{à ²¬¯Çgï‡+’‚&ïº/]æhm4sò¸±£š«*3BÏ;û½ê vTÇ9*Þ)}®Ï a`Aà7v 6vù©]FwÎ IÃEE~âPÕBkœt9ÒëYFt¶ô_à™dYdÓw}ŸTÙ˜QÔ9 ¬Ãÿ~|b:—„^߸ïkfD0jFdÑ/h«ÇEV½Ü¡+½ÒVz (ÈÆnñÆn?}uu¹HÀÀ/¿‘µ£¨Úò ¦î"c'Lsño°IÒ¸½ÑllD´ý´¥¿dÞp4ßp0¿Ç±L÷Ĺ¯Vïò JeòÁ-k‡¦õØVÔt‡Og5ÔççÛ$g+‹Hl4=ÒžÚ]–UpÙVÕoîS™¸×ÖIN÷”-OÅóRCöAö Më«Uí¦|2ÎïÓ÷ uÝJ^¿ÌÞMÄퟡ±¥sœœd­ÿUe9‘5$, tݸØíR_gѵó=~@G¸K:ãÜ&h¸fuÎîHl°oõwì±·æÕŒˆEèËžz«ªØöÔTŸÄ'VªdÏR4Æ(Òk¥tÎrý¯®då)ãIo|è¶'Þ:(òè¬ôØr%kÿ‚nk…çOºiÙÃmëwžfu´¯wjº'âÔÖ®•àÝÒÃíVW¼%ÊiÖ<»=wß|ùÍÿ–ˆu_úéÞò)3üß÷ÜÜàNžÍ((­ÀpšÐ0téçáã„»Hó·Q· ÏóMZEšœ¿ãÁžuÚíM(pu)Wœ 7úlÃlÃÆ×›ˆÐ“Ë6Þ"ðÜ~üXbñ³¡4úìFS÷KíÕP'œSÒ÷µ×¬RÑ’ŽÙñp.­VpNI=yýš±%h-·ËÒüŠÙ²¤ds¨­˜©«X¾å!^¤³Ô—Äì_¯u&ñÔ"ÅSØÌÒ`-}}CÌú’NMéª|…éûøúÞïEpîuM©«<·[{QPx—òý ï"L9‚aŠ!ŸÏóm)-ù.»&Óïêýä1‹e÷™´\ëÉœw¤…ošØyåvñ R'FœS¯{Æù,dôéó¸ïo­çÂ=|R¤òÍáÊ"¸ôwsN}}‰âððη°M]뇫7ª²ôžž’Œ¸}ÔÃdœ‡÷‘ªDÛoFÝ⿜f¹ÏGD±ö‡/ïÝoÒ4}!þ«hjX0T‰Ù³i /-ù¢êϯŸE~¯ Ò8~Ô˜WzpÉ4sá;;õ!0*,>Zd8‹Ø©¶iµ—±·d*†oª zFúoã<Òsi[Y|Û·÷d10s‘ÿÿ9퀘þfÒ´ýÕ"GŽöŠÏw‘œ¡Û@š Üš»n O™w0»kz䉡£* ddßв¾ªèð{]¤n¶!߯œ·ÐI߃\ù毺iIÞ™HÒ‘¹ªö·¾Í%Ïã׺óñ‘>Qüuô1ì ùáô< ß4‰8YLŸÑ:¢ëÙæ¯»iÉÐqتößßB„ãÉ,»Ò¤00ËÎÅc±qä{.è”·úÞŒr¶ÝÞ†Orƒ•ñù¸ÆDzµðïk®ùøg«XH‘VYvU>›{å^ÓØ;îÇÛF«»w®rXãµ,sGG=a/é½ój¹e‡L>ÜÆ¥ý}_ì‰g¬©›Fû‘ó®­ëª|]µEi£õuø²€! aô†(ˆCœŒÌg£*¾ýÊ«-JLý& ëÂ’)ð’•Þ_¦evÈÆ"Ѿ‡m|™^ãŒgÁÆ;&]®Q1qÇJŽÊ’·ƒW4u¶[·ÂTd %yŠœ“¤Îkƒ–ÄêR퓟ô´£Ôê…çë»qBL¦ªÙÙ7´¬cæÀ°ï-%'pÿÂY«ŸóH%_°îUWåk×n»s{ƒÉ;™Œñ¹òLŸ¨vÝk“—ÜêP¾v?_ê¶Ë:bùÁ3¯[v^t*ëÜôíï‘éYvå—öžÏ{CôtÎ…Îú:†‰ËöâmÉÒƒ™ž{Ó¡|XûÊ;öMÿÞ–Ë/¨ì\uÐñ[DuÑ«W¹?}ºãïßÜÔ„aMXSËg ûÞú”@ÛO%ò¡äÌNåCpNuñÑs﮸D9¿r´KG;¦'ýÉ”¯ýÝXù? º® ß6ü34#uí°´³ÑÝIÃ¥¬)÷}ïÛ¦¥¹¡cjÁÝA2(ÃŽÕúþfMñ«Œq±}‚âö/C/ ³KúäÙO¯WIÇfÌs'íôC²ï¨ôÐŒ–¦Ž±U vg6ÙÙ7œ¬o'{ùüóço±Åb?{ÖWϺêÆx!®^¶={ñ—¸Îê>¬¸¸®×£áE„ÈM‰UFCÃÂÉ>¦Ë»øèUüÜäÃoÖÖŸŽÅ}÷:‘ÑM¾÷šréa§öWXx$ÔÑkæ—#Ø«Ó3¦îDŧ#Gïmöè­{É–¼ j+IÕVf'^ïž3®×Ä+Ì_‡Õ¹³Ý5cEw“yæ~.›u†®ªŒ5ddßP³>jõy‹0ßïíSé­UÌ÷]"ÐçŸc4Y:ê”Dô,6Z­[e '1IÃÈ\¥º¤º—ú¶ªÛWo‘Ïóð0Å'ퟬ±…¬=*ÿº''T:ßmñeQÞ¥ï~ó9›Êòºô1óé]¦ÁÞe“Ægþ | >DŒÎä+à‰÷\<õÝ=©§Û%ÆøtIå¶ý±0.Æ’}>71,à†&¨²|¾ž2ÖT^˜zÿt@G®§óö|6,åÐÃC 3:Æï¯Ûšv{(¦®“ÚàënÌ ¸6'¼F²ý™±k3p¿¾õ}q&Búà3aÍÃîU‰=‘‚ìƒìrÖ‡ñéÊ<–;Áê»6“Ãi¸Ç’ê˜å‹Š±†ÂÈ S7îÇÄDÞyω5Ôs›ÙûÌЇ—áƒìƒìªÖ‡^³yúUq²Î<ï¬Pä\ñ£÷F•o™Nªq’ž%´Ñ¦ÿ äTìÓW¢W¿ÛPókZXk~?¸ü¢ ò?ùŽèÇè®Ø¥»²²oxY_{|Ç-I&,ÁZ ÷8õ3Ú ù¥ûOx:¨pv;?©š ×5†n×;»‡IïV «nqÿ¢O·ñú°æ&ò¹/]«Çš±bòÙnÝš77wY‹}!ί±¶ÏUÄÓ‘Ï66÷·öã·TÉÛE½—Ø&¬ïN¾ö›òi^ȈX>áírœlg›ïâ/zÄcØ süŠþÓÓo’¸¶¤–DìÖßN¶>÷Æ ’O‹ž~r»ÖQz#Yg99ñ¬tðM`}ä{Yä€^¿~×kù®?¹ë”=ž>W3k&úîЇyzßkG#SÚÒ÷yñ;3 ;e­^¿ Æd.{[Ñz~˜$!½m¶>WÇìá°>¬ëÀúí}}ÝþæI]]]ÙÙÙ7lØñ ..; ÿ6 sfÏxøèþÛ·Å\\\ÿ6%;vìÙºmÇŽúºœ­¡¡7—š8èmaaýoS²uë`ÏV*ªðiù³Ô××{xxLž€ß ‡Û¹sçß<#©,RSSоá 55Žšú_& °°)ߨQ£ôõ5^½z9ò[ Äá˜Fýã4¬·\…þº¹ºÌ£'&&™2¨>-AùØØØ˜˜˜ÊÊÊyRÿÁíF9سÏÎÎ)Ÿ„„'''dXÀçáÇèokkkrròĉ_¼x1ü¨©©¡¿_¾|AÅJssóÓ§OCL†ÖÖÖ555¤ûÇúô b2tiiiáààh"2jÔ¨èèhmmmËPÁÈÈ($$MÔÖÖFEEíß¿bÖ0„ñòòBE“ÎÙôôôñãÇ¿|ù"3˜9qâćCÎ~üøñöíÛ3gÎ466†È iÚÚÚ|}}IÓaΜ9·nÝ‚È EPö±±±544féèèЇ-XßPAMM-11‘4]TTTYY‰Ü‰‰ "Ö0Tqwwï¶$;;[XXøÝ»w8â38Ù´iùliiéŠ+¦M›ÆÊÊ ÁºhhhÿSRR’œœìééioÃÜ=ò}ùò¥söýû÷Ÿ>}ª¯¯g``€à rÄÅÅß¼yC¾„ŸŸ?00pÍš5°>€!IRRRYYYÏåyyy<<<?~¤þ·Ï®½1wî\TvìþµJE¥©© ­s‡.¨”™ÐmaaaáŽ;fΜ9~üxÑ‚‹‹«ºººÛBÀãaƒôÝWRRÒsyhh(XXÀPeõêÕ}­*--åää,((€6-ƒŠŠŠŠ^›ü}þü¹²²ÒÆÆÆÛÛ¢4é«í_KK òyt?BÝûPŸŸ¿×_ÓŠ‹‹ÏŸ?Ö7hikkcccCŸ¥=W¡¯BôÙ !ë’ÔÖÖöÿü^MM ßÛ·o¹¹¹!\ƒ•¾V}üøñòåËúúúººº¨¡Å‰'Pöõºª©©‰……ÅÀÀ <<5ø-,,ìuÒ &&¦ˆˆ===Ô`£¾¾žƒƒ£ó9Ìž·!;;{ll¬††Ä ¬`ˆ1þünóåËTˆIKKƒä—.]ÊÍÍígƒŠŠŠ ”””À³CC GGÇ~Ö–––fddìß¿ßÙÙb5˜‘••}÷î]?PSSûøø€õ 6Š‹‹ÈŸªí Kpp0XXÀÐãÑ£Gä³´´´MMMôôô_¿~EE4M Z[[‘õA¬ 6làåå5j%%%ÕÇ‘àqqq‘šÿ¡,C…NIIÉÎ>Áµµ5Ê8qqq”¡¤œmhh(,,DùHÊStK¶µµmÙ²eîܹ¤…À $!!É9777###ú8EKš››Ñˆò´±±‘´M^^Œšš¤±Á''çâÅ‹_¾|™››‹¾õØÙÙQ–Õ××£[²ªªŠ´MEEŽ{÷ V`}C¨¨¨ mÀãñjjj®®®¨ÔBCCƒ,„TVVv[âèèX[[{òäIÎÐŇù’ÏŸ?óññ=þ‚3„@Ÿ¢HÑÉ—¸¸¸¼zõêÆHã_¿~ý–º‹IN ™_¼x±söìÙ³žžžèÛ#>X›ššŠnÆ´´´¦¦&ä„H!b`}C -"äKÜÜܸ¹¹ïÞ½»}ûv‡C‡A”9&L8sæ Äa˜ÁÌÌÌÆÆ–’’¢  Ñ¢äåå;vŒÔ­ ==ýD"–ÁÏñãÇOžŒØ(W™D¬`X±ÿþµk×>}útÅŠ . ‚˜ f”””¶lÙq~ÈÊÊÆÇǃõ ]¦M›vöìYÅÂÒÒ²   ==BÖ0ÌYµjUHHÈöíÛ/\¸àãã#&&–““ Z-ÒÒÒuuuÅÅÅ<<<ᄆ†Æƒlll CCCÃ… ¤Ç,`ÐÚÚŠ>Nׯ_âÄ ˆXÀˆ 88˜——×ÈÈÈÚÚzöìÙ\\\×®]ÓÑÑÈ Zñ{ôèÑâÅ‹!à tëyzzB†"––– {÷î…P ®_¿nee•žžÃõŒ¤h*ª´´´ñãÇ¿}ûV@@ ²²ríÚµ»víBjãDBðxüÝ»wÁú†’’’ÈâããÕÕÕ!CˆU«V555AÛø¡BMMœœœ……EII D¬`Ä1fÌ$ÒÒÒïß¿gddôõõEî',,¼fÍšmÛ¶A|³fÍ233ƒ8 ?ôõõ¯^½ Ö7„˜>}ú¸qãÎ;¡L›6MDDäÝ»wðƒ&XÀÈEUU‰????VVV66¶¼¼¼ÔÔTzzú­[·‚û TUUåääÀHnà +++ nƒ:ƒ“††11±Ã‡/Z´¢1Èiii‘’’B_j111è Ö0Ò‘——ONNŠ•••%-A…›ÌÌLssóÝ»wC”ºººþþþîîîŠáÒxt÷‚H r®_¿¾~ýúôôtèTiSRR¢­­=nܸììljjjXЈˆHyy¹„„„££ãºuëH 'L˜P\\\TT$%%5f̘&&&ˆÕ?ÄÂÂÂÔÔ¬oøáàààááÖ7˜™„ëŸ0eÊzzúððpÈ‚aò½;vDDDèééA4»wïöóóKNNfgg‡h NÞ¾}k``0f̘àààõë×C@Àú€àééùúõkNNÎ . /QòU;ˆhjj~üøñÊ•+ÊÊʱ¿Œµµõ¡C‡Àú†{÷îuvvëT„††®X±"((hûöíAHvvöÒ¥K›ššüýý_½zë~qqñòòòýû÷[ZZ¦¥¥±±±‘¯;vlLL šÈÉÉ‘““knn>uꇸýÖ¯_ô ))IEE¢1œ˜?þ±cÇŽ9bggÑøç\ºtÉÊÊêòåË•••ÁFxx¸­­-ʦ””Xðë8;;;99Íž=»´´i]· $%%‘¢ ¤ˆ‹/ŽŠŠÚ¹s§ tý§qssCb˜˜¡f\»vMHHhùòåÝ~jþ&[·n=}útddduu5Dcð€²cÛ¶m+W®Ü»wï›7o &`}Àïù[hhh[[›¥¥%*}þü¹×Þ\888®^½Jš¾xñ¢««+##£‡‡´UûC˜™™?~üòåË&&&á;;»§§§®®îÓ§O!™üüüY³f)((:t:L$ÔÔÔìÚµëìÙ³“'OÞ·oŸ XðG ¤¤ô%²zõꘘäãÇïkãåDÐDss3*9¡ohnnn''§%K–@$#!!!²²² ,€NɇkÖ¬ILL´°°8uêDã/€>©ìííÑG\zz:äŸ“ššŠ¾;îÝ»§­­íêêzˆ„¬ø{œ={ý SWW߸qcÿ¸ÓÐÐl%Bš½~ýú¾}û þ¡}!žÿ;wêèèÄÆÆB4†~~~¨¼»{÷nèAäÏÑÒÒ²cÇŽ'N Ï±cD &ÿ ô½àããsùòeZZZ++«uëÖ¡ï  Xð144¬¬¬lll433 ½páÂ@Zr!M×ÔÔ¸»»#‡lkk[¼x1úŽ‚Àþ,ööö‰‰‰¶¶¶^^^aFtt´ŠŠ ““Dã7RXXhnnžœœ¼ÿþ}D &Ÿ'Ož Ïÿ[·nqpp¬X±ÂÂÂâD 2`}À ƒŽŽÎÏÏM”••=~üøØ±c^×?,,,äÕ€õõõ'Ožô÷÷ÏÈȘ:uª‰‰É¼yóPy‚üC‚‚‚&Ožìáá±yófˆÆ0#))IGG§¤¤äðáÃÿBKKËž={Ž=:eÊOOÏððpˆÉßäåË—W®\¹~ý:º˜uuuW­Z5}útUUU___X0dàää &É›ƒƒRÁ… ¢¢##ãÀÀÀ°ŽHç’¼¼¼‹/†„„äææ¢‚’IäЫa¯$&&*((PPPlÚ´ ¢1Ìxðদ&i¬`àÔÖÖ"Ó;}ú´ŒŒÌîݻ݈@XþBØCCCoß¾.]ôÁ>cÆŒE‹éèèì&ñë†è;¾ó±û¤¤$GGÇœœ¤"vvv?[q'$$D¾sI]]ÝåË—oݺ…Š¿Èp´µµQyÂÐÐT‘’’‚¢‘ŸŸôèQˆÆ0ÃÛÛ;66]çÏž=…€ôCXXØÒÓÓçÏŸïääÿ(oÞ¼ Gv÷øñcôáÇã fÍšeBâÖŒTTT:{‰wssCeVSSS¤‚¿Ö ##cÏÂDCCòÀÈÈHt.ä<ÒÒÒ“'OÖÑÑAN8ðšÆáATTÔöíÛååå‘Âx‰Ã ŠŠŠ)S¦ˆ‰‰]¼xÒyûúùù!Í›>}úúõë ‰@d~#mmmIII>LLLLMME1WTTÔÒÒÒÓÓSPP°%QÀú€vÔÕÕïÝ»Gš®®®Þ½{÷™3g¨¨¨–,Y‚ j|||¿|dzzú9Dº-¯¯¯ }ðàABBBNN ‹¬¬¬²²2WSSë9ÜüðY1ÇíÛ·‘!À…7œ@&.欬,”¿>>>‹/ihnnFvPPPtt4''çüùóÍÍÍWËã?‚>–ãããÑöüùst}þüY\\}`jhhL›6MD ë~VVÖíDH³555 xÿþ½¶¶ö²eËÂQRRþdz000Ì"ÒsUmm-*5¢"NJJÊË—/+**„„„&L˜   €LIQQ™äÐ ¯””zGëÖ­³´´|òä Š\rà iiéòòrRu®³³ó0îòÙ³g7nÜˆŠŠzõꕬ¬,º—MMMÉûN[[r9ôœœœ‘‘ñöíÛ††QQQt9)))Mž<ý…jR¬øƒ°°°l"Ò¹$;;ûÒ¥K¡¡¡ïÞ½C6sæL##£ß8¸“>‘^×677£²*q¾xñ9!*ÕÕա⑘˜*TúDr8nܸAÕ“'O¶¶¶N›6 IuRRô†:Ì@!@HMMåãã›7ož··÷ÿ¡ä_QPP_]]ÞšŽŽÒIDöïßÙÝ?è }Xeff"¯{ýúõû÷ïQH™™™EDDÐG–ŒŒŒ<Y".¬, ’Ê"KÚÚÚ¢££oß¾þæææ¢r ò™¹sç***þöØ ©ég›ÊÊÊ””TÒÊÉÉyóæM^^^II ÒW䇒’’è-Hù‡ºEEEõèÑ#:¨üü|:¸º†¨(_TT„²ØÎÎîÊ•+þþþ6óŸPQQ‡.ȧOŸ"9¡§§—““SWWŸ>}º²²²9ÈPrêëësˆ¼}ûöÕ«WÈå>|øP^^>zôh~~~dtãÆ“––F"‡>g &Ö ())µ‰t[^PPp÷îݘ˜T”üøñ#*Mžzþüyg³çÚÚZt)"9A—·¼¼ü¤I“öq®¨¨@·'Ò6R[JtŸ~úô ݧåååõõõ¬¬¬è®äáá!ݪ(,ÈßËñññÉ ë€ß%%¥ ‘^×ÖÕÕÅÆÆ>}ú4%%%++ àØØØ$%%Qá•ÉÐ^ÿjH´1D¦N:ð]Pá)b^^*ƒv+€~þüyÔ¨Q$]D%NTEùùù¼½½‘‘š˜˜Ü»wÏÕÕÕÊÊ ®™á==}ç8uÈÖPF£Kå2RÁn½û´¶¶¾zõ m“tîÍ›7è*jiiél«Œ¤nâĉÒÒÒýÜPC‘²²2ôNÑ]Cª~GÓ¤]ee%‡=ztçO-èÆA7¦!!!v"P `}À †‘‘Q‡H_ ò_bbbrr2* çää •Bå?qqqTÔSVVF¥áAò^˜˜˜&ùÙëëë·nݺlÙ2dŒÚÚÚOž<áââbee­ªªB¥Þ¶¶6Têå$ÂÃÃC*õ"oD_x>pðS[[›‘‘‘™™‰tކ†ÙËáÇ===‘Ï ÓC—ñÒ¥K‘ÔIÒÚVTTDª///GÑ5Lªs#]ÃèÂæ%Bú탟Ÿ]ƤkÌ Àú€‘ * é§Ä‰4éùóçH _½zEª-”““#uƒþ{ýC000t÷---I ÃÃÃ7lØ ++{áÂ11±~Œ"Ÿ©v‘¼Ì]]]¤‚TÚFºˆ"Iª$A¥mR“Ôá:Râ_£¦¦&55ÝË—/I| à£ËEXTT]„(OeddÆ×ñññ›6mÊÉÉÙ¼y³Ýßï”t‘o#ÕQwVS#mklldaaAm¤k ´ Àúà} HâÝ»wÃ?dee544 â8*¡ª¨¨hhhô#Tÿ–Î-ÍNœ8‘šš:00°g{W&&¦Ÿz^±PXXHêÊ‚TÜGÆXZZŠŠû¨¬t‘½³=*II}þѾyÈyž?ž––†¤Ždth¡°°0©{p999 -"ÿñ\êêê111¤éàà`¤XÇÿÙfœ(ïP:IþVPP@j0‰òehSSÊP’ÿwþ€œô´ZõkW`}0(!bbbÒëÚÜÜ\Òƒ… oß¾EEa%%%dƒ3fÌ@{ †ôËÊÊ"_E?ž2eŠÁ‰'þû(8ŽôìâÏÔÉM^^©‚‘ô ©ÿCd蘤n3:;D¥¢teÏŸ?£'=PúòåKô.PÊIUs¤¾.‘`ùË #Õl766¢´©©©eee©ªª¢ì&ÕâVVVÒÓÓ#y#u&„œœ¤m$''5ž„aë€.ˆYµjUÏUééé<@Nˆt 9*LÏœ9sΜ9¨`ýO’Š”•ûÛÚÚ\]]=zôO¯rx5iÌŒW¯^½~ýšÔ!*ÒEd‰”””È‘®úÇ?~<òÛßû˜":ott4Ê>¤vÈšMš4I^^yú«Kä¯Å­¢¢)©×MÒSpUUU£G&µ¼íTe  䥱ù¥¹¹yiiiHHˆ²²2ܳ€õÀoC–ˆ½½}7‡‰ŒŒ¼sç’.TpGò0kÖ¬%K–ðññýT!YÚMdÛ¶mîîî¹¹¹ƒ¤N²/~jÌ $„¤†¸¤±‘"VWWsqq!3$i¡œœ©½kç.HŠ¢ˆ »C» +))©««ëèèÎkccóGß Ê”挌ŒììlRšëëëyyy‘Å‘¥%cìØ±D~öøLLLW¯^EÈ]‘ N:õÌ™3ÿÙ?Àú`¤€|£gQccãõë×>|ˆ ÷óæÍ[»v-šø£)ÙCÄÙÙùÆÏŸ?gaaá%u33gΜ^×"Í tttDï—@ ZCZˆlpÅŠ***ÊÊÊ¿«£tpÒC¡¤êJdtèÈH³¥¤¤ÊÈÈ í'U÷ÓáÐïBAAimKK‹––ºØà~¬þÈŒ‰t.©««;v옿¿vv6RD{{{UUÕ?qêýû÷#÷C> §§GnØðàÁ$Ò”””:::óçÏ×××w$ÒÿŽõõõ÷ïßOLLDrˆlíÓ§OHÏÔÔÔ¦NÚ­ÖñíÛ·±±±HíRRRÞ¼yCCC3nÜ8$ŠŠŠ(ˤ¥¥ ‰ ªßâââHJJúøøàp8¸ë€¿ ##£Òl[[Û¥K—Nž<™••ejjêììü[„RQQ¥§§Ÿ9sFDDäåË—Ct<†ÜÜÜ'N\»víëׯsæÌY±bEÿ9öÃ4"Kû!|ôèÑáÇKKKyxxP”™—•• !»ÓÖÖÞ»wïü™²âòòr~~~—Î+ ¬þ ”””K‰f‹‹‹mmm/\¸€ÇãÝÝÝeeeÿû)Ö¬Y3}útä3ñññC¢ þœœœC‡ݸqɪ¥¥¥‰‰ÉA"¿|@ðôéÓðð𘘘ÔÔTVVV555MMÍ3fð³³³ëg÷––äQ÷‰ yF©ÒÒÒ200@æIþá ijaaá‘#G„……³²²î½X ›yAÓÕÕÕ6l¸xñâÂ… ÷ïßÏÎÎþˇD>)$$tîÜ9==½Áö®‘Vùúú"EAHz-,,Nù…C577GDD„††>|ø°¼¼\EEéÙ¬Y³TˆüZòÚ‘j÷íÛG¾…4$$äÖ­[‰‰‰(ãCΟ?ÿ¿Ñ÷A6»dÉ’±cÇýZí(`}üAXYYAÓÏŸ?G:ÑÚÚŠ\H]]ýŽFGGWXX( €üêï5׫ž>|ØÓÓSTTÔÃüÕëÀyûöí¥K—nß¾ýúõëI“&-X°`‘¿£è–DÈfggŸ?©`ii)-äœÿpXBnn %%%“íÛ·ÃmX RP©=##M””” gKOO¿xñâÔ©Sê ”””¹¹¹cÆŒ¹ÿ¾¼¼ü?y#‘‘‘vvvÈf‰ |ßââb??¿   üüüéÓ§¯X±BWWw'‘Á“SRRRÿ#Ò¹$''çܹsW®\! .\¿~=Ý¿œªçÏŸÏ;úøñãp7Öƒnnî;wî ‰‚‚55µúúúÐÐÐAGG—œœFFÆÍDH³qqqnnnÉÉÉË–-Û¹sçé§W†ªû!ñ«««›0a‚®®®§§g¯›íÙ³‡ƒƒ#77÷¿t)ùâÅ 55µsçΕ••õº··÷®]»ðx¼Oll,äίAjÇ‹{UWW/((8{öì/Á‡üüêÕ«Hø.\±°>†*ŒŒŒÙÙÙ‘‘‘lll ’’’=·±°°Ø¶mÛ•+W~áøñññ³gÏ>pàÒËn«s¢å{÷î]µj•‡‡‡ dÇïå#Š<ö­×ŠŠŠÀÀ@99¹Ÿ=ÎÑ£GMMMÁú¬€!®®neeåÔ©SÇçëëÛmíöíÛ‘Ö××300 ü˜åååòòò7nDGî¶*88xíÚµfffîîî=”‚ÎN\333QæJII!ýø£zx<^\\ÜËËËÖÖ‚ `} y>|xöìYä 999ä½¼ÐÑÑééé:téßµjÕ*¤ïß¿'ï\äÓ§O³gÏnkk»uërBøßd„ oÞ¼AŽŽŽçλxñ¢¾¾þ@vܽ{÷‚ Àú¬€aÂêÕ«µµµùøø’’’È[{š™™mÞ¼y ÖWPP ''wõêUòNÿƒƒƒÑöíÛ÷§Gÿ~È"(ÙÙÙwíÚeeeÕÿöªªªÜÜÜ‹-‚èXÃ!!¡²²2aaá“'OΚ5‹´P__åÊ•?ìÓeïÞ½—.]*))é¬â Z»vmdddUUÄvð ¢¢RQQ‘ÍÉɹsçNkkë~6^¿~ý‰'Àú¬€á M~~>Ò?ghhHZˆÇãû±}þüùŒŒŒYYY¤ÙƒDBCC«««!¤ƒ)))dø/^¼àááÙ¾}{_õ~+V¬Ø°aCMMÍo¶°>þõ—=? .L›6 -™3gŽŸŸ__Ö7a„uëÖ‘ªŒîÝ»·hÑ¢°°°ââbˆäà‡4;òsvvöøøx¤‚Ý6 ¤¤œ>}º¯¯¯££#„kdR[[ûøYjvfzScǘ±¼c™F³ba§¤¦¦¢¦Q¡hmiikiùRSQ[UQ^”_^THOO+3QVEQ‘üqh°>†¨¬Ÿœœ,..ž››ËÉÉ9sæÌ 6ô¥ –––VVV rrrHùzöÛ rfÍšUQQahhˆ„ÿæÍ›ÝÖ¢å—.]ëQ´µµÝü$9!ŽOX\jžgü$ô‚°PE—–w¬Ðøï£¡d–|yùìnñû7êx¼ºÚdÖÀЀ‡‡'$$DII)??Ÿ›››ýéÓ§ÊÊÊäÛ µæææHùüüü\]]³²²˜™™!tC”°°°¸¸866¶´´4ÎåK–,ni‰‰‰‰µµµššZTT”’’„hXrçÎ<¿ÿ~ggg$öG…˜ K®ß+!Ó¯ò5e¦UF‹ÉŒ£'_Œ«ËONýÌ "#Éaì ^a‰†ÏÕ!·MŒçõ0¨±±±¹zõjIII[[[vvvBBÂË—/}}}Aù†7qqqcÆŒQVVžæ¾š¾Ô¢¿põÑÛµ=ÁûåÜU µ±1ùnœâîØ0¦ß°–4We½{~†¾|ðKÊÞ»t …zhµóë€Ǻu뜜œ899sss·oßÎÏϯ®®nll ‘ö<|øPEE¥¬¬Œ™™徨¨(Äd8ÑJøÚÚÒLCKÿSûl¿Æì]¤iü¬™¡w_"ÛY|-ÔÓ+X:q>š•Ò™'ÊPúm³20×Vƒ·ñå‰É:hVÊp©4EæõÛ/ÐÞ>)Q<1ëŒí/ å‡ ÔèE-<¯`êý\"øiLq÷_|[ƒfùâîÇ£©Ñó¤èû:þ^Bu–©ó½ÚSkhBW‘p/1ÃŒ.æȰô÷^Š¢ì VœB'š¾@ûرוÜI†þ‚ƒÂ‹‚Üö£Rm<Áú`ÄAIIi``Ÿ••IêÁÂ2733[³f //ï›7oÀú€Þ!ŽÖ®àëgG¬ÿÇUÌå;”’S‹P—¿BÖ„1š»Êvª’ônßo†Tg”ŒÛúf¢+[]}+Æà°ãž™Òë%{¢ð¯xµlµCP×ÚEòsÙÞ÷sVCiG&ë¹HÚÞ¿B6t+u[ßÇß:Zm_ê‡mUŸ«jj0'ãø½Ni>ËÈ0÷ó^Š3Û•RjþÊùæ ÆK z ë1êÁú`$2cÆŒÓÒÒÂ#^#ŠŒ=z„ 08Lj¥¥½e"#Ö«äÔb­ß a’ªpÏõtãíR?ÙÕ~LMŒ»çmmF¬š›v åÀB»ÿ•¼HIL⚈ÛÒ®¿ŒQ Ž@’&iIt¤ kD55'϶™¦ïã·em矆a†[ü–ÎPbçàj¡¡C«ëêZ¾¿·÷¢°ñYêÆæü”褻ާמHÎCËDþ—’5ƒ—ÖÀp`Þ¼y–––ÑÑÑÌÌ̺ºº‘‡[¹reDDÄÇ!#«¡­w¢—™èÌ-Ó•„°Öª71¾VGÐÊ->æ£ÛÕ¯ORöŠ™-Zy6yý’Í3–lvH;±\Ïáþé;µËl òÇÅEg8xØêÒc͹a®ÆæGømï‡j`Ííî÷øƒ‚>¸\¦-ݗЪöy|•ÖOhnë•k–ˆ24ç;/!6(-/©Â0ö>Oñ5ÕfÌ”8ÌèäÓc 7é.tÜõÔwƒÅŽ«OSógð ¿l뀑--mYY™““Dc¤±~ýz??¿êêjÅÈ„Sk_ê§}Äúº˜›c1¬‘‘Góf†› GGõÀÌ=‡¿=H`˜¼áž&!: Spy›j÷8æa€ÏõæF#»”GJ¹$/FÀfx½žq¨ìéÍ+§Ýc1Z:AÍ;¹{ùˆ-@6Ÿ¼˜šÔ¢5oÎ$òZÆnç¢;ë‡x¿Í2‰¶Ï²¨L@®ØÏñÏç?½|å¾ßÑD¦q[S?ο÷2"*Ùxªpï…BÞûS}Õû¸„Ε×4è¸ÄÍ¢?œ=LÛy‚õÀ…‘‘ñË—/¶¶¶Š‘†¸¸8rþÜÜ\ÅH†‰_~Æù^WQ³É˜¯¢Õ^ðýPf¡)³VMéÝ-8•m”{.'°ö¾¼Ç¹¨Ù¥g˜H“¥²ëlŸÇçT^Òe¹ úAuÒ$o?ïe´0Þ@?²¬F(”””ÔÔÔŠÈØ±cá¹>9€õÀ¥µµ•‚‚â02acc{ÿþ=ÄÀúÎ ¹Q†ß33sSSÄøOàêØ\ ç´lÚ/vR›¶_Co—EHÁ:Uö_;BÙã#§âÅœ éÿpRÁút4Ôçøò¡ °°ðKYys[+……Š……’…™’Žž‚GA‰‹ˆØ‚¤ïéSϯ_ õõ„ÆÆ¯Ÿ?ý\ÓZý¹Ð†qpÑðñ2+(,8‰‘‰ B:„hn,ÌËŽ2¿¨ðsIiss••˜û,”ttŒŒí¹¿m›…&Ê}$ÿuõ„††¯õu_««ÛjjZ[Zœœ4¼¼Ìüüü‚*̬<ÒaHk^€Ë–/Sõ˜ΜyWö…^@qéF÷©R̤‹('ôÄÙs9uØ(nù© —Z.áëÚÍIKa„‡Ûáëí£´¿Í˜¯±Úc«$–ÿølÀùЗJÐcdŒ–9ØÊðvì–}6àJû*·¨¢ö¼åf ÝGC¯M½ä}îN Óìíî‹™º®êewABÔA3Ÿƒ· 0ùÏ…7%uWë‹õš€^“Z›qù¸ÿ³´¼zŒ‰_eúâõv ¼ÃÙŒÀú`ÈSWû*-=&íEiCýW©ñôââ´"´’8 I cûoÇ.üúµ /¿å͛旙õ '+;FAn+ÛXû 1üwééÑ/^”TU·IIÑKHÐ ÓˆKPˆKPýçÜ/!n~,h}ý¶éeVCca¢ œœ'×8û§íÃSÿ[qþ·šüÒwXb¸ê s¾Ú~[îV,䉚=qIïš+ñ ÈŽ0\ÝMs3¿GýïeÔcô6Û½B*¥àäîa§„6»c#¿-èã†àâ­“ÛÕ1j³Êr…¤z¼”cÚf{Jgdmåh /óÂúe*Ó·DEèECÅ€}¹o3nv†m -^¦ÄÜ-±}í¾p“_i<ç¡J#7oúöˆ÷‘€nI%ÔþÛ˜{ƃmYåb À,¹.üÊN-°> „¶ÂÂèø„Ô¼÷ŠŠŒêS§L¡˜2åÔÈQPP Ò¢×ôi¤ß[ÚÚÂã×?I¬å䤛2/&62äïæ>¡´4>þqÒë×uå5ðŒ*ª”*ªœâT8ÿXôÒžJÊ}Âׯ’’Bꘘ©ÕÔ”¥¤´!C†0Š»Wë‹3–uüÄiXèýÊúúñ‘¶Áw$©*ŽÀ¨³~•[讜ô˜~—æ‘ÄaøÐ¿ŽÍ¬öÜMZyÔˆçhó(,q3¾=s£LG3LE}Ÿèˆ)R̵ií³Gg±·ï¶;!ˡ׆š}íŽa£´þ(]’Ú“þ„xÞE¢ä[æ\O¨Ú©5¬€Kkså³ç×bcKdå¦é0-ZÈúO’AI‰Cž‰^Dy›˜P«¤( ®¾Žž²é™þ—”Ô ?JJÐÏÔe22BE^濟 •QèEœ{ûäIZLÌ™ ˆ>’‘Ñ`ºt '—(dâ/ÓPÿîò•« óæ²¢ÀòÔ Ðnrà&šãã>Iª[ºÄˆoŒ4dâàeÔ´KŸêÉ(l|–º±cZÀÐ3õ“çB«q†ü *õ^]Öïv;ómò\êŠs½AÉ5õ“kçÜ—§©.½l%¹¢Ý Œ¦!õ¦ßçûIÖ%©~÷§úÝ#)ÃÁú`0R^öèÜ…XM &KKΡ•r ŠiÓ˜Ð+++ìø‰jËuK¸¸Å!CŠÏ5Ïüü"ÍÌ8†VÊq8 <~z½Ëvs»¾rå,AAÈPë  ÕUÏÏž‹–¦wÜ4´{ÌGo½ÞæÞ=wþê2ÓùPó3êê²ýÎÜÃOc¿‘{H¿QÚ;y?~LøßÿÂ.ÐQ‚Ì€aa}­ï=öåàè´!- ÷¸ñrŠÊJòƒâÇËê´óN‡éÙÛÞPŽÍÙ}P_x€a©Ú§Áæ×9«y¿ê‘N¿ÏÕ·ä;n»‹±Óÿl"*$fíÙh(ø—bòl÷hå³ø­ñ±{¦ÀÝð§im®ùˆíÿ}üw°¾¾¾©>Ü÷>þóûéí‰ß:õ†€¦¹Ì7À·s–gív}á^tÝõ£íëJ³¼ÈÎõSà9×ý5ëþ>oß^¹ùr½%'#ã0ìEAAF†Þ×÷„²²ì¤I³ »»QXpùÙZsŽÑlð–”$½„8íÙ³çlj ij-ì¼P3Š,˜K7žØ¥IkᣳÇÃ#žÖ¡çVÒ^1Ù ÎÞºé©}qÎÏ?=-¯ž‘[~¦Ùšu†£‘?¾rw»‡ßæ=ƒÔ!'®.jMµ¾Ëæù¸¼ w—8Õõ¯º/äwºåÚÖ«£õ§ ?ìýQÌþÔ1s¦Ö¼;'ÞNÌ+ÄqÉ*,³§Aa¢¯ÁåÇSv±}_GòR.l¼G¹xÿAŽŽþ:[J#¼¶úSOw²]8¬ïw`äì£ÃÙÜô­;U îó›Ç^7âÈ·ŠØ¦;nÿ¾ð°Ð? A3®‘lNT”ûö‚ÍÈ?ÍçÐ(Œ–\i±/¬/uÙnžëi}ަ&²D67 «JÀgÀð„Ðv5ð''åPoÒùƒò$5ÎÊŠ3%å½ï©ýæk¡“Ïo¹O¸ê‰Ãµ þþZþ kÖ°¿zUæåånµÞŠšrþwÑF ”V~~›ÿ¡°àcyiÑ:Vkqêµ›qŠŠ6¦ô–ãf&cFQÅQícà Ï-Õ™qkÖõ+DȶÇÕ™rî»m -¶ñ`F›š>NÛ ÛÿyZý»°èKc6ìÇ0úÕz~%¬rüæÍVý.ì¡_ØÃBŸ” ¢`f~ɸwéþ½J¯è»/‘1~^¥a(µ:àBð¡vͬ|`#-» [s³Ø[°ŸÁåÉGlo¯ëëH£‚ѼãÎ3Ížãî¼ðà#¾—†ó/è^ôš8¢®“?h};l7Yiô¨4³ßrýýpNTpõ»Î…EžwRvZ)|"£åSŒãâ•^±y½YoSÐå z‘³O÷–O™áÿ¾ç Å«™—‹,%6~÷MÅûUÏI­1iF‘·£àæ¦Ø1 îäùÐŒúÉ/<ÞÉV{ eÛ8.Üùy5é;ë5½Ô7ÖçßsY>Ã+¶£]~çÞI}‰ò­ïÏl²4÷ºÛg²Ô-î\ôÔhM~ªŸ‰Âš+äKlÂò½ àcàøÚZyìäqí©LÒÒô#áý*(ÐñðRxt·Ù°†wçϖ“£Ÿ4‰e$¼] :Nªƒ‡<,×™CkÏŸU»üâò÷ï?|úTXQòése3+ïh.>6>¦ÑL|b’|b¿í|£ðû®ïœaì6ƒ‡$yòÆö;ïX/ácèºYCFü}ôÏzê$f aŒET½qM§tõÅü#ȆIø‚ax?óöO\u¬û ÙgM•Ï’ïq&(f§£ © ßËàòÖQ†oív&&¼Ïƒ½“u\,ìUCeúXž+À6Þ½$Ã0².¡ÔŠ€¦ÛG›(ã7jL6¦WºÑí²0wâozìu1ž4qpá胘ާá›&uzbÐr‘Eþ»ŸHTËÍŰd— í’‰.·×rguÃI ûfR_š¿ïÜ⿜f¹?ÖÇ1ïÝù‹!+t•çwëzÿˆhèKbŸÂcS;æ .H^Ħ©Š²íTâÔãs¬nuÝMACrÔ—â))ß¼;þ”È)Œ×¹°h_¿‰¨y^FzÕ;r?îüü"-MEžÞgV­bçäAã›ññÒØÚpyy]gaÆÂ:fä_[­×Ñ#FF£hFλfc§rtäöôÈili}_X’÷áCqQQeIQ}m 3çhn>dwl<ü L,ì"ãÑëo$ׂ ÝÍuâ"©®0È\Û@ôüÉœ»*= àÆZ¾Mæßý_Àý/jë\:zïé!½öyÄdkßwâìQƒC: U|ÒìIæ–膅R߯P¯)o`áhÿa¬©ßOT\ïËéÆÛ]÷|aloº™Êäžÿýi{ž,“¥iW×´¾gÏž½æ[W÷M£4î˧ÌÇg8ÜHé²¥ðêýK$Hvž±¼«ò툩qÓ`&M¹€å˜ /ë°þGe'ÉOÿ3lo‘wm]Wå½úîí"áï¶rLŸÑ:¢SùºÂ¬C ô\ŒŽÙUùC>>Ÿ;†ì˜ ­¯ÿ¥|J=nÕUù¶”ö~ë §ÔUžûûÚ{vž±¦nU…U aäÊg_E8L~‡Õ¿ð–‘óæ×“äççåeÎ~ßÀ×kú6zg',ê•G¶pÎñª›–¬ð‹|m­:zÌÏd1ÛàQ¾ú²ªÒº6V.Ö?ü+8=ÅzKÎcÇý6X[1Ž‘}-šŽŸòÒÓcQÊG‚’·a§§gÅÚ•£ÙF\[¡º†¦7ó?–V–776°rñkíÆ°óŒ¡¡gãG/¹“¸Î¡Ûq¥a¶½»¬”Z½O¶›òÑ©z¿¸a3q¾ÙpçêváSEé±F­éØ®SF<§È·' OÕÚþtUYMKsS-†5tª£òÖ¨=53·iÓï#ÛÕädºã±~—ooEÞ9bûºðË]Î"ºøœgv–½ïeL÷ä^3Ùø‘ó­/b“^Ħn¥è›üÀü[ÛÎêÇ·ºU­½v· þ&õZ¬:™|­‡Ó…M†NœXÕ•£]*€¼BÉ”Á`ü(ˆ^+ö'’ßý˜¦'ýçŽézLÿGA×꘿Žüú›„õ¤/äÿ·w&ðP­o?ƒ±ÌXf,c+k!”%…2£¢²¤ÍÜmh!¢E´¯7uK¡ÛEÝ[të¶è¶kÑbèªn¨•Ê–5Â(ë ÎÿœË-ÿäùv>9ç}ßóž3ï;sÎù÷yŸ§òYNf~^I\t4›Íû"7Ô)©"òºÚOD¤Ãm;„Jh ¸É°uc=ÞzŒM Û‡ò¡‡Äµ# kùf¤rNAÔ€OzèoŠ:øë¤IR¾ï¡?qýZƦÒîóuÙs½è’Ÿëq4bú*ÿ$„±mMBàÀ/Ý’‚îßïï (Dìg½Æ =J\[»ŸNoÀ„_XØ¡¥¾KEÅ$¿»îE+«ëžçä––±J‹JS¢ÊcênU^IHRIU[F|s§Þº½™{ºvîÇìB³ íí½Mîè2é™é žÛ‚ûà»öÁï=Ãw÷E‘ŽÛÇt8JáÙ#‘éâ}ñí?º/û䮡¡‘µjjfáF‘ü(ùîîdO˜™ØYCÅìÝñ¾<έD9DDNm ’˜ß–¬oøŽËcÑ®Ž3¶—£Nuj ùô:?7 öRczî"•ä~ 5?¹\ßiW§ f\ ¶lâKYyúYð´ÁWíIcÃM…éë”à¡  ·\¾ú«¦¦¨–Öú…E¤¤±ø±&ѲÓð[Qâ :ã¸7cÅîY‹ŸÆðɦÆ2ì9³3H¥É*ɽ«*ë‹r«ª¹ïÕÅå(J‘Öt²ß} 8MDRKJmm×ÖTX*­öMna5nÜ#LR܃aBY!++é'÷»¸øô«Þg&FHI ’úóO€Lœ2UîÏc‘óç-ï‹çI»’ò7/rs _¿*f•½%IË+`¢ŽJS–’“¡©ib \ìþß¼}¼ÿ'«eˆ¾ÍÔßRL‚ ý´¾¤7—„×;¸$i5³l¹Y…(B"Õyè·ßð´­èr­}³=ìdßxû×P–›Ï¿]œ]‰tò†RÿoLÏäÙ—¨³·Ô'XJuTœñ ƳÝïЉœ# »Û[oÆ/(ú oU”™òïÝ{ÿ$%ÅÇ_Jíà£fÇt­k«“R»ŽË7|×µc£óÖ›µ9qÉX¯L¸q±ô–\LzLqQBæS–¯ïGå{ËÁ$U'‰ D÷ó¬ãÎGìGe,ü(R›vl|‚›©à¸`è¤xoD:ws\:ÇYOÉ={T}jž 1Äu<­6?7ö×TDuRa®ƒR2Îë[·*a£zmF]ÿ:bÜjù/.Êb>åMí6^˜²Q AjöÙú{_æÖÇá`.õüDü%î½Å5zS´ëG}:cc¡Gj3ßÐÓ×Oz¿ª*=9¹tåJü†èf¤ Þ»wnäÈÉßæâ>TŠËrró‹‹óËKŠßT–‹KR)4\ÚÉȧJK ÐÒ ]ù-#¡»è\ñ"h‡¯i¥Gòº„šì™`æ×®ë<‡Š]}»¤EÀP†ZÛ!ëÚG´î,õúË>r&Ÿ?¢’ nó‰ii*È+Ðäde‡N·7$"ÔÉ3§%¶O`‹šëî<•?`:çÀ,ךb~°ÎÚ³¾’y'+åZÇ ŸÃ]äÙ‘ùÝI¾²gÌËÌGY/¦?z{IáNÍ S%m«)ØâÖZ$Å’`ÒVyÚëe[F¿«ãè«÷ú[F£•ÆðEoO°§Ö]}½q<ľèW®%Oß«¶‡Ï÷“«olàÝ» õ/>Þ™Þr‡úÓ¿tW¥Ûr%ß¶;ûM[îï{~C®l·ÞqC¬Be¢å« ®Æ³‘k­ÕªæXE•éÝyÚ$=ËÔ²u«ñ„g€3÷†žº×Ëc›¸> —|üãÛg`§1œã¶ÎH+ÔÏTäc>œ­ ùرäþ£ú®^½l=^‚@€†hxøƒÃíÿ‘<؜Ƭü’œ—ù¯Š +J‹kßVIIÓ(òŠÒòJÒ Ê$ñ¯èC¥¯‘¾o²ë–øFB¯¼ÇÜ*A4Æÿ4g®‹Ð“‚üÏ—¸š|&Ë^ëiJ¡å›™ Ë$ K¾ Õ‡cºôjåˆ-T‹öYb§} G÷T&sÝNŠš_|}n¸ìä6o/Q.ƒ£\c[7•êøˆÓ-Þ#[ 확®¼‘@½Å‘ۢεû;I°¦DÓÎѰ$–ÏwÌ nºtEwu::æÄvòGóe¡ ··C¶ò™w¦àgbll\•ššÕEùM–¦9¾ŽCŒ—Oz³ÇûíTÍÈØùwtÕ•Ãnõµ)G.u˜&‰øìŸóþ¡;ʈµèkCþÚ4AöæšÌÍcá·ð‘'½a5Ñí‘9vÛ¦·\‡äÂHùïþW¸×áq™¨sÛ(CmA9Ï­Y™Wл$½,lDLƒü.Šž°_vR›t²-Gƒ>6,ÚÑ®{÷\eñhãÏc+Œ•~ ÁmNkîßàŽë%%޾»×ígÕ©úhò‚ÒÒ"ýd¸ïMÕ“¬¬š3 hA â₃IÞO½4b„Ã=PMûY^ÁË‚üÒ¢ÂÊÒWv=>d'§@U CS&É(é`‹!ôȇ$Vá™­ËO&䡨„ÑtOÝ~§™ì§Wöÿs1»¬%É›NœïÆ ×žw%xWÄa,{È8s‘úz”¨=iÜOòdü>ýØêèêLO³Ãaf–Š‘äM\‚æ;h1ôë.P;&#sãÂׄ0_Š+ÎðqGÈR}óÕT›]ÒIÖ³»Q£× ¯M‡ËNhNw–R 1 îÛ)#ˆŒC Š%…9Ð}Ú ¤ÆEwTYö‡“¹™Éòß2ÓP—¸-êvü^G².ÅòT‘æ.æ)‘P£ºÜìºÎØÖ:£îœ{»B/`ÂÓ‚z:Eôó7&†Ñ¬Ÿí5ƒø'ö¥¦òÚ„¾$ær謂ƒ?j·ÇÐËÂçCÊ“Ýg¯Ã3-fÅn¿´†[H‰Iy÷fm9¶z2…ïé‚?·ºžÓ¾wГ„!‘­ ‰[Æ~8êÀãáÃ4ƒNè2Ð[êÇh»J{ùM ¡ûû'ÝÐ&”Ä—.µÂƒ(#DaÞ=]áxÑf'Åæn*µówCýÛ,>ê3Î^Ÿ<õœ½úÍõñ!¬Þ)Þ˜ëFü™ëh­ÅF”å!âHRµ†‡OV¤Þ'¶‰¡¡ÔÓûƒê{ð€9Ì€ ¿‚޽OJHÈütÕ‡¢hÅÛÚ¹yE¥%E•¥E‚9E*>j§L¥) QU±Eš¼—prŒ4÷Dt–_½u ¿ê”Æù´Xðq O8šÌÉWt?–tU‡„¿ñ1§è„éM Ø5Pd¤Uµ_è<±·¶·Go¯Àƒ³?iþýDè™xpöES¼,²ã­õòôÜ)Ýj¯;ììpÞbóÜ<<ÄË ~SâÌl¿žê“Ÿ‚~ôìH™V²X’‚.éé)¨Ù®AÑ5ÝdV†¬îÍÇzoÂDý¾RÓi^D»ÏÖš´Ë,#×Y®;{tF”Ñ[PtK÷ù:(¿€^ñ"«Êqú'F''û%FŒÚb¶ú±5ÍÓ÷Ìö=S¨ÄÁ£neš­ÎuVò8à¾cá`*R“tì²sÐ=l»ðõ4»7ãÎhlôN :¦Ùj*’“ó[^ v n|}ývëM¼Yz®L™¼kíÞ¢‰Òã­vxî‹ÚÍŒ %DŒfÚYéˆV>}ì{ #;2•3ÿã½=ké4;Ï꽟]Á`|?ª¯èìŸÊS™+Wd÷>>¸¦fó_UsØuDá€ã>T*ª^dç¼.-a••ˆÄ©ròTyeLÚIÊÒDhêšØš/@qÆmìÿ%;—Ëñž[i¶žkÌ·ÜÁ_ •eãþ0ЏhàßåÄÅTW“1õl<ˆ»±WzÀƒ³o\?“—ªbï2Üëp òÞ@íLÙ3籫ØÌ#®c‡õËÌ"vU@ÿ|5ÕÀ‡hd¿­¬hTTüØ@6–Ûº¢4Þ ¦A~•SR÷Ÿc!Ó®ЭԈ¦Ahò,>ù×è´•Ë !U!:~ãL+Þñ¢ÑˆßŠòbÿ—tøúÙZÜç¦Ì ãLG':ÏÀ_p”÷Ìmvl]|SÁhô¶Ý#Eñ窆û÷‹øŽÌA$ˆcE"E;@X%1ûÎn?}m ©Ö˜µ ýŠž\Ïæh‰ HJ–¾zF“ÿÎߨ—³øDÍ_ºTmíÞ{kHú‡xžö fuŒ¤×ÀF$«.k°w±6â7³Ý{ãwü%Xv¾õNÁÙ»Aª¹yï Ô>5ôvÊùÑk-$;“nÛžºmÄü°Œç¯`|)wƒvÞ }ÍSÿQ}âö[/¨—„ñ¨El6ª8”ÏÀ׆ͮ& Þ…Hds¾÷Þ¯þÄÞo¼pðöGÃÃÚLªmzHãÕCø‹†§%žÈÊÚ…Írçj¶Ê1ñ–G>üÙO„a;9}¹M&\u 3ÛyÃhxVËìš:Wòa5xÈÞú#þw]XXÈÚq`jl~u)¯›J£v>Âþx} ì ‡ƒœ»L*)®.U†67‹S¨24i¬6!‚Öb"HH¨IRJJR’‚iœÚªÊꊲœô®Lâ*¡&®dâ)¥æ&¬‚ž.ÂêÁÀ¹›B\Q„%㉸j—CX"‘»&ÈÛ&bÿxJHˆÿ!bß@|Ë bIXi|[cßúšåäæ¾x¬¢5ô Ô-ßÚU±»pÞv>&jHÕ¥}qÁ½é¨G’¦;Á–h¤jae$Rr!¾Ìɪo€Ä>DÊ++•Ö긮õx9ìxðtî¤ÍÆ[Gñ WZÓÅ>ÃW¢êM… á?b¢¢ËýýÂ÷Gh¡¢3 ä3Röüá㇩+–/ës#À úà«",&!N({Í‘“íŸY8û†ó²LL€],[e'ûÎÓvUº­2.ÛVMÚ+Qj:¹¦Ò†-,B!¡\x+ùt/–-åîKI¾¦FbE[A~ðwÿW)*b«ªŠônwŠ©‰/ra/ò˜Aòì”e<{¨>ë¹ñ!.Ñç9+óÍúKâ&z¹aEPUÉzfÒM}âÕN50|–üæŽ ï Ék‹u7Gúo%øw(à{|‚“ϯ„(þò"uÇ/„ˆcô¶S®Ÿ¤Ù^•Ô*)hÃõásA$ý|¼ï¦>8±cì³%eä¡M>‘šÒüĸ3Ví|½÷ÑÐÇU_cÎå‹<ö^ù`A‰«ŽFÿlJ뢆ã—ÞŠˆ4Ô‹Œrr1¢ }®û"Õ}5 ÉÏz¥úSŸèÛ^n§³íMbsr§«uȬ0àŽùØÌ‹ TÇ6øñÍà+ à5»J«¸N6>ЧFÛæº=ÀRVƇŠ_Ç}?"†wjgß0÷ãJŒÇör˜ÆP7BrÒÄ5z[t‡Güï‘ÆëS6ªõ®Mž=RQ‘$ôÁ×ç=ECMúù‹ê^«>Q܃Fìé>›dꀢ¼Àwíddƒ¢6m‰z®sP×9ï;ˆØ€ˆœˆ÷‡h² Þ„ž6yù’L¡ˆ’ÈT¸8|^L ±%í¿Œ¸C¡CFÒÕt MzŠ)z’šv'ÉùÇY«VöéÏÒ‡ÕÈÝíæfwº¸žªS¬ÔìlþÄì+ÛÍä·«Ï;—}?(ç„}fd!ocÝ0;#Úg±.øBÕßúï]O07ïÝÛ5vs î¸ÿŠº« ;ƤM;žñÜÊõâ8.3δæúßdëËØ†kÔúhwž¡_MŒïN7­{=,Ò9®zB¸_<9ñŒ[¢³0Œ®„›Œ²îò*c³ÙäÀËÓÎ _@,fÔ$âNËÿ¦1.Ǹ1Ì~sÜb£˜øs~\ÕIç{+ù0ÒUÕ×é½?l˜å±¿NZ[IÀ¡Gª†Õ€vøB ÓÇ+ýaŠúƒAFfDaQh™÷@l®’zçYúÃa†&­,mÆŒþ>TŸU}o®­ì ù†ÿUxßY©s)Öí5T‹­m›9¿Opyl-“{÷âƒrQö¿;·j3Œãã®s¤ß¾eË ± kóÍ­+¼{3éŸ{©O³Šë¸÷8iE }#}C»ã ^ϪŨ-M»sã3­¥fTLQÝÀÒÎzŒ™Ö{ÜaÕ–e&ÿs÷^ZJnVey]-‰D“VÔ4|ôº©¾"üP¾qÔ4F7_KÈËkèí€mÏÃ%i”0æ‘(K•Sî¸Kî‰CÓp'Ëfj!Õ!.sKŽ4b? yÂ{Œ¦ª H1‚$KvÒ£·Z7žåFïf¶^ ÏÓú[û‹Ý˜äèÓîìÊ1[–é¡uÄœ+k3âA¹øqïOVê­ýgõ[‰ìì’Nvý¡÷iò:ÒT‘ÇOêt‡ˆÁoÁ½øˆ=|³råd·@_¿u‰º÷¹Ì0bªÅKp3ðˆÉï²Â­žhÓY^fR|9…Œl|ðÁ¦~øi¢›6ÜK¾]¬­W¯2=W¯V­%D„¡§¯‡Œ4e’5¶@S ÷g8÷|>òûþ¨}Võ)Ø,rDη ‘˜ÿ›Ÿ·Ï¥ì•¬úøS®ÎzTÀ¿}d®ê‘¹ï)ŸUXRƒhKb‚ð§Õ›½ãÖ¶e$ðcðëTÚöÇŸ·øÒÕÁhà[‡ (¸x¡WhXØ¢Å4)É^ú®´Û‚îí:KŽ11 øž})ôi(:»Šv—È·ÙŒK©¸äCF¦Þ´¦ô¶8±ð}ÏÝ终‘¤úáÀÓÝ?dïÎ9n24yb?üø(*ž?ÓÙžBWÕ׈e#‹iØâµ_%/Ûfúˆ³iÑÕÆúw'ud'÷¬š®¾ŽCT$%d%«™n~zwšR­9ì==•»+]W^ŽT¼BIüÖ;z Š®á¼N9þ[Ì‘̬.ÊÇ ÄìT#¥{胅'À7 ‰,½h¡çþ_#}|äI$oÿ„ïîÞm†G ä¢:)'×A­·U57‹„‡åÍtžB“×ꟽO[âå»7tÏÂ…rJ‹ð$´ñD 5õápP}=dá꼺9RæÛþرׯ0¡¡»jEѶ—mµ®ïP¿Ñê¤Ô-í>[sOÞêx”÷-_µ8ÂgpÐ7¯X3¢7 Ùá…?±¶,óèV_¾¸…YžkŽþtf ¾àß6RÅ%ÞÞaaûæÌ–‘WøÖÇ|LýýQÿÏPOM5%<üéܹ3û­äã!F’ò÷[¾û‡(jªýÅÔ³¡A",ìÅŒöªªÆpP}ïƒ2bÅßž{§ñy×ô*æ ®+B¦O4<@ž,‚ÔT¾L½uá€ÿNfÇ}¦Ñ»è ùÓÝr/WRv‚´›b¦>r×g´) á¼NYãh²£S¥Éq)¬+ʇ«EªW̹ße'§¶æ&®iœ{ðÊy¼EI¿(ÓWðí­y±ô…’¶ÇÜØ¯=R…Ñü#7üÔv@’œ¶õD„/Z½‘¾6H>€>Y\fåòÕû#‚G[ †}ÿï—y2ÇO>öõ]&*F†Þ!/_¶ö?vêèpzíÚ§QZ*ûÇï鋽JHР÷T߇™QPã{ˆ®77•/1f§_ÌÎ÷줞œÂ AÃt(‚\kÏ?»˜FXŒ¯èn/eî7Žqh¯¼à73ùßøª²ÿUÃ{qXëf‚5wd¶ò ð}ÕfÈÉ8¤4> 1dG«ÝiÚÁù´ƒó»8_óí¥ÿ´£-MN•øÑØýXë.³¨gu÷9Õ]ŽÜÝ2¾Ü}|Žßâ &óXÔœùó¤¾Ó*xüx¨hùŠåk¡Óù™;wÅýûçÃÂ.\ G$¾×¯ù…óo« V­ƒP}=€¤;'ƒ­pÊrîÜNLJNIÉ~^–õªšwµDÅäë›i6ÚLK®‹·§rcw¡èæ»'Bv†_Í©©Ç 㻘O˜N‘œ‚¢œ¢Ôчÿ¾™X-®ˆ— z63縻Œ“ãÚàxÍñ;¾'ädJ~2ÒÔu†; “ú@µ-w=àT4AXÏwíö¿Ù5 šä’ìW êz&c&Mæ`¤ÜÙ#‹Ñü£èü£H]aâÕK×oþ›þüiNI /Kœ¬ j¬oj>Ñ~C*_k€¾ƒábhðjwHäd)-­ïmnvq±BtôƒÙ³t ¯ßÅÄÄAO—ñë¾0Ëqâ†ßÝoe9-ê÷ÿœ&U'€êë=D9uúTlqëx4uZ}Êiu×5+»oÀ–nvSw uîyµmP´Ì\±Å¯'ç+¦LŸâ-ðÝøþ”’_¾líóÉ;v^su•QTø¼;VTPzFg(À Ï{ïð$)ߥkòóÿ >3s&Uõ»˜éWS-ñû¡\CÊÊÐûª€V2_¹ÂüéfdÄ͹seTT„ûèaUÊFÇ<14¤øù¯nýH0ìåˇ7žqq¡öÝQßšjjtLöÀK¼¡[T] 3„±~=#'û^pðe[)ƒa})kÖ ™Ø¿ŸLœ î㳺²¨¨¬_oPT˜±sçß––#Gö¥Þ/x)û×É ºmÑ¢èJÕÀP×0²¾®æÔ©?ŠKª~r¡JË|»÷îšj™¿Žg üðÔ•+¡û>%e½+ô8ìºó玼È*™9SZQñÛ5úm¨§ž8‘_W×8c†íòeÓ¡ûT=@TŒüÃÞØÊë²ì#ž+]mg'5xð·2ï«0_æÂ¥çA[³ùóAì}fˆÂb“ñùüU¬Â“ÇOç²&LÒ×ÿV,?KK°ÞÏ©¯çØÚ»ºÍ€þP}|²r³~ZŠ­4rê™Ì3wï=W ‰XÐIêê_y˜š”X”•Í26R¡Ó­,±÷Å‘¢(Ïp^‚­45r’ÿ9Ÿ|ç1…B¤[k}홟¯Ë¨‰I¯23+†Uc9fÞ<è}ÕÀg¿EŒ™ ¾Ž65¥?º•–ö_aÑÛÁƒÅuõÄi |Þ°oÍ͹9"Ò+3ŸUHSD ‡N˜á$ ñAPˆh>j:¶ðzÿÉÓÛi©iyy, M =]Q--aáÏòE_¾$§c½ŸYN" µŒ&L›F†ŽP}|%‚‚úC­°¥-¥¾îͳç÷ssr ÊË^×KHÒh$ U˜"I”¢ЉÈ$L9 ‚‚¡±il¬­ÅöB«ªš«ªX,viY5‹ÕH¥•ʨTTªÕÐùÉ_»ŽDø¶¿IEND®B`‚nova-13.1.4/doc/source/images/filteringWorkflow1.png0000664000567000056710000020266513064447140023541 0ustar jenkinsjenkins00000000000000‰PNG  IHDR&𵫳sRGB®ÎébKGDÿÿÿ ½§“ pHYsÄÄ•+tIMEÜ;Ýo¶ß IDATxÚì½gp\Yvçù{.½7ðŽž,ºbùju«KÕNÞ¶¤PKšÑjV#íNÄFÌîhûm#6b¿(vFóe$…4=…´­ÑŒÔ¶Ô¶Ú—#‹E$áL™‰ôþå3w?…*6YU$îþ"&€Ì‡ûιïÏ=÷E!H$‰D"Ù¨r$‰D"‘Ha"‘H$‰D"…‰D"‘H$)L$‰D"‘H¤0‘H$‰D"…‰D"‘H$‰&‰D"‘H¤0‘H$‰D"‘ÂD"‘H$‰&‰D"‘H$R˜H$‰D"‘ÂD"‘H$‰D ‰D"‘H$R˜H$‰D"‘Ha"‘H$‰D ‰D"‘H$)L$‰D"‘H¤0‘H$‰D"…‰D"‘H$‰&‰D"‘H¤0‘H$‰D"‘ÂD"‘H$‰&‰D"‘H$R˜H$‰D"‘ÂD"‘H$‰D ‰D"‘H$R˜H$‰D"‘Ha"‘H$‰D ‰D"‘H$)L$‰D"‘Ha"‘H$‰D"…‰D"‘H$)L$‰D"‘H¤0‘H$‰D"‘ÂD"‘H$‰&‰D"‘H$ï‹~˜ÿx!BˆÍ¯ß¢( ((o-‘H6q…‹p®‹í8ô, !\ ÝÀÐuTUEQ”;þI$‰&÷#µFƒV³I³Ù¤ÕíÐ1Mz–…í8(Š‚®ix ƒ€×GÐï' …ƒrr•j!_«×)—ËÔ ê­&íNÛ¶×ʆP¨ª‚ßë# ˆ'¤‰MÁ"‘H$÷B?*8€´»]Škk¬•ËTuÚ¦‰â1Ð Ÿ?€f¨šŠªi Àu\×ÁîY˜6®e#,›€×K2'L’N%1tCZä@ã¸kÅ+¹åj•ŽÕCñxð…‚øü<~†Çsïßµm¬®‰ÙíÐn4qLŸnøéO¦Âó¿+‘H¤09xªãP(®±¼š£T¯c!D#Âa ¯õí­šX¹ !@\W¬O°µ:Fƒ ÇC2expd"!W€’ƒ%æ;–2òåµN8L8Ãðù¶ñx{ÛÔ±lZ:R™€®“ŒD&LÊA—H$S˜8ŽCvy™å|žJ«‰/!­‹u{r}]×Åìth”+8í©h”áþ~¤@‘ì[„›‚d¹¸†¥@$™Ä ¢éÛ»ëëº.=Ó¤Y.cµZôÇŒ Ò—NË!‘Har0„‰‚ìÊ ™•jÝþxŒP,†®ë;ú™¶eѬT0kuR‘(£ÃCô¥ÒR Hö–e1¿°ÀR>‡­iDRIü¡Ð#±cË4i”+X‰$GÆÇ ?¢Ï–H$R˜ìˆ8¨7ÌÌÏS¬×ðDÂD’)4]{¤×a[µµ"n«Í`*Åщ ~¿´0ɞƂµâÓsó4m‹P*E0ÞQ`™&•\­g11<ÌØØ¨Ìã’H¤0Ù_ضÍR6Ë\6ƒ¥ëô o{ÈùAéu»”Wsx]Á©cÇèëÛ¶-$‰d;1{=¦fgY-Q|>RCC»¥BЪÕh•+D gOŸ&Ë›%‘Ha²÷ét»¼59I¥Ó&˜ˆŽÇ÷Ôõ•óyÜV›XœS'N`rå'Ù;Ô›MÞºqƒºÕ#14ˆ/ØS×'„ ·°ˆÏœ˜˜`hpP |‰D “½K¹RáúíÛÔm‹Á#hš¶'¯³×íRÌf‰{|<~á~ŸOZœd×ø+ù<Ó tpßÓ¹µb‘^­A4ʹÇ“âD"‘ÂdﱘÉ0·’ÅÖõ=z¾¯•ßüqŸ“G‘ˆÅ¤ÕIvÍoÏL“Y[lj¦’û"ÁÔ2Mró E£<ñø%™+‘Ha²w&Õ[SS,ò’ Âñø¾™ „TótËf|pÑáa9¹J¹ ^}ë-²•‰ááGvâf»°m›Õ™Y†bq.ž?¾G£¤‰äW&''Y(äIŒŒàí¿ÒðBZÕ*Ýr…#CØâDòhüÇu¹vã:Kkkô™ÀãóíKÛs‡üÂ"é@ gÏʪ±‰&»7©Þžšb¡'>2¼ç’ôPЪÕh—*œe|tTŠÉŽâ87oßf©¸FjlÏ>Ïs®Knq‘¸áåâÙ³øå‘|‰äÀ±§3É\×ejv†¥R‘ØðÐþ%ŠB0Å1“Y"³œå´*’즨Ÿ™a¹\&9:²ïE €¢ª LLPµ{\¹~N§#o´D"…É£ .æX*ˆô÷ãƈ+ áD#ezi‰\>/­P²#þ3»0O¶T$:4€÷EEa`b‚šmquòf¯'o¸D"…ÉÎOª‹Ù,³+Ë’Iü¡àøh2 “³3Ëei‰’íó`>“avy…`*¹ÿ#ïAÿøÓ䯭›ØŽ#o¼D"…ÉΑ/¹½°€7!‹ØÁOôãz=LNOÑj·¤5J¶…\¡ÀÌÒ"þxŒ`$rp'/U%=6J¡ÑàÖíÛr[T"‘Âdgèt»LÍÏ£…ÄA§ÑôÈ`òö®ëJ‹”<f¯ÇìÒz(D4•<ð¯®ë$††Èט[X ‘Ha²ýLÞ¾MËuH š›¡ÐlðÖõëÒ"%ŵ7hX&ñþ¾Có7{ü>‚é4K¹åjUD"…Éö „àêµk,WÊô®dªªÒ76Êr¹ÄÔô´´JÉ–üçÍ«W×ýçCÅ¢àó2·°€#óM$)L¶cR]ÊfY.—èŸßõÁ»áõ’b¥´FE®ú$Èòê*«µ*ƒGJÿQ…h…fƒ«×¯É|‰D “‡£Óí²°œ%”Ná=Äîü¡®Çí©),Û–Ö)¹?ÿét˜]ZÄŸˆcx½‡w2SUúÇÇ(Ôj,e2RœH$R˜l Ûq¸zý: Û"(Ä)w;ܼ}[&ÃJ>Çq¸vs’¦mM&}%aÍ0§Ó,®,Óî´¥H$R˜<B–2ª–ÉðÑ£²<;ë!éÁ#”š ÖJ%i¡’÷õŸìÊ2•n—Á#²÷ÒÛcQšŽÃµ“²¾‰D²ÙÕÍèz£Á|6K8DQï­‘\ÇEpˆB²T]Gx<Üœš"‰à;ÄáyÉ{Ólµ˜YXÄŸŒ£¾G·]«§Ðm¨& Æ\¡¸°ÈÊêªìæ-‘Har¸®ËÍ™iº*$£÷.¢Ö®›Ì_ŸÂl)ëOìŽí8t:Î>{‘ÔЫssÌ/,púäI9±Jî|ü ÁÔܦ¦’L$îù3ݦÊÂj×1¾ÿØ=L³ÈÙe`¢_4ÊÌÂÉD‚à­~+‘Ha²d–—©´Û ™¸ç÷[U“…ºÕAb±óþF˜f‡Bé5jõB¬‡Ÿƒƒ,¯¬’J¥H'“ÒZ%›dWVÈW+¤'ÆïùýNCcñF†fÑG2~E9ØQ“n·J>•fÃÄÚhŠÇX­T˜ŸçÂÙ³Òh$)LÞ›V§Ãlf 4‚vt³b²xs³ž$;sà£N“|þ2år ç×ï§îõ0»°@"»çXI=Ëb.›Á`x†ªl»i·KärרV;¨Ú;…E!=2L.“%Ë1xˆŠ6J$û™]YF-,.ÒS6ÙÝ%Jz,MæèÕâD§üJ¯ÝnË]¦\.#Ä0(w>DT;m–²i­æhÙÖ=[6t:‹“Ë4‹ áði4ÍsÀýgÕÕ·¨V»q÷x^/z0ÈÂrV&ÂJ$R˜Ü›J­Jv­@4¾+áµUí±ts³#>yVz r¹7©T*1À½Xš®LÄÉärtMSZì!§Ùn‘)䉦Ө?â?ëÛ7YšE…Hä ºî;àþ³F.wZ­‡)àÞ‘Õh_šZ§C&#ŽD"…É „`~q Åçà Þ9ÉÔ-'—1kQ¡ƒ/JÖ·o®P­–¢0ÞógC±Ça^6);ôÌÌÎa©*pøŽ×»Mu=§¤¤ž>ТD±±}sjµ‡É÷%š¦L$Èæó˜RÜK$R˜¼›|q\¥L¬/}GÞH§ÑcþÚ2¾-qÈNÈ-ä‡OR¯Ïpÿ±©×—©Tz‘Æ;­ç ¨Ø9n߾ͅóçem ‰ä0 €ùÅEB‰šþÎÃØuz´ªÏ“† ÓlS¯Wp݉žT KQ¸19ÉÓO>)'ÖC‚ëºÜ¼uÅkÜÑ9X¸ ͪ‰ëöÑnüêÀBZ­öúéµ-øŠBjd˜Z~žeáõx¤qI$‡U˜¬är¬5Œ ß]ÁTQ ãÈ!yÀÔÉ­¿ÁF]†ÊR†f«E8’|¨ÖjT{&}÷ì'¥àõN`‡ÀLåöÖDÉÛž×KÃì299Éã/Jq/‘ìAv<ÇÄvæ'“wo”lá†é:¶¦rkJv> 8®ËÔÌ ªÏ'ýgP…þñ1ª6ÝnWˆDr…I©\¦aõˆ§Sru²MkßÈõN—V[¶u?èÔêuªÝ©ÁAé?Û„æñжmfççBÈ‘H“0q‡Ùùy<Áà{v–lá¦i=æäÄz€q]—ùÅE„qgn‰äáÅ}jxˆµjEžÐ‘H›0©ÔªÔ{&ñ~9ÒÛ;³’¤X­Ê¨É¦ÞlR¬×HJÿÙv¼×eyuEŠ{‰ä°×u™[\Bñxdó¹Àã÷ÑS9±P„d–—quýŽº%’í#œJ’]]¥×ëÉÁHƒ0©7”uýÛ»Ús¬"ËÙWéØö»gqìÞÙìè9[}L !p]‡{?çµiV®‘ɾNÏÙýÄÓH*ɲ,³} iw:¬®ˆ¦RÛì?–³¯Òµ;üÇêæÈd_ÁrÜõŸFù*™ìX{À±¦¬•JÒà$’ƒ.L„,çráÝÞZ­Ê®^ù,ÅFõŽI¯¹ö._þKJíîV/š^õ¦oþÛºkRus“ÿ‘×_ûc^¹üWÔº]¿yÁHKQ(‹Ò’ùBGÓ„·÷Hx£ôW®ü'ÊíÆ»^u©¾Ëå7ÿ3•®µeÿé–¿ÏÔ­À¼ë´˜ÀµëÌÞø ^íyõÊßPß#Q =`iyGžp“Hö ;’Q×5Mrk"}é]ûÃ\§E£¾‚åØhF”HdmãTƒpÚ4+ôl M £Ña1û& Ëüɳ öÅP•·çUÚÅËÔ:&ñÁh-^Û37Ð ²¼ºÊðà Ü2; 8ŽÃJ!?Ù=ÿ±4«XŽƒnÄGÞå?-êU,ÛBÓÄÂCh¢ÁRö ‹¹"¡äiÒgÐ7ýGÐ\{ƒZ×">𚙩=3Ö±tŠÒÂÕj•d"!O²«¸BÐjµh·Ût;šÝíN‡žeálD35UAÓ4|^/!¿Ÿ€ÏÏï' âóŒB‹;"L kk˜@âG: >²›Ûçæõ¿%Wo k:®cíÿÎü,º¨2uí/X©TP5 ×6ñ'?ÄéÑ£TK¯S©µYͽE8r”øÛeßð'.q:ú$½êËd÷0‰$”æ(•Ëô¥ÓÒ³kÅ"n—¾¡ÁÝFin^ÿ[ º¦ãØ=bƒ/pþôO¢»n]ûsrÕª¦âÚ&ÁôG818L¹ô•šÅJîáèQ¢oçÆ( äSœI íØa«›cêú_°âñnúO·1CO¬]È|“LÅæâÿŠt$Juõ‹¼víK¤ûž ¥.‘]aôâq´oNý¹JOè8Cƒç¨XgNÿó_ÀOCS)¬­qd|\¡dGBP©VÉ®®’+q4•@,J*Äðx¶ì3Š¢¬/Pu×K0Áu]zÝ.kµ+·nöùîëgp`ß>8å·íÂ$—/ ;Ø K3ŒŸø4£ñ Â¥ºòVKŸ\,ÇAÕ½¼]ÓMQÏF–¾°¨”®3;÷C\‚Á…ÌרÆ1‚#ø£Û—t*Ü­KÓ ÍçceuU “}JvyÝïG7Œ›œ´ýƒ¿s~Ó~’ <ª(D‡>ÅE× »ú µ›xü'¸xò$^ð?ѱ5V ¯QÍ;Þ4'Îýñ~,ãYÆË9Öòëþ“6¢FmR.¾ÅìÂë¸8„‚a K/Q ÃÇŽo—÷ (-Œ-DÆc”KÇ‘Gï%[ƶm^¿|™²Ù!˜H0œLî©ëS…Ä@?ÂuYž›çõko160À‘ñ‰=#N±MõÌÛÝß~åUãcx|÷73ôº].-ƒê~ôлø½^¿ûÏRP”»öý~ç½Þÿ^¿{?˜fRé%,ëp÷ïèzþÑ.üÁȃÖ2›-¬R™{æÙ‰vŸ!„àåþo:…?t÷Þ¶n|/K³4Ša xÿq]“bñ%:á{ŠUÍròq?=wM{ð<×qÈÏÎqéôúú¤QJ˜µr‰¹ÅEòµÃÇmž®ÙËóN»^§–/0qáܹ=!Ê·-b²ºšÃVÀð;zÁïu“ôõ÷3†û}­Lž[iuúÇ:œzbˆ@8¸¥÷0|>JÝÅr‰t2%g‰}D±\¦Þí2¼ÃáÞƒé?M[åè9?'Ÿ8½%Q ª¾Pˆ|¡ …‰äÁD­ÜšºÍr¹Œ 0ròľH¢V…`4Š×ïgyqë­·8yì±],îÛT’^A~mP"Ž"3Ú· Jj Œ·8õäÉÁ¾-O⪦â YYÍÉÆ~û,Z’]^Æ Ê![%9&Îx8õÄI¼þ‡X)ŒÅ¨Ôk8Ž#‡Vr_ôl‹·®_c.·J0"–Nï;?Ö=†Ž¥î:¼ys’åÕÕ]}†lËèµ»]ªí¡HTZé‹’*ãmN=9Brà!WiŠB8‘ R¯Ia²p‡bµJ4•’[p[%§Ÿ:?ôðU.u¯‡V¯'ûOIî‹N·Ë›×®³\­’' í[V5äÐ Âïcrv†…¥¥]{Ž<´0B°¼²‚£(h†.-õDI…ñÎöˆ’ Ÿ—fÏdMN¬û†µR‰¶cáõyå`<€(QÕwDI ¼=E£Uų²Ë+FÉ~%®NÞ ÔiÓ?1Žw.û൭B,Æ›ˆ3“Í0¿¸ˆ» ~ðÐÂÄb½a_2)W{(J'ºœ~jtÛDÉÛ†ed——åĺp… »²²žð*ýçDÉ2GÏœyzûDÉÛþI&(×ävŽäý#%WoÜ bšôíèIºÝ'‘D‚@*Éüê ‹‹¸¸ûöC “n·K³Û%—GTï{е*CG-N?5F¢?½íFK§)7êØŽ-{cÛ6åz]nã<(YâøEgž>…?´ý«TÝã¡ë:”Êe9Ü’»°,‹«×¯Sîvé=°G˃Ñ(¾D‚ùÕñ¶ÎCí½!(‹8ªúÉ>‡ce¢(.ªÚaðH˜SOŽKíL7SχéºÔjuR{ì ½äNªµ=án¹ø’ª LsÇ­|IâZ(J…ÓOqò‰3m‡üt=긼²B_:-£d×uyãÊÖ:íÍãÀ™`4B0»œEÓ4ÆFG÷¾0q… W(|ˆ£EXN㟅QëºÂh:ÄÑócDâ;—(¬( ºÏ·ÙÊ]N¬{—|¡€áßÚª_Ó£§ˆ¦Ê@ûpøç"ýã}¨ª²£þO§¨,¯`;ξêÊ*ÙAa,¯¾ö+µ*ã9ð¢dó`ÛÓKKƒý;_™ú¡<Îêõ¨·Z$ŽŒoéáçñù8÷ü‡ÎÀUÙq±M&©‹Ø¶q€ö?=Ë¢X­Þbo)EhJ'š:\57…Îöøý˜®K£Ñ Kc•¢„ÉÛ·É5Œ>vˆÄêzÞU’ªã2»´D4!°Ãõ–jtK• „áß%ÿ¨ºÉÉ'‡„~\᥵5LÓ”ÂäÒ³,®ßºEW¡ÁC?š®Ó?>ÎJ6Kl9ËèðÈŽˆ“-{[§ÝÆ´-b™„ºm“Åë+´Š üþƒo½^ƒ|þ <ÁýãGðnÃ)È@8D­ZÅì™ü2²µ—èv»ÔÛm¢#Ã;&Joä¨ç üþ£(ÊV\] t(¥Y(ÞÆ“} qilµðu»U¥ Ò°¼êGŸÄí;ƒHŸB öƒf<"ÿéRÌ¿I·[`ìLt[„‰¢i8ªJ¹Z% J£=d!˜™›£jv˜—²Çç#H2—ÉD‰î@_- “b¥cG2“{Ý‹7Vh—’D£§…œÂn IDAT|èÌ4”ËóÔë©mzº×‹£@½ÞÂdQo6p6œ|»±LXœ\¡ž÷‰œDÓü3³…–y±ô}ú¯}“tf†x¥B¸ÙD·í»zc  çñPýr"IîäÓ/|åè ¨á!Pw.b×ëu©TnQo8x¼Û{bÀ R*•’§Û+¹™µ±Á÷}Î)˜(Š{(ÆDh„ã1rµ×nÜàÙ§žÚö[&ï„¡·a÷l®gi­%‰(i’Ë]¦Tj"Ä ÝÖ÷7Šå2ý}}rbÝCKet¿oÛï‰mÁüõê«>¢Ñ-ˆ!0 3t—¾ÅÈ·þ3Ùy†——Q? ¸’x{=ú ú NLO1wó¬œû&¹g~ãèÇQôí/¹ßëõÈå.S,7ü§²­ïï‡©æ ˜½>¯lpXèY³KK¡þ÷yÎÊ,½ê÷qz…C"L\L.JŒÁ#äææ˜_\äıcÛ:—mI˜tM“Z»µíah×,ÜX¢‘‹9ðÒ^¯E>…R©ŽCìD¡9_0HµR“dž÷¶cS©×ðmcT.Ì_›¦ºì'lK¢$xíKøßü¯œ¸ù=NNOß!HÞþªˆ’OFq6j‰$« BõªxÇ~uÇáäô4ã ÜX¼ÆÌÏÖÐOþ4ø·¯B´eÙäó¯S*6ügû“|}?u×¥ÓnKarHBp{jЦm186òž?çU'Y›ÿQß’ñÎÁ_L nÝ b))¡Ž N¼€l>O:"Ý>ßÞ’0i6›8BlkÚu ר.‡ˆÅ;¢$—{“R©¶±ÒÛ¼þ¥ünW “=B§Ó¥išÛºmç:0wmšrÆ ‘¸°µHÉõ$õOÂðì$ý…Âæv@¡Ü7Hap˜ÂØ Jcg°"ýxŒ(–b£ÖVÑúç®3°8ÅÄü즠ñZ_yƒ@ëÿbá'–è<óÏÑ¡m%oP,æqÝa`g¶Š„¢€¡Q®ÕˆËcÇ‚µr™l±H|ä½·ï¼Êu*™—‰ú®sîÜ ýlA>ï¡R1‡çéºc›¯{~ʪÂôìO=þø¶0Ü’0)U+h¾í CÛ–ÃÂEªÙ ±ØyTõ`ßåuQr•R©‚ëö;'ÂTCÇÕTê‘pXÎ:{€z£ªŠþõîðŸž`îÚ<•¬‡DâÒ–rJ|3ß!ùòŸstò ±ZmÓ"ë±7Ï_béÙ_À¾€@ bppq„K¶¶ÄíÒ5^ý"#׿ÏѹtÇA‚“·oá5ÿŒ·‚1ì§ÅØú¢æíHÉÚÚ*®;´c¢dó!R­VBÈíÐC-™_Zˆ„ðî—gˆëTW¾AÄwƒÓ§r^”¬®z¹qCÃuçI§ dòï|OQúFF(..±”É01¾=IÂ,L\!¨Öêx·©~IÏ´X¼ž¡ž ‹;ð¢Ä4[äóoR*U7DÉÎטð”+F††ä̳(UÊ~ÿ¶<äz]Áâõ,ՉĹ­%ºV2h“ŸghêÍ;D‰²#cLýÄgœúÞÈÝÞÞÚyÛ‚ÕäI"ñ£F>Lîâ7¨|í/9}õU¢:*0¾¸@ý«ÿ‰>¼ÇEpaÖë™äó—)s¢dçñzê2ÏäPˆ’[ÓS䫆O¿§ªÎMêùo\ãøñº~°;¸¯¬x™œTbŽDbÛ¾Ûþ5]Ç1¿²L*•"´ ¹§ìÕn‡F§M4ùðaMËt˜»:GuÅC,6†m7ôM¶íkk·6rJ(YŸXýÔʲPÔ^Àq]ª¾m(zçØ‚…kÖ–:„籬.–Õ} ÷P­.žébìÕ/0º¼|§`†—æ(¼ñ5VRg!yê>ßÔ G^äæÏ©EÿŠK¯¼LºXD‚ó×Þ ý{fµÑwöÁE®C±xk#Ñuç¶oîòŸ—šãЖy&šv·Ër±Hrdøž%ç…9C=ÿMÒÑ·8~| M;¸¢DÈå¼LNj1G<¾ü¾µµÂñ8…Fƒ¹….œ=ûПÿÀO©V³…-ÄC•¡›fU[h )ƒ¬­Íx÷¬ år !FÙÉí›»'VË¢ÝéÈíœÝžüÚmZ¦IÊÿðùYV¯Ka9ÕKS©lí4—¿4ÅÉWÿ Ggg7_3½^<¦‰DëuÿÎq­&“Ïÿ"Nüü¼ù í§~ôyþ;_Ágš¨BpôÖ5fnüwŠŽÐï?òê8.µÚBd¢@Õ4C§Z¯ÉòôW&oݤãØ$ïÑ"ÂjÏÓÌ}“¡äeNœ( àbÊB@>ïåÆ ×%‘XþÀ‚ŸŠ¢ëëc5³ÌP¹L*‘x´Â¤T­¢y½ÛR¿ÄµUT5×óôá0~wXy¤¢@7 \U¡^¯Ka²Ë4 „¢ mK"²@Ó’x‚ÚÒo+¶I´øCFgñZ]_ˆ«—ž¦ežñÅ V¯óÔ¾…áz™úèœô³ ÜçÌì}‚¹çD*9ž|ãRåg.›Ëã¿„“~ê¾þXVM[ÁÞ…ûÞ`Jµ ²ÎÖ¤ÙlRnµ¿»!­Ù\¤™û£}¯pâDá@÷§z[”\¿îÅqfI&³÷ý÷zý~´`€Å¥%’ñøCmUëvÑ‚j­Š/ÜÆp¤Wì4Š‚á÷S©×–㱋”jU ŸoGûãÜ/Zk•XæUF²Ù ™o^¼ÈÕþéÕ9Ä·>ËÄ i4xâû_B·mn¾øo°ûžå>'ª†=ða¦Ÿúe†² æ×3çF–æ¹½ò øE„±÷«ªzü>…5,Ë’§ÛÜ‚ÑåæÔÖÆ"îŽïõrdoýŸ2›R¸}»ÿ@…i òy×#•Zz`M%),.Q®TH>DÔ䄉iõht:D2œ¹ßðÔk5y²`WW#‚Z­†/²7¢V¾Ò$ýK7ðöÖ{QuQVO½€6ð3ûZ¼¡yp¾ùç››E‚í6_y L~üqÒÏßW´C1¢tG>Îü‰¯n “D¹D2s•Úé&Ê>&†ÇCͶiw:D¥09PÔ êf—Á‰»£%~Ï,}±ï01~óPtò^]Æ4s -lÍO¼^<¡3ss$"jò@¤Óî`¹î]ªR²„‰ÏKµ`Òév øýr@vc5ÒëÑêv‰í`ãËûWI.tWHó›Ñ’ÌØkÏ‚' jŒò¹Æº†øúŸrbf=,ÐépáÕ—°u[/þ!"õìýýíÉ3ä'žÄúá·0lÍué[¾E¶:5°÷W¡ša T…F£±#½A$»-™š™Á‚{&¼Z–@UÛŒŽÞ>ãQ©¨¸nõ¡Þ#Ö—&73K©\&•Lné=(ž\¯×Quíž7P²·Ñ GYO¾”ìívûžáâ]±‡v‰Èê5"õúº0QUrC£:öNþH`Ú™ßâõÿS'Nlþn°Óáâ+/qüÛÿJ¯Ý×ç)Š‡Â±“x§!g²°‚kWöŽS?@uc¼$ƒr¥B­Û¡otDÆ6¡jþh”™ÙY\wk=„H˜”ê5<€Ü ؇(ªŠjÔê59»D­^C3ôi|ùàØj9tÛ^_9* ÅÁxÕÔÛ3þ§“7>þL?±Y»$Òlré_æØwþʯßÏl…¡ý®¢Uv£U§·/îŸ/\/Ž'90Ñ’ù¥%„aÈ]€mñ±¾4•n‡b©´³ÂDA³ÑÜ‘Æ}B·•§Óû‘Z Â¥Û\¡mÙìÔÉr!\ÌN‘fc…V«„ãºûÎXü¡•š\ñíÚê¬VÇn‡f#·¹±ºhÎzò¹PTÚ‘4ÝÀ=Öü4N}†Ëÿ}fŽßô±X½Îßÿ"Ǿýï üA‘A¨Õ¡Ø7ºùûš+ð×ó(ûD˜^/m³K×4¥1ªµåfƒDŸŒX{B!æ¶ô¬¼ï=™®iÒ6M»X`H¸5¦¯ÿ)Ýðó<~úãMÄ\»Ìí«‚™þEž<ñ<Ú&~!lŠÙï F/’'îX4 ×$Ÿù Ó ¯bö:(š—DÿsúÄÇñ{öÒö´ry™»K4 |}©ÝÚ¬}ŸË7¾Ë™ ¿ÁcÂEݘ4Ó¡kº¤÷8Ðî ~ú7yÓqÑÄŸ217‹ Äk5žüþAn?ýë,; >³é›o‹’^k–êÔ †Þ©¤º£×„}r2OÓuE¡Ý‘…Ö˹x=ò^îLÔ¤¿Òü"­f“ÈæeÝwĤÝnã( írÚì1-ó®Õ˜ÙÎÓµÍÿ;vÓ¬cš lǾãgïøžm!ØÝe¦?ÏêÚ<Öü¼Ù˜dfæk„ú>ÂSÏþNyŠÕ…/Y[Bì£âšnеÖOH±Ýözt­ÞîO‚¢K»UG e½I  꺈 ƒ®E¯×À4ëô¬Îúëþ*ý&W^øX˜8ÂÛë X­ÆSßÿ"'^þ÷t²ß¥Ý­á¾»+±Ófyþ d²‹øÜw„¼«(غ—ýrÜAÓ4U¥ÕhJƒÞç´ÚmV‹EB²`ÞŽú‹£©,e³›óʶGLоWöÇ?@DUÞàöôשµ(ŠF r†“g~‘¨Ï‹Ù¸ÁÍ[_¡Þ®#_èG‚öêט_¹LDñè‚££O¢o¬úT=Îð±_§oèY‚ƒˆ÷ fg¿Lgã=öKìAÕTÐTÚí6Á@@zÎ#¤ÙlbÁžñUÇ£i¶½^«¤UÁí5>‹¥™¿'[˜Ævšc`ô'96zÅŸâz8AáÜÓ|8²0Êz…Øç.¿B¯Õâþ Ωß&±¹í«I>Aðâ ßûãÍkp4…n( ªgÜÄz@µf“QiÒû;Z²ºŠ­*îQåõ¡t¿ËTAwñèw¬oé™*ÂpñhÛÿÌz]•Ý4ñø\´]|@ÅûÒäWs}ÀÓ ÷-LªÞ=‘ø*èÔ§X^N¾k+§B˶Ña噾ù×Ô”Ç8}þ×Э S7ÿ–ÛSž:ÿIVæ¿@Ñ pîÜïâu ,f^§Ú(3܆Xì(ý#Ÿbtèâ¦(Op‚#Á‰M ¨Þ¤ãÄ‹öï«³íª¦¡è:õzt*%g§Ge±BÐl6Q cOV¤˜¿BÕãÅÞJªërdå 7ž/SÊ\åÆÜ[=õË %û),~ž›×ÿ ¡èÿNBÜææâe'ÿG®ô?‰øæŸsln7›¼0y•ˆÿ¸=|7ð(*Šæ#9ðatû%úÖ ›“r+¥@hž}s/ýÁ µJUõ>ÆqrÅ5‚±èö?ÏlÿöÙ1º£~íSüOX³åáïþãâlÏüD}KÏ ! 0Á¯ÿ‘@c=ä/ÿl”¹’¶)D|ý->ý™,OŽÛ»¶xöƒT…ÂÚcc; Lj5|©Ä^0+*Å+ÜnÏßñZ½œ¡ºÍ%ò•c—~’þÄ0Ä`ñUnænе?Ž¢zèõš´Z%‚©œ»pôÖ÷Ì}>|ºñž–QÏ}‰›·¿F¼ÿgJ ²ß25¼µ¦ E?jª:¾€Oäöôš‹,®|iCåC¡¡vEFV—™ìÌP-\F<Æøè³ ßѱùªUAÀ®SéÖ™~ó'\õ/9¹Qç$ØéðÄ/£x‚ÜzñqÓëuN´^ ½2¹™ÓPM  èÑû/o¿0|^Ú½¶ã ô~÷”üÚõN›Ááé¶ža†äPX™c®m]ót þþ¯†™ø´ÅO>~çv¼k,dužû¹ž8ÞÛ\Ä Ù»/æ#a²++Œ ÝwÙûú)Ûq0-‹àž(Ì¥3xäÓQ›çÈõ¯1´²²1e*ÆqÃû¬ùŒªÒ.­VKZÛ§,¯®âFwu[µSðO_O2ñ€îp䱟üx…°vÝÇW¾’bjÉ‹«†×xáÃu¦¾ÓÏW~å¸wÍÌóÏ4064½ÝÓ0u‡Ógœ9jí©ñŽ$k ”ÊeúûîïÔ} “f«IO¸{f\Q4TUG}[(ÚfäBÓ|hŠ ×ëlKt°z5T-„¦*¸"‰³ÿ’£vzùS·ÿ‘™¹ÑÓg6&Ì{‰›Ræ¹1÷&‰‰ßà±cOãÙ§«%Íë¡ÛëaÙ6yvÿ‘ „À완öPÅ]UÕ1SçX;ˉ©[莃a[ O¿Jàȳ#\@CØ-,WÁ«{@ØD>ÁSC£Û^e5ó®Î×ñ?ý«Àç6ÅI¸ÙäÒ¿ŒÐ4žý=Ôò-úVçP7’à*‰~ÖF/"<ûkKQUU4]§ÝnKa²iµÛ”ê5ãc;º0k},.úñm<&zM/M¼€èüýߌq­Úã£-£Õü|ó¥aš¶à·¦Êwþa˜¯MÃO¬„·ãã—c¼žîrv¤Gÿ É‰“5Nh£¿ëòí®†í*\{-ÎôuŒHSçj<6n±ÛëgUÓÐ~V ùí&vE×÷Å Û&RÈ~tðET3C®8O²ïSø•Ù™ÿJKgtøÐo€¦c!Ðаh52´Í1>ߦØé5o35ûm¼ñsdh‚^g {bø<¾ý噊‚£*´Z-<±˜œ©ÍV‹žãìú‰¶»%ÍÚijäû¿ËðÊ púæ KFY6¯³š¿E,N1ó]L}ˆxxFù-¦®14ñ"±@”HdKŸ§|ìÓ¼ö ˆ¿æÄÌÌfWâKßÿ"®ÛÁתptnnó£3£c¢ãhz€ý$ñß.TØh6QÇ}Çj.‡£ªžÌkR¸úƒÕ©øfˆ¹íç¹O@5à•·||ä÷ùÔs°ë4VC|û?ÿ‰¦†©š¤ºªsî áµñ› Ëø©&c çŽÛ…A6`|ÀbéJ’¯;Æïüþ"O³v=å ‹QZÉÑîtî+ ö…‰‚F£î1ö„züI„á¹Ë¼þ4hªw˜ã§~é¹—¹våÖú÷BÏpòØ hZ€Ph€•Åo³¶ú-EA÷ pdâ9|Þ§™Ï|Ãðqúèóx4„C³2C­–ƒÞ÷y­òÃÏ4˜ø9NýÐ6néˆ?⣨*ºÇC»Ý&.…É#&б7üÅK BU@è~ãcéèK ®®¢ A°ÝæÙì ¹#nM–¬DUœ8ó+ F£ˆÞõe¦oü9 ¢(:éñO‘8OÙ[ã{O¯  ÁÑÙwêœ|ø[_Z÷˜*³•p”7†Ç¸V.1ÖjoŸ*ì|±6Ãï£.ó´ö%…R‰P<޲£Iè‚'_(ð/~¡tGÄä³ÿöøú|ÐÔi9.ƒÑ Ý¥?mÒšñÒ´O½X`úïúøë?;B8aròl~´Ìû=ÎN—ù×ÿGƒHÒ$´ !þßÿûßy%ÂùñÒfîî=·ýØ ”Ëe÷Ñáþ¾.·ÒhàÛ'r5ʉs¿0¢wˆUOpêñãéGUTbý?Æã±³˜½(:_ÏFÒMjä§§Ÿ¡g™€Šá‰âõPœºø¯é¶Ð€ò€z,É2N¯—ñÉÉ&B0“HàúóHsɘ’ÅU~Z¶¯(Ptx"‘¯ÌYVOCH$ÐËL„m`ZSËqP±Êv3¨g¤ù1<‘3„OQ£ žÄ™JáH¥°•”ËMÒé`Êïc¢´–©òM¤ ;±}M=˜`>øw´¬8²œ ¬ÖIËöÕ%ºÓI,5šO _215‰%I9Ó¹|-1%£#×oRÖô,ªciî+··€©Ñ±{²ê½žM*"è(^–®:l4ç†yrcl´4F¸ÜIKWê„on:Å—“øÌl>v9½ Ûf.™\¶Š6EÕ(¬HÅÏ.ï©*Æ2w±¶0›L¡g2HB $ -I$]N ͬùpÊ08ÿoÉN.ÁŠ0 •‹ƒ«º·ªCÇ´l,ËúƒÉò²:ÎÁøÄN'ï€})Û¥0<² Ü¿EEóIñ IDATT}éBþšÃAÒ0˜½ëX‡{j”eÛ˜–µlí{}…*›­Þ@Û]ƒ¬8ñøV¿ôZw8˜IçÉ2ŠeÛÄS)\>ï²<_Ó5š¶5#ì dpeEÉ~0’„%≎|^ΊaLÎÌâ)çã ƒ•þí(Þߦªåqmií—¬(HšÆøÄ>¯wQÀxO`’Lfá-&Ià æ“/WÅãÓu2FÛ¶ó‹±\Þšm“12øôåkè$+€²‘ ¹Â4]'‘L ó‡=G%•J‘1 ‚n÷}Ÿ7!$ cc°b¦©Ñ?°Põ‹T·=Ь,½-I¿ŸñXŒúÚÚÅï§{= žL"å'ЮOó.ËX"[Κ÷ø–G,ÛÊ2R«¨C1b¥{Íá •JåzËød,[&|ŸÎµi—01··µnˆõÊd4BÕ]Ôuîá N¬¹/ñø¼LööÝ•±W?m*úêUÛ²™Ÿ6ˆW/² Z¢Âù8E×H$ø¼^ò²ÞZ!I«ÖœÐ¶!6'•HoH‚î‚⪕a0dU%•LæÈsÕœ ÁD,†Ã}ÿ# dGu[ÃHäÈØÂ Ë<ó[QUÒ–EljŠ¢ÂÂ/L2éô2‡¾»“¾ }L*hZhÝÓL›¸@ zzߊ|f¶d8ïñ-—Ä ¤UJŒ´-¼4J¤×F‘ÝÙ6òëX 3ÁÄÄyt—Ì.ÿ><¾åÿ½Š®‘J¥±…@É“œ“t:Ít@ÉÅ1Æû˜%¾u®? "‘Óó ¤W˜d+Û†aàt:ó‡>—ôϲ˜›Ãá+X–çËbŒé¡wñëÐÜ1†Ói­ëõŒÇ-NòârÅI]"öEBÑu¦i´&ßÝÐY+Ö|Ë2,®žéarȃÏÛ¹îA‰iD£§‰F‡1Í¢yP²ò¢ª*¦•&Ë,Ò¦¬®€þ˜‚þ QÆûT <딘f𱱓D£±[@É*ì±E&™Î'皤Òiâéô²t]–™ Öÿ>^õš›#ë”$&§Nù˜œŒRZz IZ:Zu:™™ž¾c´Ã]5:É€$!-sR—eZô|z™éa/ÿVdy}÷M±m‹HäccÃXV °zž–¢©XVžŠ^ž}¶1, EYÞ™mÁ@÷8± oA#šV°®×Õ4ÓŒŽ~D4ŶKW ”,VM#•O Ï9I§Ó˜Â^ââ Ä £W>Ä«¾O[Û(N§¹Î×1ÃÉ“Ab±qJK/!ËKËê;=nfãq,ëvpwW«™L¥—9ïÀ2m®¹Ê̈ŸPhûºOÀ´,“Hä8ccÃØv%ËÙÀæ !EA Ýq(ò²4ÀIJ,”edLl.FïWðÔ¡(®uÝPÍ4SD"‰D¢–\è«i™t&àsL¦fg–<ñU"ÎÀ…c„œï°iÓº¾¾:ÃHñÉ'EŒÇ¨¨èF–KíÃêNS‘èyŽw&éô²“ÈÀ(ƒ—&Ð4Áèèáu¯,™ŒA,6“ ²T´-e÷Ú›o²¶Ä—¨ ,ïDÓøt𾋗°2~’‰Øº_SÃH09™ÌP h™L˜ä0™žÁáv/™³+‹ÃWŽr¾ÅŽý”?&™¤¢âÜ’3% ÀÞ¡cÚ‚T:}[ò»ÞŽ™te™+rÒ‰4ÇC¨jñ†P–túB˜9JNSIåcäK.©tyYY@ÛY*Cw=´!ÖÔ0Î#Ä$¹4/GV2ét¾ä>‡Ä‚D"Žî_š\+YŒ1Ñw˜°û›7®{P’Lœ0Q,[`ÛyoÉ€‰eaY6²œkÀ^;Ilnê3¯›LŽ}ÌD"ußc$-c’îOÈ`¤ï3ú#HLàô™’°½TÕ<ŽGIrþì_1<9ÎZ?u’”m¢—Î7YË P’É6Œ>›—¥Ýá$‘HÜ›1±l –}NÎýˆme°…}‡Áµ­ô- DIMÊ$ÝYˆSw.òžŒæát¸HÏ\`¨ÿ8­ü¾b ¨@A|ê,cã}Ô´þµ%•$&Žpìäß1݃¯ª‰µìôɲŒ-ì|«³t–=[hó_ÃL+y»»n’ÇÚSܯϒœÔ‘ýw„KarÄ͉“>"º7CÇÖI65fPÖ€ŽÉšFr6Žif‹&ÃÈY£ 6ÓѹØ_XpaÍ‹ÏàìÔ×®ü»`?[[ö££tŸýK®^ÿ%EþƒŒ½ËÅlïú&f:ŽaÚ”•ï Ðóe5_¡¦¬= @æ «ª j~3ç<ÅÔäu´ÀNÊŠ«×zˆü0É3&Kʘ؀,墙Œö½N2zËl*a2 ¢Ò³—î¡|Ó¿¡µºÉšÂNý1Ãçh¨n%ÆVk))}¯ËCYÅ(hÅ8„À¡PX²ƒPAè6À!I ²l雳Ýo’2 Š«_¢8f=¨+Š‚iåINœîù»kÕÒl™cï”î²Øµé&0±2*ÇÞ*Åx"Ácí÷7ÄL©üôûïå‰-ÉÛtlvÔÃýË*bÔUd¼ä£^~÷êeS¥™óz¦( Ëœæ·8cb˜9 LFfš¹ÙÁP DšŒ‘Ä ¤â#ÄæRÔ·>‚¿ „ŸÒp=熻I[Ï£¹J0'ú8JIQå5Ïâpø ݃hºMVýÜLzŠÙ™AÉ)t½ !ì5ϘdY1 +L–î„Ú6äl(G¥¤r{oaLÆ8ò?Ï{œÓ¤…Ž×_œÕ/Å· ”Ìô0[§¨òIÆ.¾É‰ãÿ7ÞZÂE]TVT!ŸwÎ4ÂåûØ^ÐL,z’ëCGèm¥±¼~Í÷ÏQÓÌWµå‚¤3idEÉù3e¦®t{éÑŠEUÃ-õél¸Ô¹rÞG- Jkã´Ö¥¸öq!GN(W B[j2Y0ÓqÍqà…6WZÌõûùßþ]-{t:*ÌœgMdYÁöBÉð]C9RÎÆèÂرéy´yÃjQ>™½D °ì–Ð4ç<`QµL3‚aËT4þ –ú FÆNrnè]guÍ¿J¥ï^°…m ‚Ų£h7©™3œúø/¸ÚW϶Ö} ~M^¢d‘U>F¾´Œ‰@ä,¸×\%øý5·MPÙpÔü ±mƒyò¾’'ÙÜÊôÔuÆ£§¹vñ¯ˆ§Ó´UﶤÌ$MÂ…u CƒÕÄFÿ¢—¨)«G_9&Vž1É ‰§Ò9ÑqÙ6e2 y¯f2 õ¶Ä_Á;§ÜTפS*oþ¼ˆ}ßèçë{œø§ ¾ØMC] ÅTyÿ—!}n˜Ò9èŒBaR!•–o&ŽKPÚ:ÅoÖMãtÚ¤Ó£CN,Í$°Ö«/)2–}3×qÑ4M YY›=L4Õƒ&Ù¤’³a’JEQÔ ªœÍÚ.¯ý:Uõ_%9s‰îs? ·ïCŠ;¶Ì›áÏäZ“ÉÑÃôOÐÜúnUÁémÀét0—šÀ•5oYóåŽK(Yöimæh9]…xƒ‰ÈUªA0Æ™˜ìÇíßSN0pí0r ‹ò’‡w`Æ{™œ"SBšÏݤy­°˜~ÈÉËçhëú—T†0“×H¦fpª.ÖC§$EQ°òŒINH2Z’9xÍòñåü¯Ÿ/„[„-ÑÙÉ®'!>ìå—ï…èxé¿}p)©ñWßmàãžz(Åh §ù­ßÀ¯K\½àÅò¦¨pMQú÷…ìz2ÂÃÍ©ÛB9²"p9áÌ%üõß1—éØ7žÎôš¸$y˜Ü‹1±L3K¿˜8¼5”2Ú÷süŽgPR׉^§¬ì¸¥iz¯þ˜YJMÍ#¨V9ˮȚ”f:vž©P9w~C4b?åŠ$SQÖÉìø Fb³T•×/°6kÝãË“¥Ó4‘$yMµ šºÇ¹6ø÷|{¬)³úæGÑdÉx½}‡éuE†dZ¦²¾ §Äïusµû/‘ÍoÒPµU–ARï$v3kØÔ¶O¡ü¬ÿóß9iëœfÓ¦¶T›¤£Ÿw» ;§yQ˜tŸór¾ÇËÉ+3<Ú–ûàäFNÐ$ò»” [H9ʘxüu¨NÏgž@º®!©E4·ÿ&WzÞ¡çÂ@Öð•~…æ¦Ç‘…’Ò]L]û%çÏœG’T\Þ6ZšÇéP©¨|˜ë‘#ô»Šð4ìAWd$ wÓÙ>ɵO8=ò² ²ù[4UoZÒQŠ’AQW¾þ^VäÛÚ߯Øë¨ºG’¤ûÞ[Ó2s3¿DöRßþ[PÐx[ÌYVC4vþ.¶· ™Ú–oSXr…™x d7¾`3^— AsçoR»Èlr$ ¯ž€·Ytlý)žÃ対pHh®¶vý>±‰Ë$4Šæ#lÆãô,iUŽ,Çð‡]èΕ>+ò}—Û' ƒá‘a6zQœ$AiI)žìØjJ{ÕO¨vŽ;§qÍß°éYGR‘Å- eóM-Ó’°t>7È¿éœäÓ³^º/øøóCExy€g6ßýŒ¥æTbq‰ò’»K±k× ò¿·p줗íMiÜj®ï¿„tK? Å;¿Ú¹9ÐM’ý4uþsõÛò:d5DËÖ?DÈndIÂéë cKS6î+ÉÈŠe‘y»ÙìšoÁ.eßST$ ›~jÓ@Rœ¨·3IvP\óu +žÅ6’¤ (Ž%ÍüVÕŒâÊÂU9ÚÇÄ´mÎ^¸ÀøÄú˜Â©É [:; ¿ü¨sÛ¶ÉÅ:XIR)*x½r.ô–t|¡|¡;þIq,êâÎU‘pûÛ¨ñ·-z먎BŠËYF+Bq%ÔuÔ¡9¤•^Ø…’û/k7û8uî,þÂB6²ÌNNÒR7ÇæööröLÃ@Ws»¹š?`Te®_u²§nÒ*½ýNBe³øT™s'¼8ÊãŽ÷!þ¿¿¨ç㪠s'ý"Å·¶Ì.„“–ê¬/ßÝ®’ʤïLD— ¯/QÔ¾à$-]ÅT5W¯–+½D-é— ‡Ëµæ‰¦ßœ:W“u JÊMZwÔQTé_µob °äŽ®š†µÚ£Td›‡Œa–%PoùŠnòÈ3#Xu)$ÅæùW©8åãÚ©(Á7÷ޱ¥5‰,Á3/Ž:‘m”†nòäW‡èÚ6‹G—øê7‡øø¢“ùüú VÏòÏ~§“§}ŒN)”µL±ÿ¥6µ¤—°*Gbf¦Õ±<ާªéd2Æ=€‰-”¼a]1P²½„ª¦ªÕ»ˆ%齕¼Ü˶óŒÉJ€%J¸Ô¤eGÅ•ÁUü&ùÎÉ9LL[‚Õì,%)6{ž»óÒuX<úüèMüâ1Øñè;y†3”bïÔ¢'­íá Ú^Ü›*®ólm|ÙܵɩFfÓ/ªÞ´<ÀDU1ç‰|wÆ$oX—ST5…/8MK×ê‚’ÀD䇸-÷œgL– (J”Â’ -Ûk)© ­þ7ÊuX}‡À²²Á´¼ê-9(‰Å™Ë¼L¨úëøÃËÃìgûÝ£\ø~’¸òòŪª§)ðMѼ­ˆªæªÕ?vKüš—[€‰äµgõG#ÎÐÒÕ@iM$JB&¹ wy‡zÉe|¼™¤ýM «_À[XµŒj$-ŒFQïÿóß2‰¦gp{Mš¶RÕ\™#€XÊÕ¥ôÜòŒÉ²‰¢D(ª4nm¢´¶0GÔGʆsòÛ³ºuÁ¹ÊëÞRÉØX)¾MqýsÊWìRïÄ$"‹ï‚3!l“Ù©i,!áöù1âÓ(..Ç—k‚655ÅÅ‹©®®¦¸¸u‘A} ÁêžÀŹ {p¹’–02ZÎm”6îÇí+]qE²çÁ‰z7Ô’ï´Á<¾{„r&''ù·ÿöß222ÂÓO?ÍO:::(,,¼'Hqx‹¨© sáÒUÒû;pXsôõF)oÙÌÜÄ(“ |%N†{¯‘vWP]SÈdï>xï(ãÓ lKÂWÞÀcŸ¦4 sýÌQ_tòò¯>K²ÿ4ï½y˜C"PTŠÊ$Óf=ßúÆfF/žä퉌Ŝaö¾ürïi>9}WÌÆöRï›æÐû}<ùÒ Lœù'¯)Ô”:˜Ÿbv.CÓîìÞÕŠl%9óîë||ê*ŠÓCqq€©Ø•[Ÿã±=Í|™VY'Nœà;ßùÍÍÍ<öØcìÝ»—M›6QXXˆ¦i k¾ÔAIMVúF’6ÆÓ² Š¬Ñœ«3åFO ;€‰mÛ+˜üzo±-HÌ,cƒy t—‰«@]ñ¾c’Éd¸páï¿ÿ>¯½ömmmìÝ»—gžy†]»v …¾ÔE,kBEa‚ÎÛDídŒ³G1ëÝξÍÙç¥F¯q¾û]º»Qß}éÀj|Gß|‡Þ¡ ,Kà.,ç‘瞣ʗáÈk?etÚ‰bÎPܱ—mµ6ï½u”ØtÖãñ•Õ³çÙ§) ¹¾48‘e™wÞy‡?ýÓ?%PYYISS»ví¢¥¥…-[¶PXXˆaäÆ?Ó°ÇÈ$6„úÛFÑœ”Ö–£¬dƒÈ,er‡ÍÌÌ066ÆÈȇ¢´´”M›6±ÿ~{ì1:;;±­;IsQÝÐÀÑ7¯06cRbŒ1KP¿wÃGÞa`d‚šP˜Á+W 5tR¦yï×™rµðä7w£Î ñË×þ‘#ïòµ¯>‚šb|‹•œàÄ»o3ënçÙƒ8Ãû?{ÉPYVŒYæ$ƒ‡ž<ÀÃŽï¿úcNœ¸Ê7öµQ][IhÛ£lïj'Ùó.ãÑ +£û¢MëC/²ëÉçßú1ù„¶ö´ØŽ>Aóþ¯Óµ¹’øèÝ˶=}_ŽÁµ|ÿý÷)**¢££ƒ½{÷²wï^ººº²#$¤¥ÍŽTuªîË£ïQ# æsµîÚÇdµSˆl /M1v=†m­ÿª[ØÌÎŒSV_À¦=[Vœ©¥îÙ IDAT1ÉétzÑ·“É$Édöb7 ƒ3gÎpæÌ¾÷½ïñÐCñÄOðäd2¾Ð`W!°, Ó´oó:æ&F™´Ó0ÿ$gi=~ÑBYŸ“‡öÄÍÙü!§¯JìÿÚ‹”{œxóUÞ}ÍÁË/ïf6rëÑrž~þqêª8÷OΈUÉ ßúJ|„Ó'.2<¥$X}OMA&“¹cMEÁ¶mb±±XŒ3gÎðꫯâñx¨¯¯§©©‰òêj¾úë¿¶ªçÉ2múÎ19äÂ¡× ËKÀ ‘ULD6/é+!‰ì9B^®Õ4SŒÙÄ*ÔVРÂÀ@?ãÃÃ8ç§ãÚ¶Í¥K—n+£££ŒŽŽòÎ;ïPRR®]»¨kjdÿ /|FÊêq¥?f¨/Š*z™³ ©o¬Å¼êe¸g€T­Moÿ4õëIG‡¸ruœêý{°âSXBÇëvqåbñƒ7Û„&§cŒŒÎQÿµmTW–"J¼\;þ!“7Ž¸ì¢¼±ƒ¶öT3NE…—Ó£“Hî:TIÂYàÅãÒI~†-*.+bë¶F T‰ÊÚ~yaŒL&ÍLÿ ³j˜¶]Û yUºvnæô'ïâÄŽ~x„«×uŠ;vÛZF£Q:Ä¡C‡…BlÛ¶–ÖVûÊóy¶r ;æYÆä.ɯ«]r% ^gìšÇÝ‚ÓY´¾A‰m3:zŠÙ™i¼Ó+ïݦR)¿ù&o¼öÚ]/éîîî;^Çã?~œ3gÎðƒü€'Ÿ{–_ù½ßý¼ÝeìÂq~ôïÇnÉè—¨ìÚǶ.zZ}~’¤á/ !â1._¸Š¯æ1Š<`â&\RÁ™cW‰Íu!I:%µõlÞÑf§PN梳 LÐÐÐÀS/·#«úçÒÈÝ/òþë?¿ÃH=ztÑ¿w»ÝÌÌÌàv»Õmô$„ ÷|“ƒ.üþ6åór,-r…T:Š«ï$ò̦$°æ'p«–@V$+6a·¢”#<…+vA˜¦ÁÄÄqff4¼ǪÜKN§“ׇ{~N”mÛ¸Ýî;Jñý~?.—‹ÊÊJÂá0–i-’´)á*ª¥¦HæÚµk¨VΊ*‚ÁBª+K¹|®‡Ážf{êª0æÎ2Ðöc¦.g÷Ä6¡¨*DÊ·0hÒ†„»`¾‚GÖ†0ru«¸ÝÞl%$¡h*Âþü†êô£Î¯¹¬* ,la“JesNœóCc´‚n¿ã®·’Çí&/ L<Ïkét:ñù| áTÃ0:‡® $FWFØ-/@…Œ]¹|A’éN`"IRvìô*Öä^Š2v5Ç]Ó^÷ $ù”±±A„(FWü;¸œN>]Û}¿··—W_}•‹/¢ª*^¯MÓ¨««£µµ•íÛ·SߨÈL*ùB€Áš&¶x¿~óoáJÜúç÷þ°Ì4ÓãÓD2gx}ô|VYloQ†i¬àrûQ$;ž‘Äëosê¿ç˜­RÚÐÆ®ÏP[ZpÏÏikkã[Ÿ§¸¸ø¶× 8}ú4™LMÓ(**"sðàA¶mÛÆSO=ÅõÁ¦W±B ÷|/±~@ ã@‰ŸÄ9‰2ð þî÷©‰ ŽFq%“èé4ŽL!I¤2ºÎœ×ËDaãÛ˜lÛ\¶¹¸¤åëébY&‘È)¢Ñ(B”¯Šþ ¨¸˜G¶uẘüä'? àp8¨««£««‹íÛ·³uëVÚÛÛ¹64H_äÎÙ*²î¥¶¥šw/œÅV')ß±§®®«E|pˆs§çÐÃuT»† Y9Û¿òmiφe,#…%ë84™ÉBÓÐUA*‘ž×ƒX,ý¥~çÝ9£;/§CCXi+û?͹)’Ówÿ¼ÍÛ¶²µ£3{ÿ,ÂÚÞ(n·{!”³k×.:;;Ù¹s'ÉTŠÓׯ®XƒP‰SC@òéôúÏÊ4M›©iÕí;)?²,Œ„4A&«‰K.EíIãq7lPr†ÑÑl»IZ­qÝYVî:•SÓ餽½ÊÊJºººhiiaÛ¶mÔ××ãr¹’ÄûýBŸ¦iß¶mÑ“Ï3‹Š¢ã+ P¸ó_¢sþ-ƒŒ)ãR’\DBº%œ`âå±—~Ý©9Æ®ç½|‡cSöâ£8îÁjÜPÏ®Iaa!@]×Ù²e {öìá‰'ž ³³sÁCîF˜«ãA]?wñë*~'Šâ¸Ïƒi!>Âùé?¼t˜úËÝø§§Q-kQ?IK$ ‘ 85Eåà æ¹3D޼AoÛVFyQ³%Ô°  Äblì4‘È(¶]F6§UÔ'{b>s–””ÐÑÑÁþýûÙ²e ;wî\ЗɯҼ‡x'2Q©jlÁzóÇ\ V²¹®E’ðVR¤OsáB û¾C ””S_íãò‘(ñìÆ#f8óñI|Íñ榛 ƒ/@I‘‹«'OQ_¤“ê;ÍÐð~^èXF—m&F‰LÔ}A_Y!TY;õŽ~‚cS9ÝŸœcx*Ií}.suu5?ü0]]]tuu±yófÀB•ÜÈØX¶Ÿ ËOøK$˜þ€é5š· át®ï䨙…3g¼ φ¬0ðȲ¨Ñ=I±®,2B0x9Êè•JÎ26Öe…}Õ «°Å¢^Ê )++ãþè…B´µµ‡ï¨È1–‘B•TÂJ%TSHMC%Ÿœ9ÃÕ¦Êü*WN}Ä´ZÁ®®šÛ×8=Éé·_'ám`óÖf¼¡"¼n¦eÝ7ðnlläå—_fÛ¶mìß¿ŸêêjÇ:´ÒÈ^Ø‚Þó½Œ÷ªøý›QU×}=GÀ¸þ J¦³'(œG]¤bËVT„$# dܸ^%!ÐM“Êá!Š£†{Îså¡§ìwp”íX2OÖ²,"‘ÓD"CXV  ¬žþ{Þ Þ~¾óïð•¯|…ÊÊJÜn÷+¾åÿÖÖSßVÉ”\OE؃h?Õ-uôÎDhh¬Î:’®0{žÿ:Gá—ÿýo±x‹kh­(C‘@v…(.v¢º‹ØñäS¼÷öÞþÛ> ËÊ ÖTK© A ¨ëfŸÝ¦HxôM›;9rêû}tV8). #ª'Lq‘çf®‘楤´ð×tòÈ£×9ùÉ{ ^ R\죬"°OöedçÎüà? ¡¡`0ˆ®ëw°³²$Á #½Á”¸Å4· áõfÖõ]5=­ÐÝí$•ꥸx‰íˆ¸0‘çÉJÚUÛ ^gôjšOý†aJÆÆú1Í0àXõït¯ên·›W^y完ʗÍå%¸ûåäò…ðzt$I!TF‘ PTMEè#>zë(_`ëçI¾õ Žþìï°m ÕégÓþ.\·/óM¤dÝGqY G>>ƵO"Ë2Î@-»ÙŒã>«6öîÝKgg'Á`ð®,¯l‹ÿl¢ëý:~ç}ƒ=zÎÿwÿˆ–îó¸nIüµ•©`ˆ‰âr&JÊIúBUG²L”tG|š’þ‚“1üÓÓH€nÔöõ˜úÎÇF:ð/PëŸåÁ˜Á,SrŠHdp”h«ª;ÙÈÁí̇$I”••QVVvßןâ«ãùßþC,IÇçũt]¿Mëã&ÿü'JkÚyú•ZR©  ÝåÆéÐAÃCÏR¼YÁçRÈøŠy虯STF•m>üáüE>´‚ré×PœÙü+ÕÍ–'^¢ÅÐÑd•mϽDÓÞ$²îÆí€p]¯ßKEøEêÓú<ûXÖ¶‡«,¼~"3G°¦“›'ès"¦{8OÀý¥ñ©ÏçcÏž=ŸcÃæêeT=‰ÓÇñð&--ë”ÌÌ(œ=ëfvvpø*£c›—qmA&‹?ÔE z˜¬cb™‚¡K±,()¨ß‰®ccŸ2660Ï”¬>(á 4[ P"É:ö¤6e߯]A~þLÅ‹Ç-óâo–â‡På_ûçÿ‚TFà †pj2O|ëW™›cZ6ºËKAI’xä…W°•‚l2ž¤Rÿð“”¶ï •6$g—K¿oªWQÂáðçx»òŠÓ°é»0ÌÔ ‹@ EqÞ×sä¹ 2—ÿ‘æwÿmÝÝ(ó,‰©¨Œ•Wq­£‹HË.Òå(¾„·d`HfyjþØUœý§(½p„Æî3„¦²Ù éiº>x ÇÜ$W^è ÏÜ—× `šæ|øfÛ.™gW]–º¯×¸)î÷NþE¶YsºÑœî;̽¬à×A˜I†/žà£OǨïlC‰ry¶<Ü€*ƒî |æy>4ç &ĉ/xóCý¬Ý’^|Ž[ÙMþùǤ“Óty›qÂ46–¹zË_C]UhYB-7ÏÔòèžD‚Ù‘Cxxƒææ¼Þ4ëYfg>ýÔÃÜ\”ÈòòÚ4ižY<”ÃÍÙ+J/Æ»–Äã©Áá(Z×s°lÛbtô‘È0¶]¼êžÞ­ÚJU‘èn/ºûîGÓí¿Ùا°ø&Hõ ¹µùµ¢»ðÞÉ xü…w+?ÈJ6Î^©äqÛôb¢_Âç«%;(òËw¨”4v÷Oi|ë/è8~áÒ@OK;g™DÇ3xjÑœ¾;øŠJÚŠÛ0+wqiÓÓD?ü+ZN¦öúuTÛÆ•N³ùä1lýé}ɇT¾íKßä–e‰\"šO×sG`Eæ‹=áW]´îÚ©ždx$‚-;yäk/±u[å=C¹÷+»Ÿ>À™ÓE7òÔTº—Uï–Có$'>ö>å "x<ƺ%ñ¸ÌéÓ>fg(.îA’V D&²°,Ë‹„rdY’°VÀ°F£\?U "Ë1’÷L~\û’HL‰Ä¢r©·® '‚­‘e9Û{™%•H1t­;SD2yî~oUü£çÙúÆŸÑqáÂmW«¸æ¦`f„™ÙS™/jŒUb›2YØHêðßÒzñ<ªe¡'Ž ~—žG~ËõåºKš¦ÂÔT/BÔä (YПå L–Aþ¶?ñÜ2e |ö¦‘ Öv²¯¶se€×2Mô–DœDä-üÚëÔ×Oàr­ïD×x\æ“OÂÌÌôSVviEõ €¼h°w¥¨èd|D PL*µþ/¬tzpå(™¿œäe,é܈ÀdE¨?ªR†ê~ô¾¡Ï â}‹ŠÁÁ…ðÍ­U 5ýýGÞæ”»…éößÀv¿Øì‚d`š£…ÍÄßú÷ç¤áÚUX'ß×ç$°+:©Âv¢á0EÑ(à™BIŰ,ãKUçäàªÞwŽIÈç#ät‘ÛÐ6¨@’‚ìÔ¨š†eäeí‰eš8ç¥zwÚ{Œ‰&ã#c¹j(”/Ô< ;ÍøðLïJ}Áûôþ¶•ÙyfCa':†ín¦,T¾,‰VZlÛBQ ˜(²ÌÖŽæææÖ¢””Ü_µ˜¦ª£tòtäCÎ^>N[ÓW¨OÍ¢Üâ€ôzK‰+!¼Ü,–$ QøWûd+ÃŽ#oâN$n'’Ìtû¯cÊ2£ÑQJË:P kâ0.º³ÆÇ²pÍŒ`ØÄ&²‘•ûk.,$üÈ#äeiDÓ5 ÓÌ/ÄtŽ…eã¸c¢¨ v" «ˆÓwùǤ‚OSè/_¨$Ö ×»¿Gªâw(ö=z_]B„•¤çìß 略¬qÑ !Ñë¯rüøÃÓüû¾†¾¦]Û–ª<ødcǃÇãÙðJ¦*JVÑ–,D¶´:4=Õ‹!I(Vf˜Ø’DR÷Ò\xoŠçD‚‡¹¸ÿ_!Y¶ô.îdrœˆwþ '$‰~_˜K×#‹[pIê¿]õ0YTqÓÎØ6ŽÄØkû"±,ë¾I^–VºN2LÖ0±m$)k;ï LTE]ã¡Ar®Ÿ¹Ä4BÒpTSàò,¼—š`61\žJ Ü^â1ù~ü2BnÏÏ´ç¸6ð)Š£dÝ”×JYÊMSóÚ±D’ ‹‰¬²åè…%>s~%!ÄÖj#}¯Ñ?|Š´a¡9Ë©¨y–ê’=t?ñ¯v†íŸ¡ ²¥ÄBúS"›v‹&ùô\9MÍ_%ü*Y³Hvê³§XÛ‰ö–e¢8\ùƒŸâv8‰MO­¢ç}ÝP’¤¶Ðºõú ¿Û‡Q>C}€eËó2 ÝÝ„æ(÷¬½¶"Ë ÑÛHSÕ5œÙl3tí'\ºþ!’âA&†å¡¾ã·i(«`¢ÿg|zå(²êD&:µßÂ1w‰Èä5¼j ±©6ü®V”[<>Ûœåâ¥7ÞM4ºdFÖZÕT¼,Ø“e$¤N€µ˜›¸Œ)+XŠ‚fšÙiÀf ÃH29øKÎt¿KiíÓÔ…Jˆö¿ÉÙO„kÏ¿$àðóó²"âÛc÷é ÜHˆíéáiÀÜò,‰Ò6.×gÎX‚©È*‡{oê”,‘qùA^۠ضlT5Ϙä0q:±&V‘11U~ò½:Ò[Gøƒoãš?Úé9÷çõ_ç¾:}_%¹vFæÝ(ÆÛ>ÅCm©;žéñó¯ñ³Ÿ†9ø¯»ùÝg¦Q×H±¥mÛ(’´ë¸80ÑT„«Œ‰EtðM>Ž_¸ÊiƧF T€¿JïÕ7q—¾ÂÖÖG‘Í(çOý }ýP]ò"ÓÑ0 ìÞñrŠèèql‡âàcø{ŽSÑð2u¥-·!LÆþ‰èœJG×KÌuw¯/m7)´¼,0™¯Ð°m›\\ÕôìUF{΄ÇšßwIªG.Ð×9BÿØq¤‚VP «ø‚‘Ãÿ…Øô>·IofkÅV4vŸ:Lhj hîéAâ N51¬€“Â…ÍLä—Df/°¾g €¡j$}¥ym÷1±,EÉ3޹ ]Ƕ¬U £Ú–Äb£²lûö×§Çub3*È6p†`ÁM2`fB'6­bKašÏ&zÝDZ”ÉUUiʽ7fÌi}?@Ú‘¦­&ÅZ£lËB‘e”{3&"g ‡3„?P¿€mkŽÙh©T”™´ ©´‡æµ„p¨žáþ+¤,·¯Æ?åìÙ¿¦¨¨Pp~o)"më'ËÚgæÆÒ3ç¹>x‰âªoPäq_gl‰$‘ïc²ÔÀ$‡»';¼MÔný}ìá }ê8žDÙ¶©î¡O%‘˜Es¡)* ¡èatÙ"a$pø7ÑÜú×OñZ(D¦³ƒ}çÏS8™'M==Èâ?qB˜L·ý¸J0Rýô^9DhNà‰Çhì©`˜Œ»äµÍÖY–ªæI.ˆ¢©ÛÎ:9ìlþe1?{;HF€dK¨)^xeˆíõ}Ç‹øþ?Í¿'$”‚ Ï%}Ñϱ Ô)¯LQ¼#¾p*.“ÝÏŒâð þã©Ðš¼‡4E¹wމ®i+W‰Œ/¼“–ÖçÑæ)Ûˆ27ü)@Ø&BH(²:o%EòSX¶DIÃ+l+hb,rŽÑþ×¹~U¢¶ùש /~ˆ…g¸ÿ Ʀ'ñ]ãÊ•«ŒO1'JÿP5í mtעض=‘æ›C-Ù ŸÐËà^õ”’ 40ššB*ÆÆðŒ~Ц,#1¬$„Ä Ê*H2%5/®x’øl?×' ) »ÏœY'W{€ÿŸ½÷ ’+Kï;×ßô¶*+Ë( ®=Lws†cz†QCJ"G†¢(i¹±!÷°æAzÙØXmÄúXQ!­´Zy®Ä!9äÇÁ´o˜FÃWå½IonÞ{Ï>T5h Ñ0™•eî?@òÞ<ç|ßùþÿâCá’Ýÿ—©nSÍN0\X#¹¼|÷-fc­”‰`lóÞ7®íxÄd‹@× $±žßEõòè‡ þ­e¢m§ªrsAe`­úùÁµá?0Ïßü¥Uä¼÷/{øÁcŒô,qåÝ$k¾ÿàoÌáw5Þ{7‚# ž{)Cï÷£œúÊ<¯)Þ¦‘AKg»¼=½nÍÆÔ»dò¡f²ª( ¶ç¼Ma(…Â2®X/%.æÑÌvtÅ%ŸÆ=Âç~›žÿ[´ø\ff.`¹ ñŽ·¾PéìÊ¥Ò–U¢V+Rµ 8îöNÜs]w½ƒç1©/1Ù(­Ø ó]ýwݾ‰Õö]ü!QŸ‰»ÉÒêV5ÃÊüT¥8±@+¥ÌÇ\¿öGk¡è µÐ>~Ø:ÄÛG_g-½ûÙ}ccûÑï¹úošÝ<ßuŠý+óø6ú¦Í«{£Æû·}C>×qÐ5/Gk+À4 4$Ü&oØ5‰rI¡X\ÿS*ËÔìõ“¾º`p{EâðËÒI›TO‘ƒ%¦Ç‚dk‚Hk™Êb€üY’Ñy™Ã¯.óÒþ †o}Δ/à`î0\«V1Mó®ü™}L>8e›Yzp/m{™ÿ#ÆÜ%kš¹Õ%:{¾„O*s{ê»ÌUÒí‡QÝ5Š–C Ù‚ªø05ÁâÜi¢Á0©XÛú…­H÷~“tï'Op¸Sõeú÷¼Œ®loµºNLäÑe+MQ¶¨ÇDEÓüH€èd±ûùÐÛDòy$ wô2]û‰)Œ]ýçÜ‘„€îþ_¦=ž@”WqKWøèüEdIE_Ë—IPu޾õ]ŸÕ76ü.:E´Ò ]S?o??Óš$ÓqU nûý®s·ÿ‚‡æÂÐutM[ï"jšM{¡——ùí¿xOòkÞ <¡Ôj2¶+ðûÜuR. |>‡ª¥S±áÕ_™†x‚/†øýãHÁ*_ýæ4ÇÚvî¾Y• þxâ-õHŸX|ŽËÖÊé’‰§žÇúôð0I#ž~;FV"ôïÿkègY]¾€|tô}‹Þî—tíùöÄÛ,ͽ’F4ýº{_GסgïW˜\'“&M}æeíK%%¥ênéɲ…ªm®‚û$éÈëÃPÇ«_QÐg –ÜGZó û#Y"³çu&{¾ÇÈÇ!ÝS“=û'8¿ôßÑø¾j”¾‘¿}?¯‹½ƒ¿þŽâzh?ƒ#ûúùý‰ÎþÏ{ …¶½z“WE©­ÐÒ™ÜÔ5uœõ$1/ùµÎUÅÚ‚ÄD’ƒÄ}wÿn'qóù?GÏÄÂù<®\¦ùW\ýB'uœO×%*Z„H4ò Ém}•›¯ý}‚ç>øÉ]ÏI×ôô}¿7:ø·_úuÜà¾:’’â­Ð»¿Ý·yÄÄuÖû/x9Z[@€Bqë–(Ä»K¼¸¿ÆÚF›¶„X ðÓ IDATþ “N-“4~ðÝ.nU*¼þÆ*zY£\„#6šêdn\Š0¶èks[“¹u3@¹¤“s$ìÛA>þØ%Ñ^¢3á°•§p]$¦a~1‘eTeÝcâa3.²"¡X–#Iº{6÷ᮃ¦È(1©¯×DUׇ‰mq¸f’ìž/r{ïŸpøÒ‡h¶Íó?£dª\­Š;õØ-ãÔ)n½®`kGÞý3¢÷””Q&_ÆM¾J}­)Êñ6›Á£{H÷n®•,\EV¼­-„p ÈÌêjÓžß5P ý”Q" :òˆ°@Òküê·&ùÃ?Nòƒo·ƒê²ÿÔ,¿ôæŠ /ŸXaæ{ þ¿Û…¬Úöexók+DC¯œXãGÅx»¥B×W2wsM²‹~þìÛ¹9«Q‘]ò×büË›^úÊ,î3øëùª•®”¨ëzÙµê=9ŸML6\Ñ®ë òk<))Mæ<–¢c_Çæ[^®@W5Ïâ«71Ñ4œbe뿨$a·¼Èù¯üÿ˜C]BÂ…¯þìû˜Å SÏýyÖöþJpd呚Ë(® [*¦IÙ绘Ô4K‡2~èˆP/õ0ãdyD[•Áç{hëŽnúò9Žƒêy·–ÇÄçñíõü¹ÍÞÕæW~ó¨íQ1¿ö7o#Ôõö t¿ø×‹Ø ª >W–Xã·öfp\‰uo @QABðæ_àmÁ½Íº£í~ûïÝzÈ]î¢Õ‰”ÌÏïG ¾J 6T×%«YºªÞ—ÏúðY9²Œ®ªXžÇ¤Á¤$O¬5Ïà±6Ú÷´7…ÇAÕ„j(U‘]—¼ßÀ(,¢®]Å¿p‘äć ^ÿˆÔÂÏ'æV Žçòþ.¤¿’\ùÉ’LWz¾‹Tw¢)Kç:ë0½äñ-${º¾ž·à8›NL$ 4ã!I hÆý÷©¢Š‡çpJ jõ!£dE`(®a4æ¾æ†pÔ7iø2’Ö[_roÕ05íó=&’$¡«ª7 iHÉб4é=é¦Ç®¡™ÞœzÃ4 \{ûx…Áêù—Þ°p”ÁÐõ+h¶ê8´./‘üÙ÷Yýè]fºöP '©éå@W–é(®â/åi¹MËò¾R õžŠ¤l$ÎLJ_åú‰¿‰Ôò:¢-èe9O¢­ÄÐó¤º[š·pŽ‹®ªžÇd A×5TI±×.ÏFJ†°å¯“î{Í?XªYøýþûdè3‰‰¦kØå²·3 !%¢É<ƒM&%µZ Ý+u¬;|†±}<&ŸXÿ4ÅÁ¿Ì…@ŠBäÿfðêD×ÖY’««$7b÷ȇB¸²L(ŸGyHitMÕ™ëìåÚ¡SLý˸ɗA}ö¼Y©K<ÚFkWks·ã`¨šGL¶ ÝÀ¯ëÔªU Ÿé-ȳxJæ‡qÔ¯‘î{#8ØçTJeÉûåø3MM×qò9owê E)ŒÊá›tÓC(v­†iÞÆ4Àc‚p?³Cú„&Ô‰œ˜1ªû¾ÉÇñ=,îùº¯þ„½·®(Qï©2’ànÏ}Ê ¨i«É6F‡25ò&…®“ˆÈ{~ã¾®\#^aàh m½Í—×±Ñ Ÿ ÝR†ŸLÀ`µ\&øê1GJææFpµ¯’îÿ z`°AϸVè§J»ÕG)V·ÖX‹O’À²FqÜÕÝ°ÕØÎ4PžÁ£ýtìkoº2“% Éq1MϪ¨;UU$>q'7¨$•<®s¥þd'îgþÅ7™íïfôÖ;´OMÒ²0Gba@±ˆâ8wûøI¢ªëd£QVÒ,µµ3ÝÓO©ãrt¡<û; ©H d3p$JǾî-AjV =ä]~[ ±H„ù©É¦óÛ®p]˜™9Œd~™ŽÁ_Dõ 5ìYŽe¡ÊÒwÐgjLŸi6ÜêéD7 8ö.I²•:1ý½´tlE&²Ã‹Ã6„ô)²‚ëØÐ bâ ú|¡D£ä4¼Œì¼€[³˜˜™áΛ¸•*r6‹–É ™Z,Ž!‡Ãhƒƒø zt q7Áµ>ï'0PÔˆ$¶¨Yºá…B·¡b£2Gñšß=Á• 15}5ðeº†~Inèóªå AŸÿ=z„ÇÄ\§N „/`àÛë…šǶ‘eÉ‹7‚˜( šª4´û«$A¬uó?C==ðê«!Å"N©„$ÉÈÁÒ=s.v $ÀµíûCyØ0MU–±- Å÷lÉýSS]”Ë;¿@ Tò“ÉtMí¥{ÿë  7ü™V©DKà ˆ‰¦ª ŽãxŒs‡Â¶m¯ëkƒ È2†¦m‹&kONˆ$¤`9ÜÕ{,KÒzYµ—£µå`&ÃÀªT0ž’˜t­äsÂÒίP:ÃÄÒƒ îÛ”gÖ*­©ŒšÏ$&’,£lt¯ôˆÉ΄cÛ¨^©ccˆ‰¢`ªy«æ-Æ…ë¸È¬wùõ°õ ƒ` ÀråYšJ¡aÌPÿ®Y7!$6'´/®e ‡øÙgJ”¼añÙµºgìÌCh;˜šî ðkWÁ0MÖÊ¥æ’Ïš„ëJ»hÝ]ÔMJù¨Õ,Å#ö[Ñp„ÙɉgüÁ.Ê!ÚDUáÖjh²‚ÿ!-õQŒÓ§ÔjͳøòkEVf—pœÝauÖjÁ°ŸÎþîMº´j„ ÝëZÙ ¦Í4íù¥¬`úÖ嬵Kä§Š?¢Ów¤ Ýlü™®Yšæõ0Ùª‡BЬÖô>•R¿Ï÷нy¤Çħ딪ÍQj¹•“W¨Z0Í–¿IÅâ ‹‹ˆ§íM#&¶eaÆb^9]ƒà7}8M"öŬ`âÊ,•ŒÓìD’v¶bÎçgX\ÇXôìïBß„|T§fã3 ïÒÛ¢ð™&º¢b•+˜Cž!\‹šµ›F·tCéLʹmÁГI’0}&öÚÚæ+™Õ“WqJi"‘¾¿Õ… kkT*~`óÚ˜[Õ*†×äqÄÄïG4!ùµ”…‰+3T2ÂáAdygç@ärS¬®ÎS©D0ü›×­Úµ,LÝðˆý…aD²ù|Cˆ‰pJ¬NýɺɦÆ@š„rYfv.Ìð‘4ÁÔWž‘—ªÅ-{ö>T~ÔÏÛXÛÚ\I~µÂäÕìbŠpxw’ùùóärU$) LoÚ³mËÂïÍÉiL]Gb}˜Ø&åñ”s2w.ß¡’ ‰ìtR"Èå&™›»A±¨ D؈2gÇD¾«ë¢+2±èÃ'‚?Òé÷ùp7Ñ_­2yu‘Z¡•pxçgBçóËÌÍ­“!Ò›+´›ï÷yĤQeEQ°7A†„€rNeìÒ¥5HäÀŽ&%B2™;ÌÎ^§PÐ"ºéï`U+žÇq‹#‹!;ëy&õ;{.ó·ß"ª}‡ááOJ,ËåÂ…0™Ì ©¶Huð²9i¬öXÄ$àóƒ³>ïc3HÉÔÕj…¡Ð¾/0¹Ü2óóÉç«ѶéÏwj6Š$y¥à &&†¦o 1©äÆ.Ý¢´¦‹B–wî¾ !Èfo37w‹bÑBMyZ¥Šßïû­ Ó4ÑU•J©^Õq‚…±·ˆkßæàÁÙOJj5‡ "¬¬,ÐÖv½nVT1“!‹}¶î|Ôÿ×4 Y¢áŠ5¿RaâÊV!N(´oÇ'ê­“’KäóB¤hF|ÒªVÑuÝKÜk E!èóQ«VúœRVÚð”hÄãGv¼§äRR*™¬·Í—šñ"(HëÆ›‡¦Ãu]nݺÅû|@åžÞ%š¦…(f³uØr›ùÑ3DµÿÄ¡C“†³£×Ô¶m.\ˆ²°°J*uY®ƒÂq]ÜZd2ù™aPùó,>]ש5°2'·Rbâê,v1I(Ô· ª–YX¸D¡PEˆ–ÏÛ‚†¿éóˆI!IŸ«Ò8bRÌÂøÇ“T²~âñ£ÈòÎ{$„ —gnn”RÉhVÒa­Z]oNèy› Çqç?ø?øáYXX¸O§I’D"‘¤Z,>“ç_¸e–ïü˜¸ñ{9ŸG47’,©–ËÞbì²)†¢ ½ÅØd‹EÞÿà}Eah#ѵ§§‡¾¾Gw —$‰tK Ù¥åMéËåáAXÕ ²ëÒÚòù³ï>×c"IÁ@j¹‚îu9ܨ•+DB!˜l1BÁ s óž×q‡ Z© iª—øºÙU¹Ì¹s瘟_àµ×^£³³“?üH$‚ù9w“$Ittt0:3cÛ¨šæ-èfËM¡@,Â÷ÝÆ?×c"I±Hسøv „À®V ynèÍ!&’D0À±¬­•häáél©L0èyK6uÍ«UÎ;ÇÔô4Ç_}•¾¾>ü~?¯¼ò ###õ~ŸˆßO)Ÿ÷´ Z5_ %‘|,ã챊º#Á Ókk[Ââ«”jß½sÍ…K¥´€«·àÓԺ׼áP..òéæÃ¦¿ UÞ^V“»f =ë}“ ž§Õìýw+K|Ê=ûï:eJå,@ª!ç©å(Wïï³"iü†ÛÍeµ«±„w°7KçW*œ9s†Û·osòäIúûûïöÀPŸ°/I*Ù™iBÑ(’×\rÓ`•+È® ¼RÿÇ åøA\ÛF¸Iibÿ 7Ë­Ëÿ”jèe }mƒ¸ö*7.þ¯X-ßàhÿ+÷)ÜÇ¿°mVfÎ Eƹ÷#jå1.¼ûwÔìøº<ðWHG“ÛJ±J€kÙøý^EÁfÁïó¡+ ¶m£7¹Shv鮜eø…¿K[hcðpÈÌýˆó·.ðÜKÿ -~ý)δ Všbnižö®î#ìBT™¹ó{|tým dã³%]_cßëÊö¹ „ëâÖlB^(tSP.—9{ö,·nÝâä©S =S·ê¶TŠÑ©I,Ëò0n"JÙ,ñpø±Â8í1 øýHŽƒ+\dš«Dªå%*fõ¥X-ÍS±»wì Žc#$ E1QõþŸ¹6‚õŸ)Š‚]™åÎÍïàëòGî»@„“§X…=þ*íñ¶»W¼/ØvÖŽ‹©iù™ŠªšH¢ÆêüÛÜ'Ò2HØþùÅ-¬ê2fô%Žþ:êÆ?Ëz]Ù^V«pv­öØ ÖóyJÞ~ûmn޼ɉ'9pà™É`Àï'“Íd0ÚÚp>$.œã¦ ÙWå“ãíÔd.‹ãôdy¾·ÆÓ~ZYA2»2t/\[bb4Èè5Åfp$Çž´MÓú®K­X"õåÜOMLü>¦¦a[ֻΚÂÎÖÎscôÏÈó ©"ûúaSÇ*\åÚõï“)®u2CìëûEг?äÎìyÂ"€¦¸ìë:v×êsޤ uá÷G@’‘%e[ZLåRÃ0!¿ÂÁ±‹ÈÆ~‚d@’$IÞvÄ^rŠ/G«‘°,‹wÞ}—kׯóêñã:t¨nŸÝžjcáæ œdòéZÔ?ÒðSøÉvP=:Ë=U|ÄÄ®hüè÷»¨ýBc½Ù§:÷NUá;ÿºƒøKK¼ö\ù>r#\‰K?nç?~/J¤ÕÂ*hœy/Â_ûíIÛœ¦ÊY~-ƒ©ªÆylbë-«å2fÓ-mA9w‹ÙÙ÷î å¬Q´@Ô¸uí_“e˜Á‘_C«Mqóú¿çÆÍ Ç~‰™Ûßa©êcd䯣»‹LL~ÀZn…ŽÔ0Ñè^R_¦«ý¹{\Ñ×.ã¸eÆGÿ)w Io¡¥í8Ým}(Û,ǤZ*“ ‡=Í·É“H(ÌÄÊÖè… „ÅÒüP¢ÕÈ,Ýmw¸2õ]®Œ]dïà¯ÐO±8ñ®}ü Fþkââ:·FB¢ÿoÐÛš&³pŽÅµ[´Ä^' ^ôï}ƒØ½áàØe¬üG\ûh‚RÕ" gÏ >¶G®–+ºá ¿l Çáý÷ßçÊ•+¼òòË9|¸®Ÿß–J™š¢ÍI4'WH¸07`jAÅ¡£·H{ÒA–Ö– ~¦æu\Yê*Ò•²¿˜àƒ Zä©´ÅP‹sWvŠK>¾ÿã('çùÍ?¿‚“ññ³·C(Ž´ÞÇTjš²¡˜É0ÔÕýÈ>3OMLâ‘(«+hvÒ—ÃÚÒy®—Æîû·Üê$©¨&YXËÑ}ä+´%z€Ò+ïqmþ û‹H²†e•(×&9xè94°n#¦¿Ÿªjm5††¢(´´%¿ò]ú§8Òï°/½oûX}BàT«„SmžöÛdƒA„m?ý ±zZ£…q¦Æ¾Íü=¤Úµ×¨¸1Àbméjx˜ž®—ñk2¦úƧÿ72ùYâÀÎS-Îa9¤{…´¨ªJÉôƒä¶ÞWJ+„‹ª%Q4 %ÐOk`•ÙñÿÀjncGƒ€¦n›},‹±o0>üðC.]ºÄ‹/¼À‘#Gêþù²,Ó–Lrkn–p<Þæÿ°ïü4J0b#Ù ¹ªÍW}’/«òÑ÷:øßŽÙÈ®BÑvùâ×gÎú¹>éÃjõ3=«ÓŸ,ß é¬M™,:|q¤ÈA,Éåå_X$ØäÈq­REâ±z—<1‰„BÔæfq]·ÉÖ‚Jzï7yáÀ›÷xL–9ú¿¥ Øv [Șf`M Èc;·±\…ξ_¥dÿ!7ÿc7tÂñƒìÙ÷Ë$±Äq^ÇZñMѤGì=pþüy>üðCŽ9‘#GFÚÓiÆgg(‹øë^ú-qñÇmü£ñ»ù®#sí¶Á¡_€ò|Ÿü …¾/Nð[¿œA.™ü?ÿç^~v.ÆÉC‹L]‹!¥3üý¿;CH–¹|>‰ýûVèþ^„7~i–_x®|7¹E|ÆÇŸ|§ )cR,)„ºs|ë[3ìMºMÛϵÅEÒñ8¾'L4~¢PŽä8¸ŽÓt7¦$©(Šv7Œ"¹?/VEXVyÃ5íP³²ÈrE–"ÆÀÈßb_-Gvõ27o|‡[·£„‡†>Ùã™c(U DL|ÅDU „kÓ\?ÙÂuQ‘(A”‹óHþ I$4=ˆ„»z»8Žƒp‚AO~ .ðî»ï222±cÇšÏè3MÚIfV×@L-mž;’C¿'Çdu|ýܬ­èÌW_8Tħj•Á¾? ¯¹¤÷eqþ4Ê?ù]…‘y ìë±°–×=ù²Â ­¶-a•àÀá¾öj‘â\˜ÿåêæÇïäè|3{÷=6õžÝuéîìzb‚ùØ;ðû1»VÛÒ]óÌ@;‰P€¥é·h ¼dM3¿|‡DË—ðIy¦GŸ¢ÒKWÇp73@Þ©"Q¨Q,LS²ºñæ:Ý.…å÷¸tåtô‹öd'ù•÷XÌ—iïè»ëµÙ¨Uª˜†¦ªžlb‘Ë¥ÈHÑ–nߺÆüâ R‘KÓç¨*ib¡4…ÕËÜ¿L{ïDq"áîາlâ¸³ä ‹è¡Ô]ÃÁ±V˜ý÷,‹}è“SçÐB„ÍíӨ̵mÖ›z¨§½ärùòeÞ~ûm†÷óâ‹/¢7x®”$I´·µ1³¼DͲê>Ǫc†¯¾¹ŒoCÕVó·N·RWBªºÑ×HUX¶ŒåÂѯOÚ“çÂGA®¼ÓÂ~ÐÊ—¿9Åkýpüñ ƒC%ü>OžžV‡ÙEËi1É,,bªêS%Š?ö ¥( >Ólz¬nÆqUý›L÷%Š‚lt²oð×½ý3._¸†@B¼@ß(ŠŸ@ •ÙÉŸ²4ûc$IBÑ[ÙÛû2¦¡-5ÈÉ?FS †ö¾²^Ê(É„Z^ »}ŠÉÛ¿ÇÄ- IÖié~“½ûëÜ–º±Öc©PðâãÍ$&á03ãÍmT(øüÉ,.iãß‘d’_¦¯T`zô?2…Œ¤˜ô “t$‚¨¶aJ?åæÇÿ $IRiéù-±vjb¿xŸ+W¾Íþ¿Nk$Š(z ]=oP¼ù§\úð<®Ð| ý áºö’p$¥aÌZ¥Šß4~iî&8ŽÃ•«W9÷Ö[  ñÊ˯`l’G1‰…Y[\¤µ³sÓ¾s8T#¤HÌL鸽6X 3sáD™*qûfgŽ_ÉP̘ü»ßÝÃû„9¶'Ôœ®?¢]%’zœñ “ƒEœ’F¶ òÛ¨Mò–Ø¥C}ý(OÑ»é‰LçD´¹ °’¡äo#´è}ž Y3xèïãmÈ’L,u‚ÃѪV !if}ÃKÐÒõ&á–±jUd4=Šaø‘ þ¯è(PŒÚ=ý-ÁÞáß$Ý»‚íØÈЉÏG©kHK ¨+# jú#µr™H[ÚÓ†MB(B8vSguDZŽóâ ûñ#÷–B´ý ¼9JЧ!K­ôíÿ-:ÊkØ®ƒª†ð™áõœ-³“áC¿C¹’Åq]dŇ鋡Ê2FË)^>Ù#dÁÈÏù$n9ÉÑèA*Õâ:11â˜z=+r,4}‘޾8¾`c˜I¹P Ý#öu%%ׯ_ç·ßfhpãÇo)õ$Ø=]]¬]¿¶©‘€pg‘ãÏ9ÿ£4f1 “¦T^ýj†°¬pæíV.-»=’G«êÌ =TÁo:ÄðÁ™$éÐ"Gú«wIG¼£À+ËüôûiòóyJÓAÆÝ*¿q8Ñ„žŽ¹¥eüªFò)¹Â“h$ÂèÜìzžI“:Xú‚é‡1|¡Îû(¤fÄÐŒØÃ|x¾Œú#IÈZ”°ý R¤á4*éM ËtîƒîÁÞ†<Á¶m$×!äÍøhº¤45*É>BᛃɊŸpØÏïŸyÞe5@à!y’¤}†å)I¨zŒ kÀ·ª¡jóôŽÓ÷\?HBP«Tˆn¢e½“áº.·nÝâwÞa__'NœhŠ'*‹õùYž™¥­·§®¾²B-]½Ï+)+._]Æí´‘T‡oüú4ñ³1F'HªÍ¿9ʼnó(|á«‹Èg£Ü¹BR\ž;µÀñSk„L‰_|s·®ø˜_Ðqû~ÞhTñÙ|í7¦þ$µ´ù IDATɵ;~´P™oýæ/Zus º®LÍ "äG÷#¶C)“åðððSyKž˜˜ød!¶Dkí¦/ÒÙ'ÓháD¨1ÄIJÐÕëøÚDèšFÀç£Z®l~@;6Š:ÇÀáGúÑt·9µ²±¯É»5:ʹsçèîîæÄñãM ɲLÿ¾½|på Žm?sÅœ¤:|éWgøwÍ_ã+iêçº Zá _Ÿã 3`Ú |ý/zW¼üÕy^þêßm†-¾üY¾Ü=ƒ¥¥Aôàþhÿ#·°ºJ"¤5ùôÕvOd[ü~‚†U©xÒU'¨ê]ý0p¤q¤ V©ôù·EçÞŒx4JµXô¢>tYž£ÿP„£#%Ÿ{SÕ¼Vôõ %·nqúôiÚÛÛ9qâĦ†o†h8BXÓ™»³žÿåáAR²¸8ˆ-¿Ajïבô$ðŵ ===Ï”G÷DÄD–$‚ ÕRÙÛ­:@Qçè€#}„b¡†>«Z,F½Eo2b‘v¥‚ëºÞb<3)™§ÿPˆÁ£hzc“‰kå á@À#öψÑÑQNŸ>M[*ʼn'¶Ñ“$‰ýÃÃ˜ŠŠcÛÞ&} ËËýTÅWiÙó ´à±G{KVVi‰DH>Aûùg&&ŸX|µRÉc–ÏJJ”9º$õŠ5Ö=,\Q³‰z‰{MG0@Ù°À=<)é;dèùt³±eÒ±yÄþ™066Æé3gH&“œ>Ι³g‰Åbœ:ujËyo%Ibdÿ~‚ºNÙ µ°¶ÖCÑú%Zz¿A06ø¹¿Ÿ[\"=Ѱ¾ÏÂû%>>]£Z© êu¨,iç3Ô»t´yºú í#ÝœD:»\!ä÷cnŽ£»², G˜-ä Åž±BEaã8Ù]±v®›GQçé{®›ÁcýèÆæ4h¨UªèŠŠÏk¬öT¤dfv–³gÏ 9uò$ñ:\\1LÒñ£Ó³}{QvqÔ.Ÿo'_ý:‰î_!”èûÜß/e2h¶ÃÞ={êң鉗^Ó4"Á k¥"ð³¹âd„t‡é¿ÓHÊ"]ýQöŒnž÷¢R,Œ%š×ÔËÃ}ˆÇbL.-"\éj[ ŸBK—‹U¹´+ÖÍ/lz¢ýô õ4<§ä>bR.зpÇë­JJ8{æ º®sòäI’É­;I’$úúÈòd––I¤“»nÏ$I¢\IQÏnÿUÂ-ûÇb œÉ²·=]·vOÅ ±8óuè`Ù¾·‹xj÷¸Í=˜¾º¹y N„Uóâã[¡`¨Y5tóé½X¦_càè BH»F‚d6“_’_Ò‘n÷ˆýbiy™³gÏâº.¯½ö­­­[þ×›®usáÚU*?BÈ”Ê-K;¿=†ã ªVˆåµ“¤û¿J<ÝÿXg~unްnÐÕÙU·wy*b ‡Áqp癲Ô5]¯ûŒŸ²öªUdðú—l!øL“ iR)Ÿ‰˜Àº×±Ñ£ v3$!‡X$â-Æ`ee…3§OS*—yýµ×H§·OÇéÖ–:—[™š_A(=\¼þ;ì†|XI¸®KûÀAZ»‡ï?Yªí°o µŽ½ÍžŠUø|>šNµTBõ*=¶4ª…"AŸÓ4½ÅØ"P…x$ÊTfð·{ØŸRU–½þ%O€µµ5NŸ>M.Ÿçõ×^£»»{Û}‡þ¾>VΟÇñHïýUoSFd€ÅéºZZ‰ÇêÛÑù©ˆ‰¦ªDÃaò1ÙÒ( ´¥Ú<7ôC2gba¾áý\\[Þ5ë*)E­Ÿy[)ˆ†Âh^~Éc!“ÉpæÌ2™ 'O¢··w[~]ÓØÓÙɵ;·©†Â>ϰû4Æ'0ìmÀ?1‘$‰–D‚é[7{È—×XœXÂ*×vÅF[V…H‹Ÿ}‡ë£¤…@²º³YÏŽ`0ˆ*I Ö]ÌÔ˜º1I)g³Â=–UÄ0plÓÿìdLb=q<±·Ï#ö|>Ï[o½Åêê*'NždßÞ½ÛzÝ:ÚÓäòy¦æçIõöxgà^Y+)*ö6$)ü©D"á0ŠÔªU´”¡f–V™¹žÁ­ìÁ4S»@¨çYX8Oµ2W7bR-•пç†Þr0 ƒH H>—o1)djL^™§’M ö"I;Ûk’ÍN°< #»Ä¾çëò™®í ¸õú—|> .9wî ‹‹õÕmOJdIf`ß>ò— ̓޳ÇÛhÀ®ÕÈ/-Ó×ÙÕ°&yOML à ìóSÌç‰Ö™˜d–V™¾žATzºvüFçós,.^§TÒ‰R?a®ä DC!Ï ½%•žD2‘`yj²!ž’É+ TsÂá~dygWär,-MP.0üõó •óyü†®{ý¹Nå2çÎcff†ã'NÐ×ׇ,ï "¬i#ÃÃ|pé"+óóÄS©]í9q‡…;tÆbtwv6l-žúô(ŠB2§œ/Ô—”,¯m’î]CJæç/Q(¸Q?ÏкºHK"é¹ ·(±’ãÔu>G1k3qeŽj.D(´;HÉÜÜ  ¨¯õVÎåˆÇbu­6؉¤äôéÓŒñòË/3Ðß¿cHÉ' õõ#Jeª¥ÒîÝlÙùyR¡#4ô^y¦ÔšL‚e!ê0LAn%»¾)wtïø}Îåæ˜›»D>ï"D}küZ ¼þ%[>Ÿ€aPÊçë ?PÊ9ŒÝðc¨*¥gœ- „ ˜-0ss §ÔI0¸HÉ<óó7<%­[_?2Y¦Ïë³…¡©*ÉXœB&ûŒòÕ¢ÃËc”ׂD"C;š”!Èf'˜›»E¡ ×Ÿ”•b CÕú½ùRCµZåìÙ³\»~“'OòÜÁƒ;–”ÀzÁÇÐà 푳££»n q%—£–ÍÑÓÑAx†/>ÓIR•d$Jnyå©'2 !(åŠL]_¡–O öìè BË-0?¡a¤!(çò¤Z[Îl=<ÚZ[q«ÕgŸuR2zé:Å•±ØA$ig“’LfœÙÙ1ŠEƒz‡o>YÔìÒ2±pÅ ã<€Z­Æ[o½Åµk×8yâGŽÙábY’8vô(Ñ8s·ïàÖ!R°à–+TW×èio§£½}sÖúYYd:݆U~ú¸[1[dêÚ2V6E лã79Ÿ_daá<…‚½‘SRB —–¤ç†ÞêŠLå)cו‚ÃØG£”V¢$Ïï‚ê›qææîP*é@cʬ…ÔÊ%Òé´—ŸõéKÊuyï½÷¸rå ¯¾ò GŽÙukpôðaR¡ówÆŸÚ Ø.ªååÚ[Sôlb£¼gžŸ‹Dñ)*ÕrÓÿde©Ål‘Û—Æ(gƒ>*•¹½É•J‘¥¥QŠE!Ò !%°ž´ç×t /Œ³å¡ª*ÉHŒ¹¥%L¿ÿ‰.B«â2zñksÑè¥ÒΖŸj5ÃÂÂ,å² ˜ “Dzðk:q/?ëRòþûïóÑå˼øÒK»ÆSò0ƒüÐÎt‰ù;ã´íéÝ‘ë *UJ+«¤qöönîw|fb¢( ‰H„¥¥ERÝÝOôò«ó«¬ÎÑõ(ÙìÚŽ>ÌB@¹qXÛª’Yª"D™LfǯUµ:M¹,€ÆöåÉ-¯D<ùù”^9þ</^äÈáÃ9|xW‡¹t]çðÈA.|ü1ó㤺»wÐzTóìl–¶D’===›. ÏLL$I¢«£“Ù/¯q‚ͱm ]@US»A´±,Xh)Ù0kP]èh÷¦¡nÄ¢ÑÙSe|Á'K¶T”†:²+ÖÉq\`±±RêºXÅ"½{<ù¹.^äƒ?äàÈGõz#¦irdd„Ë×®±49E¢£ux©sË˸ù"½íítwu5… ×å‰ÉD‚ n`U*OåIØÇæ1Ý Ïôº½n#ȲLkÛu]nÞ¼É;ï¼ÃÀÀ'NœðÂ7)Ãíí9HÒô1?v›b6·eåY‚å©)Ê++t¦R/$üÌòlš&‡$›Ëqst”ÌÄ5Y"‘nCÙ$]­Jå\ŽâêŠ+Ø—n§·»]Ó¶l.¢\ïhK&)e³ b†Ësï°˜™ã>[KX,ÏüŒùÜOkƒ¹Në—þ-sc”ö ·ÄäÍõñó¨þN )Çí[ÀBf™­`ó ÇÁ*ikmõ´Á6G[*…!I ¯î`Û•‡œ[Ç®Ü#eŠ…r¹) ¥û½­P,Ì®ÿ¬¸Œã \§ÄÜÌܾý.™Ü4νB$Vç~ÌäÜ8ûüm^{í2Ô»§ºJm $ËK¬)kI$wmÒøíÛ·9sú4‰D‚“'O‡=a¬×%+ËÄ¢Qž?z”CCC$M™©i–'&)¬­5$IVܪEqq‰•ñ ª+«tµ¦xþðaûú0t}KŸõºS­­ŒMOQ.ño‡ª^k‰ù¹Éä—A2Åö“nÛ&K¸öó³ï³–_B Œ ’j 7û§LÌ'("˜ºL{ËÞ»–\­4ÎäìU:÷þû÷A89–—oá3ý hh¿´ÇA9_ `˜ÄvyÒÞN€iš´Äâ,d²øšd½»Ö,£×~¹µ¥uÛERH´‘á¾ã(n±kÿ/³+  I½Ao2ÈüÂY–V+Ü™x Ýü± ‹[¸E–—.£Å^ âØ©®7iWhJóóª…Š€Ö–Ý7[JÁää$§Ïœ!‹qêÔ)O4Š,ňE£ŠE—–˜_ZbyiI×0CA ßúDxUUy\Ó^–$„ëR«Õ°«•b»TF‚X8DªwñXl[(©»Vðù|´ÆÌe2 #&Ž] RÉàlø{\;‹ó‰æ–¸}ýß0±¼J²e?Jm‘[¿M®ü×8°÷ ócÿ™+3S´¶îGq³LŽýUçÏ“U ¡i{¸† ’¿MÁ1hWlîŒý 5¡O>GÀôÓlÒ)ål–ÞTª7 uG J1{õ 5ËBk€2qj&Fÿ¬Ïÿ‰ „ÂÚ%l±~Þ—¦~ÈØü"Ãÿ ©hœ•é?æÒÍ? –&)Ýabòmþ{ZÛ(®^d>[Bö¢µe˜UËehà+„MóçDÇ.SÊÏaI~®]¾C¾° jœž_¥»µ©É̾˜ÉЖˆcÞóλ…”ÌÍÍqöìYBÁ §NòËm†Î–$BaÞ®. …+kk¬¬­QÌäp„@V$MEÕuT]C’%$IB„À±ml«†kÕpí®ã «±`doŠh4ŠßçÛ–^À†˜+í©3ËK R¬6 Â[ËçîUµä–ÇIu‚Uezú2ÉÁ¿Ãs{#¹Y®_øŸ™œ}¾ÞA*…;8j;Ý{¾LÈ0)åÇqÔ4AÕÄg|—–ô«¤¢m?'ÂŪ䰊·™ÿ€®P-M19ýûŸû[´ÇSM%'ÕR ÉvHyaœƒDæKkØ®‹®È¸µ –+ãÓL„SD däð+ص5¦¿ÏÇ×¾ÏbÛõ.’I«•U“@¤µê ®È8Ž…@mnEŽ8å2}ý»¦w‰‚••Μ>MµZå7Þð:ÝzØzÆY£>8™LbJùµÍÇn1üªÃÚÊ8®açX]›ÀÀT-æ¦Î°Z’IµŸäÀ¡ß¦§¥•Õ•q,W !¨¹V á>"F‰Åùó!°Ë·É•VñùZï–,7k ‹ s׳ëC×élk#¿²ºÉOÖ‰µÃÍß`bê²™›ŒýˆªÑO<ÚEvù?ügŒM_¢TÎãº.²b"I*²ÀvVY^ÃvîIÝ“C¤ÚŽ"Ö~Âõÿ?{ïõ×yæk>{ïÚ•s@!rI00(Û•l·Ý¶»×ééî3gÎŹ˜µfÍuÿ 37³f­9gæ´ÝÇmÙ–%YJ¤ÄRb H9€*ç°÷ž @)R%Q îçŠDUíðÅß÷~ï÷¾ï]:Éô̇X<=8-?œ¥"c%|_E,ãØ±cd³YöîÝKmm­ÞÙt ‹ ¬&6j¬«ãúÜNï÷çåm°wjdbþ5ΤN#(Ò9™P×>Œ‚D6q•é±÷˜¶W!Q".hèÄltãrÚ˜û=Fñg4×õ#‹" `´¶ÑR?ÀÈä¿“Zþ”es;mu°ŸP,jmÕŽ­CA ¶¦†™… ¹æûµU'Øñx[>×lÉŽÇÓŠ x~BYaná0+s*’)Hÿ†Ÿ´YÐ̨ö]gaòOÌ© ÉN;þ†jo-eÃ&ªæ¯3>þ>²)@ÐåFAÄUý=&ç³›³—¾ž—°ï礀pï×Ë'’t64>2‘Mc±Ç'•N³oï^õަó`ŽÚw?>_(0tæ Öꪻ}œ'|­Y¾7S¢¦•Yž?IÅÒDµ·á¦ƒ¦Xž;FÙ±‰Z·MÍ“ˆL¯€hÁééÂã¬BDC©¤ˆ­ “Ê%«³¿·ƒÙØEca,ž ÔøêosàS•<‰èebÉe$£¯¿§ÕyÏÛTš¦‘Ë]!]BÓ°~¥¤¾ :šqz¿: ݧ«½` JÏúù(tPQ¤£­ IQP*½@nWMšzœ´mhÁlûêþ h°43ƒÛáX÷y¥òù<ÇŽcl|œ;wÒÙÙ©‹]˜¬N¾]Ød™R¡ —øMV}JÚ:7·Ü“(P²9\+Í¡>È<"8ìv­Š¢pæÌ®\fû¶í èÛ7:º0¹ŒF#u£ssXvjZBx«Ë =ª^À`1š¾Þ ª–ÊäIZÚÛõÁæ§½µØ…ód’I=;{GgkG”dä¯-VSrñ8MMëÒ·DUUΞ=Ë¥K—Ø´‰M›6é'öttaòu5†XŠÆHF£xƒA N݉ó«H­¬p¹êqKyL&õUA®ÏÍbu80YÌz¡|ÉÈ2›Úšõ—FÓ4Î;ǹóçØÐ¿dYº¨ó/@¾w%$I4ÕÕQN¥)éA×¾’B&ƒš/ÐØÐ ïëÐØØˆÛj%YÖ ã+(åó¨¹<¡††u—SJUUÎ;Ç'Ÿ|Boo/[¶lY÷±YttaòQS]MÐã!^ÐùrˆôJ”ú`Û­—‡ÎMqßÒØˆšËQÌçõù’þ“\\¢&Àïó­;QrñâEN:E__;¶ïÀd2éu®£ “oЇéÜ& IDAT ´65cRU2‰¤^ _@bq « Ò¤gÕùÁ@u~?ÑpX÷_@&Å"ë®ÿ¨ªÊðð0§N¢¿¿ŸÝ»wë–]˜Üv;- dã1}`½ &AÀP©ÐÞÒ¢::w¥¥© " }KçÌ’„P,ÑÖ܂ż~üp4Mãúõ뜯çѹ˜¾v£ P¬«w»qãCCC477³k×.ÝïLG&÷I’ØÐÛ‹]4K¥ôÚX#±°ˆ èZçù@VU å ]˜ug6¯êÀ¢HOWNƒ¬;“¯õ-Ÿ§­­ “qýôŸññqŽ;†?`Ïž=ØÖ"ÿêè¬G~ðÍIAh¬o`yy…Å©ijZšÙ b°89M•ÛÏëÕ[§Î=a2ééìäâèuйæGtÒ5Å©iNU~ÿº±&LNNrìØ1<{q9_ë÷±xœ±±1=¤ÎƒÕ_%‰–æfªªªîè«‚ö€,±Š¥g/]¢(K¸ïò ëḢ±K6lÐ#¼ê|-4MãÚè(ã‹ ››1‡iH…°I6õ÷¯‹þ£isss9r‡ÃÁ¾ýûðy¿þ±çCC <‰ÝíÖ·~tŒ¶ ¤c16ôõñä“OÞáÀýÀ¸s›ŒFZC!.^'gIcû𫂇|ë?Ýë¢ÿ|º}SÈçÙ»w/Á`Pttaò@ <&zz°!°8½¾Ä‰I2™žÆ.èïîÆ IzKÔ¹¯‚@CC=Í5µb1 ™Ìºy7‹Á@dr›d ¯«iôŸh4Êñ'Èe³ R·¶¥ttÖ0°ÛllìíŦÁÒô̺Ø/7K"SÓX‘ }}zFPï®s "Í¡UA²Ë+³Ù‡¿ÿˆ ãX$‰Mýýë"T"‘`hhˆt*Åàà ¡PHo¼:º0yq:lìíÃ!ŠDçæÑTõ¡-l£ ²89…EÙÔß¿®’‹é< \iij¢!PEjaq5õÃC*ðe ''qM lܸ.DI:fhhˆh4ÊîÝ»ijjÒ­Ž>n= ét8ØÐÓ‹K–Yž™CyOëHªÊâÄ.óê ª[Jt¾OqÒÞÚJ¨º†Ld™T,þÐY…r™¥ÉI—‡çQ5|6_ÖÅäÈ©ãg¹qᗆèšFrö§ORª¨¨å"…bùk_[-g¹òñ fRßûZæ¡éÝv›=½\¹6ÂÂÄÁ¦¦‡'XP©ÌÒÌ —› }}ºC›Î÷?± í­­˜Íf&æçˆ—J¸ƒUE02%—'¹°H•ÇM_OﺨUU9uêçΟ£¹©™B¡ÀåË—?_iw©ÇÛ>¿åÿ` ÔÔÔ~½x.jžK‡‘m=@s¨–Oãò©å,çß}ìV;] ^øÖZÎ3ôÊ_±öîaó†FÄo9ôE'Îrø½,]mXŒ‡oQ%eèÕ?q5ëaï“Mdç¯rêøu6ýä'œ÷h5×4´Ï׿Vaìäa}x•®­{¨XòT4jX¸6Äá3N:z›ˆœùˆ…²‹íÀ(Þëí4´b†Øâ"ª£…ºj'ßç¬õP-;ŒF#ý/_fnb_}=f«õè5M£˜J“[‰ôxéï]ƒªÎÃ+Nëë±Ù¬ŒNLŸ ¦¹i5ÐÑÚ’‘b¡H}0HÇ:9üé»ÉF#¡Æªª2<<Œ¢(·Y²TUE½eëZƒÛ¶²5M»ís“ÙÌàààwhNS+D秉,'ÑD#¾ºFªüDA@Sâái–" TÁ€§¦ž`•‹å‘O¸>|»bÇWí¡Þ©žš!-![Ô67a·ÈwLzš¦ŠÌž‚ÑJmK+N‹áfIä®OE©`¤®½—͈€F1evjŽBYÃâöSªÃdÑ4J.ÎÌäùb“ÃK]S=£4•b:ÊÜt˜BIÃUÓ@mµQP‰L]£h¬Ã\ްÍ`´ûµ5 KâíϬi(¥<ó¤rE fuÍMØL"sÃç˜'t÷â¶[8ê—/LcªkÀ´m3“@.a~n‰R¼uMTW94•¹± Žjò+sˆÞVBu޵ûjdf¯suxƒ3@us3MÞ2)­æ¶mløÏ] ªúñÖéî!ª ˳“¬ÄÒFuÍM8¬F”B‚ѱ%êœLO,ÑÜÓM]k®7Jf‰k3iBõnÂÓs”T‰ªæ6üN3OD˜™šG•mÔ†jYŸ¤±§‹,ñM¦gÃÃ8¸nìïÇ|c”ùÈ2Y&Pÿày°‹‚@xr ›(ÑÚÐ@cCƒ>3ê<ø<^¶ntpáÒ%"SÓ¸«ƒX°ˆ©A`zô.“‰®¶öu—ÔR’$öÞ&2¾R˜|Nˆ|ú›Ï ûwR—>x“'/bv8Ë%Ò9Û^ø%»j™<ö6o~x«Óލ•Èaàñƒ¸af‹=sDæÇ;tœÑp›ÅH)ŸÁPÓËû<^³xÛ½&OáÈû'¨ˆ&4MEð·ðì/†—opâ@! [ÁÖµ›Ÿÿêy,±‹¼ñû×X*Z±e²É45ƒ<ûܤì‘âÉÿü¿³±ÁNzaœ±p ÎxŒ˜µ‡¿ÿÏc‘Tοõoœ8q‚¾¶gÔ2W=þ-UV8ýïÿ'®Mr`?v³t‹5IÁ qpßOi®¶‘œ8ËË¿;Ä\â16 îcøô0}Ïp`[+3§£ÜX³ïÇOã7dxçè1ÊÁ-üí/ŸÄ$”úÓãÜñãll>€¨0ºxúFÀf¼¥”Dj7l§ïâED¥ŽÇ~4ȹW.±¬Þ>»úim=M¾…½ƒ[)ΜåìéËô¿ô¿ðضʱ1þôÿüžKFlÐJú7óã§¶Bnþ¦UHE4¥Bco?vtP\¸Äÿýþ;331\K˜Í;yñÿ_äÌ«¿çãh”o3åæŽì÷ùرe £ãcÄæÂT º:´@¬e\œ™Aª(Ôùý4‡BÈzÒ,uÕ.Š´67SðsõÚuâ3³l6^Ú÷,¤ ‚å2ñ…E¨Thmn!XU…¤'´üÞF0O H[Fiµî•RšLx5†T&'VéëjÁ$@òÐØ\ÏÕÓSdË|¡„#—yí·yšÛ;hhi¦©!€šL‚ b÷ùih¬bøÝWHM´ÓÜÞFsO/vóícd>b%š£õñLFxBüäo…h41ÿñVg¦P ²AÂá÷" ÊÅá3¸šžÂï¶"ªšÚ±j'‰¤r´6ÔÑTkcèåÿN¸½¦¶:z;0”–˜Ÿ&í03ôÖ«ïº#NÏD#ê|öÕ­"w•ÒT–ŠªŸ ÉäÀí2ríØÛ WÔrŽh"I6[†*ñæä.Šk~A‚€(гIæ§Âäküó @#M‘œM’Ì—A2¬ áµ™ø|WauòÄ/rÞÄµï ˆ¢F4¼ÈR2‹â,‡/€V"™ÊP ÏQjñ`0yhhkÅ`¨|nƒM¶úih¬E’$Lv²¬RÊ¥‰Ì.a© Qå·c0 tlìÂ;4þíÆƒ‡½;™Ífúº{ˆÅbܘ˜ :=…ÑáÀîñÀ÷0¨ k…˜ÆHg³Øe#­¸NÝJ¢óP|§Ãɶ͛YX\drf†h:ÙåÂêvÁw܆%A@-IEc(…"~¯—¦P³É¤÷ŸïY˜øZ{Ø~àfÃj¹WòQæO}DPUU>õ­$I¢¢”©(кÿ ?­kãÆÈ f.Ÿäü±£l~æ%¶u|6y‹–ûþk¯^erbŠOý…“ÎV^ø»ŸRëþÌ*¦i*ªª!‰kοÈFãMç[A6¯m£¬N¸ÂšÅ§RQeÃÍ&+H ¥ŠŠÑÓÄõ÷Œ _efjšã¯~Œ­q+O<Ñ¢Xì6,k T-Í}h3ãs˜™Dƒé6Ëšz»eQÓˆãý¿¾Kpàý½!´Ä ckGó¿¸kªŠª‰XìÖÏîß¾™` —EAÂ`4ߟn¸¶õ'ÊF¬v³Xéß÷ÎúVd1Óªð¼[ ‘d ŸzF¯*"44* É Ý쯣Œhøvs¯a=t)Qñûý¸= aæÂ ¬$S˜\N,NçwvzG+—)¦3R)ì&3­!‚UUë"íºÎ#f=‘$êjk „˜[\`9Çèt`u8‘M÷7fˆ¦(”r9Š©4Kø<ê[Zq¹\ze<€Xmì²ÂòBµÉå<+K¬žj¬²ÊÂä ûžï£˜Špä÷¿åÚÅkô¶ö" QQ5ʹK‘­[÷ѵm7‘ñ³¼ü_ß$ú§MC©T(åódã (•ð8œ4„šVÍÎzhy‡Üzb2i…¨­©aqiiµE§Ìfœn7F«Ñ`@D´{ìI¢ ¬wT”R‰T"I!“Áa2ôx¨Vãr:õ x…I°‘Þ MŒœ|ã…Ä죳iú÷>‰]*qõÂq®G ­§¹’b9£âï`6ZpÙ`ôÌ LÉ ã®`µQ_í&µ8†äôcw:n»—Ñ kC;ÇÏ~È;•eLù®\Yboh æ/š¤#mÛ¸ô—“9ôu>Ó—ÏáhÛ@¨ÞGbê ‡ßø˜Ú¾~‚n3‘É9¬þ._5Ý›7qèÈ'úk™j—ÄôÈ0bõF?Ðs¯Kc\>/V%Ź#2o)[šÇ kÌ_&ìºí۲ɅšpöÄ'lÛ¾ž^Žž9Á{b¿Ucbx[ÛnÛÕxVíìV #W9w¶¾¶.zZÏrñÝW(„;QaƦâl|ú9Z,ßdÐ0PÓÑáòQŽ¿ýu^™ùñI Ò·[œKÿò/ÿò/ëqõçv¹¨­®ÆërA¹D&ž RÊfT Y‘ÄÕcdš¦Ý SÓ4 ƒ `Ð@+•(¥Ó¤#2Ñ(rY¡Êí¦£µ¦ÆFœ‡n%ÑYWnéCYÓÈ'’¤V¢Ó”Bµ\FDXÝŽQUEAUUDm5ó¯P©PÉå)§Òä¢q2Ñ(Z¡ˆÛb¥¥¾öÖ6ªG¹õ‡dllŒT.GMSñn‹'M¡+â¬n ®6€´æ¯ © …| O}õµAš‘µË‹K™Î{Ù¾½ƒ(ã¯rSÉ'Y /’É•©íÞÌö=[±Û¬˜DHÅØë»èl’YYd%C5ºÙ´oÝm5·ÙQ¦ª¾‹T&º¸HIr°qï6õ6P*äÁè&ÔÒ€Ù(¡– ™ºæVêÚÚ x-¤—‰Å3¸»Ùýø>‚n v_«Q#¶0O,GvÖ°õ±4ÕûðÔ7áuˆ-΋¥pÕ·³e÷6|6‰|¾„3Ð@}½(æsHV?M­õ ŸmkX<,F‘ØâeÁJó¶ý´6¸(æ‹8ü¬&#PU^V‡ I˓ΖñÔ†hïíÆi…èÂñxK[woÁaÒ(ʸ«CÔT{ióyÌž uõU(¥v ~Jù<’ÍOs[.»™R.MY´PÛÒAWwJY–‹ä*Ú¶Ó×׌ («Fê›[pÚh•"ù¢F ¾CBm447a·Ê (5‚m„:Z±U‘%ÑBM—Hx™Îm{¨òÜ}JÓ4"³s4èî¾3ß• ­‡4÷blSUR©Ñh”D:M>—£T)£ H"êÚêNDX * ¬6.‡ŸÏ‡ÓáÐ÷¿u94M#—Ï“N¥He2¤3òù<år™²RAY edIÂ`0`4™qØlØm6œN'N§sÕ‚¢ó½qèÐ!f——Ø¿ƒQwÆ×¹¿d—'¸pn’¦Í;¨ö™:þzš_þ¯ÿ…ÏtWUU¹4Jée"˹ôua¢££££s_'ËØÔþò»?1N'wP )F†N²Ï}íg›¹t„¿üámV’…{ÿ•Räâ[à¿ÿ_ÿ•S'/‘I,3ÉÕ¡“¬$óh•"WOæèÑs”ÕN™è[9:::::k3°F©£P,#"F‹ £,!ÙT ÙbC-æ)UT F3ëg§.”R|¾ˆ&HT*%RÉÔ]#ÝhªB!—£\QDf› ƒ´ÁUUJä³yUC’MX¬fDÔb†LÙ„I(RT$ ‘qN>F¯#ˆÃь٠ ©erÙ<Š¢a0™±˜M«GU5r1G¡PF0QK9’Éü¼¿J!—¥TVD ³Õ†lÈÇ»4‚«e'zH…¯ññû'Øî¯Ãf­Ç( ¨J‰\6ª‚l¶`6b."X œ£"ZpØä›Žáå|–’*a³™r6E¾bÀá´!PÉg)¨6«‘âÚs‰’ŒÕnE$‹Þ»Ñ,µHÂjÙæÒ Ìv•|ÉdµD2‘BU²©$Š &›£,ã“޳ÑUƒ£³‘Îí83d²©$F‹J1Oùnu^Ì“Ë$‹ÕH6SÀît ~K¿w]˜èèèè耦±2~£œ –*‚(âªmãñçŒK]äý?ÿ•’À*åIÄ(7{^|‰Öj;•ì2G_ƒ‰¹8&§—9KE½Û¼_æÆÉ8}n”B©¢LMçfz7²’âÌ»‡¹1OEƒÑNÇÎýlßÒÂõã¯qêZ¯1GIôaWÃ\!ï>‚l”é 99ýÎÛŒN-R.k~{’žöjò‘ >øë;,%ŠØ¼Ìêšæ¹Ëë«ÌžûˆC—È+ˆ’L°}€½{70qú4W¯Í¢•ý\ü°‚exdíÑä§iõ œ:ôsQEÃì²ýégh©ùøÐ+̬ȘÔîöA|ãj–­Ìä©÷9· òãŸ=‹]ÈqôÿÊxÖÉOÿé7øÌ çßyEªhª¸zñÙ| D3¡Ý îéÇPŠráè(Õ{xr£Gqòôu0Û© zɦâ{ž ¿ 4µÈµ£ïp6±D<‘Ãߺ‰û{9u”+WG(ùŽ`öS™<Æp,D»•÷^ù+eC)O2G•½ìyáEZªíT²>üËëŒÏűzüø½2‹…ý/¾DKÝ· ˜¨oåèèèèè U2Ì¡:yü—¿âéí"{åC>>w‘ÄÜU"‰­;ç™çžÄ˜¾Á™Ó×Q5±“Gøx$Ŧ§ŸãÉ'·# $“w†%/Fg9ýÑIÌ­Ûxîï~Ãþ==ä—YXÊ0yòGNNÓ¾çi^øÍßÐUgàèëo3±˜C+F¾4£yûŸÿ ¶öRÛØÌæÁý´7WsãÈ|øÉûòüß>Oƒ-Á‡o¾ÏR<Å¥#ï0²bcÏK¿dߎv´Ø2…âªI+F™¿£¡‡Ÿüú×ìÞÙÃÌÇpm.CÇ–-´47Ð:°ƒÁƒéÞØKm¨•m{÷Ñ\çâêoòñHŽ O¿Äó¿x–*qž#o&žWÈÇf¸1§aÓ6oéþì´Š`Àá³™˜"žÈSÉ,1½fq.Lx9‡’3=9I"æä‘S¸Û6rð7¿bsogÞ}›áÉ(éè"©t‰ôüÇŽœ¤~Ãn^øÅAÜ&…+Ÿ\![P(¥–(Z\ìxþÜÈÄ'Ç™J˜õtRÛØÌ–½ûik®¡œŽ°´œCEâ³WYIæhßùÏ|Cb”³gFQ5ÑðÉ[~ü"öö“_˜äÚÈÜ}Iª ÙNçÎýlìm$žaq)EYÓ¯dP5 D+Õ-Ý´µÖá«m &h%º§¢–X¸qWS;==­Ô´öÒ×׆Ëz—éE1he‹ó,­¤ tíâÙ¿ù ¡™¹ëÃØBlè!PÓ@ßîØK—£‚€¯ÊǦ]TœXDAÂáõbÔrŒ]¹‚)à§Ê!R(‹xkê(,M±07ÇüÄ }ýt·ÔRß»ÖÖfÌw‰3'˜üô?öcÚC>"ÓSÄc)”b†H<‡ÉéÆ$ ˜œ\.3v§ A0àôz In _ÇQSEÀªRPd<Á3Ä’YÁ@0ÔȦí}øÜ·pU·à’„Wâ¤æfP, ôÖYœ “‰FH¤5ärŒh΀¯*H1—ÇjÀ-&™›˜¡ò©ˆ¦›™%!øéݳ _Mˆ­;6Rëù,μÑꥥ+-¡:·ôáv*dY¬Nûê»x<˜%ásÕe%ØÒCkK-¾ÚFª«-¬,Æ©¨EF¯ãm餷·†žÍô÷µã4ߟØEúVŽŽŽŽŽZ9ÍÙC¯röZœê¦FÜv™r¹„T^]q#ÉX,¶µ‰U@”E´l´™t s•ýf8{‹ÃŠd¼3®ÉbÿÏ~Ê©ã§8òòoÁ`¥mÓžøÑNR©æFçZæ`0˜í˜Ä ©| ™½˜î’µVSK$“y–*S¼÷çðêß4³ÛA¥“ 2Z IDAT”%›Sqyìk¡Ñl.3‚¤ÜqµáÄ›dbª° …µà›ê—–›ªIÆ3Ìç¯óöÇÖž©‚ÍïAQ$Ì7Ò]æl³ËG°ÆÆüÄiS°‡îà Ï̱Dš¢£šjs‘Äâ(gßyõ¦¨)hFLFõ‡W•\¾ˆh0b–W3@Ë6×g飇ÕÄjVfQVç—!ÉX­¶›Yž%Y‚BeµÎ3,õ޵÷px]L÷GRèÂDGGGG‡ÔÒ—/LÐõü?òÄöVÔÜ ±áI~Õä%ÈX-Š™ìj6] r©,J±rõSÁ^×ÍÁØ@!gòâï½þþP3‡‘™dzÍ P)d(ª"¶Oƒ` wà'ˆF\N3Í5;øû_ì[6j…bIÅ ¥™2 äS«Ç_A#È£)Ÿ›ú4å±QÎ]Xæ™ÿò¿±©ÉMváÓ—/}ɱÙÕ ¢dÂåuÐÙ³Ÿ_>»uM'”)–Á"—™áÑNC} 3×®0!ĩݾ›j·åü%FÓEüõ½ÔšÂø[{ùÉ?ÿ#Õh…\³ÝXŠ®]IÄl’Q•2¥ÊêW2)ré·Fùb{Æ—¥äîZç³ÈR6·vìZ#›H£”*÷¥-ê[9:::::ˆ‚ˆ$‰”ri’+‹\>y’•xœRl‰LQùab¤º­•Ää ®_Ÿbqb„áá1’ù;'»dx’#¯¼ÆÅË”T ·ß‡lP4™ºÎnò3c\# sõãÓlµÔ×î¼¥$#¨y–æÃdËF»»‰Ž sõú,éD”«'óÑáOÈ”dªC5Ì^¹ÌéæFÎ2>1M¡|ç³$ •l"I|ašá󉥳dW"äÊ·[0Ñ”{AÄSWµáÚé Ä—¸pî ñüWW¼dDP ,…Ãdòʽ5ÁDu[ ñÉŒO̲86ÌðÕ1R…ûsÄX·˜èèèèè`¯j`ó®^Î9ÌëWíT…ZÙþô“ ]åò6·Ë-¦z£Í‹ "m»ö³iáuNýõ˜ì¼/­í |ÎÜàð zD.y‹ó*ˆ’ú]ÓÛUMs°7]âÚÑ·Ö@29ØûÜi Z5yðûl7×î6O-Í!7?z“É–ÇBªxˆËï¿ÎyEE2ÙèØq—ׯǞ"òÚ!ÿþ·X=ìÁêlîÛíx[ºÙºí2×Þÿ3Sµ­íìØ»™‘± Üèðb÷@^ý•Óß@¨ÚÄÙ>Àh|Žþ'Ÿ%[y—3oý‰ƒÅEÏàl³Ã‡S6}¡5Âîo¤Þã bª!°!ÉÕÁZâˆ44øðû‚<ödŠ3ç?æÕóÇA4hÝ@U•A‹csûQ¬2®†vìçÜ©÷˜v 8¨ªu"Š"‚dÆðÞvŒ×í¯ÂlqxëhjppáÈ¥§1X½øÖ|Sœ¾–[¶äŒ6/nÉ ‚@Ç®ýLͽαWþ€ÓÄiwârøö~&LvaG‘¯“]X-I§Ò¨ˆX.Œ’B2–DvzP³Id›ë¦8)eãä*fÜ®ÕI¬œÏ’ÍæA”±9Ìd’9œ>Ͼj¥D6¡\Q%V§Ù°ê¡)%2© eEC6[°Û¬Âj´ÖDNÆã±­ùŠh3)²ù2&§›Iú췃ɊݱëM£˜M¯ÆÛÍ8l"É”‚Ûë¼=Þ†¦¡”r¤Óy4Ñ€ÝéDP ¤R9,n/j*Šæða7Š€F>• _T°¸¼XŒ"j¥@&•¥¢¨Èf6»Q(¤c”°át|‘8ÑÈÅ£”0áö8($cäÊnŸ QXõ™É§Ó«ñe$»Ë‰,­>G6C“Xä2sãÓ¨f.»1æåÿö:þGö IÄs¸}ž›ïœˆ®`qz0É"…t’\¡‚ÙéAVR¤‹$Q@-ÄYŠkøÝFbѪ ᪠b‘EJ+SŒœ=MÚÑA°!HmÀ½ö”w›÷âo ·¿§ÙáÆí¶#|‡mM&:::::_¹ºžºr¨àe’ éùa>úë óËdReúôSöö’™»Ê±wŽÍ”QÊ*ªìfÇÁçÙÐægéú|í^·Å\Å/üˆÂø)NœºLEÓÐGu{Ÿ}†Ú[N¹£a¦®žf|2ŒåüÔVj9Ù÷¡f#¤ÓÌ5Ý<û‹ºŒÄ&¯pô½‰e+¨eUö°ëùèkñÝ!NT¥À'‡ÞB*®/–0ù[xâÅç¨sÕ#ïqæòšªPÕ1ÀcÏìG]ºÎûo&‘« ¨ š`¤÷±ç©*Œ2°‘vƒJ*2Kmÿìߌ]Hr"» ¦ÍPíì|v?͵.¦Oæ­£ÇéÜ6€Q+,Óà)¶´þˆß½ü ³;÷ÐÞÛG¨é†–=ìܱaM”|F.¦¢” Ÿc>-sð7¿¡Ê¢0~é,…JUã®ùîº‰ŽŽŽŽÎ—ÛK )–“Y¼þjŒŸÎF‚DU}½=M8Ü~jCŠ™ ™ø2Sc³X™¼ÁøTÁÈÊä‰\ ³ÍCSW§Øì “‹9¡Ìôµ–SZ™ðÄ4åV“Å‚ˆÙjE@¶ziîéÇïuRÝ҄îQÈäÈ'£LŽÍ#HË“£LŒÏ¡‰F–ÇÇIäï /Ênšz6ðº©jë¦!è$žfftŒHº‚’O1>|•X¶‚TIž]@vù0¢\<~œ‰éµ=ÛØ:ЊÑbEEd“³éöÀa‚d£­¿›èø(±t‘td–¹H‰öÞv¬6-}=hÉyÎ?ÁèréÉTqÕŠ`tÑÐÙƒ×íZ ·†ÍSMKgéÙQÎbv~™|j™L¦ X~šÚZ°ÛløêkÄ…BÑhÆ ŒfLFãJ«7€˜_áâñãLήPß·ƒÍ{Ýb¢££££ó½¢”V“ÃÍÖÏ¢¥ "²ù³ì²’,!h*¥L.“§˜Za|$·öe~ k“™(Û°e@£T*QJ'IÌS‰¬Z2Ìþª[kо<±/‚Á‚Õ$³š1W@WO”(J™|6‡’\f|$»öe7]›w_åËl6óšaCÂ(K”+Erù¥\Šå©Qk/îoj§¦ÚEÃÆM~{˜²h¥{ïAöovÉ 4tö`?|™‰é¾åÊö:šCA2 £|ðê«h¾6Z[±J%Œñ³2-ØÌw­[>Í{o|„«©‡Pc5¦b YXÝ’ù´ŒLi­ÊV#ìª÷ìå+ÚþÏìŒ\¹Æ©·®P‘ìôî?ÈÞíÍ÷%ü¼.Lttttt¾6¢AB’@)—V³ôÞbŸcŽ—-xün,wóÒ“ ¥”%_6`·™ˆ¬¦Û]óó±ÙlØA6ýèçlht*ùtÁdÇl¼Ëħi_š @6Zpûݸöòüc}kÏ!_1b·ÞÅ:PÊHdšR$“/b 8ñz5\Á»_ú^š¦’Ž'±8”2qªº¶Óºe¹ø'ß~3 ±©ïG€†ªÝý)ÍUÍ´5:˜º|𥸠Uíð;$¦/O07ð›ÿô šÝVF†8¡ Üš5æŽÒÐJ„Ç®‘6VóâO_$`éÊŠvo'Œ4¾¢,5réÕ=;iÛº‡ll‘¡7_å̱ٴ¹ —¬ ÉdÃm1“L¬PÑ:ø²Œ;f·Ÿ¶î&N ã”Ë@ƒOäÒ©ãècß¾mwX<Ímt7™¸òÁ›˜w ħ9sf”¶Á§Ù²¡ù6 ‡([Q+&®ß dûâfOí]!>>q”"unK§Ž#Ö°op ¶[|L4 þÿöî´9®êÎãø÷®½ª»%uk³¤¶d-–-[²ñÎb;l¡2 3CªæñÌ‹àUdj¦¦RS™„I 0‚7l#É’e[û¾[kï}ïmË’7ˆ –ÿ§Jºûö¹›¤ó»çž{Ž•Yd¤ó —6™¡ú&34í©¥²$F¸¥‹ Ÿ|„ÝÔÈR_-­ì|ú0öðyÚ,¶x˜|3M"Å—ïG×4\Lô18REuyxímÕCõæ´¼q ÅðóäÓU节ÇëFM&èïìAÍ›§§«‹åTŠÙ¡A–ƒw І/‡aàJ/ó™úGIX£}ƒòîÑä¤h¸t˜ígpdÕáÛoÏXqz?;EÇlÛ¿“ž"™±ð‡|+­_ßÈïÛ«¯¾úªüÙ !Ä÷SOO‹ñ8¥ë£¨šv_e(šIrº—+ƒKÔmÙŒG·˜™˜o1µõh ,ÎL°”2©nØHu] j|Š«m­\½Ôâ+aû}D.b ÓÌ/C´¶–€ßD1üD£%ÌŽôÓùy+ƒÃ3„«izx cm7H·Kcirˆþ 4—C÷P±¡Žü '½Èäø5Beµ”¯‹PBl’«m­twõ ÊhÚ¿B¿¹æi’Ôâ43q7.áËmô N³~û>öìßJ^~1eE~Æz¯Ðq¡Ñ©•[w²½©‚ ‡k£½t]ø‚žË½¨yeì{ú)J Œ1Ôs•¥´IY´÷-ûáq«Ìôc¬ÛÌÎÝ[ñ˜*Þü0V|–¾ŽvÆgcmÚCM±ÎäØ$¦× ŽÁº õ†ÜØ©E¦&æ–Õ±is±éQº;/q-ëw"b,3=·Œéó (ªêjÉóXñkLN-SRµ‘âH>¤c ÷\e!eP­Äcæ¶ÓNÎ399OA´‘º afGzéºÐJï•>Ô`9{Ÿ~‚â<㾟Êq‡©átÐnù½”Ù……â{ìo™]ø^úZxýµ¿²ù¹Ÿ³ç¡õ8Ù4Y'×À±³d2†iäÆ+±,,+‹ã€ªéhšŠ¢(8¶E&c¯,·rqžÍ`Y6¹þ*:šz§g3²™ ¶í é:VÖÂ0Í•r2©ªa¢]ïr·mX[IÚdÒY S'›Îàܲ~Çq°²¹u¢(膱2k±meÉZöõð¦¡ëzn–dÛ"“É¢¨º¡ßᆗC&ÆQ4ŒŸ;¶m‘ÍZ¹þ;†ŽcYd- Íб2«÷5÷}U3ÑT°¬,–eçÖ§kØÙ – º®’ÍÞ<'¹}Í ›&êʹ¸};'W¾n˜¹qR®—ŸÛO]רw‰Ì.,„â¢[iÚÞCçÙÓ”—Qñ®™]XQuÌU}3UM»c ¢j˜®Ûß×tíKk$ݸ¹Ö[¯´ ×ÚΡwÛ†5%**¦Ë¼ã÷sŸ¯]çªPuóÛ|·}\½†éº½ÈC±N’ñ¡[†íÇJ1:0@ƾǹ¹m?5û<××ʧoÌb2³öx9Ë£í|ôæÌHOüôÇ”zi?×ʒ奪®†Ôàœ8ÑŃ<ð+-&B!¾Tr¦ŸÏŽŸaxlT“ÒÚÍì>¸¿®`-qúã Ïâ ^_OóÞ팶|Ì™ÏÛ(U¼¨™ÇÙ±£qå ÜÎ$H«&ë6ÔP }Å­°j;Ë©“§˜²"x‚ jÂÆ±œ:ò'Ɔp|a¶=z˜M5Å(8,^âÜé LÎ, ¦y;¶¯¸Í±ï¸À¹Ž!öüäŠ} WO~@ûÕšŸyžê?ã­'8ß“d×3™ºpŒ®KÝ$Òþ‚R6ï9@}4@ç©£ôÏš˜©)ì¼ ìÝ[Çå3'é˜ kAh]5͇g¹ó$'N· •-árÙ<¼§¿ù͵¤Æ8wìCSØŠNqu»?FЄÁÖÓ´µ^ba)…á R»muIÎ}ü>3øÞ~Ÿ=§ª8p=fèimc.¿‰—~ò$~¢ë£Ì.AЫ¡xªÙÒPÉÑóçÙ¶§"Ïý5I‹‰BצdtðÆÏ0±ë%Nfž“oÿ—‡â4ì~„­QÏ}À±Z°‹‹¥µgM{¡¹¹Ž…á«t÷NQ-¦¨¬Œhý6jê7p³ußÁJ%±œ,ÝŸŸåÃ7ÞࣣŸÐ=0}Ï m…âòR"e¥”×4°ys-A—ÃpWËiÛöï»ÜDZN0Ë’˜àèf6ã¦ùÑǨ©œzçM:z§n¶ß(*¡ƒÁ‹íŒ/ádb \n§£ã2CcsØVšáÞKŒÍeXlý÷ßm!¯r »Ù;6ÈÑ7Þet.Mr¶›OÞ?GJPÓPÃDë ZZˆnÛË®}Ídfú¹Ôz™’ò%åeTÔn¡¾¡öæìÃ÷Åffdèæ9d|dâæ¾e—8÷Þ[\èšaÃŽý47×3Õþ¼}Œ¥‰^N¾û!©@»=NeX¥ëB+×ôR‚Å¥WVÑøP#%þ›k˦¸6<ˆ7hpñ/oó§_ÿš>üŒ*ª¢ ¨:ë·mŘ½J_ß÷Ûh"-&BñƒgÑßv–?\Z5È–ÅTÏ(u»`yb„ËWfØü³ ©9ŠbÕ›èál{Ë·‘M&ÈàÁíÏ£¼¬šÊ†m8º3£aêáò(á ÷–Õüj‚ÅÙIü¥Ìö_¤õ|Oüâe¶ÔßåvŽ‚+&ç!ë-¢´,„¢®¬`ßsO‘ïRp-õpõø,ÉdŠ…«é›°9ü£Ý—x) 6Ñåwô]îgSuCUreV².FGFÙt3·¬PS]µÁa’õ^Ʀ)ß±‡ÑŽ#¸£›Ùÿø^òÜ*a_šîÿ|ñÉk¨ŠJ¨¸=Ï>AÄ£ÐÕ"ÏKqtµu¤,7yî^·I ¨ŒâHõrI|f”³ï¾EǪpcgcÌgrU{bv‚+]#Tü%»voBq2X×ú9r¦“™‡ŠH%âx y…ÅTT½@Cò‚Á›ÁÄÌb¨4Í@SlÒ©Lî6c“N&AÉ mîŽTóä?U°8;ÃhïeÎ}zœXFç™ÇÖqçùk4ÓÀëÉÍ]ã „ñû½L¦_r;çNÙäæu¹²ª5MƒPE-{ú2¥þ\u—Ídpòñ®>ŠÎºê:>;r…^Ÿ‰§leU%´Ÿ?Fo›âPZbÈ­1O®lŸ•I’± ]½^Œ±²~3XÊ—þ‘¦ùkLöðùñO9¶á/ïÉeˆ[ŽŠcÛ¤SIâ±Ø‚I†L6sÇ$O @^0¸Lby7o½hªŽ¡A:‘¾¾6‡T2£˜èºNùǨز‹Ù‰1º[Ïrì­wðFð\/ëVªf(.@‰i×ƒŽ‚ÇŸ‡ê¤I[Ö×÷»(ŽB!î%¯ LIH§ÿbËü(ñIúúF(Þð0=Açñã$ƒÕ4n®¤>ègâj'c×I;åhX,-.Jgq»nÌ c1ÑÕ‡mç¡§^¤a}> m OĨl,ÃpRŒ\éC+¬¤$ì¿å¢[AUT’‰eâñÔ=ÒŠN¤²¿ÕÁÔØUÛד]ž¢«ý e; øÜ+·– +«ð¨Çù¼ÝMÓÓ“_¢Ð»À…‹S”m:LÐ㥼¾†Î¶«ŒŒoc]ÉÐ¥NÒî0e!–‡V… +Io˦2AjÚHÕ–‹ƒ]œé›'i¦8ÄcK$’iüÐL?[=ÌÖ¯ñ¼yƒù”ùî¼Ä\m—µH÷å «ë1‡8y¼š‡¦x}--AgpêÈÕIDATÛ{¤ ¼ªm%XZŽòWW 7µõ|ön'»·³±ÔCOG'Ž¿˜HÀ—Û÷l‚¹e›üç¾…–`"„?ôÖÓ…®©wyŒP;äÌéó¼ù__à86Š;Ê®ý»0 šjÓ~ìm.ž0Ðp°UÛk à˧ª¦˜/>~‹Gö=”» ¢hmØHÅå^Nüßo8åX(N–¢Æ]ìl®Ç^žáì‘·0›^äÙÇ7­Íš›²ÊrZ?<Ã{ï(ä[`¬ÐKÕ1]¹–‹üª­ìÞ3ÆùãGè=£â86îp%•M·Ʀ ‡Ê(Ï÷04gQ {ÝDŠÂ´\ê¡¢¶]UiØÿ(ã3G8öúoPU Ë‚í‡R½.ÈÅvóúÄ9ŠjbÐwú#.·CSÀ¶lšöì#äÍ£²ºœã-ó±©sðÐnÞû©ŠU —y[•óÜ( hy%4<ÈÉcgøó¯¯àØY¥˜=Ï API Conductor API API Conductor Conductor Scheduler Scheduler Scheduler DB Compute Compute Compute Keystone Network Glance & Cinder oslo.messaging DB HTTP Nova service External service Hypervisor Nova-Networking API Conductor API API Conductor Conductor Scheduler Scheduler Scheduler DB Compute Compute Compute Keystone Glance & Cinder Hypervisor Neutron Neutron nova-13.1.4/doc/source/notifications.rst0000664000567000056710000002705613064447152021374 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Notifications in Nova ===================== Similarly to other OpenStack services Nova emits notifications to the message bus with the Notifier class provided by oslo.messaging [1]_. From the notification consumer point of view a notification consists of two parts: an envelope with a fixed structure defined by oslo.messaging and a payload defined by the service emitting the notification. The envelope format is the following:: { "priority": , "event_type": , "timestamp": , "publisher_id": , "message_id": , "payload": } There are two types of notifications in Nova: legacy notifications which have an unversioned payload and newer notifications which have a versioned payload. Unversioned notifications ------------------------- Nova code uses the nova.rpc.get_notifier call to get a configured oslo.messaging Notifier object and it uses the oslo provided functions on the Notifier object to emit notifications. The configuration of the returned Notifier object depends on the parameters of the get_notifier call and the value of the oslo.messaging configuration options `driver` and `topics`. There are notification configuration options in Nova which are specific for certain notification types like `notify_on_state_change`, `notify_api_faults`, `default_notification_level`, etc. The structure of the payload of the unversioned notifications is defined in the code that emits the notification and no documentation or enforced backward compatibility contract exists for that format. Versioned notifications ----------------------- The versioned notification concept is created to fix the shortcomings of the unversioned notifications. The envelope structure of the emitted notification is the same as in the unversioned notification case as it is provided by oslo.messaging. However the payload is not a free form dictionary but a serialized oslo versionedobject [2]_. .. _service.update: For example the wire format of the `service.update` notification looks like the following:: { "priority":"INFO", "payload":{ "nova_object.namespace":"nova", "nova_object.name":"ServiceStatusPayload", "nova_object.version":"1.0", "nova_object.data":{ "host":"host1", "disabled":false, "last_seen_up":null, "binary":"nova-compute", "topic":"compute", "disabled_reason":null, "report_count":1, "forced_down":false, "version":2 } }, "event_type":"service.update", "publisher_id":"nova-compute:host1" } The serialized oslo versionedobject as a payload provides a version number to the consumer so the consumer can detect if the structure of the payload is changed. Nova provides the following contract regarding the versioned notification payload: * the payload version defined by the `the nova_object.version` field of the payload will be increased if and only if the syntax or the semantics of the `nova_object.data` field of the payload is changed. * a minor version bump indicates a backward compatible change which means that only new fields are added to the payload so a well written consumer can still consume the new payload without any change. * a major version bump indicates a backward incompatible change of the payload which can mean removed fields, type change, etc in the payload. There is a Nova configuration parameter `notification_format` that can be used to specify which notifications are emitted by Nova. The possible values are `unversioned`, `versioned`, `both` and the default value is `both`. How to add a new versioned notification ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To support the above contract from the Nova code every versioned notification is modeled with oslo versionedobjects. Every versioned notification class shall inherit from the `nova.objects.notification.NotificationBase` which already defines three mandatory fields of the notification `event_type`, `publisher_id` and `priority`. The new notification class shall add a new field `payload` with an appropriate payload type. The payload object of the notifications shall inherit from the `nova.objects.notification.NotificationPayloadBase` class and shall define the fields of the payload as versionedobject fields. The base classes are described in [3]_. The following code example defines the necessary model classes for a new notification `myobject.update`:: @notification.notification_sample('myobject-update.json') @base.NovaObjectRegistry.register class MyObjectNotification(notification.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('MyObjectUpdatePayload') } @base.NovaObjectRegistry.register class MyObjectUpdatePayload(notification.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'some_data': fields.StringField(), 'another_data': fields.StringField(), } After that the notification can be populated and emitted with the following code:: payload = MyObjectUpdatePayload(some_data="foo", another_data="bar") MyObjectNotification( publisher=notification.NotificationPublisher.from_service_obj( ), event_type=notification.EventType( object='myobject', action=fields.NotificationAction.UPDATE), priority=fields.NotificationPriority.INFO, payload=payload).emit(context) The above code will generate the following notification on the wire:: { "priority":"INFO", "payload":{ "nova_object.namespace":"nova", "nova_object.name":"MyObjectUpdatePayload", "nova_object.version":"1.0", "nova_object.data":{ "some_data":"foo", "another_data":"bar", } }, "event_type":"myobject.update", "publisher_id":":" } There is a possibility to reuse an existing versionedobject as notification payload by adding a `SCHEMA` field for the payload class that defines a mapping between the fields of existing objects and the fields of the new payload object. For example the service.status notification reuses the existing `nova.objects.service.Service` object when defines the notification's payload:: @notification.notification_sample('service-update.json') @base.NovaObjectRegistry.register class ServiceStatusNotification(notification.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('ServiceStatusPayload') } @base.NovaObjectRegistry.register class ServiceStatusPayload(notification.NotificationPayloadBase): SCHEMA = { 'host': ('service', 'host'), 'binary': ('service', 'binary'), 'topic': ('service', 'topic'), 'report_count': ('service', 'report_count'), 'disabled': ('service', 'disabled'), 'disabled_reason': ('service', 'disabled_reason'), 'availability_zone': ('service', 'availability_zone'), 'last_seen_up': ('service', 'last_seen_up'), 'forced_down': ('service', 'forced_down'), 'version': ('service', 'version') } # Version 1.0: Initial version VERSION = '1.0' fields = { 'host': fields.StringField(nullable=True), 'binary': fields.StringField(nullable=True), 'topic': fields.StringField(nullable=True), 'report_count': fields.IntegerField(), 'disabled': fields.BooleanField(), 'disabled_reason': fields.StringField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'last_seen_up': fields.DateTimeField(nullable=True), 'forced_down': fields.BooleanField(), 'version': fields.IntegerField(), } def populate_schema(self, service): super(ServiceStatusPayload, self).populate_schema(service=service) If the `SCHEMA` field is defined then the payload object needs to be populated with the `populate_schema` call before it can be emitted:: payload = ServiceStatusPayload() payload.populate_schema(service=) ServiceStatusNotification( publisher=notification.NotificationPublisher.from_service_obj( ), event_type=notification.EventType( object='service', action=fields.NotificationAction.UPDATE), priority=fields.NotificationPriority.INFO, payload=payload).emit(context) The above code will emit the :ref:`already shown notification` on the wire. Every item in the `SCHEMA` has the syntax of:: : (, ) The mapping defined in the `SCHEMA` field has the following semantics. When the `populate_schema` function is called the content of the `SCHEMA` field is enumerated and the value of the field of the pointed parameter object is copied to the requested payload field. So in the above example the `host` field of the payload object is populated from the value of the `host` field of the `service` object that is passed as a parameter to the `populate_schema` call. A notification payload object can reuse fields from multiple existing objects. Also a notification can have both new and reused fields in its payload. Note that the notification's publisher instance can be created two different ways. It can be created by instantiating the `NotificationPublisher` object with a `host` and a `binary` string parameter or it can be generated from a `Service` object by calling `NotificationPublisher.from_service_obj` function. Versioned notifications shall have a sample file stored under `doc/sample_notifications` directory and the notification object shall be decorated with the `notification_sample` decorator. For example the `service.update` notification has a sample file stored in `doc/sample_notifications/service-update.json` and the ServiceUpdateNotification class is decorated accordingly. Existing versioned notifications ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. versioned_notifications:: .. [1] http://docs.openstack.org/developer/oslo.messaging/notifier.html .. [2] http://docs.openstack.org/developer/oslo.versionedobjects .. [3] http://docs.openstack.org/developer/nova/devref/api/nova.objects.notification.html nova-13.1.4/doc/source/api_plugins.rst0000664000567000056710000001566113064447152021034 0ustar jenkinsjenkins00000000000000API Plugins =========== Background ---------- Nova has two API plugin frameworks, one for the original V2 API and one for what we call V2.1 which also supports V2.1 microversions. The V2.1 API acts from a REST API user point of view in an identical way to the original V2 API. V2.1 is implemented in the same framework as microversions, with the version requested being 2.1. The V2 API is now frozen and with the exception of significant bugs no change should be made to the V2 API code. API changes should only be made through V2.1 microversions. This document covers how to write plugins for the v2.1 framework. A `microversions specific document `_ covers the details around what is required for the microversions part. It does not cover V2 plugins which should no longer be developed. There may still be references to a v3 API both in comments and in the directory path of relevant files. This is because v2.1 first started out being called v3 rather than v2.1. Where you see references to v3 you can treat it as a reference to v2.1 with or without microversions support. The original V2 API plugins live in ``nova/api/openstack/compute/legacy_v2`` and the V2.1 plugins live in ``nova/api/openstack/compute``. Note that any change to the Nova API to be merged will first require a spec be approved first. See `here `_ for the appropriate repository. For guidance on the design of the API please refer to the `OpenStack API WG `_ Basic plugin structure ---------------------- A very basic skeleton of a v2.1 plugin can be seen `here in the unittests `_. An annotated version below:: """Basic Test Extension""" from nova.api.openstack import extensions from nova.api.openstack import wsgi ALIAS = 'test-basic' # ALIAS needs to be unique and should be of the format # ^[a-z]+[a-z\-]*[a-z]$ class BasicController(wsgi.Controller): # Define support for GET on a collection def index(self, req): data = {'param': 'val'} return data # Defining a method implements the following API responses: # delete -> DELETE # update -> PUT # create -> POST # show -> GET # If a method is not definied a request to it will be a 404 response # It is also possible to define support for further responses # See `servers.py `_. class Basic(extensions.V3APIExtensionBase): """Basic Test Extension.""" name = "BasicTest" alias = ALIAS version = 1 # Both get_resources and get_controller_extensions must always # be definied by can return an empty array def get_resources(self): resource = extensions.ResourceExtension('test', BasicController()) return [resource] def get_controller_extensions(self): return [] All of these plugin files should live in the ``nova/api/openstack/compute`` directory. Policy ~~~~~~ Policy (permission) is defined ``etc/nova/policy.json``. Implementation of policy is changing a bit at the moment. Will add more to this document or reference another one in the future. Note that a 'discoverable' policy needs to be added for each plugin that you wish to appear in the ``/extension`` output. Also look at the authorize call in plugins currently merged. Modularity ~~~~~~~~~~ The Nova REST API is separated into different plugins in the directory 'nova/api/openstack/compute/' Because microversions are supported in the Nova REST API, the API can be extended without any new plugin. But for code readability, the Nova REST API code still needs modularity. Here are rules for how to separate modules: * You are adding a new resource The new resource should be in standalone module. There isn't any reason to put different resources in a single module. * Add sub-resource for existing resource To prevent an existing resource module becoming over-inflated, the sub-resource should be implemented in a separate module. * Add extended attributes for existing resource In normally, the extended attributes is part of existing resource's data model too. So this can be added into existing resource module directly and lightly. To avoid namespace complexity, we should avoid to add extended attributes in existing extended models. New extended attributes needn't any namespace prefix anymore. JSON-Schema ~~~~~~~~~~~ The v2.1 API validates a REST request body with JSON-Schema library. Valid body formats are defined with JSON-Schema in the directory 'nova/api/openstack/compute/schemas'. Each definition is used at the corresponding method with the ``validation.schema`` decorator like:: @validation.schema(schema.update_something) def update(self, req, id, body): .... Nova supports the extension of JSON-Schema definitions based on the loaded API extensions for some APIs. Stevedore library tries to find specific name methods which return additional parameters and extends them to the original JSON-Schema definitions. The following are the combinations of extensible API and method name which returns additional parameters: * Create a server API - get_server_create_schema() * Update a server API - get_server_update_schema() * Rebuild a server API - get_server_rebuild_schema() * Resize a server API - get_server_resize_schema() For example, keypairs extension(Keypairs class) contains the method get_server_create_schema() which returns:: { 'key_name': parameter_types.name, } then the parameter key_name is allowed on Create a server API. Support files ------------- At least one entry needs to made in ``setup.cfg`` for each plugin. An entry point for the plugin must be added to nova.api.v21.extensions even if no resource or controller is added. Other entry points available are * Modify create behaviour (nova.api.v21.extensions.server.create) * Modify rebuild behaviour (nova.api.v21.extensions.server.rebuild) * Modify update behaviour (nova.api.v21.extensions.server.update) * Modify resize behaviour (nova.api.v21.extensions.server.resize) These are essentially hooks into the servers plugin which allow other plugins to modify behaviour without having to modify servers.py. In the past not having this capability led to very large chunks of unrelated code being added to servers.py which was difficult to maintain. Unit Tests ---------- Should write something more here. But you need to have both unit and functional tests. Functional tests and API Samples -------------------------------- Should write something here Commit message tags ------------------- Please ensure you add the ``DocImpact`` tag along with a short description for any API change. nova-13.1.4/doc/source/project_scope.rst0000664000567000056710000003205713064447140021354 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Scope of the Nova project ========================== Nova is focusing on doing an awesome job of its core mission. This document aims to clarify that core mission. This is a living document to help record where we agree about what Nova should and should not be doing, and why. Please treat this as a discussion of interesting, and hopefully useful, examples. It is not intended to be an exhaustive policy statement. Mission -------- Our mission statement starts with: To implement services and associated libraries to provide massively scalable, on demand, self service access to compute resources. Our official mission statement also includes the following examples of compute resources: bare metal, virtual machines, and containers. For the full official mission statement see: http://governance.openstack.org/reference/projects/nova.html#mission This document aims to help clarify what the mission statement means. Compute Resources ------------------ Nova is all about access to compute resources. This section looks at the types of compute resource Nova works with. Virtual Servers **************** Nova was originally focused purely on providing access to virtual servers running on a variety of different hypervisors. The majority of users use Nova only to provide access to virtual servers from a single hypervisor, however, its possible to have a Nova deployment include multiple different types of hypervisors, while at the same time offering containers and bare metal servers. Containers *********** The Nova API is not a good fit for a lot of container use cases. The Magnum project intends to deliver a good container experience built on top of Nova. Nova allows you to use containers in a similar way to how you would use on demand virtual machines. We want to maintain this distinction, so we maintain the integrity and usefulness of the existing Nova API. For example, Nova is not designed to spin up new containers for every apache request, nor do we plan to control what goes on inside containers. They get the same metadata provided to them as virtual machines, to do with as they see fit. Bare Metal Servers ******************* Ironic project has been pioneering the idea of treating physical machines in a similar way to on demand virtual machines. Nova's driver is able to allow a multi-tenant cloud style use of Ironic controlled resources. While currently there are operations that are a fundamental part of our virtual machine abstraction that are not currently available in ironic, such as attaching iSCSI volumes, it does not fundamentally change the semantics of our API, and as such is a suitable Nova driver. Moreover, it is expected that gap with shrink over time. Driver Parity ************** Our goal for the Nova API to provide a consistent abstraction to access on demand compute resources. We are not aiming to expose all features of all hypervisors. Where the details of the underlying hypervisor leak through our APIs, we have failed in this goal, and we must work towards better abstractions that are more interoperable. This is one reason why we put so much emphasis on the use of Tempest in third party CI systems. The key tenant of driver parity is that if a feature is supported in a driver, it must feel the same to users, as if they where using any of the other drivers that also support that feature. The exception is that, if possible for widely different performance characteristics, but the effect of that API call must be identical. Following on from that, should a feature only be added to one of the drivers, we must make every effort to ensure another driver could be implemented to match that behavior. Its important that drivers support enough features, so the API actually provides a consistent abstraction. For example, being unable to create a server or delete a server, would severely undermine that goal. In fact, Nova only ever manages resources it creates. Upgrades --------- Nova is widely used in production. As such we need to respect the needs of our existing users. At the same time we need evolve the current code base, including both adding and removing features. This section outlines how we expect people to upgrade, and what we do to help existing users that upgrade in the way we expect. Upgrade expectations ********************* Our upgrade plan is to concentrate on upgrades from N-1 to the Nth release. So for someone running juno, they would have to upgrade to kilo before upgrading to liberty. This is designed to balance the need for a smooth upgrade, against having to keep maintaining the compatibility code to make that upgrade possible. We talk about this approach as users consuming the stable branch. In addition, we also support users upgrading from the master branch, technically, between any two commits within the same release cycle. In certain cases, when crossing release boundaries, you must upgrade to the stable branch, before upgrading to the tip of master. This is to support those that are doing some level of "Continuous Deployment" from the tip of master into production. Many of the public cloud provides running OpenStack use this approach so they are able to get access to bug fixes and features they work on into production sooner. This becomes important when you consider reverting a commit that turns out to have been bad idea. We have to assume any public API change may have already been deployed into production, and as such cannot be reverted. In a similar way, a database migration may have been deployed. Any commit that will affect an upgrade gets the UpgradeImpact tag added to the commit message, so there is no requirement to wait for release notes. Don't break existing users **************************** As a community we are aiming towards a smooth upgrade process, where users must be unaware you have just upgraded your deployment, except that there might be additional feature available and improved stability and performance of some existing features. We don't ever want to remove features our users rely on. Sometimes we need to migrate users to a new implementation of that feature, which may require extra steps by the deployer, but the end users must be unaffected by such changes. However there are times when some features become a problem to maintain, and fall into disrepair. We aim to be honest with our users and highlight the issues we have, so we are in a position to find help to fix that situation. Ideally we are able to rework the feature so it can be maintained, but in some rare cases, the feature no longer works, is not tested, and no one is stepping forward to maintain that feature, the best option can be to remove that feature. When we remove features, we need to warn users by first marking those features as deprecated, before we finally remove the feature. The idea is to get feedback on how important the feature is to our user base. Where a feature is important we work with the whole community to find a path forward for those users. API Scope ---------- Nova aims to provide a highly interoperable and stable REST API for our users to get self-service access to compute resources. No more API Proxies ******************** Nova API current has some APIs that are now (in kilo) mostly just a proxy to other OpenStack services. If it were possible to remove a public API, these are some we might start with. As such, we don't want to add any more. The first example is the API that is a proxy to the Glance v1 API. As Glance moves to deprecate its v1 API, we need to translate calls from the old v1 API we expose, to Glance's v2 API. The next API to mention is the networking APIs, in particular the security groups API. If you are using nova-network, Nova is still the only way to perform these network operations. But if you use Neutron, security groups has a much richer Neutron API, and if you use both Nova API and Neutron API, the miss match can lead to some very unexpected results, in certain cases. Our intention is to avoid adding to the problems we already have in this area. No more Orchestration ********************** Nova is a low level infrastructure API. It is plumbing upon which richer ideas can be built. Heat and Magnum being great examples of that. While we have some APIs that could be considered orchestration, and we must continue to maintain those, we do not intend to add any more APIs that do orchestration. Third Party APIs ***************** Nova aims to focus on making a great API that is highly interoperable across all Nova deployments. We have historically done a very poor job of implementing and maintaining compatibility with third party APIs inside the Nova tree. As such, all new efforts should instead focus on external projects that provide third party compatibility on top of the Nova API. Where needed, we will work with those projects to extend the Nova API such that its possible to add that functionality on top of the Nova API. However, we do not intend to add API calls for those services to persist third party API specific information in the Nova database. Instead we want to focus on additions that enhance the existing Nova API. Scalability ------------ Our mission includes the text "massively scalable". Lets discuss what that means. Nova has three main axes of scale: Number of API requests, number of compute nodes and number of active instances. In many cases the number of compute nodes and active instances are so closely related, you rarely need to consider those separately. There are other items, such as the number of tenants, and the number of instances per tenant. But, again, these are very rarely the key scale issue. Its possible to have a small cloud with lots of requests for very short lived VMs, or a large cloud with lots of longer lived VMs. These need to scale out different components of the Nova system to reach their required level of scale. Ideally all Nova components are either scaled out to match the number of API requests and build requests, or scaled out to match the number of running servers. If we create components that have their load increased relative to both of these items, we can run into inefficiencies or resource contention. Although it is possible to make that work in some cases, this should always be considered. We intend Nova to be usable for both small and massive deployments. Where small involves 1-10 hypervisors and massive deployments are single regions with greater than 10,000 hypervisors. That should be seen as our current goal, not an upper limit. There are some features that would not scale well for either the small scale or the very large scale. Ideally we would not accept these features, but if there is a strong case to add such features, we must work hard to ensure you can run without that feature at the scale you are required to run. IaaS not Batch Processing -------------------------- Currently Nova focuses on providing on-demand compute resources in the style of classic Infrastructure-as-a-service clouds. A large pool of compute resources that people can consume in a self-service way. Nova is not currently optimized for dealing with a larger number of requests for compute resources compared with the amount of compute resources currently available. We generally assume that a level of spare capacity is maintained for future requests. This is needed for users who want to quickly scale out, and extra capacity becomes available again as users scale in. While spare capacity is also not required, we are not optimizing for a system that aims to run at 100% capacity at all times. As such our quota system is more focused on limiting the current level of resource usage, rather than ensuring a fair balance of resources between all incoming requests. This doesn't exclude adding features to support making a better use of spare capacity, such as "spot instances". There have been discussions around how to change Nova to work better for batch job processing. But the current focus is on how to layer such an abstraction on top of the basic primitives Nova currently provides, possibly adding additional APIs where that makes good sense. Should this turn out to be impractical, we may have to revise our approach. Deployment and Packaging ------------------------- Nova does not plan on creating its own packaging or deployment systems. Our CI infrastructure is powered by Devstack. This can also be used by developers to test their work on a full deployment of Nova. We do not develop any deployment or packaging for production deployments. Being widely adopted by many distributions and commercial products, we instead choose to work with all those parties to ensure they are able to effectively package and deploy Nova. nova-13.1.4/PKG-INFO0000664000567000056710000000632113064447471015015 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: nova Version: 13.1.4 Summary: Cloud computing fabric controller Home-page: http://docs.openstack.org/developer/nova/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: OpenStack Nova README ===================== OpenStack Nova provides a cloud computing fabric controller, supporting a wide variety of virtualization technologies, including KVM, Xen, LXC, VMware, and more. In addition to its native API, it includes compatibility with the commonly encountered Amazon EC2 and S3 APIs. OpenStack Nova is distributed under the terms of the Apache License, Version 2.0. The full terms and conditions of this license are detailed in the LICENSE file. Nova primarily consists of a set of Python daemons, though it requires and integrates with a number of native system components for databases, messaging and virtualization capabilities. To keep updated with new developments in the OpenStack project follow `@openstack `_ on Twitter. To learn how to deploy OpenStack Nova, consult the documentation available online at: http://docs.openstack.org For information about the different compute (hypervisor) drivers supported by Nova, read this page on the wiki: https://wiki.openstack.org/wiki/HypervisorSupportMatrix In the unfortunate event that bugs are discovered, they should be reported to the appropriate bug tracker. If you obtained the software from a 3rd party operating system vendor, it is often wise to use their own bug tracker for reporting problems. In all other cases use the master OpenStack bug tracker, available at: http://bugs.launchpad.net/nova Developers wishing to work on the OpenStack Nova project should always base their work on the latest Nova code, available from the master GIT repository at: https://git.openstack.org/cgit/openstack/nova Developers should also join the discussion on the mailing list, at: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-dev Any new code must follow the development guidelines detailed in the HACKING.rst file, and pass all unit tests. Further developer focused documentation is available at: http://docs.openstack.org/developer/nova/ For information on how to contribute to Nova, please see the contents of the CONTRIBUTING.rst file. -- End of broadcast Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 nova-13.1.4/MAINTAINERS0000664000567000056710000000133713064447140015410 0ustar jenkinsjenkins00000000000000Nova doesn't have maintainers in the same way as the Linux Kernel. However, we do have sub-teams who maintain parts of Nova and a series of nominated "czars" to deal with cross functional tasks. Each of these sub-teams and roles are documented on our wiki at https://wiki.openstack.org/wiki/Nova You can find helpful contacts for many parts of our code repository at https://wiki.openstack.org/wiki/Nova#Developer_Contacts We also have a page which documents tips and mentoring opportunities for new Nova developers at https://wiki.openstack.org/wiki/Nova/Mentoring Finally, you should also check out our developer reference at http://docs.openstack.org/developer/nova/devref/ Thanks for your interest in Nova, please come again! nova-13.1.4/babel.cfg0000664000567000056710000000002113064447140015426 0ustar jenkinsjenkins00000000000000[python: **.py] nova-13.1.4/nova.egg-info/0000775000567000056710000000000013064447471016353 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova.egg-info/SOURCES.txt0000664000567000056710000044052413064447471020250 0ustar jenkinsjenkins00000000000000.coveragerc .mailmap .testr.conf AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE MAINTAINERS README.rst babel.cfg bandit.yaml openstack-common.conf requirements.txt run_tests.sh setup.cfg setup.py test-requirements.txt tests-py3.txt tox.ini api-guide/source/authentication.rst api-guide/source/conf.py api-guide/source/extensions.rst api-guide/source/extra_specs_and_properties.rst api-guide/source/faults.rst api-guide/source/general_info.rst api-guide/source/index.rst api-guide/source/limits.rst api-guide/source/links_and_references.rst api-guide/source/microversions.rst api-guide/source/paginated_collections.rst api-guide/source/polling_changes-since_parameter.rst api-guide/source/request_and_response_formats.rst api-guide/source/server_concepts.rst api-guide/source/users.rst api-guide/source/versions.rst contrib/profile_caching_scheduler.sh contrib/xen/vif-openstack devstack/tempest-dsvm-cells-rc devstack/tempest-dsvm-lxc-rc doc/README.rst doc/api_samples/all_extensions/extensions-list-resp-v2.json doc/api_samples/all_extensions/extensions-list-resp-v21-compatible.json doc/api_samples/all_extensions/extensions-list-resp.json doc/api_samples/all_extensions/flavor-get-resp.json doc/api_samples/all_extensions/flavors-detail-resp.json doc/api_samples/all_extensions/flavors-list-resp.json doc/api_samples/all_extensions/server-action-confirm-resize.json doc/api_samples/all_extensions/server-action-create-image.json doc/api_samples/all_extensions/server-action-reboot.json doc/api_samples/all_extensions/server-action-rebuild-resp.json doc/api_samples/all_extensions/server-action-rebuild.json doc/api_samples/all_extensions/server-action-resize.json doc/api_samples/all_extensions/server-action-revert-resize.json doc/api_samples/all_extensions/server-create-req.json doc/api_samples/all_extensions/server-create-resp.json doc/api_samples/all_extensions/server-get-resp.json doc/api_samples/all_extensions/servers-details-resp.json doc/api_samples/all_extensions/servers-list-resp.json doc/api_samples/consoles/consoles-get-resp.json doc/api_samples/consoles/consoles-list-get-resp.json doc/api_samples/extension-info/extensions-get-resp-v2.json doc/api_samples/extension-info/extensions-get-resp.json doc/api_samples/flavor-access/flavor-access-add-tenant-req.json doc/api_samples/flavor-access/flavor-access-add-tenant-resp.json doc/api_samples/flavor-access/flavor-access-create-req.json doc/api_samples/flavor-access/flavor-access-create-resp.json doc/api_samples/flavor-access/flavor-access-detail-resp.json doc/api_samples/flavor-access/flavor-access-list-resp.json doc/api_samples/flavor-access/flavor-access-remove-tenant-req.json doc/api_samples/flavor-access/flavor-access-remove-tenant-resp.json doc/api_samples/flavor-access/flavor-access-show-resp.json doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json doc/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json doc/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json doc/api_samples/flavor-manage/flavor-create-post-req.json doc/api_samples/flavor-manage/flavor-create-post-resp.json doc/api_samples/flavors/flavor-get-resp.json doc/api_samples/flavors/flavors-detail-resp.json doc/api_samples/flavors/flavors-list-resp.json doc/api_samples/images/image-get-resp.json doc/api_samples/images/image-meta-key-get.json doc/api_samples/images/image-meta-key-put-req.json doc/api_samples/images/image-meta-key-put-resp.json doc/api_samples/images/image-metadata-get-resp.json doc/api_samples/images/image-metadata-post-req.json doc/api_samples/images/image-metadata-post-resp.json doc/api_samples/images/image-metadata-put-req.json doc/api_samples/images/image-metadata-put-resp.json doc/api_samples/images/images-details-get-resp.json doc/api_samples/images/images-list-get-resp.json doc/api_samples/keypairs/keypairs-get-resp.json doc/api_samples/keypairs/keypairs-import-post-req.json doc/api_samples/keypairs/keypairs-import-post-resp.json doc/api_samples/keypairs/keypairs-list-resp.json doc/api_samples/keypairs/keypairs-post-req.json doc/api_samples/keypairs/keypairs-post-resp.json doc/api_samples/keypairs/v2.10/keypairs-get-resp.json doc/api_samples/keypairs/v2.10/keypairs-import-post-req.json doc/api_samples/keypairs/v2.10/keypairs-import-post-resp.json doc/api_samples/keypairs/v2.10/keypairs-list-resp.json doc/api_samples/keypairs/v2.10/keypairs-post-req.json doc/api_samples/keypairs/v2.10/keypairs-post-resp.json doc/api_samples/keypairs/v2.2/keypairs-get-resp.json doc/api_samples/keypairs/v2.2/keypairs-import-post-req.json doc/api_samples/keypairs/v2.2/keypairs-import-post-resp.json doc/api_samples/keypairs/v2.2/keypairs-list-resp.json doc/api_samples/keypairs/v2.2/keypairs-post-req.json doc/api_samples/keypairs/v2.2/keypairs-post-resp.json doc/api_samples/limits/limit-get-resp.json doc/api_samples/limits/v2-limit-get-resp.json doc/api_samples/os-access-ips/server-action-rebuild-resp.json doc/api_samples/os-access-ips/server-action-rebuild.json doc/api_samples/os-access-ips/server-get-resp.json doc/api_samples/os-access-ips/server-post-req.json doc/api_samples/os-access-ips/server-post-resp.json doc/api_samples/os-access-ips/servers-details-resp.json doc/api_samples/os-admin-actions/admin-actions-inject-network-info.json doc/api_samples/os-admin-actions/admin-actions-reset-network.json doc/api_samples/os-admin-actions/admin-actions-reset-server-state.json doc/api_samples/os-admin-password/admin-password-change-password.json doc/api_samples/os-agents/agent-post-req.json doc/api_samples/os-agents/agent-post-resp.json doc/api_samples/os-agents/agent-update-put-req.json doc/api_samples/os-agents/agent-update-put-resp.json doc/api_samples/os-agents/agents-get-resp.json doc/api_samples/os-aggregates/aggregate-add-host-post-req.json doc/api_samples/os-aggregates/aggregate-metadata-post-req.json doc/api_samples/os-aggregates/aggregate-post-req.json doc/api_samples/os-aggregates/aggregate-post-resp.json doc/api_samples/os-aggregates/aggregate-remove-host-post-req.json doc/api_samples/os-aggregates/aggregate-update-post-req.json doc/api_samples/os-aggregates/aggregate-update-post-resp.json doc/api_samples/os-aggregates/aggregates-add-host-post-resp.json doc/api_samples/os-aggregates/aggregates-get-resp.json doc/api_samples/os-aggregates/aggregates-list-get-resp.json doc/api_samples/os-aggregates/aggregates-metadata-post-resp.json doc/api_samples/os-aggregates/aggregates-remove-host-post-resp.json doc/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.json doc/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-resp.json doc/api_samples/os-attach-interfaces/attach-interfaces-create-req.json doc/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json doc/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json doc/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json doc/api_samples/os-availability-zone/availability-zone-detail-resp.json doc/api_samples/os-availability-zone/availability-zone-list-resp.json doc/api_samples/os-availability-zone/availability-zone-post-req.json doc/api_samples/os-availability-zone/availability-zone-post-resp.json doc/api_samples/os-baremetal-nodes/baremetal-node-get-resp.json doc/api_samples/os-baremetal-nodes/baremetal-node-list-resp.json doc/api_samples/os-cells/cells-capacities-resp.json doc/api_samples/os-cells/cells-get-resp.json doc/api_samples/os-cells/cells-list-empty-resp.json doc/api_samples/os-cells/cells-list-resp.json doc/api_samples/os-certificates/certificate-create-resp.json doc/api_samples/os-certificates/certificate-get-root-resp.json doc/api_samples/os-cloudpipe/cloud-pipe-create-req.json doc/api_samples/os-cloudpipe/cloud-pipe-create-resp.json doc/api_samples/os-cloudpipe/cloud-pipe-get-resp.json doc/api_samples/os-cloudpipe/cloud-pipe-update-req.json doc/api_samples/os-config-drive/server-config-drive-get-resp.json doc/api_samples/os-config-drive/server-post-req.json doc/api_samples/os-config-drive/server-post-resp.json doc/api_samples/os-config-drive/servers-config-drive-details-resp.json doc/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.json doc/api_samples/os-console-auth-tokens/get-rdp-console-post-req.json doc/api_samples/os-console-output/console-output-post-req.json doc/api_samples/os-console-output/console-output-post-resp.json doc/api_samples/os-create-backup/create-backup-req.json doc/api_samples/os-deferred-delete/force-delete-post-req.json doc/api_samples/os-deferred-delete/restore-post-req.json doc/api_samples/os-disk-config/list-servers-detail-get.json doc/api_samples/os-disk-config/server-action-rebuild-req.json doc/api_samples/os-disk-config/server-action-rebuild-resp.json doc/api_samples/os-disk-config/server-get-resp.json doc/api_samples/os-disk-config/server-post-req.json doc/api_samples/os-disk-config/server-post-resp.json doc/api_samples/os-disk-config/server-resize-post-req.json doc/api_samples/os-evacuate/server-evacuate-find-host-req.json doc/api_samples/os-evacuate/server-evacuate-find-host-resp.json doc/api_samples/os-evacuate/server-evacuate-req.json doc/api_samples/os-evacuate/server-evacuate-resp.json doc/api_samples/os-evacuate/v2.14/server-evacuate-find-host-req.json doc/api_samples/os-evacuate/v2.14/server-evacuate-req.json doc/api_samples/os-extended-availability-zone/server-get-resp.json doc/api_samples/os-extended-availability-zone/servers-detail-resp.json doc/api_samples/os-extended-server-attributes/server-get-resp.json doc/api_samples/os-extended-server-attributes/server-post-resp.json doc/api_samples/os-extended-server-attributes/servers-detail-resp.json doc/api_samples/os-extended-server-attributes/v2.16/server-get-resp.json doc/api_samples/os-extended-server-attributes/v2.16/servers-detail-resp.json doc/api_samples/os-extended-server-attributes/v2.3/server-get-resp.json doc/api_samples/os-extended-server-attributes/v2.3/servers-detail-resp.json doc/api_samples/os-extended-status/server-get-resp.json doc/api_samples/os-extended-status/servers-detail-resp.json doc/api_samples/os-extended-volumes/server-get-resp.json doc/api_samples/os-extended-volumes/servers-detail-resp.json doc/api_samples/os-extended-volumes/v2.3/server-get-resp.json doc/api_samples/os-extended-volumes/v2.3/servers-detail-resp.json doc/api_samples/os-fixed-ips/fixedip-post-req.json doc/api_samples/os-fixed-ips/fixedips-get-resp.json doc/api_samples/os-fixed-ips/v2.4/fixedip-post-req.json doc/api_samples/os-fixed-ips/v2.4/fixedips-get-resp.json doc/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json doc/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json doc/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json doc/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json doc/api_samples/os-floating-ip-pools/floatingippools-list-resp.json doc/api_samples/os-floating-ips/floating-ips-create-req.json doc/api_samples/os-floating-ips/floating-ips-create-resp.json doc/api_samples/os-floating-ips/floating-ips-get-resp.json doc/api_samples/os-floating-ips/floating-ips-list-empty-resp.json doc/api_samples/os-floating-ips/floating-ips-list-resp.json doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json doc/api_samples/os-fping/fping-get-details-resp.json doc/api_samples/os-fping/fping-get-resp.json doc/api_samples/os-hide-server-addresses/server-get-resp.json doc/api_samples/os-hide-server-addresses/servers-details-resp.json doc/api_samples/os-hide-server-addresses/servers-list-resp.json doc/api_samples/os-hosts/host-get-reboot.json doc/api_samples/os-hosts/host-get-resp.json doc/api_samples/os-hosts/host-get-shutdown.json doc/api_samples/os-hosts/host-get-startup.json doc/api_samples/os-hosts/host-put-maintenance-req.json doc/api_samples/os-hosts/host-put-maintenance-resp.json doc/api_samples/os-hosts/hosts-list-resp.json doc/api_samples/os-hypervisors/hypervisors-detail-resp.json doc/api_samples/os-hypervisors/hypervisors-list-resp.json doc/api_samples/os-hypervisors/hypervisors-search-resp.json doc/api_samples/os-hypervisors/hypervisors-show-resp.json doc/api_samples/os-hypervisors/hypervisors-statistics-resp.json doc/api_samples/os-hypervisors/hypervisors-uptime-resp.json doc/api_samples/os-hypervisors/hypervisors-with-servers-resp.json doc/api_samples/os-hypervisors/hypervisors-without-servers-resp.json doc/api_samples/os-instance-actions/instance-action-get-resp.json doc/api_samples/os-instance-actions/instance-actions-list-resp.json doc/api_samples/os-instance-actions/v2.21/instance-action-get-resp.json doc/api_samples/os-instance-actions/v2.21/instance-actions-list-resp.json doc/api_samples/os-instance-usage-audit-log/inst-usage-audit-log-index-get-resp.json doc/api_samples/os-instance-usage-audit-log/inst-usage-audit-log-show-get-resp.json doc/api_samples/os-lock-server/lock-server.json doc/api_samples/os-lock-server/unlock-server.json doc/api_samples/os-migrate-server/live-migrate-server.json doc/api_samples/os-migrate-server/migrate-server.json doc/api_samples/os-migrate-server/v2.25/live-migrate-server.json doc/api_samples/os-migrations/migrations-get.json doc/api_samples/os-migrations/v2.23/migrations-get.json doc/api_samples/os-multinic/multinic-add-fixed-ip-req.json doc/api_samples/os-multinic/multinic-remove-fixed-ip-req.json doc/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json doc/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json doc/api_samples/os-multiple-create/multiple-create-post-req.json doc/api_samples/os-multiple-create/multiple-create-post-resp.json doc/api_samples/os-networks/network-add-req.json doc/api_samples/os-networks/network-create-req.json doc/api_samples/os-networks/network-create-resp.json doc/api_samples/os-networks/network-show-resp.json doc/api_samples/os-networks/networks-disassociate-req.json doc/api_samples/os-networks/networks-list-resp.json doc/api_samples/os-networks-associate/network-associate-host-req.json doc/api_samples/os-networks-associate/network-disassociate-host-req.json doc/api_samples/os-networks-associate/network-disassociate-project-req.json doc/api_samples/os-networks-associate/network-disassociate-req.json doc/api_samples/os-pause-server/pause-server.json doc/api_samples/os-pause-server/unpause-server.json doc/api_samples/os-pci/hypervisors-pci-detail-resp.json doc/api_samples/os-pci/hypervisors-pci-show-resp.json doc/api_samples/os-pci/pci-detail-resp.json doc/api_samples/os-pci/pci-index-resp.json doc/api_samples/os-pci/pci-show-resp.json doc/api_samples/os-pci/server-get-resp.json doc/api_samples/os-pci/servers-detail-resp.json doc/api_samples/os-personality/server-action-rebuild-req.json doc/api_samples/os-personality/server-action-rebuild-resp.json doc/api_samples/os-personality/server-post-req.json doc/api_samples/os-personality/server-post-resp.json doc/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-preserve-ephemeral-resp.json doc/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-preserve-ephemeral.json doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json doc/api_samples/os-quota-class-sets/quota-classes-update-post-req.json doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json doc/api_samples/os-quota-sets/quotas-show-get-resp.json doc/api_samples/os-quota-sets/quotas-update-force-post-req.json doc/api_samples/os-quota-sets/quotas-update-force-post-resp.json doc/api_samples/os-quota-sets/quotas-update-post-req.json doc/api_samples/os-quota-sets/quotas-update-post-resp.json doc/api_samples/os-quota-sets/user-quotas-show-get-resp.json doc/api_samples/os-quota-sets/user-quotas-update-post-req.json doc/api_samples/os-quota-sets/user-quotas-update-post-resp.json doc/api_samples/os-remote-consoles/get-rdp-console-post-req.json doc/api_samples/os-remote-consoles/get-rdp-console-post-resp.json doc/api_samples/os-remote-consoles/get-serial-console-post-req.json doc/api_samples/os-remote-consoles/get-serial-console-post-resp.json doc/api_samples/os-remote-consoles/get-spice-console-post-req.json doc/api_samples/os-remote-consoles/get-spice-console-post-resp.json doc/api_samples/os-remote-consoles/get-vnc-console-post-req.json doc/api_samples/os-remote-consoles/get-vnc-console-post-resp.json doc/api_samples/os-remote-consoles/v2.6/create-vnc-console-req.json doc/api_samples/os-remote-consoles/v2.6/create-vnc-console-resp.json doc/api_samples/os-remote-consoles/v2.8/create-mks-console-req.json doc/api_samples/os-remote-consoles/v2.8/create-mks-console-resp.json doc/api_samples/os-rescue/server-get-resp-rescue.json doc/api_samples/os-rescue/server-get-resp-unrescue.json doc/api_samples/os-rescue/server-rescue-req-with-image-ref.json doc/api_samples/os-rescue/server-rescue-req.json doc/api_samples/os-rescue/server-rescue.json doc/api_samples/os-rescue/server-unrescue-req.json doc/api_samples/os-scheduler-hints/scheduler-hints-post-req.json doc/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json doc/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json doc/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json doc/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json doc/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json doc/api_samples/os-security-groups/security-group-add-post-req.json doc/api_samples/os-security-groups/security-group-post-req.json doc/api_samples/os-security-groups/security-group-remove-post-req.json doc/api_samples/os-security-groups/security-groups-create-resp.json doc/api_samples/os-security-groups/security-groups-get-resp.json doc/api_samples/os-security-groups/security-groups-list-get-resp.json doc/api_samples/os-security-groups/server-get-resp.json doc/api_samples/os-security-groups/server-post-req.json doc/api_samples/os-security-groups/server-post-resp.json doc/api_samples/os-security-groups/server-security-groups-list-resp.json doc/api_samples/os-security-groups/servers-detail-resp.json doc/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json doc/api_samples/os-server-external-events/event-create-req.json doc/api_samples/os-server-external-events/event-create-resp.json doc/api_samples/os-server-groups/server-groups-get-resp.json doc/api_samples/os-server-groups/server-groups-list-resp.json doc/api_samples/os-server-groups/server-groups-post-req.json doc/api_samples/os-server-groups/server-groups-post-resp.json doc/api_samples/os-server-groups/v2.13/server-groups-get-resp.json doc/api_samples/os-server-groups/v2.13/server-groups-list-resp.json doc/api_samples/os-server-groups/v2.13/server-groups-post-req.json doc/api_samples/os-server-groups/v2.13/server-groups-post-resp.json doc/api_samples/os-server-password/get-password-resp.json doc/api_samples/os-server-usage/server-get-resp.json doc/api_samples/os-server-usage/servers-detail-resp.json doc/api_samples/os-services/service-disable-log-put-req.json doc/api_samples/os-services/service-disable-log-put-resp.json doc/api_samples/os-services/service-disable-put-req.json doc/api_samples/os-services/service-disable-put-resp.json doc/api_samples/os-services/service-enable-put-req.json doc/api_samples/os-services/service-enable-put-resp.json doc/api_samples/os-services/services-list-get-resp.json doc/api_samples/os-services/v2.11/service-disable-log-put-req.json doc/api_samples/os-services/v2.11/service-disable-log-put-resp.json doc/api_samples/os-services/v2.11/service-disable-put-req.json doc/api_samples/os-services/v2.11/service-disable-put-resp.json doc/api_samples/os-services/v2.11/service-enable-put-req.json doc/api_samples/os-services/v2.11/service-enable-put-resp.json doc/api_samples/os-services/v2.11/service-force-down-put-req.json doc/api_samples/os-services/v2.11/service-force-down-put-resp.json doc/api_samples/os-services/v2.11/services-list-get-resp.json doc/api_samples/os-shelve/os-shelve-offload.json doc/api_samples/os-shelve/os-shelve.json doc/api_samples/os-shelve/os-unshelve.json doc/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json doc/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json doc/api_samples/os-suspend-server/server-resume.json doc/api_samples/os-suspend-server/server-suspend.json doc/api_samples/os-tenant-networks/networks-list-res.json doc/api_samples/os-tenant-networks/networks-post-req.json doc/api_samples/os-tenant-networks/networks-post-res.json doc/api_samples/os-used-limits/usedlimits-get-resp.json doc/api_samples/os-used-limits/v2-usedlimits-get-resp.json doc/api_samples/os-user-data/userdata-post-req.json doc/api_samples/os-user-data/userdata-post-resp.json doc/api_samples/os-virtual-interfaces/vifs-list-resp-v2.json doc/api_samples/os-virtual-interfaces/vifs-list-resp.json doc/api_samples/os-virtual-interfaces/v2.12/vifs-list-resp.json doc/api_samples/os-volumes/attach-volume-to-server-req.json doc/api_samples/os-volumes/attach-volume-to-server-resp.json doc/api_samples/os-volumes/list-volume-attachments-resp.json doc/api_samples/os-volumes/os-volumes-detail-resp.json doc/api_samples/os-volumes/os-volumes-get-resp.json doc/api_samples/os-volumes/os-volumes-index-resp.json doc/api_samples/os-volumes/os-volumes-post-req.json doc/api_samples/os-volumes/os-volumes-post-resp.json doc/api_samples/os-volumes/snapshot-create-req.json doc/api_samples/os-volumes/snapshot-create-resp.json doc/api_samples/os-volumes/snapshots-detail-resp.json doc/api_samples/os-volumes/snapshots-list-resp.json doc/api_samples/os-volumes/snapshots-show-resp.json doc/api_samples/os-volumes/update-volume-req.json doc/api_samples/os-volumes/volume-attachment-detail-resp.json doc/api_samples/server-ips/server-ips-network-resp.json doc/api_samples/server-ips/server-ips-resp.json doc/api_samples/server-metadata/server-metadata-all-req.json doc/api_samples/server-metadata/server-metadata-all-resp.json doc/api_samples/server-metadata/server-metadata-req.json doc/api_samples/server-metadata/server-metadata-resp.json doc/api_samples/server-migrations/force_complete.json doc/api_samples/server-migrations/live-migrate-server.json doc/api_samples/server-migrations/v2.23/migrations-get.json doc/api_samples/server-migrations/v2.23/migrations-index.json doc/api_samples/servers/server-action-confirm-resize.json doc/api_samples/servers/server-action-create-image.json doc/api_samples/servers/server-action-reboot.json doc/api_samples/servers/server-action-rebuild-preserve-ephemeral.json doc/api_samples/servers/server-action-rebuild-resp.json doc/api_samples/servers/server-action-rebuild.json doc/api_samples/servers/server-action-resize.json doc/api_samples/servers/server-action-revert-resize.json doc/api_samples/servers/server-action-start.json doc/api_samples/servers/server-action-stop.json doc/api_samples/servers/server-create-req.json doc/api_samples/servers/server-create-resp.json doc/api_samples/servers/server-get-resp.json doc/api_samples/servers/server-post-req.json doc/api_samples/servers/server-post-resp.json doc/api_samples/servers/server-update-req.json doc/api_samples/servers/server-update-resp.json doc/api_samples/servers/servers-details-resp.json doc/api_samples/servers/servers-list-resp.json doc/api_samples/servers-sort/server-sort-keys-list-resp.json doc/api_samples/servers/v2.16/server-get-resp.json doc/api_samples/servers/v2.16/servers-details-resp.json doc/api_samples/servers/v2.17/server-action-trigger-crash-dump.json doc/api_samples/servers/v2.19/server-action-rebuild-resp.json doc/api_samples/servers/v2.19/server-action-rebuild.json doc/api_samples/servers/v2.19/server-get-resp.json doc/api_samples/servers/v2.19/server-post-req.json doc/api_samples/servers/v2.19/server-post-resp.json doc/api_samples/servers/v2.19/server-put-req.json doc/api_samples/servers/v2.19/server-put-resp.json doc/api_samples/servers/v2.19/servers-details-resp.json doc/api_samples/servers/v2.19/servers-list-resp.json doc/api_samples/servers/v2.9/server-get-resp.json doc/api_samples/servers/v2.9/servers-details-resp.json doc/api_samples/servers/v2.9/servers-list-resp.json doc/api_samples/versions/v2-version-get-resp.json doc/api_samples/versions/v21-version-get-resp.json doc/api_samples/versions/versions-get-resp.json doc/ext/__init__.py doc/ext/nova_todo.py doc/ext/support_matrix.py doc/ext/versioned_notifications.py doc/notification_samples/service-update.json doc/source/addmethod.openstackapi.rst doc/source/aggregates.rst doc/source/api_microversion_dev.rst doc/source/api_microversion_history.rst doc/source/api_plugins.rst doc/source/architecture.rst doc/source/block_device_mapping.rst doc/source/blueprints.rst doc/source/cells.rst doc/source/code-review.rst doc/source/conductor.rst doc/source/conf.py doc/source/development.environment.rst doc/source/feature_classification.rst doc/source/filter_scheduler.rst doc/source/gmr.rst doc/source/how_to_get_involved.rst doc/source/i18n.rst doc/source/index.rst doc/source/notifications.rst doc/source/policies.rst doc/source/policy_enforcement.rst doc/source/process.rst doc/source/project_scope.rst doc/source/rpc.rst doc/source/sample_config.rst doc/source/scheduler_evolution.rst doc/source/services.rst doc/source/stable_api.rst doc/source/support-matrix.ini doc/source/support-matrix.rst doc/source/test_strategy.rst doc/source/threading.rst doc/source/upgrade.rst doc/source/vmstates.rst doc/source/_ga/layout.html doc/source/_static/support-matrix.css doc/source/image_src/Nova_spec_process.graphml doc/source/image_src/PowerStates.odp doc/source/image_src/architecture.dia doc/source/image_src/create_vm_states.diag doc/source/images/Nova_spec_process.svg doc/source/images/PowerStates1.png doc/source/images/PowerStates2.png doc/source/images/architecture.svg doc/source/images/create_vm_states.svg doc/source/images/filteringWorkflow1.png doc/source/images/filteringWorkflow2.png doc/source/images/run_instance_walkthrough.png doc/source/images/rpc/arch.png doc/source/images/rpc/arch.svg doc/source/images/rpc/flow1.png doc/source/images/rpc/flow1.svg doc/source/images/rpc/flow2.png doc/source/images/rpc/flow2.svg doc/source/images/rpc/rabt.png doc/source/images/rpc/rabt.svg doc/source/images/rpc/state.png doc/source/man/index.rst doc/source/man/nova-all.rst doc/source/man/nova-api-metadata.rst doc/source/man/nova-api-os-compute.rst doc/source/man/nova-api.rst doc/source/man/nova-cells.rst doc/source/man/nova-cert.rst doc/source/man/nova-compute.rst doc/source/man/nova-conductor.rst doc/source/man/nova-console.rst doc/source/man/nova-consoleauth.rst doc/source/man/nova-dhcpbridge.rst doc/source/man/nova-idmapshift.rst doc/source/man/nova-manage.rst doc/source/man/nova-network.rst doc/source/man/nova-novncproxy.rst doc/source/man/nova-rootwrap.rst doc/source/man/nova-scheduler.rst doc/source/man/nova-serialproxy.rst doc/source/man/nova-spicehtml5proxy.rst doc/source/man/nova-xvpvncproxy.rst doc/source/testing/libvirt-numa.rst doc/source/testing/serial-console.rst etc/nova/README-nova.conf.txt etc/nova/api-paste.ini etc/nova/cells.json etc/nova/logging_sample.conf etc/nova/nova-config-generator.conf etc/nova/policy.json etc/nova/release.sample etc/nova/rootwrap.conf etc/nova/rootwrap.d/api-metadata.filters etc/nova/rootwrap.d/compute.filters etc/nova/rootwrap.d/network.filters nova/__init__.py nova/availability_zones.py nova/baserpc.py nova/block_device.py nova/cache_utils.py nova/config.py nova/context.py nova/crypto.py nova/debugger.py nova/exception.py nova/filters.py nova/hooks.py nova/i18n.py nova/loadables.py nova/manager.py nova/netconf.py nova/notifications.py nova/opts.py nova/paths.py nova/policy.py nova/quota.py nova/rpc.py nova/safe_utils.py nova/service.py nova/signature_utils.py nova/test.py nova/utils.py nova/version.py nova/weights.py nova/wsgi.py nova.egg-info/PKG-INFO nova.egg-info/SOURCES.txt nova.egg-info/dependency_links.txt nova.egg-info/entry_points.txt nova.egg-info/not-zip-safe nova.egg-info/pbr.json nova.egg-info/requires.txt nova.egg-info/top_level.txt nova/CA/.gitignore nova/CA/geninter.sh nova/CA/genrootca.sh nova/CA/openssl.cnf.tmpl nova/CA/newcerts/.placeholder nova/CA/private/.placeholder nova/CA/projects/.gitignore nova/CA/projects/.placeholder nova/CA/reqs/.gitignore nova/CA/reqs/.placeholder nova/api/__init__.py nova/api/auth.py nova/api/compute_req_id.py nova/api/manager.py nova/api/opts.py nova/api/sizelimit.py nova/api/validator.py nova/api/ec2/__init__.py nova/api/ec2/cloud.py nova/api/ec2/ec2utils.py nova/api/metadata/__init__.py nova/api/metadata/base.py nova/api/metadata/handler.py nova/api/metadata/password.py nova/api/metadata/vendordata_json.py nova/api/openstack/__init__.py nova/api/openstack/api_version_request.py nova/api/openstack/auth.py nova/api/openstack/common.py nova/api/openstack/extensions.py nova/api/openstack/rest_api_version_history.rst nova/api/openstack/urlmap.py nova/api/openstack/versioned_method.py nova/api/openstack/wsgi.py nova/api/openstack/compute/__init__.py nova/api/openstack/compute/access_ips.py nova/api/openstack/compute/admin_actions.py nova/api/openstack/compute/admin_password.py nova/api/openstack/compute/agents.py nova/api/openstack/compute/aggregates.py nova/api/openstack/compute/assisted_volume_snapshots.py nova/api/openstack/compute/attach_interfaces.py nova/api/openstack/compute/availability_zone.py nova/api/openstack/compute/baremetal_nodes.py nova/api/openstack/compute/block_device_mapping.py nova/api/openstack/compute/block_device_mapping_v1.py nova/api/openstack/compute/cells.py nova/api/openstack/compute/certificates.py nova/api/openstack/compute/cloudpipe.py nova/api/openstack/compute/config_drive.py nova/api/openstack/compute/console_auth_tokens.py nova/api/openstack/compute/console_output.py nova/api/openstack/compute/consoles.py nova/api/openstack/compute/create_backup.py nova/api/openstack/compute/deferred_delete.py nova/api/openstack/compute/disk_config.py nova/api/openstack/compute/evacuate.py nova/api/openstack/compute/extended_availability_zone.py nova/api/openstack/compute/extended_server_attributes.py nova/api/openstack/compute/extended_status.py nova/api/openstack/compute/extended_volumes.py nova/api/openstack/compute/extension_info.py nova/api/openstack/compute/fixed_ips.py nova/api/openstack/compute/flavor_access.py nova/api/openstack/compute/flavor_manage.py nova/api/openstack/compute/flavor_rxtx.py nova/api/openstack/compute/flavors.py nova/api/openstack/compute/flavors_extraspecs.py nova/api/openstack/compute/floating_ip_dns.py nova/api/openstack/compute/floating_ip_pools.py nova/api/openstack/compute/floating_ips.py nova/api/openstack/compute/floating_ips_bulk.py nova/api/openstack/compute/fping.py nova/api/openstack/compute/hide_server_addresses.py nova/api/openstack/compute/hosts.py nova/api/openstack/compute/hypervisors.py nova/api/openstack/compute/image_metadata.py nova/api/openstack/compute/image_size.py nova/api/openstack/compute/images.py nova/api/openstack/compute/instance_actions.py nova/api/openstack/compute/instance_usage_audit_log.py nova/api/openstack/compute/ips.py nova/api/openstack/compute/keypairs.py nova/api/openstack/compute/limits.py nova/api/openstack/compute/lock_server.py nova/api/openstack/compute/migrate_server.py nova/api/openstack/compute/migrations.py nova/api/openstack/compute/multinic.py nova/api/openstack/compute/multiple_create.py nova/api/openstack/compute/networks.py nova/api/openstack/compute/networks_associate.py nova/api/openstack/compute/pause_server.py nova/api/openstack/compute/pci.py nova/api/openstack/compute/personality.py nova/api/openstack/compute/preserve_ephemeral_rebuild.py nova/api/openstack/compute/quota_classes.py nova/api/openstack/compute/quota_sets.py nova/api/openstack/compute/remote_consoles.py nova/api/openstack/compute/rescue.py nova/api/openstack/compute/scheduler_hints.py nova/api/openstack/compute/security_group_default_rules.py nova/api/openstack/compute/security_groups.py nova/api/openstack/compute/server_diagnostics.py nova/api/openstack/compute/server_external_events.py nova/api/openstack/compute/server_groups.py nova/api/openstack/compute/server_metadata.py nova/api/openstack/compute/server_migrations.py nova/api/openstack/compute/server_password.py nova/api/openstack/compute/server_usage.py nova/api/openstack/compute/servers.py nova/api/openstack/compute/services.py nova/api/openstack/compute/shelve.py nova/api/openstack/compute/simple_tenant_usage.py nova/api/openstack/compute/suspend_server.py nova/api/openstack/compute/tenant_networks.py nova/api/openstack/compute/used_limits.py nova/api/openstack/compute/user_data.py nova/api/openstack/compute/versions.py nova/api/openstack/compute/versionsV21.py nova/api/openstack/compute/virtual_interfaces.py nova/api/openstack/compute/volumes.py nova/api/openstack/compute/legacy_v2/__init__.py nova/api/openstack/compute/legacy_v2/consoles.py nova/api/openstack/compute/legacy_v2/extensions.py nova/api/openstack/compute/legacy_v2/flavors.py nova/api/openstack/compute/legacy_v2/image_metadata.py nova/api/openstack/compute/legacy_v2/images.py nova/api/openstack/compute/legacy_v2/ips.py nova/api/openstack/compute/legacy_v2/limits.py nova/api/openstack/compute/legacy_v2/server_metadata.py nova/api/openstack/compute/legacy_v2/servers.py nova/api/openstack/compute/legacy_v2/versions.py nova/api/openstack/compute/legacy_v2/contrib/__init__.py nova/api/openstack/compute/legacy_v2/contrib/admin_actions.py nova/api/openstack/compute/legacy_v2/contrib/agents.py nova/api/openstack/compute/legacy_v2/contrib/aggregates.py nova/api/openstack/compute/legacy_v2/contrib/assisted_volume_snapshots.py nova/api/openstack/compute/legacy_v2/contrib/attach_interfaces.py nova/api/openstack/compute/legacy_v2/contrib/availability_zone.py nova/api/openstack/compute/legacy_v2/contrib/baremetal_ext_status.py nova/api/openstack/compute/legacy_v2/contrib/baremetal_nodes.py nova/api/openstack/compute/legacy_v2/contrib/block_device_mapping_v2_boot.py nova/api/openstack/compute/legacy_v2/contrib/cell_capacities.py nova/api/openstack/compute/legacy_v2/contrib/cells.py nova/api/openstack/compute/legacy_v2/contrib/certificates.py nova/api/openstack/compute/legacy_v2/contrib/cloudpipe.py nova/api/openstack/compute/legacy_v2/contrib/cloudpipe_update.py nova/api/openstack/compute/legacy_v2/contrib/config_drive.py nova/api/openstack/compute/legacy_v2/contrib/console_auth_tokens.py nova/api/openstack/compute/legacy_v2/contrib/console_output.py nova/api/openstack/compute/legacy_v2/contrib/consoles.py nova/api/openstack/compute/legacy_v2/contrib/createserverext.py nova/api/openstack/compute/legacy_v2/contrib/deferred_delete.py nova/api/openstack/compute/legacy_v2/contrib/disk_config.py nova/api/openstack/compute/legacy_v2/contrib/evacuate.py nova/api/openstack/compute/legacy_v2/contrib/extended_availability_zone.py nova/api/openstack/compute/legacy_v2/contrib/extended_evacuate_find_host.py nova/api/openstack/compute/legacy_v2/contrib/extended_floating_ips.py nova/api/openstack/compute/legacy_v2/contrib/extended_hypervisors.py nova/api/openstack/compute/legacy_v2/contrib/extended_ips.py nova/api/openstack/compute/legacy_v2/contrib/extended_ips_mac.py nova/api/openstack/compute/legacy_v2/contrib/extended_networks.py nova/api/openstack/compute/legacy_v2/contrib/extended_quotas.py nova/api/openstack/compute/legacy_v2/contrib/extended_rescue_with_image.py nova/api/openstack/compute/legacy_v2/contrib/extended_server_attributes.py nova/api/openstack/compute/legacy_v2/contrib/extended_services.py nova/api/openstack/compute/legacy_v2/contrib/extended_services_delete.py nova/api/openstack/compute/legacy_v2/contrib/extended_status.py nova/api/openstack/compute/legacy_v2/contrib/extended_virtual_interfaces_net.py nova/api/openstack/compute/legacy_v2/contrib/extended_volumes.py nova/api/openstack/compute/legacy_v2/contrib/fixed_ips.py nova/api/openstack/compute/legacy_v2/contrib/flavor_access.py nova/api/openstack/compute/legacy_v2/contrib/flavor_disabled.py nova/api/openstack/compute/legacy_v2/contrib/flavor_rxtx.py nova/api/openstack/compute/legacy_v2/contrib/flavor_swap.py nova/api/openstack/compute/legacy_v2/contrib/flavorextradata.py nova/api/openstack/compute/legacy_v2/contrib/flavorextraspecs.py nova/api/openstack/compute/legacy_v2/contrib/flavormanage.py nova/api/openstack/compute/legacy_v2/contrib/floating_ip_dns.py nova/api/openstack/compute/legacy_v2/contrib/floating_ip_pools.py nova/api/openstack/compute/legacy_v2/contrib/floating_ips.py nova/api/openstack/compute/legacy_v2/contrib/floating_ips_bulk.py nova/api/openstack/compute/legacy_v2/contrib/fping.py nova/api/openstack/compute/legacy_v2/contrib/hide_server_addresses.py nova/api/openstack/compute/legacy_v2/contrib/hosts.py nova/api/openstack/compute/legacy_v2/contrib/hypervisor_status.py nova/api/openstack/compute/legacy_v2/contrib/hypervisors.py nova/api/openstack/compute/legacy_v2/contrib/image_size.py nova/api/openstack/compute/legacy_v2/contrib/instance_actions.py nova/api/openstack/compute/legacy_v2/contrib/instance_usage_audit_log.py nova/api/openstack/compute/legacy_v2/contrib/keypairs.py nova/api/openstack/compute/legacy_v2/contrib/migrations.py nova/api/openstack/compute/legacy_v2/contrib/multinic.py nova/api/openstack/compute/legacy_v2/contrib/multiple_create.py nova/api/openstack/compute/legacy_v2/contrib/networks_associate.py nova/api/openstack/compute/legacy_v2/contrib/os_networks.py nova/api/openstack/compute/legacy_v2/contrib/os_tenant_networks.py nova/api/openstack/compute/legacy_v2/contrib/preserve_ephemeral_rebuild.py nova/api/openstack/compute/legacy_v2/contrib/quota_classes.py nova/api/openstack/compute/legacy_v2/contrib/quotas.py nova/api/openstack/compute/legacy_v2/contrib/rescue.py nova/api/openstack/compute/legacy_v2/contrib/scheduler_hints.py nova/api/openstack/compute/legacy_v2/contrib/security_group_default_rules.py nova/api/openstack/compute/legacy_v2/contrib/security_groups.py nova/api/openstack/compute/legacy_v2/contrib/server_diagnostics.py nova/api/openstack/compute/legacy_v2/contrib/server_external_events.py nova/api/openstack/compute/legacy_v2/contrib/server_group_quotas.py nova/api/openstack/compute/legacy_v2/contrib/server_groups.py nova/api/openstack/compute/legacy_v2/contrib/server_list_multi_status.py nova/api/openstack/compute/legacy_v2/contrib/server_password.py nova/api/openstack/compute/legacy_v2/contrib/server_sort_keys.py nova/api/openstack/compute/legacy_v2/contrib/server_start_stop.py nova/api/openstack/compute/legacy_v2/contrib/server_usage.py nova/api/openstack/compute/legacy_v2/contrib/services.py nova/api/openstack/compute/legacy_v2/contrib/shelve.py nova/api/openstack/compute/legacy_v2/contrib/simple_tenant_usage.py nova/api/openstack/compute/legacy_v2/contrib/used_limits.py nova/api/openstack/compute/legacy_v2/contrib/used_limits_for_admin.py nova/api/openstack/compute/legacy_v2/contrib/user_data.py nova/api/openstack/compute/legacy_v2/contrib/user_quotas.py nova/api/openstack/compute/legacy_v2/contrib/virtual_interfaces.py nova/api/openstack/compute/legacy_v2/contrib/volume_attachment_update.py nova/api/openstack/compute/legacy_v2/contrib/volumes.py nova/api/openstack/compute/schemas/__init__.py nova/api/openstack/compute/schemas/access_ips.py nova/api/openstack/compute/schemas/admin_password.py nova/api/openstack/compute/schemas/agents.py nova/api/openstack/compute/schemas/aggregates.py nova/api/openstack/compute/schemas/assisted_volume_snapshots.py nova/api/openstack/compute/schemas/attach_interfaces.py nova/api/openstack/compute/schemas/availability_zone.py nova/api/openstack/compute/schemas/block_device_mapping.py nova/api/openstack/compute/schemas/block_device_mapping_v1.py nova/api/openstack/compute/schemas/cells.py nova/api/openstack/compute/schemas/cloudpipe.py nova/api/openstack/compute/schemas/config_drive.py nova/api/openstack/compute/schemas/console_output.py nova/api/openstack/compute/schemas/create_backup.py nova/api/openstack/compute/schemas/disk_config.py nova/api/openstack/compute/schemas/evacuate.py nova/api/openstack/compute/schemas/fixed_ips.py nova/api/openstack/compute/schemas/flavor_access.py nova/api/openstack/compute/schemas/flavor_manage.py nova/api/openstack/compute/schemas/flavors_extraspecs.py nova/api/openstack/compute/schemas/floating_ip_dns.py nova/api/openstack/compute/schemas/floating_ips.py nova/api/openstack/compute/schemas/floating_ips_bulk.py nova/api/openstack/compute/schemas/hosts.py nova/api/openstack/compute/schemas/image_metadata.py nova/api/openstack/compute/schemas/keypairs.py nova/api/openstack/compute/schemas/migrate_server.py nova/api/openstack/compute/schemas/multinic.py nova/api/openstack/compute/schemas/multiple_create.py nova/api/openstack/compute/schemas/networks.py nova/api/openstack/compute/schemas/networks_associate.py nova/api/openstack/compute/schemas/personality.py nova/api/openstack/compute/schemas/preserve_ephemeral_rebuild.py nova/api/openstack/compute/schemas/quota_classes.py nova/api/openstack/compute/schemas/quota_sets.py nova/api/openstack/compute/schemas/remote_consoles.py nova/api/openstack/compute/schemas/rescue.py nova/api/openstack/compute/schemas/reset_server_state.py nova/api/openstack/compute/schemas/scheduler_hints.py nova/api/openstack/compute/schemas/security_groups.py nova/api/openstack/compute/schemas/server_external_events.py nova/api/openstack/compute/schemas/server_groups.py nova/api/openstack/compute/schemas/server_metadata.py nova/api/openstack/compute/schemas/server_migrations.py nova/api/openstack/compute/schemas/servers.py nova/api/openstack/compute/schemas/services.py nova/api/openstack/compute/schemas/tenant_networks.py nova/api/openstack/compute/schemas/user_data.py nova/api/openstack/compute/schemas/volumes.py nova/api/openstack/compute/views/__init__.py nova/api/openstack/compute/views/addresses.py nova/api/openstack/compute/views/flavors.py nova/api/openstack/compute/views/images.py nova/api/openstack/compute/views/limits.py nova/api/openstack/compute/views/servers.py nova/api/openstack/compute/views/versions.py nova/api/validation/__init__.py nova/api/validation/parameter_types.py nova/api/validation/validators.py nova/cells/__init__.py nova/cells/driver.py nova/cells/manager.py nova/cells/messaging.py nova/cells/opts.py nova/cells/rpc_driver.py nova/cells/rpcapi.py nova/cells/scheduler.py nova/cells/state.py nova/cells/utils.py nova/cells/filters/__init__.py nova/cells/filters/different_cell.py nova/cells/filters/image_properties.py nova/cells/filters/target_cell.py nova/cells/weights/__init__.py nova/cells/weights/mute_child.py nova/cells/weights/ram_by_instance_type.py nova/cells/weights/weight_offset.py nova/cert/__init__.py nova/cert/manager.py nova/cert/rpcapi.py nova/cloudpipe/__init__.py nova/cloudpipe/bootscript.template nova/cloudpipe/client.ovpn.template nova/cloudpipe/pipelib.py nova/cmd/__init__.py nova/cmd/all.py nova/cmd/api.py nova/cmd/api_metadata.py nova/cmd/api_os_compute.py nova/cmd/baseproxy.py nova/cmd/cells.py nova/cmd/cert.py nova/cmd/compute.py nova/cmd/conductor.py nova/cmd/console.py nova/cmd/consoleauth.py nova/cmd/dhcpbridge.py nova/cmd/idmapshift.py nova/cmd/manage.py nova/cmd/network.py nova/cmd/novnc.py nova/cmd/novncproxy.py nova/cmd/scheduler.py nova/cmd/serialproxy.py nova/cmd/spicehtml5proxy.py nova/cmd/xvpvncproxy.py nova/common/__init__.py nova/common/config.py nova/compute/__init__.py nova/compute/api.py nova/compute/arch.py nova/compute/build_results.py nova/compute/cells_api.py nova/compute/claims.py nova/compute/cpumodel.py nova/compute/flavors.py nova/compute/hv_type.py nova/compute/instance_actions.py nova/compute/manager.py nova/compute/opts.py nova/compute/power_state.py nova/compute/resource_tracker.py nova/compute/rpcapi.py nova/compute/stats.py nova/compute/task_states.py nova/compute/utils.py nova/compute/vm_mode.py nova/compute/vm_states.py nova/compute/monitors/__init__.py nova/compute/monitors/base.py nova/compute/monitors/cpu/__init__.py nova/compute/monitors/cpu/virt_driver.py nova/compute/resources/__init__.py nova/compute/resources/base.py nova/conductor/__init__.py nova/conductor/api.py nova/conductor/manager.py nova/conductor/rpcapi.py nova/conductor/tasks/__init__.py nova/conductor/tasks/base.py nova/conductor/tasks/live_migrate.py nova/conductor/tasks/migrate.py nova/conf/__init__.py nova/conf/availability_zone.py nova/conf/cells.py nova/conf/cert.py nova/conf/compute.py nova/conf/conductor.py nova/conf/ephemeral_storage.py nova/conf/ironic.py nova/conf/opts.py nova/conf/pci.py nova/conf/scheduler.py nova/conf/serial_console.py nova/conf/virt.py nova/conf/vnc.py nova/conf/wsgi.py nova/console/__init__.py nova/console/api.py nova/console/fake.py nova/console/manager.py nova/console/rpcapi.py nova/console/serial.py nova/console/type.py nova/console/websocketproxy.py nova/console/xvp.conf.template nova/console/xvp.py nova/consoleauth/__init__.py nova/consoleauth/manager.py nova/consoleauth/rpcapi.py nova/db/__init__.py nova/db/api.py nova/db/base.py nova/db/migration.py nova/db/sqlalchemy/__init__.py nova/db/sqlalchemy/api.py nova/db/sqlalchemy/api_models.py nova/db/sqlalchemy/migration.py nova/db/sqlalchemy/models.py nova/db/sqlalchemy/types.py nova/db/sqlalchemy/utils.py nova/db/sqlalchemy/api_migrations/__init__.py nova/db/sqlalchemy/api_migrations/migrate_repo/README nova/db/sqlalchemy/api_migrations/migrate_repo/__init__.py nova/db/sqlalchemy/api_migrations/migrate_repo/migrate.cfg nova/db/sqlalchemy/api_migrations/migrate_repo/versions/001_cell_mapping.py nova/db/sqlalchemy/api_migrations/migrate_repo/versions/002_instance_mapping.py nova/db/sqlalchemy/api_migrations/migrate_repo/versions/003_host_mapping.py nova/db/sqlalchemy/api_migrations/migrate_repo/versions/004_add_request_spec.py nova/db/sqlalchemy/api_migrations/migrate_repo/versions/005_flavors.py nova/db/sqlalchemy/api_migrations/migrate_repo/versions/006_build_request.py nova/db/sqlalchemy/api_migrations/migrate_repo/versions/007_instance_mapping_nullable_cellid.py nova/db/sqlalchemy/api_migrations/migrate_repo/versions/__init__.py nova/db/sqlalchemy/migrate_repo/README nova/db/sqlalchemy/migrate_repo/__init__.py nova/db/sqlalchemy/migrate_repo/manage.py nova/db/sqlalchemy/migrate_repo/migrate.cfg nova/db/sqlalchemy/migrate_repo/versions/216_havana.py nova/db/sqlalchemy/migrate_repo/versions/217_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/218_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/219_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/220_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/221_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/222_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/223_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/224_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/225_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/226_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/227_fix_project_user_quotas_resource_length.py nova/db/sqlalchemy/migrate_repo/versions/228_add_metrics_in_compute_nodes.py nova/db/sqlalchemy/migrate_repo/versions/229_add_extra_resources_in_compute_nodes.py nova/db/sqlalchemy/migrate_repo/versions/230_add_details_column_to_instance_actions_events.py nova/db/sqlalchemy/migrate_repo/versions/231_add_ephemeral_key_uuid.py nova/db/sqlalchemy/migrate_repo/versions/232_drop_dump_tables.py nova/db/sqlalchemy/migrate_repo/versions/233_add_stats_in_compute_nodes.py nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py nova/db/sqlalchemy/migrate_repo/versions/235_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/236_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/237_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/238_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/239_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/240_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/241_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/242_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/243_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/244_increase_user_id_length_volume_usage_cache.py nova/db/sqlalchemy/migrate_repo/versions/245_add_mtu_and_dhcp_server.py nova/db/sqlalchemy/migrate_repo/versions/246_add_compute_node_id_fk.py nova/db/sqlalchemy/migrate_repo/versions/246_sqlite_upgrade.sql nova/db/sqlalchemy/migrate_repo/versions/247_nullable_mismatch.py nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py nova/db/sqlalchemy/migrate_repo/versions/249_remove_duplicate_index.py nova/db/sqlalchemy/migrate_repo/versions/250_remove_instance_groups_metadata.py nova/db/sqlalchemy/migrate_repo/versions/251_add_numa_topology_to_comput_nodes.py nova/db/sqlalchemy/migrate_repo/versions/252_add_instance_extra_table.py nova/db/sqlalchemy/migrate_repo/versions/253_add_pci_requests_to_instance_extra_table.py nova/db/sqlalchemy/migrate_repo/versions/254_add_request_id_in_pci_devices.py nova/db/sqlalchemy/migrate_repo/versions/255_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/256_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/257_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/258_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/259_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/260_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/261_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/262_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/263_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/264_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/265_remove_duplicated_index.py nova/db/sqlalchemy/migrate_repo/versions/266_add_instance_tags.py nova/db/sqlalchemy/migrate_repo/versions/267_instance_uuid_non_nullable.py nova/db/sqlalchemy/migrate_repo/versions/268_add_host_in_compute_node.py nova/db/sqlalchemy/migrate_repo/versions/269_add_numa_node_column.py nova/db/sqlalchemy/migrate_repo/versions/270_flavor_data_in_extra.py nova/db/sqlalchemy/migrate_repo/versions/271_sqlite_postgresql_indexes.py nova/db/sqlalchemy/migrate_repo/versions/272_add_keypair_type.py nova/db/sqlalchemy/migrate_repo/versions/273_sqlite_foreign_keys.py nova/db/sqlalchemy/migrate_repo/versions/274_update_instances_project_id_index.py nova/db/sqlalchemy/migrate_repo/versions/275_add_keypair_type.py nova/db/sqlalchemy/migrate_repo/versions/276_vcpu_model.py nova/db/sqlalchemy/migrate_repo/versions/277_add_fixed_ip_updated_index.py nova/db/sqlalchemy/migrate_repo/versions/278_remove_service_fk_in_compute_nodes.py nova/db/sqlalchemy/migrate_repo/versions/279_fix_unique_constraint_for_compute_node.py nova/db/sqlalchemy/migrate_repo/versions/280_add_nullable_false_to_keypairs_name.py nova/db/sqlalchemy/migrate_repo/versions/281_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/282_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/283_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/284_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/285_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/286_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/287_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/288_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/289_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/290_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/291_enforce_flavors_migrated.py nova/db/sqlalchemy/migrate_repo/versions/292_drop_nova_volumes_tables.py nova/db/sqlalchemy/migrate_repo/versions/293_add_migration_type.py nova/db/sqlalchemy/migrate_repo/versions/294_add_service_heartbeat.py nova/db/sqlalchemy/migrate_repo/versions/295_add_virtual_interfaces_uuid_index.py nova/db/sqlalchemy/migrate_repo/versions/296_add_missing_db2_fkeys.py nova/db/sqlalchemy/migrate_repo/versions/297_add_forced_down_for_services.py nova/db/sqlalchemy/migrate_repo/versions/298_mysql_extra_specs_binary_collation.py nova/db/sqlalchemy/migrate_repo/versions/299_service_version_number.py nova/db/sqlalchemy/migrate_repo/versions/300_migration_context.py nova/db/sqlalchemy/migrate_repo/versions/301_add_cpu_and_ram_ratios_for_compute_nodes.py nova/db/sqlalchemy/migrate_repo/versions/302_pgsql_add_instance_system_metadata_index.py nova/db/sqlalchemy/migrate_repo/versions/303_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/304_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/305_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/306_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/307_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/308_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/309_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/310_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/311_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/312_placeholder.py nova/db/sqlalchemy/migrate_repo/versions/313_add_parent_id_column.py nova/db/sqlalchemy/migrate_repo/versions/314_add_resource_provider_tables.py nova/db/sqlalchemy/migrate_repo/versions/315_add_migration_progresss_detail.py nova/db/sqlalchemy/migrate_repo/versions/316_add_disk_ratio_for_compute_nodes.py nova/db/sqlalchemy/migrate_repo/versions/317_add_aggregate_uuid.py nova/db/sqlalchemy/migrate_repo/versions/318_resource_provider_name_aggregates.py nova/db/sqlalchemy/migrate_repo/versions/319_add_instances_deleted_created_at_index.py nova/db/sqlalchemy/migrate_repo/versions/__init__.py nova/hacking/__init__.py nova/hacking/checks.py nova/image/__init__.py nova/image/api.py nova/image/glance.py nova/image/s3.py nova/image/download/__init__.py nova/image/download/base.py nova/image/download/file.py nova/ipv6/__init__.py nova/ipv6/account_identifier.py nova/ipv6/api.py nova/ipv6/rfc2462.py nova/keymgr/__init__.py nova/keymgr/barbican.py nova/keymgr/conf_key_mgr.py nova/keymgr/key.py nova/keymgr/key_mgr.py nova/keymgr/mock_key_mgr.py nova/keymgr/not_implemented_key_mgr.py nova/keymgr/single_key_mgr.py nova/locale/cs/LC_MESSAGES/nova-log-critical.po nova/locale/cs/LC_MESSAGES/nova-log-error.po nova/locale/cs/LC_MESSAGES/nova-log-info.po nova/locale/cs/LC_MESSAGES/nova-log-warning.po nova/locale/cs/LC_MESSAGES/nova.po nova/locale/de/LC_MESSAGES/nova-log-critical.po nova/locale/de/LC_MESSAGES/nova-log-error.po nova/locale/de/LC_MESSAGES/nova-log-info.po nova/locale/de/LC_MESSAGES/nova.po nova/locale/es/LC_MESSAGES/nova-log-critical.po nova/locale/es/LC_MESSAGES/nova-log-error.po nova/locale/es/LC_MESSAGES/nova-log-info.po nova/locale/es/LC_MESSAGES/nova-log-warning.po nova/locale/es/LC_MESSAGES/nova.po nova/locale/es_MX/LC_MESSAGES/nova-log-critical.po nova/locale/fr/LC_MESSAGES/nova-log-critical.po nova/locale/fr/LC_MESSAGES/nova-log-error.po nova/locale/fr/LC_MESSAGES/nova-log-info.po nova/locale/fr/LC_MESSAGES/nova-log-warning.po nova/locale/fr/LC_MESSAGES/nova.po nova/locale/hr/LC_MESSAGES/nova-log-critical.po nova/locale/it/LC_MESSAGES/nova-log-error.po nova/locale/it/LC_MESSAGES/nova-log-info.po nova/locale/it/LC_MESSAGES/nova.po nova/locale/ja/LC_MESSAGES/nova-log-critical.po nova/locale/ja/LC_MESSAGES/nova.po nova/locale/ko_KR/LC_MESSAGES/nova-log-critical.po nova/locale/ko_KR/LC_MESSAGES/nova-log-error.po nova/locale/ko_KR/LC_MESSAGES/nova-log-info.po nova/locale/ko_KR/LC_MESSAGES/nova-log-warning.po nova/locale/ko_KR/LC_MESSAGES/nova.po nova/locale/pa_IN/LC_MESSAGES/nova-log-critical.po nova/locale/pt_BR/LC_MESSAGES/nova-log-critical.po nova/locale/pt_BR/LC_MESSAGES/nova-log-error.po nova/locale/pt_BR/LC_MESSAGES/nova-log-info.po nova/locale/pt_BR/LC_MESSAGES/nova.po nova/locale/ru/LC_MESSAGES/nova.po nova/locale/tr_TR/LC_MESSAGES/nova-log-critical.po nova/locale/tr_TR/LC_MESSAGES/nova-log-error.po nova/locale/tr_TR/LC_MESSAGES/nova-log-info.po nova/locale/tr_TR/LC_MESSAGES/nova-log-warning.po nova/locale/tr_TR/LC_MESSAGES/nova.po nova/locale/zh_CN/LC_MESSAGES/nova-log-critical.po nova/locale/zh_CN/LC_MESSAGES/nova-log-error.po nova/locale/zh_CN/LC_MESSAGES/nova-log-info.po nova/locale/zh_CN/LC_MESSAGES/nova-log-warning.po nova/locale/zh_CN/LC_MESSAGES/nova.po nova/locale/zh_TW/LC_MESSAGES/nova-log-critical.po nova/locale/zh_TW/LC_MESSAGES/nova.po nova/mks/__init__.py nova/network/__init__.py nova/network/api.py nova/network/base_api.py nova/network/dns_driver.py nova/network/driver.py nova/network/floating_ips.py nova/network/l3.py nova/network/ldapdns.py nova/network/linux_net.py nova/network/manager.py nova/network/minidns.py nova/network/model.py nova/network/noop_dns_driver.py nova/network/opts.py nova/network/rpcapi.py nova/network/neutronv2/__init__.py nova/network/neutronv2/api.py nova/network/neutronv2/constants.py nova/network/security_group/__init__.py nova/network/security_group/neutron_driver.py nova/network/security_group/openstack_driver.py nova/network/security_group/security_group_base.py nova/objects/__init__.py nova/objects/agent.py nova/objects/aggregate.py nova/objects/bandwidth_usage.py nova/objects/base.py nova/objects/block_device.py nova/objects/build_request.py nova/objects/cell_mapping.py nova/objects/compute_node.py nova/objects/dns_domain.py nova/objects/ec2.py nova/objects/external_event.py nova/objects/fields.py nova/objects/fixed_ip.py nova/objects/flavor.py nova/objects/floating_ip.py nova/objects/host_mapping.py nova/objects/hv_spec.py nova/objects/image_meta.py nova/objects/instance.py nova/objects/instance_action.py nova/objects/instance_fault.py nova/objects/instance_group.py nova/objects/instance_info_cache.py nova/objects/instance_mapping.py nova/objects/instance_numa_topology.py nova/objects/instance_pci_requests.py nova/objects/keypair.py nova/objects/migrate_data.py nova/objects/migration.py nova/objects/migration_context.py nova/objects/monitor_metric.py nova/objects/network.py nova/objects/network_request.py nova/objects/notification.py nova/objects/numa.py nova/objects/pci_device.py nova/objects/pci_device_pool.py nova/objects/quotas.py nova/objects/request_spec.py nova/objects/resource_provider.py nova/objects/security_group.py nova/objects/security_group_rule.py nova/objects/service.py nova/objects/tag.py nova/objects/task_log.py nova/objects/vcpu_model.py nova/objects/virt_cpu_topology.py nova/objects/virtual_interface.py nova/objects/volume_usage.py nova/openstack/__init__.py nova/openstack/common/README nova/openstack/common/__init__.py nova/openstack/common/_i18n.py nova/openstack/common/cliutils.py nova/pci/__init__.py nova/pci/devspec.py nova/pci/manager.py nova/pci/request.py nova/pci/stats.py nova/pci/utils.py nova/pci/whitelist.py nova/rdp/__init__.py nova/scheduler/__init__.py nova/scheduler/caching_scheduler.py nova/scheduler/chance.py nova/scheduler/driver.py nova/scheduler/filter_scheduler.py nova/scheduler/host_manager.py nova/scheduler/ironic_host_manager.py nova/scheduler/manager.py nova/scheduler/rpcapi.py nova/scheduler/scheduler_options.py nova/scheduler/utils.py nova/scheduler/client/__init__.py nova/scheduler/client/query.py nova/scheduler/client/report.py nova/scheduler/filters/__init__.py nova/scheduler/filters/affinity_filter.py nova/scheduler/filters/aggregate_image_properties_isolation.py nova/scheduler/filters/aggregate_instance_extra_specs.py nova/scheduler/filters/aggregate_multitenancy_isolation.py nova/scheduler/filters/all_hosts_filter.py nova/scheduler/filters/availability_zone_filter.py nova/scheduler/filters/compute_capabilities_filter.py nova/scheduler/filters/compute_filter.py nova/scheduler/filters/core_filter.py nova/scheduler/filters/disk_filter.py nova/scheduler/filters/exact_core_filter.py nova/scheduler/filters/exact_disk_filter.py nova/scheduler/filters/exact_ram_filter.py nova/scheduler/filters/extra_specs_ops.py nova/scheduler/filters/image_props_filter.py nova/scheduler/filters/io_ops_filter.py nova/scheduler/filters/isolated_hosts_filter.py nova/scheduler/filters/json_filter.py nova/scheduler/filters/metrics_filter.py nova/scheduler/filters/num_instances_filter.py nova/scheduler/filters/numa_topology_filter.py nova/scheduler/filters/pci_passthrough_filter.py nova/scheduler/filters/ram_filter.py nova/scheduler/filters/retry_filter.py nova/scheduler/filters/trusted_filter.py nova/scheduler/filters/type_filter.py nova/scheduler/filters/utils.py nova/scheduler/weights/__init__.py nova/scheduler/weights/affinity.py nova/scheduler/weights/disk.py nova/scheduler/weights/io_ops.py nova/scheduler/weights/metrics.py nova/scheduler/weights/ram.py nova/servicegroup/__init__.py nova/servicegroup/api.py nova/servicegroup/drivers/__init__.py nova/servicegroup/drivers/base.py nova/servicegroup/drivers/db.py nova/servicegroup/drivers/mc.py nova/spice/__init__.py nova/tests/__init__.py nova/tests/fixtures.py nova/tests/uuidsentinel.py nova/tests/functional/__init__.py nova/tests/functional/api_paste_fixture.py nova/tests/functional/api_samples_test_base.py nova/tests/functional/integrated_helpers.py nova/tests/functional/test_extensions.py nova/tests/functional/test_instance_actions.py nova/tests/functional/test_legacy_v2_compatible_wrapper.py nova/tests/functional/test_login.py nova/tests/functional/test_middleware.py nova/tests/functional/test_server_group.py nova/tests/functional/test_servers.py nova/tests/functional/api/__init__.py nova/tests/functional/api/client.py nova/tests/functional/api_sample_tests/README.rst nova/tests/functional/api_sample_tests/__init__.py nova/tests/functional/api_sample_tests/api_sample_base.py nova/tests/functional/api_sample_tests/test_access_ips.py nova/tests/functional/api_sample_tests/test_admin_actions.py nova/tests/functional/api_sample_tests/test_admin_password.py nova/tests/functional/api_sample_tests/test_agents.py nova/tests/functional/api_sample_tests/test_aggregates.py nova/tests/functional/api_sample_tests/test_assisted_volume_snapshots.py nova/tests/functional/api_sample_tests/test_attach_interfaces.py nova/tests/functional/api_sample_tests/test_availability_zone.py nova/tests/functional/api_sample_tests/test_baremetal_nodes.py nova/tests/functional/api_sample_tests/test_block_device_mapping_boot.py nova/tests/functional/api_sample_tests/test_cells.py nova/tests/functional/api_sample_tests/test_certificates.py nova/tests/functional/api_sample_tests/test_cloudpipe.py nova/tests/functional/api_sample_tests/test_config_drive.py nova/tests/functional/api_sample_tests/test_console_auth_tokens.py nova/tests/functional/api_sample_tests/test_console_output.py nova/tests/functional/api_sample_tests/test_consoles.py nova/tests/functional/api_sample_tests/test_create_backup.py nova/tests/functional/api_sample_tests/test_deferred_delete.py nova/tests/functional/api_sample_tests/test_disk_config.py nova/tests/functional/api_sample_tests/test_evacuate.py nova/tests/functional/api_sample_tests/test_extended_availability_zone.py nova/tests/functional/api_sample_tests/test_extended_server_attributes.py nova/tests/functional/api_sample_tests/test_extended_status.py nova/tests/functional/api_sample_tests/test_extended_volumes.py nova/tests/functional/api_sample_tests/test_extension_info.py nova/tests/functional/api_sample_tests/test_fixed_ips.py nova/tests/functional/api_sample_tests/test_flavor_access.py nova/tests/functional/api_sample_tests/test_flavor_extraspecs.py nova/tests/functional/api_sample_tests/test_flavor_manage.py nova/tests/functional/api_sample_tests/test_flavor_rxtx.py nova/tests/functional/api_sample_tests/test_flavors.py nova/tests/functional/api_sample_tests/test_floating_ip_dns.py nova/tests/functional/api_sample_tests/test_floating_ip_pools.py nova/tests/functional/api_sample_tests/test_floating_ips.py nova/tests/functional/api_sample_tests/test_floating_ips_bulk.py nova/tests/functional/api_sample_tests/test_fping.py nova/tests/functional/api_sample_tests/test_hide_server_addresses.py nova/tests/functional/api_sample_tests/test_hosts.py nova/tests/functional/api_sample_tests/test_hypervisors.py nova/tests/functional/api_sample_tests/test_images.py nova/tests/functional/api_sample_tests/test_instance_actions.py nova/tests/functional/api_sample_tests/test_instance_usage_audit_log.py nova/tests/functional/api_sample_tests/test_keypairs.py nova/tests/functional/api_sample_tests/test_limits.py nova/tests/functional/api_sample_tests/test_lock_server.py nova/tests/functional/api_sample_tests/test_migrate_server.py nova/tests/functional/api_sample_tests/test_migrations.py nova/tests/functional/api_sample_tests/test_multinic.py nova/tests/functional/api_sample_tests/test_multiple_create.py nova/tests/functional/api_sample_tests/test_networks.py nova/tests/functional/api_sample_tests/test_networks_associate.py nova/tests/functional/api_sample_tests/test_pause_server.py nova/tests/functional/api_sample_tests/test_pci.py nova/tests/functional/api_sample_tests/test_personality.py nova/tests/functional/api_sample_tests/test_preserve_ephemeral_rebuild.py nova/tests/functional/api_sample_tests/test_quota_classes.py nova/tests/functional/api_sample_tests/test_quota_sets.py nova/tests/functional/api_sample_tests/test_remote_consoles.py nova/tests/functional/api_sample_tests/test_rescue.py nova/tests/functional/api_sample_tests/test_scheduler_hints.py nova/tests/functional/api_sample_tests/test_security_group_default_rules.py nova/tests/functional/api_sample_tests/test_security_groups.py nova/tests/functional/api_sample_tests/test_server_diagnostics.py nova/tests/functional/api_sample_tests/test_server_external_events.py nova/tests/functional/api_sample_tests/test_server_groups.py nova/tests/functional/api_sample_tests/test_server_metadata.py nova/tests/functional/api_sample_tests/test_server_migrations.py nova/tests/functional/api_sample_tests/test_server_password.py nova/tests/functional/api_sample_tests/test_server_usage.py nova/tests/functional/api_sample_tests/test_servers.py nova/tests/functional/api_sample_tests/test_servers_ips.py nova/tests/functional/api_sample_tests/test_services.py nova/tests/functional/api_sample_tests/test_shelve.py nova/tests/functional/api_sample_tests/test_simple_tenant_usage.py nova/tests/functional/api_sample_tests/test_suspend_server.py nova/tests/functional/api_sample_tests/test_tenant_networks.py nova/tests/functional/api_sample_tests/test_used_limits.py nova/tests/functional/api_sample_tests/test_user_data.py nova/tests/functional/api_sample_tests/test_versions.py nova/tests/functional/api_sample_tests/test_virtual_interfaces.py nova/tests/functional/api_sample_tests/test_volumes.py nova/tests/functional/api_sample_tests/api_samples/all_extensions/extensions-list-resp-v2.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/extensions-list-resp-v21-compatible.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/extensions-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/flavor-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/flavors-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/flavors-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-action-confirm-resize.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-action-create-image.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-action-reboot.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-action-rebuild-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-action-rebuild.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-action-resize.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-action-revert-resize.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-create-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-create-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/servers-details-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/all_extensions/servers-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/consoles/consoles-create-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/consoles/consoles-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/consoles/consoles-list-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/extension-info/extensions-get-resp-v2.json.tpl nova/tests/functional/api_sample_tests/api_samples/extension-info/extensions-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-add-tenant-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-add-tenant-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-create-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-create-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-remove-tenant-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-remove-tenant-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-show-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-manage/flavor-create-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavor-manage/flavor-create-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavors/flavor-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavors/flavors-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/flavors/flavors-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/images/image-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/images/image-meta-key-get.json.tpl nova/tests/functional/api_sample_tests/api_samples/images/image-meta-key-put-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/images/image-meta-key-put-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/images/image-metadata-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/images/image-metadata-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/images/image-metadata-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/images/image-metadata-put-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/images/image-metadata-put-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/images/images-details-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/images/images-list-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-import-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-import-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-import-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-import-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-import-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-import-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/limits/limit-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/limits/v2-limit-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-access-ips/server-action-rebuild-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-access-ips/server-action-rebuild.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-access-ips/server-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-access-ips/server-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-access-ips/server-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-access-ips/servers-details-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-access-ips/servers-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-admin-actions/admin-actions-inject-network-info.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-admin-actions/admin-actions-reset-network.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-admin-actions/admin-actions-reset-server-state.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-admin-actions/admin-actions-reset-state.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-admin-password/admin-password-change-password.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-agents/agent-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-agents/agent-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-agents/agent-update-put-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-agents/agent-update-put-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-agents/agents-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregate-add-host-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregate-metadata-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregate-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregate-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregate-remove-host-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregate-update-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregates-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-availability-zone/availability-zone-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-availability-zone/availability-zone-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-availability-zone/availability-zone-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-availability-zone/availability-zone-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-baremetal-nodes/baremetal-node-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-baremetal-nodes/baremetal-node-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-cells/cells-capacities-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-cells/cells-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-cells/cells-list-empty-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-cells/cells-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-certificates/certificate-create-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-certificates/certificate-create-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-certificates/certificate-get-root-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-cloudpipe/cloud-pipe-create-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-cloudpipe/cloud-pipe-create-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-cloudpipe/cloud-pipe-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-cloudpipe/cloud-pipe-update-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-config-drive/server-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-config-drive/server-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-console-auth-tokens/get-rdp-console-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-console-output/console-output-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-console-output/console-output-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-create-backup/create-backup-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-deferred-delete/force-delete-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-deferred-delete/restore-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-disk-config/list-servers-detail-get.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-action-rebuild-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-action-rebuild-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-resize-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-evacuate/server-evacuate-find-host-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-evacuate/server-evacuate-find-host-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-evacuate/server-evacuate-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-evacuate/server-evacuate-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.14/server-evacuate-find-host-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.14/server-evacuate-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-extended-availability-zone/server-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-extended-availability-zone/servers-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-extended-server-attributes/server-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-extended-server-attributes/servers-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-extended-server-attributes/v2.16/server-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-extended-server-attributes/v2.16/servers-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-extended-status/server-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-extended-status/servers-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-extended-volumes/server-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-extended-volumes/servers-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-fixed-ips/fixedip-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-fixed-ips/fixedips-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-fixed-ips/v2.4/fixedip-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-fixed-ips/v2.4/fixedips-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-pools/floatingippools-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ips/floating-ips-create-nopool-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ips/floating-ips-create-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ips/floating-ips-create-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ips/floating-ips-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ips/floating-ips-list-empty-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ips/floating-ips-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-fping/fping-get-details-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-fping/fping-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hide-server-addresses/server-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hide-server-addresses/servers-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hosts/host-get-reboot.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hosts/host-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hosts/host-get-shutdown.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hosts/host-get-startup.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hosts/host-put-maintenance-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hosts/host-put-maintenance-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hosts/hosts-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-statistics-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-with-servers-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-without-servers-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/instance-action-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/instance-actions-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.21/instance-action-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.21/instance-actions-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-instance-usage-audit-log/inst-usage-audit-log-index-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-instance-usage-audit-log/inst-usage-audit-log-show-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-lock-server/lock-server.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-lock-server/unlock-server.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-migrate-server/live-migrate-server.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-migrate-server/migrate-server.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-migrate-server/v2.25/live-migrate-server.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-migrations/migrations-get.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-migrations/v2.23/migrations-get.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-multinic/multinic-add-fixed-ip-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-multinic/multinic-remove-fixed-ip-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-multiple-create/multiple-create-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-multiple-create/multiple-create-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-networks/network-add-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-networks/network-create-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-networks/network-create-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-networks/network-show-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-networks/networks-disassociate-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-networks/networks-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-networks-associate/network-associate-host-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-networks-associate/network-disassociate-host-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-networks-associate/network-disassociate-project-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-networks-associate/network-disassociate-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-pause-server/pause-server.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-pause-server/unpause-server.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-pci/hypervisors-pci-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-pci/hypervisors-pci-show-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-pci/pci-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-pci/pci-index-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-pci/pci-show-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-pci/server-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-pci/servers-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-personality/server-action-rebuild-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-personality/server-action-rebuild-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-personality/server-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-personality/server-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-preserve-ephemeral-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-preserve-ephemeral.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-quota-class-sets/quota-classes-update-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/quotas-update-force-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/quotas-update-force-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/quotas-update-force-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/quotas-update-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/user-quotas-show-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/user-quotas-update-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/user-quotas-update-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-rdp-console-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-rdp-console-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-serial-console-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-serial-console-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-spice-console-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-spice-console-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-vnc-console-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-vnc-console-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/v2.6/create-vnc-console-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/v2.6/create-vnc-console-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/v2.8/create-mks-console-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/v2.8/create-mks-console-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-rescue/server-get-resp-rescue.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-rescue/server-get-resp-unrescue.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-rescue/server-rescue-req-with-image-ref.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-rescue/server-rescue-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-rescue/server-rescue.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-rescue/server-unrescue-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-scheduler-hints/scheduler-hints-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-security-groups/security-group-add-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-security-groups/security-group-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-security-groups/security-group-remove-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-security-groups/security-groups-create-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-security-groups/security-groups-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-security-groups/security-groups-list-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-security-groups/server-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-security-groups/server-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-security-groups/server-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-security-groups/server-security-groups-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-security-groups/servers-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-server-external-events/event-create-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-server-external-events/event-create-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-server-groups/server-groups-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-server-groups/server-groups-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-server-groups/server-groups-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-server-groups/server-groups-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-server-password/get-password-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-server-usage/server-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-server-usage/servers-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/service-disable-log-put-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/service-disable-log-put-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/service-disable-put-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/service-disable-put-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/service-enable-put-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/service-enable-put-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/services-list-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-disable-log-put-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-disable-log-put-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-disable-put-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-disable-put-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-enable-put-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-enable-put-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-force-down-put-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-force-down-put-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/services-list-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-shelve/os-shelve-offload.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-shelve/os-shelve.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-shelve/os-unshelve.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-suspend-server/server-resume.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-suspend-server/server-suspend.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-tenant-networks/networks-list-res.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-tenant-networks/networks-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-tenant-networks/networks-post-res.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-used-limits/usedlimits-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-used-limits/v2-usedlimits-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-user-data/userdata-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-user-data/userdata-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-virtual-interfaces/vifs-list-resp-v2.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-virtual-interfaces/vifs-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-virtual-interfaces/v2.12/vifs-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/attach-volume-to-server-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/attach-volume-to-server-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/list-volume-attachments-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/os-volumes-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/os-volumes-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/os-volumes-index-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/os-volumes-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/os-volumes-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/snapshot-create-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/snapshot-create-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/snapshots-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/snapshots-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/snapshots-show-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/update-volume-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/volume-attachment-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/server-ips/server-ips-network-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/server-ips/server-ips-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/server-metadata/server-metadata-all-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/server-metadata/server-metadata-all-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/server-metadata/server-metadata-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/server-metadata/server-metadata-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/server-migrations/force_complete.json.tpl nova/tests/functional/api_sample_tests/api_samples/server-migrations/live-migrate-server.json.tpl nova/tests/functional/api_sample_tests/api_samples/server-migrations/v2.23/migrations-get.json.tpl nova/tests/functional/api_sample_tests/api_samples/server-migrations/v2.23/migrations-index.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-action-confirm-resize.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-action-create-image.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-action-reboot.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-action-rebuild-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-action-rebuild.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-action-resize.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-action-revert-resize.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-action-start.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-action-stop.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-create-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-create-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-update-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/server-update-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/servers-details-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/servers-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers-sort/server-sort-keys-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/v2.17/server-action-trigger-crash-dump.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-action-rebuild-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-action-rebuild.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-post-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-put-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-put-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/servers-details-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/servers-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/v2.9/server-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/v2.9/servers-details-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/servers/v2.9/servers-list-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/versions/v2-version-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/versions/v21-version-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/versions/versions-get-resp.json.tpl nova/tests/functional/db/__init__.py nova/tests/functional/db/test_archive.py nova/tests/functional/db/test_build_request.py nova/tests/functional/db/test_cell_mapping.py nova/tests/functional/db/test_connection_switch.py nova/tests/functional/db/test_flavor_model.py nova/tests/functional/db/test_host_mapping.py nova/tests/functional/db/test_instance_mapping.py nova/tests/functional/db/test_request_spec.py nova/tests/functional/db/test_resource_provider.py nova/tests/functional/db/api/__init__.py nova/tests/functional/db/api/test_migrations.py nova/tests/functional/libvirt/__init__.py nova/tests/functional/libvirt/test_numa_servers.py nova/tests/functional/libvirt/test_rt_servers.py nova/tests/functional/notification_sample_tests/__init__.py nova/tests/functional/notification_sample_tests/notification_sample_base.py nova/tests/functional/notification_sample_tests/test_service_update.py nova/tests/functional/regressions/README.rst nova/tests/functional/regressions/__init__.py nova/tests/functional/regressions/test_bug_1522536.py nova/tests/functional/regressions/test_bug_1541691.py nova/tests/functional/regressions/test_bug_1548980.py nova/tests/functional/regressions/test_bug_1552888.py nova/tests/functional/regressions/test_bug_1558866.py nova/tests/functional/wsgi/__init__.py nova/tests/functional/wsgi/test_flavor_manage.py nova/tests/functional/wsgi/test_secgroup.py nova/tests/live_migration/hooks/ceph.sh nova/tests/live_migration/hooks/nfs.sh nova/tests/live_migration/hooks/run_tests.sh nova/tests/live_migration/hooks/utils.sh nova/tests/unit/README.rst nova/tests/unit/__init__.py nova/tests/unit/cast_as_call.py nova/tests/unit/conf_fixture.py nova/tests/unit/fake_block_device.py nova/tests/unit/fake_build_request.py nova/tests/unit/fake_crypto.py nova/tests/unit/fake_flavor.py nova/tests/unit/fake_hosts.py nova/tests/unit/fake_instance.py nova/tests/unit/fake_ldap.py nova/tests/unit/fake_network.py nova/tests/unit/fake_network_cache_model.py nova/tests/unit/fake_notifier.py nova/tests/unit/fake_pci_device_pools.py nova/tests/unit/fake_policy.py nova/tests/unit/fake_processutils.py nova/tests/unit/fake_request_spec.py nova/tests/unit/fake_server_actions.py nova/tests/unit/fake_utils.py nova/tests/unit/fake_volume.py nova/tests/unit/image_fixtures.py nova/tests/unit/matchers.py nova/tests/unit/policy_fixture.py nova/tests/unit/test_api_validation.py nova/tests/unit/test_availability_zones.py nova/tests/unit/test_baserpc.py nova/tests/unit/test_block_device.py nova/tests/unit/test_cache.py nova/tests/unit/test_cinder.py nova/tests/unit/test_configdrive2.py nova/tests/unit/test_context.py nova/tests/unit/test_crypto.py nova/tests/unit/test_exception.py nova/tests/unit/test_fixtures.py nova/tests/unit/test_flavors.py nova/tests/unit/test_hacking.py nova/tests/unit/test_hooks.py nova/tests/unit/test_instance_types_extra_specs.py nova/tests/unit/test_iptables_network.py nova/tests/unit/test_ipv6.py nova/tests/unit/test_loadables.py nova/tests/unit/test_matchers.py nova/tests/unit/test_metadata.py nova/tests/unit/test_notifications.py nova/tests/unit/test_notifier.py nova/tests/unit/test_nova_manage.py nova/tests/unit/test_pipelib.py nova/tests/unit/test_policy.py nova/tests/unit/test_quota.py nova/tests/unit/test_rpc.py nova/tests/unit/test_safeutils.py nova/tests/unit/test_service.py nova/tests/unit/test_signature_utils.py nova/tests/unit/test_test.py nova/tests/unit/test_test_utils.py nova/tests/unit/test_utils.py nova/tests/unit/test_uuid_sentinels.py nova/tests/unit/test_versions.py nova/tests/unit/test_weights.py nova/tests/unit/test_wsgi.py nova/tests/unit/utils.py nova/tests/unit/api/__init__.py nova/tests/unit/api/test_auth.py nova/tests/unit/api/test_compute_req_id.py nova/tests/unit/api/test_validator.py nova/tests/unit/api/test_wsgi.py nova/tests/unit/api/openstack/__init__.py nova/tests/unit/api/openstack/common.py nova/tests/unit/api/openstack/fakes.py nova/tests/unit/api/openstack/test_api_version_request.py nova/tests/unit/api/openstack/test_common.py nova/tests/unit/api/openstack/test_faults.py nova/tests/unit/api/openstack/test_legacy_v2_compatible_wrapper.py nova/tests/unit/api/openstack/test_mapper.py nova/tests/unit/api/openstack/test_wsgi.py nova/tests/unit/api/openstack/compute/__init__.py nova/tests/unit/api/openstack/compute/admin_only_action_common.py nova/tests/unit/api/openstack/compute/basic.py nova/tests/unit/api/openstack/compute/dummy_schema.py nova/tests/unit/api/openstack/compute/microversions.py nova/tests/unit/api/openstack/compute/test_access_ips.py nova/tests/unit/api/openstack/compute/test_admin_actions.py nova/tests/unit/api/openstack/compute/test_admin_password.py nova/tests/unit/api/openstack/compute/test_agents.py nova/tests/unit/api/openstack/compute/test_aggregates.py nova/tests/unit/api/openstack/compute/test_api.py nova/tests/unit/api/openstack/compute/test_attach_interfaces.py nova/tests/unit/api/openstack/compute/test_auth.py nova/tests/unit/api/openstack/compute/test_availability_zone.py nova/tests/unit/api/openstack/compute/test_baremetal_nodes.py nova/tests/unit/api/openstack/compute/test_block_device_mapping.py nova/tests/unit/api/openstack/compute/test_block_device_mapping_v1.py nova/tests/unit/api/openstack/compute/test_cells.py nova/tests/unit/api/openstack/compute/test_certificates.py nova/tests/unit/api/openstack/compute/test_cloudpipe.py nova/tests/unit/api/openstack/compute/test_cloudpipe_update.py nova/tests/unit/api/openstack/compute/test_config_drive.py nova/tests/unit/api/openstack/compute/test_console_auth_tokens.py nova/tests/unit/api/openstack/compute/test_console_output.py nova/tests/unit/api/openstack/compute/test_consoles.py nova/tests/unit/api/openstack/compute/test_create_backup.py nova/tests/unit/api/openstack/compute/test_createserverext.py nova/tests/unit/api/openstack/compute/test_deferred_delete.py nova/tests/unit/api/openstack/compute/test_disk_config.py nova/tests/unit/api/openstack/compute/test_evacuate.py nova/tests/unit/api/openstack/compute/test_extended_availability_zone.py nova/tests/unit/api/openstack/compute/test_extended_hypervisors.py nova/tests/unit/api/openstack/compute/test_extended_ips.py nova/tests/unit/api/openstack/compute/test_extended_ips_mac.py nova/tests/unit/api/openstack/compute/test_extended_rescue_with_image.py nova/tests/unit/api/openstack/compute/test_extended_server_attributes.py nova/tests/unit/api/openstack/compute/test_extended_status.py nova/tests/unit/api/openstack/compute/test_extended_virtual_interfaces_net.py nova/tests/unit/api/openstack/compute/test_extended_volumes.py nova/tests/unit/api/openstack/compute/test_extension_info.py nova/tests/unit/api/openstack/compute/test_extensions.py nova/tests/unit/api/openstack/compute/test_fixed_ips.py nova/tests/unit/api/openstack/compute/test_flavor_access.py nova/tests/unit/api/openstack/compute/test_flavor_disabled.py nova/tests/unit/api/openstack/compute/test_flavor_manage.py nova/tests/unit/api/openstack/compute/test_flavor_rxtx.py nova/tests/unit/api/openstack/compute/test_flavor_swap.py nova/tests/unit/api/openstack/compute/test_flavorextradata.py nova/tests/unit/api/openstack/compute/test_flavors.py nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py nova/tests/unit/api/openstack/compute/test_floating_ip_dns.py nova/tests/unit/api/openstack/compute/test_floating_ip_pools.py nova/tests/unit/api/openstack/compute/test_floating_ips.py nova/tests/unit/api/openstack/compute/test_floating_ips_bulk.py nova/tests/unit/api/openstack/compute/test_fping.py nova/tests/unit/api/openstack/compute/test_hide_server_addresses.py nova/tests/unit/api/openstack/compute/test_hosts.py nova/tests/unit/api/openstack/compute/test_hypervisor_status.py nova/tests/unit/api/openstack/compute/test_hypervisors.py nova/tests/unit/api/openstack/compute/test_image_metadata.py nova/tests/unit/api/openstack/compute/test_image_size.py nova/tests/unit/api/openstack/compute/test_images.py nova/tests/unit/api/openstack/compute/test_instance_actions.py nova/tests/unit/api/openstack/compute/test_instance_usage_audit_log.py nova/tests/unit/api/openstack/compute/test_keypairs.py nova/tests/unit/api/openstack/compute/test_limits.py nova/tests/unit/api/openstack/compute/test_lock_server.py nova/tests/unit/api/openstack/compute/test_microversions.py nova/tests/unit/api/openstack/compute/test_migrate_server.py nova/tests/unit/api/openstack/compute/test_migrations.py nova/tests/unit/api/openstack/compute/test_multinic.py nova/tests/unit/api/openstack/compute/test_multiple_create.py nova/tests/unit/api/openstack/compute/test_networks.py nova/tests/unit/api/openstack/compute/test_neutron_security_groups.py nova/tests/unit/api/openstack/compute/test_pause_server.py nova/tests/unit/api/openstack/compute/test_pci.py nova/tests/unit/api/openstack/compute/test_plugin_framework.py nova/tests/unit/api/openstack/compute/test_quota_classes.py nova/tests/unit/api/openstack/compute/test_quotas.py nova/tests/unit/api/openstack/compute/test_remote_consoles.py nova/tests/unit/api/openstack/compute/test_rescue.py nova/tests/unit/api/openstack/compute/test_scheduler_hints.py nova/tests/unit/api/openstack/compute/test_security_group_default_rules.py nova/tests/unit/api/openstack/compute/test_security_groups.py nova/tests/unit/api/openstack/compute/test_server_actions.py nova/tests/unit/api/openstack/compute/test_server_diagnostics.py nova/tests/unit/api/openstack/compute/test_server_external_events.py nova/tests/unit/api/openstack/compute/test_server_group_quotas.py nova/tests/unit/api/openstack/compute/test_server_groups.py nova/tests/unit/api/openstack/compute/test_server_metadata.py nova/tests/unit/api/openstack/compute/test_server_migrations.py nova/tests/unit/api/openstack/compute/test_server_password.py nova/tests/unit/api/openstack/compute/test_server_reset_state.py nova/tests/unit/api/openstack/compute/test_server_start_stop.py nova/tests/unit/api/openstack/compute/test_server_usage.py nova/tests/unit/api/openstack/compute/test_serversV21.py nova/tests/unit/api/openstack/compute/test_services.py nova/tests/unit/api/openstack/compute/test_shelve.py nova/tests/unit/api/openstack/compute/test_simple_tenant_usage.py nova/tests/unit/api/openstack/compute/test_snapshots.py nova/tests/unit/api/openstack/compute/test_suspend_server.py nova/tests/unit/api/openstack/compute/test_tenant_networks.py nova/tests/unit/api/openstack/compute/test_urlmap.py nova/tests/unit/api/openstack/compute/test_used_limits.py nova/tests/unit/api/openstack/compute/test_user_data.py nova/tests/unit/api/openstack/compute/test_versions.py nova/tests/unit/api/openstack/compute/test_virtual_interfaces.py nova/tests/unit/api/openstack/compute/test_volumes.py nova/tests/unit/api/openstack/compute/legacy_v2/__init__.py nova/tests/unit/api/openstack/compute/legacy_v2/test_auth.py nova/tests/unit/api/openstack/compute/legacy_v2/test_extensions.py nova/tests/unit/api/openstack/compute/legacy_v2/test_servers.py nova/tests/unit/api/openstack/compute/legacy_v2/extensions/__init__.py nova/tests/unit/api/openstack/compute/legacy_v2/extensions/foxinsocks.py nova/tests/unit/api_samples_test_base/__init__.py nova/tests/unit/api_samples_test_base/test_compare_result.py nova/tests/unit/cells/__init__.py nova/tests/unit/cells/fakes.py nova/tests/unit/cells/test_cells_filters.py nova/tests/unit/cells/test_cells_manager.py nova/tests/unit/cells/test_cells_messaging.py nova/tests/unit/cells/test_cells_rpc_driver.py nova/tests/unit/cells/test_cells_rpcapi.py nova/tests/unit/cells/test_cells_scheduler.py nova/tests/unit/cells/test_cells_state_manager.py nova/tests/unit/cells/test_cells_utils.py nova/tests/unit/cells/test_cells_weights.py nova/tests/unit/cert/__init__.py nova/tests/unit/cert/test_rpcapi.py nova/tests/unit/cmd/__init__.py nova/tests/unit/cmd/test_baseproxy.py nova/tests/unit/cmd/test_idmapshift.py nova/tests/unit/cmd/test_manage.py nova/tests/unit/cmd/test_nova_api.py nova/tests/unit/compute/__init__.py nova/tests/unit/compute/eventlet_utils.py nova/tests/unit/compute/fake_resource_tracker.py nova/tests/unit/compute/test_arch.py nova/tests/unit/compute/test_claims.py nova/tests/unit/compute/test_compute.py nova/tests/unit/compute/test_compute_api.py nova/tests/unit/compute/test_compute_cells.py nova/tests/unit/compute/test_compute_mgr.py nova/tests/unit/compute/test_compute_utils.py nova/tests/unit/compute/test_compute_xen.py nova/tests/unit/compute/test_flavors.py nova/tests/unit/compute/test_host_api.py nova/tests/unit/compute/test_hvtype.py nova/tests/unit/compute/test_keypairs.py nova/tests/unit/compute/test_multiple_nodes.py nova/tests/unit/compute/test_resource_tracker.py nova/tests/unit/compute/test_resources.py nova/tests/unit/compute/test_rpcapi.py nova/tests/unit/compute/test_shelve.py nova/tests/unit/compute/test_stats.py nova/tests/unit/compute/test_tracker.py nova/tests/unit/compute/test_virtapi.py nova/tests/unit/compute/test_vmmode.py nova/tests/unit/compute/monitors/__init__.py nova/tests/unit/compute/monitors/test_monitors.py nova/tests/unit/compute/monitors/cpu/__init__.py nova/tests/unit/compute/monitors/cpu/test_virt_driver.py nova/tests/unit/conductor/__init__.py nova/tests/unit/conductor/test_conductor.py nova/tests/unit/conductor/tasks/__init__.py nova/tests/unit/conductor/tasks/test_base.py nova/tests/unit/conductor/tasks/test_live_migrate.py nova/tests/unit/conductor/tasks/test_migrate.py nova/tests/unit/console/__init__.py nova/tests/unit/console/test_console.py nova/tests/unit/console/test_rpcapi.py nova/tests/unit/console/test_serial.py nova/tests/unit/console/test_type.py nova/tests/unit/console/test_websocketproxy.py nova/tests/unit/consoleauth/__init__.py nova/tests/unit/consoleauth/test_consoleauth.py nova/tests/unit/consoleauth/test_rpcapi.py nova/tests/unit/db/__init__.py nova/tests/unit/db/fakes.py nova/tests/unit/db/test_db_api.py nova/tests/unit/db/test_migration_utils.py nova/tests/unit/db/test_migrations.py nova/tests/unit/db/test_models.py nova/tests/unit/db/test_sqlalchemy_migration.py nova/tests/unit/fake_loadables/__init__.py nova/tests/unit/fake_loadables/fake_loadable1.py nova/tests/unit/fake_loadables/fake_loadable2.py nova/tests/unit/image/__init__.py nova/tests/unit/image/abs.tar.gz nova/tests/unit/image/fake.py nova/tests/unit/image/rel.tar.gz nova/tests/unit/image/test_fake.py nova/tests/unit/image/test_glance.py nova/tests/unit/image/test_transfer_modules.py nova/tests/unit/keymgr/__init__.py nova/tests/unit/keymgr/fake.py nova/tests/unit/keymgr/test_barbican.py nova/tests/unit/keymgr/test_conf_key_mgr.py nova/tests/unit/keymgr/test_key.py nova/tests/unit/keymgr/test_key_mgr.py nova/tests/unit/keymgr/test_mock_key_mgr.py nova/tests/unit/keymgr/test_not_implemented_key_mgr.py nova/tests/unit/keymgr/test_single_key_mgr.py nova/tests/unit/monkey_patch_example/__init__.py nova/tests/unit/monkey_patch_example/example_a.py nova/tests/unit/monkey_patch_example/example_b.py nova/tests/unit/network/__init__.py nova/tests/unit/network/interfaces-override.template nova/tests/unit/network/test_api.py nova/tests/unit/network/test_config.py nova/tests/unit/network/test_l3.py nova/tests/unit/network/test_linux_net.py nova/tests/unit/network/test_manager.py nova/tests/unit/network/test_network_info.py nova/tests/unit/network/test_neutronv2.py nova/tests/unit/network/test_rpcapi.py nova/tests/unit/network/security_group/__init__.py nova/tests/unit/network/security_group/test_neutron_driver.py nova/tests/unit/objects/__init__.py nova/tests/unit/objects/test_agent.py nova/tests/unit/objects/test_aggregate.py nova/tests/unit/objects/test_bandwidth_usage.py nova/tests/unit/objects/test_block_device.py nova/tests/unit/objects/test_build_request.py nova/tests/unit/objects/test_cell_mapping.py nova/tests/unit/objects/test_compute_node.py nova/tests/unit/objects/test_dns_domain.py nova/tests/unit/objects/test_ec2.py nova/tests/unit/objects/test_external_event.py nova/tests/unit/objects/test_fields.py nova/tests/unit/objects/test_fixed_ip.py nova/tests/unit/objects/test_flavor.py nova/tests/unit/objects/test_floating_ip.py nova/tests/unit/objects/test_host_mapping.py nova/tests/unit/objects/test_hv_spec.py nova/tests/unit/objects/test_image_meta.py nova/tests/unit/objects/test_instance.py nova/tests/unit/objects/test_instance_action.py nova/tests/unit/objects/test_instance_fault.py nova/tests/unit/objects/test_instance_group.py nova/tests/unit/objects/test_instance_info_cache.py nova/tests/unit/objects/test_instance_mapping.py nova/tests/unit/objects/test_instance_numa_topology.py nova/tests/unit/objects/test_instance_pci_requests.py nova/tests/unit/objects/test_keypair.py nova/tests/unit/objects/test_migrate_data.py nova/tests/unit/objects/test_migration.py nova/tests/unit/objects/test_migration_context.py nova/tests/unit/objects/test_monitor_metric.py nova/tests/unit/objects/test_network.py nova/tests/unit/objects/test_network_request.py nova/tests/unit/objects/test_notification.py nova/tests/unit/objects/test_numa.py nova/tests/unit/objects/test_objects.py nova/tests/unit/objects/test_pci_device.py nova/tests/unit/objects/test_pci_device_pool.py nova/tests/unit/objects/test_quotas.py nova/tests/unit/objects/test_request_spec.py nova/tests/unit/objects/test_resource_provider.py nova/tests/unit/objects/test_security_group.py nova/tests/unit/objects/test_security_group_rule.py nova/tests/unit/objects/test_service.py nova/tests/unit/objects/test_tag.py nova/tests/unit/objects/test_task_log.py nova/tests/unit/objects/test_vcpu_model.py nova/tests/unit/objects/test_virt_cpu_topology.py nova/tests/unit/objects/test_virtual_interface.py nova/tests/unit/objects/test_volume_usage.py nova/tests/unit/pci/__init__.py nova/tests/unit/pci/fakes.py nova/tests/unit/pci/test_devspec.py nova/tests/unit/pci/test_manager.py nova/tests/unit/pci/test_request.py nova/tests/unit/pci/test_stats.py nova/tests/unit/pci/test_utils.py nova/tests/unit/pci/test_whitelist.py nova/tests/unit/scheduler/__init__.py nova/tests/unit/scheduler/fakes.py nova/tests/unit/scheduler/ironic_fakes.py nova/tests/unit/scheduler/test_caching_scheduler.py nova/tests/unit/scheduler/test_chance_scheduler.py nova/tests/unit/scheduler/test_client.py nova/tests/unit/scheduler/test_filter_scheduler.py nova/tests/unit/scheduler/test_filters.py nova/tests/unit/scheduler/test_host_filters.py nova/tests/unit/scheduler/test_host_manager.py nova/tests/unit/scheduler/test_ironic_host_manager.py nova/tests/unit/scheduler/test_rpcapi.py nova/tests/unit/scheduler/test_scheduler.py nova/tests/unit/scheduler/test_scheduler_options.py nova/tests/unit/scheduler/test_scheduler_utils.py nova/tests/unit/scheduler/filters/__init__.py nova/tests/unit/scheduler/filters/test_affinity_filters.py nova/tests/unit/scheduler/filters/test_aggregate_image_properties_isolation_filters.py nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py nova/tests/unit/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py nova/tests/unit/scheduler/filters/test_availability_zone_filters.py nova/tests/unit/scheduler/filters/test_compute_capabilities_filters.py nova/tests/unit/scheduler/filters/test_compute_filters.py nova/tests/unit/scheduler/filters/test_core_filters.py nova/tests/unit/scheduler/filters/test_disk_filters.py nova/tests/unit/scheduler/filters/test_exact_core_filter.py nova/tests/unit/scheduler/filters/test_exact_disk_filter.py nova/tests/unit/scheduler/filters/test_exact_ram_filter.py nova/tests/unit/scheduler/filters/test_extra_specs_ops.py nova/tests/unit/scheduler/filters/test_image_props_filters.py nova/tests/unit/scheduler/filters/test_io_ops_filters.py nova/tests/unit/scheduler/filters/test_isolated_hosts_filter.py nova/tests/unit/scheduler/filters/test_json_filters.py nova/tests/unit/scheduler/filters/test_metrics_filters.py nova/tests/unit/scheduler/filters/test_num_instances_filters.py nova/tests/unit/scheduler/filters/test_numa_topology_filters.py nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py nova/tests/unit/scheduler/filters/test_ram_filters.py nova/tests/unit/scheduler/filters/test_retry_filters.py nova/tests/unit/scheduler/filters/test_trusted_filters.py nova/tests/unit/scheduler/filters/test_type_filters.py nova/tests/unit/scheduler/filters/test_utils.py nova/tests/unit/scheduler/weights/__init__.py nova/tests/unit/scheduler/weights/test_weights_affinity.py nova/tests/unit/scheduler/weights/test_weights_disk.py nova/tests/unit/scheduler/weights/test_weights_hosts.py nova/tests/unit/scheduler/weights/test_weights_ioopsweight.py nova/tests/unit/scheduler/weights/test_weights_metrics.py nova/tests/unit/scheduler/weights/test_weights_ram.py nova/tests/unit/servicegroup/__init__.py nova/tests/unit/servicegroup/test_api.py nova/tests/unit/servicegroup/test_db_servicegroup.py nova/tests/unit/servicegroup/test_mc_servicegroup.py nova/tests/unit/ssl_cert/ca.crt nova/tests/unit/ssl_cert/ca.key nova/tests/unit/ssl_cert/certificate.crt nova/tests/unit/ssl_cert/privatekey.key nova/tests/unit/virt/__init__.py nova/tests/unit/virt/fakelibosinfo.py nova/tests/unit/virt/test_block_device.py nova/tests/unit/virt/test_configdrive.py nova/tests/unit/virt/test_diagnostics.py nova/tests/unit/virt/test_driver.py nova/tests/unit/virt/test_events.py nova/tests/unit/virt/test_fake.py nova/tests/unit/virt/test_hardware.py nova/tests/unit/virt/test_imagecache.py nova/tests/unit/virt/test_images.py nova/tests/unit/virt/test_osinfo.py nova/tests/unit/virt/test_virt.py nova/tests/unit/virt/test_virt_drivers.py nova/tests/unit/virt/test_volumeutils.py nova/tests/unit/virt/disk/__init__.py nova/tests/unit/virt/disk/test_api.py nova/tests/unit/virt/disk/test_inject.py nova/tests/unit/virt/disk/mount/__init__.py nova/tests/unit/virt/disk/mount/test_api.py nova/tests/unit/virt/disk/mount/test_block.py nova/tests/unit/virt/disk/mount/test_loop.py nova/tests/unit/virt/disk/mount/test_nbd.py nova/tests/unit/virt/disk/vfs/__init__.py nova/tests/unit/virt/disk/vfs/fakeguestfs.py nova/tests/unit/virt/disk/vfs/test_guestfs.py nova/tests/unit/virt/disk/vfs/test_localfs.py nova/tests/unit/virt/hyperv/__init__.py nova/tests/unit/virt/hyperv/test_base.py nova/tests/unit/virt/hyperv/test_driver.py nova/tests/unit/virt/hyperv/test_eventhandler.py nova/tests/unit/virt/hyperv/test_hostops.py nova/tests/unit/virt/hyperv/test_imagecache.py nova/tests/unit/virt/hyperv/test_livemigrationops.py nova/tests/unit/virt/hyperv/test_migrationops.py nova/tests/unit/virt/hyperv/test_pathutils.py nova/tests/unit/virt/hyperv/test_rdpconsoleops.py nova/tests/unit/virt/hyperv/test_snapshotops.py nova/tests/unit/virt/hyperv/test_vif.py nova/tests/unit/virt/hyperv/test_vmops.py nova/tests/unit/virt/hyperv/test_volumeops.py nova/tests/unit/virt/image/__init__.py nova/tests/unit/virt/image/test_model.py nova/tests/unit/virt/ironic/__init__.py nova/tests/unit/virt/ironic/test_client_wrapper.py nova/tests/unit/virt/ironic/test_driver.py nova/tests/unit/virt/ironic/test_patcher.py nova/tests/unit/virt/ironic/utils.py nova/tests/unit/virt/libvirt/__init__.py nova/tests/unit/virt/libvirt/fake_imagebackend.py nova/tests/unit/virt/libvirt/fake_libvirt_utils.py nova/tests/unit/virt/libvirt/fake_os_brick_connector.py nova/tests/unit/virt/libvirt/fakelibvirt.py nova/tests/unit/virt/libvirt/test_blockinfo.py nova/tests/unit/virt/libvirt/test_compat.py nova/tests/unit/virt/libvirt/test_config.py nova/tests/unit/virt/libvirt/test_designer.py nova/tests/unit/virt/libvirt/test_driver.py nova/tests/unit/virt/libvirt/test_fakelibvirt.py nova/tests/unit/virt/libvirt/test_firewall.py nova/tests/unit/virt/libvirt/test_guest.py nova/tests/unit/virt/libvirt/test_host.py nova/tests/unit/virt/libvirt/test_imagebackend.py nova/tests/unit/virt/libvirt/test_imagecache.py nova/tests/unit/virt/libvirt/test_utils.py nova/tests/unit/virt/libvirt/test_vif.py nova/tests/unit/virt/libvirt/storage/__init__.py nova/tests/unit/virt/libvirt/storage/test_dmcrypt.py nova/tests/unit/virt/libvirt/storage/test_lvm.py nova/tests/unit/virt/libvirt/storage/test_rbd.py nova/tests/unit/virt/libvirt/volume/__init__.py nova/tests/unit/virt/libvirt/volume/test_aoe.py nova/tests/unit/virt/libvirt/volume/test_disco.py nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py nova/tests/unit/virt/libvirt/volume/test_fs.py nova/tests/unit/virt/libvirt/volume/test_glusterfs.py nova/tests/unit/virt/libvirt/volume/test_gpfs.py nova/tests/unit/virt/libvirt/volume/test_hgst.py nova/tests/unit/virt/libvirt/volume/test_iscsi.py nova/tests/unit/virt/libvirt/volume/test_iser.py nova/tests/unit/virt/libvirt/volume/test_net.py nova/tests/unit/virt/libvirt/volume/test_nfs.py nova/tests/unit/virt/libvirt/volume/test_quobyte.py nova/tests/unit/virt/libvirt/volume/test_remotefs.py nova/tests/unit/virt/libvirt/volume/test_scaleio.py nova/tests/unit/virt/libvirt/volume/test_scality.py nova/tests/unit/virt/libvirt/volume/test_smbfs.py nova/tests/unit/virt/libvirt/volume/test_volume.py nova/tests/unit/virt/vmwareapi/__init__.py nova/tests/unit/virt/vmwareapi/fake.py nova/tests/unit/virt/vmwareapi/ovf.xml nova/tests/unit/virt/vmwareapi/stubs.py nova/tests/unit/virt/vmwareapi/test_configdrive.py nova/tests/unit/virt/vmwareapi/test_driver_api.py nova/tests/unit/virt/vmwareapi/test_ds_util.py nova/tests/unit/virt/vmwareapi/test_ds_util_datastore_selection.py nova/tests/unit/virt/vmwareapi/test_imagecache.py nova/tests/unit/virt/vmwareapi/test_images.py nova/tests/unit/virt/vmwareapi/test_network_util.py nova/tests/unit/virt/vmwareapi/test_read_write_util.py nova/tests/unit/virt/vmwareapi/test_vif.py nova/tests/unit/virt/vmwareapi/test_vim_util.py nova/tests/unit/virt/vmwareapi/test_vm_util.py nova/tests/unit/virt/vmwareapi/test_vmops.py nova/tests/unit/virt/vmwareapi/test_volumeops.py nova/tests/unit/virt/xenapi/__init__.py nova/tests/unit/virt/xenapi/stubs.py nova/tests/unit/virt/xenapi/test_agent.py nova/tests/unit/virt/xenapi/test_driver.py nova/tests/unit/virt/xenapi/test_network_utils.py nova/tests/unit/virt/xenapi/test_vif.py nova/tests/unit/virt/xenapi/test_vm_utils.py nova/tests/unit/virt/xenapi/test_vmops.py nova/tests/unit/virt/xenapi/test_volume_utils.py nova/tests/unit/virt/xenapi/test_volumeops.py nova/tests/unit/virt/xenapi/test_xenapi.py nova/tests/unit/virt/xenapi/vm_rrd.xml nova/tests/unit/virt/xenapi/client/__init__.py nova/tests/unit/virt/xenapi/client/test_objects.py nova/tests/unit/virt/xenapi/client/test_session.py nova/tests/unit/virt/xenapi/image/__init__.py nova/tests/unit/virt/xenapi/image/test_bittorrent.py nova/tests/unit/virt/xenapi/image/test_glance.py nova/tests/unit/virt/xenapi/image/test_utils.py nova/tests/unit/virt/xenapi/image/test_vdi_through_dev.py nova/tests/unit/virt/xenapi/plugins/__init__.py nova/tests/unit/virt/xenapi/plugins/plugin_test.py nova/tests/unit/virt/xenapi/plugins/test_nova_plugin_version.py nova/tests/unit/volume/__init__.py nova/tests/unit/volume/test_cinder.py nova/tests/unit/volume/encryptors/__init__.py nova/tests/unit/volume/encryptors/test_base.py nova/tests/unit/volume/encryptors/test_cryptsetup.py nova/tests/unit/volume/encryptors/test_luks.py nova/tests/unit/volume/encryptors/test_nop.py nova/virt/__init__.py nova/virt/block_device.py nova/virt/configdrive.py nova/virt/diagnostics.py nova/virt/driver.py nova/virt/event.py nova/virt/fake.py nova/virt/firewall.py nova/virt/hardware.py nova/virt/imagecache.py nova/virt/images.py nova/virt/interfaces.template nova/virt/netutils.py nova/virt/opts.py nova/virt/osinfo.py nova/virt/storage_users.py nova/virt/virtapi.py nova/virt/volumeutils.py nova/virt/watchdog_actions.py nova/virt/disk/__init__.py nova/virt/disk/api.py nova/virt/disk/mount/__init__.py nova/virt/disk/mount/api.py nova/virt/disk/mount/block.py nova/virt/disk/mount/loop.py nova/virt/disk/mount/nbd.py nova/virt/disk/vfs/__init__.py nova/virt/disk/vfs/api.py nova/virt/disk/vfs/guestfs.py nova/virt/disk/vfs/localfs.py nova/virt/hyperv/README.rst nova/virt/hyperv/__init__.py nova/virt/hyperv/constants.py nova/virt/hyperv/driver.py nova/virt/hyperv/eventhandler.py nova/virt/hyperv/hostops.py nova/virt/hyperv/imagecache.py nova/virt/hyperv/livemigrationops.py nova/virt/hyperv/migrationops.py nova/virt/hyperv/pathutils.py nova/virt/hyperv/rdpconsoleops.py nova/virt/hyperv/snapshotops.py nova/virt/hyperv/vif.py nova/virt/hyperv/vmops.py nova/virt/hyperv/volumeops.py nova/virt/image/__init__.py nova/virt/image/model.py nova/virt/ironic/__init__.py nova/virt/ironic/client_wrapper.py nova/virt/ironic/driver.py nova/virt/ironic/ironic_states.py nova/virt/ironic/patcher.py nova/virt/libvirt/__init__.py nova/virt/libvirt/blockinfo.py nova/virt/libvirt/compat.py nova/virt/libvirt/config.py nova/virt/libvirt/designer.py nova/virt/libvirt/driver.py nova/virt/libvirt/firewall.py nova/virt/libvirt/guest.py nova/virt/libvirt/host.py nova/virt/libvirt/imagebackend.py nova/virt/libvirt/imagecache.py nova/virt/libvirt/instancejobtracker.py nova/virt/libvirt/utils.py nova/virt/libvirt/vif.py nova/virt/libvirt/storage/__init__.py nova/virt/libvirt/storage/dmcrypt.py nova/virt/libvirt/storage/lvm.py nova/virt/libvirt/storage/rbd_utils.py nova/virt/libvirt/volume/__init__.py nova/virt/libvirt/volume/aoe.py nova/virt/libvirt/volume/disco.py nova/virt/libvirt/volume/fibrechannel.py nova/virt/libvirt/volume/fs.py nova/virt/libvirt/volume/glusterfs.py nova/virt/libvirt/volume/gpfs.py nova/virt/libvirt/volume/hgst.py nova/virt/libvirt/volume/iscsi.py nova/virt/libvirt/volume/iser.py nova/virt/libvirt/volume/net.py nova/virt/libvirt/volume/nfs.py nova/virt/libvirt/volume/quobyte.py nova/virt/libvirt/volume/remotefs.py nova/virt/libvirt/volume/scaleio.py nova/virt/libvirt/volume/scality.py nova/virt/libvirt/volume/smbfs.py nova/virt/libvirt/volume/volume.py nova/virt/vmwareapi/__init__.py nova/virt/vmwareapi/constants.py nova/virt/vmwareapi/driver.py nova/virt/vmwareapi/ds_util.py nova/virt/vmwareapi/error_util.py nova/virt/vmwareapi/host.py nova/virt/vmwareapi/imagecache.py nova/virt/vmwareapi/images.py nova/virt/vmwareapi/network_util.py nova/virt/vmwareapi/read_write_util.py nova/virt/vmwareapi/vif.py nova/virt/vmwareapi/vim_util.py nova/virt/vmwareapi/vm_util.py nova/virt/vmwareapi/vmops.py nova/virt/vmwareapi/volumeops.py nova/virt/xenapi/__init__.py nova/virt/xenapi/agent.py nova/virt/xenapi/driver.py nova/virt/xenapi/fake.py nova/virt/xenapi/firewall.py nova/virt/xenapi/host.py nova/virt/xenapi/network_utils.py nova/virt/xenapi/pool.py nova/virt/xenapi/pool_states.py nova/virt/xenapi/vif.py nova/virt/xenapi/vm_utils.py nova/virt/xenapi/vmops.py nova/virt/xenapi/volume_utils.py nova/virt/xenapi/volumeops.py nova/virt/xenapi/client/__init__.py nova/virt/xenapi/client/objects.py nova/virt/xenapi/client/session.py nova/virt/xenapi/image/__init__.py nova/virt/xenapi/image/bittorrent.py nova/virt/xenapi/image/glance.py nova/virt/xenapi/image/utils.py nova/virt/xenapi/image/vdi_through_dev.py nova/vnc/__init__.py nova/vnc/xvp_proxy.py nova/volume/__init__.py nova/volume/cinder.py nova/volume/encryptors/__init__.py nova/volume/encryptors/base.py nova/volume/encryptors/cryptsetup.py nova/volume/encryptors/luks.py nova/volume/encryptors/nop.py nova/wsgi/nova-api.py nova/wsgi/nova-metadata.py plugins/xenserver/doc/networking.rst plugins/xenserver/networking/etc/init.d/host-rules plugins/xenserver/networking/etc/init.d/openvswitch-nova plugins/xenserver/networking/etc/sysconfig/openvswitch-nova plugins/xenserver/networking/etc/udev/rules.d/xen-openvswitch-nova.rules plugins/xenserver/networking/etc/xensource/scripts/novalib.py plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py plugins/xenserver/networking/etc/xensource/scripts/vif_5.6-fp1.patch plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py plugins/xenserver/xenapi/README plugins/xenserver/xenapi/etc/xapi.d/plugins/_bittorrent_seeder plugins/xenserver/xenapi/etc/xapi.d/plugins/agent plugins/xenserver/xenapi/etc/xapi.d/plugins/bandwidth plugins/xenserver/xenapi/etc/xapi.d/plugins/bittorrent plugins/xenserver/xenapi/etc/xapi.d/plugins/config_file plugins/xenserver/xenapi/etc/xapi.d/plugins/console plugins/xenserver/xenapi/etc/xapi.d/plugins/glance plugins/xenserver/xenapi/etc/xapi.d/plugins/ipxe plugins/xenserver/xenapi/etc/xapi.d/plugins/kernel plugins/xenserver/xenapi/etc/xapi.d/plugins/migration plugins/xenserver/xenapi/etc/xapi.d/plugins/nova_plugin_version plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py plugins/xenserver/xenapi/etc/xapi.d/plugins/utils.py plugins/xenserver/xenapi/etc/xapi.d/plugins/workarounds plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py releasenotes/notes/.placeholder releasenotes/notes/13.0.0-cve-bugs-fe43ef267a82f304.yaml releasenotes/notes/1516578-628b417b372f4f0f.yaml releasenotes/notes/abort-live-migration-cb902bb0754b11b6.yaml releasenotes/notes/add-aggregate-type-extra-specs-affinity-filter-79a2d3ee152b8ecd.yaml releasenotes/notes/add-novnc-proxy-config-to-vnc-group-f5bb68740f623744.yaml releasenotes/notes/add-xvp-config-to-vnc-group-349cca99f05fcfd3.yaml releasenotes/notes/aggregate-uuid-generation-1f029af7a9af519b.yaml releasenotes/notes/api-database-now-required-6245f39d36885d1c.yaml releasenotes/notes/api_servers_no_scheme-e4aa216d251022f2.yaml releasenotes/notes/apply-limits-to-qemu-img-8813f7a333ebdf69.yaml releasenotes/notes/attach-detach-vol-for-shelved-and-shelved-offloaded-instances-93f70cfd49299f05.yaml releasenotes/notes/block-live-migrate-with-attached-volumes-ee02afbfe46937c7.yaml releasenotes/notes/bp-add-project-and-user-id-a560d087656157d4.yaml releasenotes/notes/bp-boot-from-uefi-b413b96017db76dd.yaml releasenotes/notes/bp-get-valid-server-state-a817488f4c8d3822.yaml releasenotes/notes/bp-instance-crash-dump-7ccbba7799dc66f9.yaml releasenotes/notes/bp-making-live-migration-api-friendly-3b547f4e0958ee05.yaml releasenotes/notes/bp-rbd-instance-snapshots-130e860b726ddc16.yaml releasenotes/notes/bp-split-network-plane-for-live-migration-40bc127734173759.yaml releasenotes/notes/bp-virt-driver-cpu-thread-pinning-1aaeeb6648f8e009.yaml releasenotes/notes/bug-1559026-47c3fa3468d66b07.yaml releasenotes/notes/bug-1662699-06203e7262e02aa6.yaml releasenotes/notes/bug-1673569-cve-2017-7214-2d7644b356015c93.yaml releasenotes/notes/cinder-backend-report-discard-1def1c28140def9b.yaml releasenotes/notes/compute_upgrade_levels_auto-97acebc7b45b76df.yaml releasenotes/notes/conductor_rpcapi_v2_drop-9893c27bb32d9786.yaml releasenotes/notes/config_scheduler_driver-e751ae392bc1a1d0.yaml releasenotes/notes/config_scheduler_host_manager_driver-a543a74ea70f5e90.yaml releasenotes/notes/deprecate-conductor-manager-class-03620676d939b0eb.yaml releasenotes/notes/deprecate-local-conductor-9cb9f45728281eb0.yaml releasenotes/notes/deprecate-nova-manage-service-subcommand-7626f7692bd62e41.yaml releasenotes/notes/deprecate_compute_stats_class-229abfcb8816bdbd.yaml releasenotes/notes/deprecate_db_driver-91c76ca8011d663c.yaml releasenotes/notes/deprecate_ert-449b16638c008457.yaml releasenotes/notes/deprecate_glance_opts-eab01aba5dcda38a.yaml releasenotes/notes/deprecate_hooks-6f6d60ac206a6da6.yaml releasenotes/notes/deprecate_pluggable_managers-ca0224bcd779454c.yaml releasenotes/notes/deprecate_security_group_api-3d96d683a3723e2c.yaml releasenotes/notes/deprecate_vendordata_driver-eefc745365a881c3.yaml releasenotes/notes/disable_ec2_api_by_default-0ec0946433fc7119.yaml releasenotes/notes/disco_volume_libvirt_driver-916428b8bd852732.yaml releasenotes/notes/disk-weight-scheduler-98647f9c6317d21d.yaml releasenotes/notes/disk_ratio_to_rt-b6224ab8c0272d86.yaml releasenotes/notes/drop_instancev1_obj-4447ddd2bea644fa.yaml releasenotes/notes/ebtables-version-fde659fe18b0e0c0.yaml releasenotes/notes/filters_use_reqspec-9f92b9c0ead76093.yaml releasenotes/notes/force-live-migration-be5a10cd9c8eb981.yaml releasenotes/notes/force_config_drive_opt-e087055e14c40d88.yaml releasenotes/notes/hyperv_2k8_drop-fb309f811767c7c4.yaml releasenotes/notes/instance-actions-read-deleted-instances-18bbb327924b66c7.yaml releasenotes/notes/instance-hostname-used-to-populate-ports-dns-name-08341ec73dc076c0.yaml releasenotes/notes/ironic_api_version_opt_deprecated-50c9b0486e78fe6e.yaml releasenotes/notes/known-issue-on-api-1efca45440136f3e.yaml releasenotes/notes/libvirt-deprecate-migration-flags-config-4ba1e2d6c9ef09ff.yaml releasenotes/notes/libvirt-live-migration-flags-mangling-a2407a31ddf17427.yaml releasenotes/notes/libvirt-live-migration-new-tunneled-option-d7ebb1eb1e95e683.yaml releasenotes/notes/libvirt_hardware_policy_from_libosinfo-19e261851d1ad93a.yaml releasenotes/notes/list-server-bad-status-fix-7db504b38c8d732f.yaml releasenotes/notes/live_migration_uri-dependent-on-virt_type-595c46c2310f45c3.yaml releasenotes/notes/lock_policy-75bea372036acbd5.yaml releasenotes/notes/min_libvirt_bump-d9916d9c4512dd11.yaml releasenotes/notes/mitaka_prelude-c8b955ed78a5ad65.yaml releasenotes/notes/neutron-mtu-6a7edd9e396107d7.yaml releasenotes/notes/neutron-ovs-bridge-name-7b3477103622f4cc.yaml releasenotes/notes/new-oslo-reports-option-619c3dbf3ae320fb.yaml releasenotes/notes/online-data-migrations-48dde6a1d8661e47.yaml releasenotes/notes/optional_project_id-6aebf1cb394d498f.yaml releasenotes/notes/os-migrations-ef225e5b309d5497.yaml releasenotes/notes/parallels_support_snapshot-29b4ffae300c1f05.yaml releasenotes/notes/policy-sample-defaults-changed-b5eea1daeb305251.yaml releasenotes/notes/remove-deprecated-neutron-options-5f3a782aa9082fb5.yaml releasenotes/notes/remove-ec2-api-service-c17a35ed297355b8.yaml releasenotes/notes/remove-on-shared-storage-flag-from-evacuate-api-76a3d58616479fe9.yaml releasenotes/notes/remove_ec2_and_objectstore_api-4ccb539db1d171fa.yaml releasenotes/notes/request-spec-api-db-b9cc6e0624d563c5.yaml releasenotes/notes/reserved-hugepages-per-nodes-f36225d5fca807e4.yaml releasenotes/notes/rm_volume_manager-78fed5be43d285b3.yaml releasenotes/notes/scheduling-to-disabled-hosts-79f5b5d20a42875a.yaml releasenotes/notes/server_migrations-30519b35d3ea6763.yaml releasenotes/notes/service-status-notification-e137297f5d5aa45d.yaml releasenotes/notes/soft-affinity-for-server-group-f45e191bd8cdbd15.yaml releasenotes/notes/switch-to-oslo-cache-7114a0ab2dea52df.yaml releasenotes/notes/upgrade_rootwrap_compute_filters-428ca239f2e4e63d.yaml releasenotes/notes/user-settable-server-description-89dcfc75677e31bc.yaml releasenotes/notes/versioned-notifications-423f4d8d2a3992c6.yaml releasenotes/notes/vhost-user-mtu-23d0af36a8adfa56.yaml releasenotes/notes/vmware_integration_bridge-249567087da5ecb2.yaml releasenotes/notes/vmware_limits-16edee7a9ad023bc.yaml releasenotes/notes/xen_rename-03edd9b78f3e81e5.yaml releasenotes/notes/xenserver-glance-plugin-1.3-11c3b70b8c928263.yaml releasenotes/notes/zookeeper-servicegroup-driver-removed-c3bcaa6f9fe976ed.yaml releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder tools/abandon_old_reviews.sh tools/build_latex_pdf.sh tools/clean-vlans tools/colorizer.py tools/ebtables.workaround tools/enable-pre-commit-hook.sh tools/flake8wrap.sh tools/install_venv.py tools/install_venv_common.py tools/nova-manage.bash_completion tools/pretty_tox.sh tools/regression_tester.py tools/reserve-migrations.py tools/test-setup.sh tools/with_venv.sh tools/db/schema_diff.py tools/xenserver/cleanup_sm_locks.py tools/xenserver/destroy_cached_images.py tools/xenserver/populate_other_config.py tools/xenserver/rotate_xen_guest_logs.sh tools/xenserver/stress_test.py tools/xenserver/vdi_chain_cleanup.py tools/xenserver/vm_vdi_cleaner.pynova-13.1.4/nova.egg-info/dependency_links.txt0000664000567000056710000000000113064447467022426 0ustar jenkinsjenkins00000000000000 nova-13.1.4/nova.egg-info/pbr.json0000664000567000056710000000005613064447467020037 0ustar jenkinsjenkins00000000000000{"is_release": true, "git_version": "e193201"}nova-13.1.4/nova.egg-info/top_level.txt0000664000567000056710000000000513064447467021105 0ustar jenkinsjenkins00000000000000nova nova-13.1.4/nova.egg-info/entry_points.txt0000664000567000056710000002271413064447467021664 0ustar jenkinsjenkins00000000000000[console_scripts] nova-all = nova.cmd.all:main nova-api = nova.cmd.api:main nova-api-metadata = nova.cmd.api_metadata:main nova-api-os-compute = nova.cmd.api_os_compute:main nova-cells = nova.cmd.cells:main nova-cert = nova.cmd.cert:main nova-compute = nova.cmd.compute:main nova-conductor = nova.cmd.conductor:main nova-console = nova.cmd.console:main nova-consoleauth = nova.cmd.consoleauth:main nova-dhcpbridge = nova.cmd.dhcpbridge:main nova-idmapshift = nova.cmd.idmapshift:main nova-manage = nova.cmd.manage:main nova-network = nova.cmd.network:main nova-novncproxy = nova.cmd.novncproxy:main nova-rootwrap = oslo_rootwrap.cmd:main nova-rootwrap-daemon = oslo_rootwrap.cmd:daemon nova-scheduler = nova.cmd.scheduler:main nova-serialproxy = nova.cmd.serialproxy:main nova-spicehtml5proxy = nova.cmd.spicehtml5proxy:main nova-xvpvncproxy = nova.cmd.xvpvncproxy:main [nova.api.v21.extensions] access_ips = nova.api.openstack.compute.access_ips:AccessIPs admin_actions = nova.api.openstack.compute.admin_actions:AdminActions admin_password = nova.api.openstack.compute.admin_password:AdminPassword agents = nova.api.openstack.compute.agents:Agents aggregates = nova.api.openstack.compute.aggregates:Aggregates assisted_volume_snapshots = nova.api.openstack.compute.assisted_volume_snapshots:AssistedVolumeSnapshots attach_interfaces = nova.api.openstack.compute.attach_interfaces:AttachInterfaces availability_zone = nova.api.openstack.compute.availability_zone:AvailabilityZone baremetal_nodes = nova.api.openstack.compute.baremetal_nodes:BareMetalNodes block_device_mapping = nova.api.openstack.compute.block_device_mapping:BlockDeviceMapping cells = nova.api.openstack.compute.cells:Cells certificates = nova.api.openstack.compute.certificates:Certificates cloudpipe = nova.api.openstack.compute.cloudpipe:Cloudpipe config_drive = nova.api.openstack.compute.config_drive:ConfigDrive console_auth_tokens = nova.api.openstack.compute.console_auth_tokens:ConsoleAuthTokens console_output = nova.api.openstack.compute.console_output:ConsoleOutput consoles = nova.api.openstack.compute.consoles:Consoles create_backup = nova.api.openstack.compute.create_backup:CreateBackup deferred_delete = nova.api.openstack.compute.deferred_delete:DeferredDelete disk_config = nova.api.openstack.compute.disk_config:DiskConfig evacuate = nova.api.openstack.compute.evacuate:Evacuate extended_availability_zone = nova.api.openstack.compute.extended_availability_zone:ExtendedAvailabilityZone extended_server_attributes = nova.api.openstack.compute.extended_server_attributes:ExtendedServerAttributes extended_status = nova.api.openstack.compute.extended_status:ExtendedStatus extended_volumes = nova.api.openstack.compute.extended_volumes:ExtendedVolumes extension_info = nova.api.openstack.compute.extension_info:ExtensionInfo fixed_ips = nova.api.openstack.compute.fixed_ips:FixedIps flavor_access = nova.api.openstack.compute.flavor_access:FlavorAccess flavor_manage = nova.api.openstack.compute.flavor_manage:FlavorManage flavor_rxtx = nova.api.openstack.compute.flavor_rxtx:FlavorRxtx flavors = nova.api.openstack.compute.flavors:Flavors flavors_extraspecs = nova.api.openstack.compute.flavors_extraspecs:FlavorsExtraSpecs floating_ip_dns = nova.api.openstack.compute.floating_ip_dns:FloatingIpDns floating_ip_pools = nova.api.openstack.compute.floating_ip_pools:FloatingIpPools floating_ips = nova.api.openstack.compute.floating_ips:FloatingIps floating_ips_bulk = nova.api.openstack.compute.floating_ips_bulk:FloatingIpsBulk fping = nova.api.openstack.compute.fping:Fping hide_server_addresses = nova.api.openstack.compute.hide_server_addresses:HideServerAddresses hosts = nova.api.openstack.compute.hosts:Hosts hypervisors = nova.api.openstack.compute.hypervisors:Hypervisors image_metadata = nova.api.openstack.compute.image_metadata:ImageMetadata image_size = nova.api.openstack.compute.image_size:ImageSize images = nova.api.openstack.compute.images:Images instance_actions = nova.api.openstack.compute.instance_actions:InstanceActions instance_usage_audit_log = nova.api.openstack.compute.instance_usage_audit_log:InstanceUsageAuditLog ips = nova.api.openstack.compute.ips:IPs keypairs = nova.api.openstack.compute.keypairs:Keypairs limits = nova.api.openstack.compute.limits:Limits lock_server = nova.api.openstack.compute.lock_server:LockServer migrate_server = nova.api.openstack.compute.migrate_server:MigrateServer migrations = nova.api.openstack.compute.migrations:Migrations multinic = nova.api.openstack.compute.multinic:Multinic multiple_create = nova.api.openstack.compute.multiple_create:MultipleCreate networks = nova.api.openstack.compute.networks:Networks networks_associate = nova.api.openstack.compute.networks_associate:NetworksAssociate pause_server = nova.api.openstack.compute.pause_server:PauseServer personality = nova.api.openstack.compute.personality:Personality preserve_ephemeral_rebuild = nova.api.openstack.compute.preserve_ephemeral_rebuild:PreserveEphemeralRebuild quota_classes = nova.api.openstack.compute.quota_classes:QuotaClasses quota_sets = nova.api.openstack.compute.quota_sets:QuotaSets remote_consoles = nova.api.openstack.compute.remote_consoles:RemoteConsoles rescue = nova.api.openstack.compute.rescue:Rescue scheduler_hints = nova.api.openstack.compute.scheduler_hints:SchedulerHints security_group_default_rules = nova.api.openstack.compute.security_group_default_rules:SecurityGroupDefaultRules security_groups = nova.api.openstack.compute.security_groups:SecurityGroups server_diagnostics = nova.api.openstack.compute.server_diagnostics:ServerDiagnostics server_external_events = nova.api.openstack.compute.server_external_events:ServerExternalEvents server_groups = nova.api.openstack.compute.server_groups:ServerGroups server_metadata = nova.api.openstack.compute.server_metadata:ServerMetadata server_migrations = nova.api.openstack.compute.server_migrations:ServerMigrations server_password = nova.api.openstack.compute.server_password:ServerPassword server_usage = nova.api.openstack.compute.server_usage:ServerUsage servers = nova.api.openstack.compute.servers:Servers services = nova.api.openstack.compute.services:Services shelve = nova.api.openstack.compute.shelve:Shelve simple_tenant_usage = nova.api.openstack.compute.simple_tenant_usage:SimpleTenantUsage suspend_server = nova.api.openstack.compute.suspend_server:SuspendServer tenant_networks = nova.api.openstack.compute.tenant_networks:TenantNetworks used_limits = nova.api.openstack.compute.used_limits:UsedLimits user_data = nova.api.openstack.compute.user_data:UserData versions = nova.api.openstack.compute.versionsV21:Versions virtual_interfaces = nova.api.openstack.compute.virtual_interfaces:VirtualInterfaces volumes = nova.api.openstack.compute.volumes:Volumes [nova.api.v21.extensions.server.create] access_ips = nova.api.openstack.compute.access_ips:AccessIPs availability_zone = nova.api.openstack.compute.availability_zone:AvailabilityZone block_device_mapping = nova.api.openstack.compute.block_device_mapping:BlockDeviceMapping block_device_mapping_v1 = nova.api.openstack.compute.block_device_mapping_v1:BlockDeviceMappingV1 config_drive = nova.api.openstack.compute.config_drive:ConfigDrive disk_config = nova.api.openstack.compute.disk_config:DiskConfig keypairs_create = nova.api.openstack.compute.keypairs:Keypairs multiple_create = nova.api.openstack.compute.multiple_create:MultipleCreate personality = nova.api.openstack.compute.personality:Personality scheduler_hints = nova.api.openstack.compute.scheduler_hints:SchedulerHints security_groups = nova.api.openstack.compute.security_groups:SecurityGroups user_data = nova.api.openstack.compute.user_data:UserData [nova.api.v21.extensions.server.rebuild] access_ips = nova.api.openstack.compute.access_ips:AccessIPs disk_config = nova.api.openstack.compute.disk_config:DiskConfig personality = nova.api.openstack.compute.personality:Personality preserve_ephemeral_rebuild = nova.api.openstack.compute.preserve_ephemeral_rebuild:PreserveEphemeralRebuild [nova.api.v21.extensions.server.resize] disk_config = nova.api.openstack.compute.disk_config:DiskConfig [nova.api.v21.extensions.server.update] access_ips = nova.api.openstack.compute.access_ips:AccessIPs disk_config = nova.api.openstack.compute.disk_config:DiskConfig [nova.api.v21.test_extensions] basic = nova.tests.unit.api.openstack.compute.basic:Basic microversions = nova.tests.unit.api.openstack.compute.microversions:Microversions [nova.compute.monitors.cpu] virt_driver = nova.compute.monitors.cpu.virt_driver:Monitor [nova.compute.resources] [nova.image.download.modules] file = nova.image.download.file [nova.ipv6_backend] account_identifier = nova.ipv6.account_identifier rfc2462 = nova.ipv6.rfc2462 [nova.scheduler.driver] caching_scheduler = nova.scheduler.caching_scheduler:CachingScheduler chance_scheduler = nova.scheduler.chance:ChanceScheduler fake_scheduler = nova.tests.unit.scheduler.fakes:FakeScheduler filter_scheduler = nova.scheduler.filter_scheduler:FilterScheduler [nova.scheduler.host_manager] host_manager = nova.scheduler.host_manager:HostManager ironic_host_manager = nova.scheduler.ironic_host_manager:IronicHostManager [oslo.config.opts] nova = nova.opts:list_opts nova.api = nova.api.opts:list_opts nova.cache_utils = nova.cache_utils:list_opts nova.cells = nova.cells.opts:list_opts nova.compute = nova.compute.opts:list_opts nova.conf = nova.conf.opts:list_opts nova.network = nova.network.opts:list_opts nova.network.neutronv2 = nova.network.neutronv2.api:list_opts nova.virt = nova.virt.opts:list_opts [oslo.config.opts.defaults] nova.api = nova.common.config:set_middleware_defaults nova-13.1.4/nova.egg-info/PKG-INFO0000664000567000056710000000632113064447467017457 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: nova Version: 13.1.4 Summary: Cloud computing fabric controller Home-page: http://docs.openstack.org/developer/nova/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: OpenStack Nova README ===================== OpenStack Nova provides a cloud computing fabric controller, supporting a wide variety of virtualization technologies, including KVM, Xen, LXC, VMware, and more. In addition to its native API, it includes compatibility with the commonly encountered Amazon EC2 and S3 APIs. OpenStack Nova is distributed under the terms of the Apache License, Version 2.0. The full terms and conditions of this license are detailed in the LICENSE file. Nova primarily consists of a set of Python daemons, though it requires and integrates with a number of native system components for databases, messaging and virtualization capabilities. To keep updated with new developments in the OpenStack project follow `@openstack `_ on Twitter. To learn how to deploy OpenStack Nova, consult the documentation available online at: http://docs.openstack.org For information about the different compute (hypervisor) drivers supported by Nova, read this page on the wiki: https://wiki.openstack.org/wiki/HypervisorSupportMatrix In the unfortunate event that bugs are discovered, they should be reported to the appropriate bug tracker. If you obtained the software from a 3rd party operating system vendor, it is often wise to use their own bug tracker for reporting problems. In all other cases use the master OpenStack bug tracker, available at: http://bugs.launchpad.net/nova Developers wishing to work on the OpenStack Nova project should always base their work on the latest Nova code, available from the master GIT repository at: https://git.openstack.org/cgit/openstack/nova Developers should also join the discussion on the mailing list, at: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-dev Any new code must follow the development guidelines detailed in the HACKING.rst file, and pass all unit tests. Further developer focused documentation is available at: http://docs.openstack.org/developer/nova/ For information on how to contribute to Nova, please see the contents of the CONTRIBUTING.rst file. -- End of broadcast Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 nova-13.1.4/nova.egg-info/not-zip-safe0000664000567000056710000000000113064447415020577 0ustar jenkinsjenkins00000000000000 nova-13.1.4/nova.egg-info/requires.txt0000664000567000056710000000237213064447467020764 0ustar jenkinsjenkins00000000000000pbr>=1.6 SQLAlchemy<1.1.0,>=1.0.10 boto>=2.32.1 decorator>=3.4.0 eventlet!=0.18.3,>=0.18.2 Jinja2>=2.8 keystonemiddleware!=4.1.0,>=4.0.0 lxml>=2.3 cryptography!=1.3.0,>=1.0 pycrypto>=2.6 WebOb>=1.2.3 greenlet>=0.3.2 PasteDeploy>=1.5.0 Paste PrettyTable<0.8,>=0.7 sqlalchemy-migrate>=0.9.6 netaddr!=0.7.16,>=0.7.12 netifaces>=0.10.4 paramiko>=1.16.0 Babel!=2.3.0,!=2.3.1,!=2.3.2,!=2.3.3,>=1.3 iso8601>=0.1.9 jsonschema!=2.5.0,<3.0.0,>=2.0.0 python-cinderclient>=1.3.1 keystoneauth1>=2.1.0 python-neutronclient!=4.1.0,>=2.6.0 python-glanceclient>=2.0.0 requests!=2.9.0,>=2.8.1 six>=1.9.0 stevedore>=1.5.0 setuptools!=24.0.0,!=34.0.0,!=34.0.1,!=34.0.2,!=34.0.3,!=34.1.0,!=34.1.1,!=34.2.0,!=34.3.0,>=16.0 websockify>=0.6.1 oslo.cache>=1.5.0 oslo.concurrency>=3.7.1 oslo.config>=3.7.0 oslo.context>=0.2.0 oslo.log>=1.14.0 oslo.reports>=0.6.0 oslo.serialization>=1.10.0 oslo.utils>=3.5.0 oslo.db>=4.1.0 oslo.rootwrap>=2.0.0 oslo.messaging>=4.0.0 oslo.policy>=0.5.0 oslo.i18n>=2.1.0 oslo.service>=1.0.0 rfc3986>=0.2.0 oslo.middleware>=3.0.0 psutil<2.0.0,>=1.1.1 oslo.versionedobjects>=1.5.0 alembic>=0.8.0 os-brick!=1.4.0,>=1.0.0 os-win>=0.2.3 castellan>=0.3.1 [:(python_version!='2.7')] Routes!=2.0,!=2.3.0,>=1.12.3 [:(python_version=='2.7')] Routes!=2.0,!=2.1,!=2.3.0,>=1.12.3 nova-13.1.4/nova/0000775000567000056710000000000013064447471014661 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/0000775000567000056710000000000013064447471016023 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/fixtures.py0000664000567000056710000005130713064447152020250 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Fixtures for Nova tests.""" from __future__ import absolute_import import logging as std_logging import os import warnings import fixtures from oslo_config import cfg from oslo_db.sqlalchemy import enginefacade from oslo_messaging import conffixture as messaging_conffixture import six from nova.db import migration from nova.db.sqlalchemy import api as session from nova import exception from nova.objects import base as obj_base from nova import rpc from nova import service from nova.tests.functional.api import client _TRUE_VALUES = ('True', 'true', '1', 'yes') CONF = cfg.CONF DB_SCHEMA = {'main': "", 'api': ""} SESSION_CONFIGURED = False class ServiceFixture(fixtures.Fixture): """Run a service as a test fixture.""" def __init__(self, name, host=None, **kwargs): name = name # If not otherwise specified, the host will default to the # name of the service. Some things like aggregates care that # this is stable. host = host or name kwargs.setdefault('host', host) kwargs.setdefault('binary', 'nova-%s' % name) self.kwargs = kwargs def setUp(self): super(ServiceFixture, self).setUp() self.service = service.Service.create(**self.kwargs) self.service.start() self.addCleanup(self.service.kill) class NullHandler(std_logging.Handler): """custom default NullHandler to attempt to format the record. Used in conjunction with log_fixture.get_logging_handle_error_fixture to detect formatting errors in debug level logs without saving the logs. """ def handle(self, record): self.format(record) def emit(self, record): pass def createLock(self): self.lock = None class StandardLogging(fixtures.Fixture): """Setup Logging redirection for tests. There are a number of things we want to handle with logging in tests: * Redirect the logging to somewhere that we can test or dump it later. * Ensure that as many DEBUG messages as possible are actually executed, to ensure they are actually syntactically valid (they often have not been). * Ensure that we create useful output for tests that doesn't overwhelm the testing system (which means we can't capture the 100 MB of debug logging on every run). To do this we create a logger fixture at the root level, which defaults to INFO and create a Null Logger at DEBUG which lets us execute log messages at DEBUG but not keep the output. To support local debugging OS_DEBUG=True can be set in the environment, which will print out the full debug logging. There are also a set of overrides for particularly verbose modules to be even less than INFO. """ def setUp(self): super(StandardLogging, self).setUp() # set root logger to debug root = std_logging.getLogger() root.setLevel(std_logging.DEBUG) # supports collecting debug level for local runs if os.environ.get('OS_DEBUG') in _TRUE_VALUES: level = std_logging.DEBUG else: level = std_logging.INFO # Collect logs fs = '%(asctime)s %(levelname)s [%(name)s] %(message)s' self.logger = self.useFixture( fixtures.FakeLogger(format=fs, level=None)) # TODO(sdague): why can't we send level through the fake # logger? Tests prove that it breaks, but it's worth getting # to the bottom of. root.handlers[0].setLevel(level) if level > std_logging.DEBUG: # Just attempt to format debug level logs, but don't save them handler = NullHandler() self.useFixture(fixtures.LogHandler(handler, nuke_handlers=False)) handler.setLevel(std_logging.DEBUG) # Don't log every single DB migration step std_logging.getLogger( 'migrate.versioning.api').setLevel(std_logging.WARNING) class OutputStreamCapture(fixtures.Fixture): """Capture output streams during tests. This fixture captures errant printing to stderr / stdout during the tests and lets us see those streams at the end of the test runs instead. Useful to see what was happening during failed tests. """ def setUp(self): super(OutputStreamCapture, self).setUp() if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES: self.out = self.useFixture(fixtures.StringStream('stdout')) self.useFixture( fixtures.MonkeyPatch('sys.stdout', self.out.stream)) if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES: self.err = self.useFixture(fixtures.StringStream('stderr')) self.useFixture( fixtures.MonkeyPatch('sys.stderr', self.err.stream)) @property def stderr(self): return self.err._details["stderr"].as_text() @property def stdout(self): return self.out._details["stdout"].as_text() class Timeout(fixtures.Fixture): """Setup per test timeouts. In order to avoid test deadlocks we support setting up a test timeout parameter read from the environment. In almost all cases where the timeout is reached this means a deadlock. A class level TIMEOUT_SCALING_FACTOR also exists, which allows extremely long tests to specify they need more time. """ def __init__(self, timeout, scaling=1): super(Timeout, self).__init__() try: self.test_timeout = int(timeout) except ValueError: # If timeout value is invalid do not set a timeout. self.test_timeout = 0 if scaling >= 1: self.test_timeout *= scaling else: raise ValueError('scaling value must be >= 1') def setUp(self): super(Timeout, self).setUp() if self.test_timeout > 0: self.useFixture(fixtures.Timeout(self.test_timeout, gentle=True)) class Database(fixtures.Fixture): def __init__(self, database='main', connection=None): """Create a database fixture. :param database: The type of database, 'main' or 'api' :param connection: The connection string to use """ super(Database, self).__init__() # NOTE(pkholkin): oslo_db.enginefacade is configured in tests the same # way as it is done for any other service that uses db global SESSION_CONFIGURED if not SESSION_CONFIGURED: session.configure(CONF) SESSION_CONFIGURED = True self.database = database if database == 'main': if connection is not None: ctxt_mgr = session.create_context_manager( connection=connection) facade = ctxt_mgr.get_legacy_facade() self.get_engine = facade.get_engine else: self.get_engine = session.get_engine elif database == 'api': self.get_engine = session.get_api_engine def _cache_schema(self): global DB_SCHEMA if not DB_SCHEMA[self.database]: engine = self.get_engine() conn = engine.connect() migration.db_sync(database=self.database) DB_SCHEMA[self.database] = "".join(line for line in conn.connection.iterdump()) engine.dispose() def cleanup(self): engine = self.get_engine() engine.dispose() def reset(self): self._cache_schema() engine = self.get_engine() engine.dispose() conn = engine.connect() conn.connection.executescript(DB_SCHEMA[self.database]) def setUp(self): super(Database, self).setUp() self.reset() self.addCleanup(self.cleanup) class DatabaseAtVersion(fixtures.Fixture): def __init__(self, version, database='main'): """Create a database fixture. :param version: Max version to sync to (or None for current) :param database: The type of database, 'main' or 'api' """ super(DatabaseAtVersion, self).__init__() self.database = database self.version = version if database == 'main': self.get_engine = session.get_engine elif database == 'api': self.get_engine = session.get_api_engine def cleanup(self): engine = self.get_engine() engine.dispose() def reset(self): engine = self.get_engine() engine.dispose() engine.connect() migration.db_sync(version=self.version, database=self.database) def setUp(self): super(DatabaseAtVersion, self).setUp() self.reset() self.addCleanup(self.cleanup) class RPCFixture(fixtures.Fixture): def __init__(self, *exmods): super(RPCFixture, self).__init__() self.exmods = [] self.exmods.extend(exmods) def setUp(self): super(RPCFixture, self).setUp() self.addCleanup(rpc.cleanup) rpc.add_extra_exmods(*self.exmods) self.addCleanup(rpc.clear_extra_exmods) self.messaging_conf = messaging_conffixture.ConfFixture(CONF) self.messaging_conf.transport_driver = 'fake' self.useFixture(self.messaging_conf) rpc.init(CONF) class WarningsFixture(fixtures.Fixture): """Filters out warnings during test runs.""" def setUp(self): super(WarningsFixture, self).setUp() # NOTE(sdague): Make deprecation warnings only happen once. Otherwise # this gets kind of crazy given the way that upstream python libs use # this. warnings.simplefilter("once", DeprecationWarning) warnings.filterwarnings('ignore', message='With-statements now directly support' ' multiple context managers') self.addCleanup(warnings.resetwarnings) class ConfPatcher(fixtures.Fixture): """Fixture to patch and restore global CONF. This also resets overrides for everything that is patched during it's teardown. """ def __init__(self, **kwargs): """Constructor :params group: if specified all config options apply to that group. :params **kwargs: the rest of the kwargs are processed as a set of key/value pairs to be set as configuration override. """ super(ConfPatcher, self).__init__() self.group = kwargs.pop('group', None) self.args = kwargs def setUp(self): super(ConfPatcher, self).setUp() for k, v in six.iteritems(self.args): self.addCleanup(CONF.clear_override, k, self.group) CONF.set_override(k, v, self.group) class OSAPIFixture(fixtures.Fixture): """Create an OS API server as a fixture. This spawns an OS API server as a fixture in a new greenthread in the current test. The fixture has a .api paramenter with is a simple rest client that can communicate with it. This fixture is extremely useful for testing REST responses through the WSGI stack easily in functional tests. Usage: api = self.useFixture(fixtures.OSAPIFixture()).api resp = api.api_request('/someurl') self.assertEqual(200, resp.status_code) resp = api.api_request('/otherurl', method='POST', body='{foo}') The resp is a requests library response. Common attributes that you'll want to use are: - resp.status_code - integer HTTP status code returned by the request - resp.content - the body of the response - resp.headers - dictionary of HTTP headers returned """ def __init__(self, api_version='v2', project_id='6f70656e737461636b20342065766572'): """Constructor :param api_version: the API version that we're interested in using. Currently this expects 'v2' or 'v2.1' as possible options. :param project_id: the project id to use on the API. """ super(OSAPIFixture, self).__init__() self.api_version = api_version self.project_id = project_id def setUp(self): super(OSAPIFixture, self).setUp() # in order to run these in tests we need to bind only to local # host, and dynamically allocate ports conf_overrides = { 'osapi_compute_listen': '127.0.0.1', 'metadata_listen': '127.0.0.1', 'osapi_compute_listen_port': 0, 'metadata_listen_port': 0, 'verbose': True, 'debug': True } self.useFixture(ConfPatcher(**conf_overrides)) self.osapi = service.WSGIService("osapi_compute") self.osapi.start() self.addCleanup(self.osapi.stop) self.auth_url = 'http://%(host)s:%(port)s/%(api_version)s' % ({ 'host': self.osapi.host, 'port': self.osapi.port, 'api_version': self.api_version}) self.api = client.TestOpenStackClient('fake', 'fake', self.auth_url, self.project_id) self.admin_api = client.TestOpenStackClient( 'admin', 'admin', self.auth_url, self.project_id) class PoisonFunctions(fixtures.Fixture): """Poison functions so they explode if we touch them. When running under a non full stack test harness there are parts of the code that you don't want to go anywhere near. These include things like code that spins up extra threads, which just introduces races. """ def setUp(self): super(PoisonFunctions, self).setUp() # The nova libvirt driver starts an event thread which only # causes trouble in tests. Make sure that if tests don't # properly patch it the test explodes. # explicit import because MonkeyPatch doesn't magic import # correctly if we are patching a method on a class in a # module. import nova.virt.libvirt.host # noqa def evloop(*args, **kwargs): import sys warnings.warn("Forgot to disable libvirt event thread") sys.exit(1) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.host.Host._init_events', evloop)) class IndirectionAPIFixture(fixtures.Fixture): """Patch and restore the global NovaObject indirection api.""" def __init__(self, indirection_api): """Constructor :param indirection_api: the indirection API to be used for tests. """ super(IndirectionAPIFixture, self).__init__() self.indirection_api = indirection_api def cleanup(self): obj_base.NovaObject.indirection_api = self.orig_indirection_api def setUp(self): super(IndirectionAPIFixture, self).setUp() self.orig_indirection_api = obj_base.NovaObject.indirection_api obj_base.NovaObject.indirection_api = self.indirection_api self.addCleanup(self.cleanup) class _FakeGreenThread(object): def __init__(self, func, *args, **kwargs): self._result = func(*args, **kwargs) def cancel(self, *args, **kwargs): # This method doesn't make sense for a synchronous call, it's just # defined to satisfy the interface. pass def kill(self, *args, **kwargs): # This method doesn't make sense for a synchronous call, it's just # defined to satisfy the interface. pass def link(self, func, *args, **kwargs): func(self, *args, **kwargs) def unlink(self, func, *args, **kwargs): # This method doesn't make sense for a synchronous call, it's just # defined to satisfy the interface. pass def wait(self): return self._result class SpawnIsSynchronousFixture(fixtures.Fixture): """Patch and restore the spawn_n utility method to be synchronous""" def setUp(self): super(SpawnIsSynchronousFixture, self).setUp() self.useFixture(fixtures.MonkeyPatch( 'nova.utils.spawn_n', _FakeGreenThread)) self.useFixture(fixtures.MonkeyPatch( 'nova.utils.spawn', _FakeGreenThread)) class BannedDBSchemaOperations(fixtures.Fixture): """Ban some operations for migrations""" def __init__(self, banned_resources=None): super(BannedDBSchemaOperations, self).__init__() self._banned_resources = banned_resources or [] @staticmethod def _explode(resource, op): raise exception.DBNotAllowed( 'Operation %s.%s() is not allowed in a database migration' % ( resource, op)) def setUp(self): super(BannedDBSchemaOperations, self).setUp() for thing in self._banned_resources: self.useFixture(fixtures.MonkeyPatch( 'sqlalchemy.%s.drop' % thing, lambda *a, **k: self._explode(thing, 'drop'))) self.useFixture(fixtures.MonkeyPatch( 'sqlalchemy.%s.alter' % thing, lambda *a, **k: self._explode(thing, 'alter'))) class StableObjectJsonFixture(fixtures.Fixture): """Fixture that makes sure we get stable JSON object representations. Since objects contain things like set(), which can't be converted to JSON, we have some situations where the representation isn't fully deterministic. This doesn't matter at all at runtime, but does to unit tests that try to assert things at a low level. This fixture mocks the obj_to_primitive() call and makes sure to sort the list of changed fields (which came from a set) before returning it to the caller. """ def __init__(self): self._original_otp = obj_base.NovaObject.obj_to_primitive def setUp(self): super(StableObjectJsonFixture, self).setUp() def _doit(obj, *args, **kwargs): result = self._original_otp(obj, *args, **kwargs) if 'nova_object.changes' in result: result['nova_object.changes'].sort() return result self.useFixture(fixtures.MonkeyPatch( 'nova.objects.base.NovaObject.obj_to_primitive', _doit)) class EngineFacadeFixture(fixtures.Fixture): """Fixture to isolation EngineFacade during tests. Because many elements of EngineFacade are based on globals, once an engine facade has been initialized, all future code goes through it. This means that the initialization of sqlite in databases in our Database fixture will drive all connections to sqlite. While that's fine in a production environment, during testing this means we can't test againts multiple backends in the same test run. oslo.db does not yet support a reset mechanism here. This builds a custom in tree engine facade fixture to handle this. Eventually this will be added to oslo.db and this can be removed. Tracked by https://bugs.launchpad.net/oslo.db/+bug/1548960 """ def __init__(self, ctx_manager, engine, sessionmaker): super(EngineFacadeFixture, self).__init__() self._ctx_manager = ctx_manager self._engine = engine self._sessionmaker = sessionmaker def setUp(self): super(EngineFacadeFixture, self).setUp() self._existing_factory = self._ctx_manager._root_factory self._ctx_manager._root_factory = enginefacade._TestTransactionFactory( self._engine, self._sessionmaker, apply_global=False, synchronous_reader=True) self.addCleanup(self.cleanup) def cleanup(self): self._ctx_manager._root_factory = self._existing_factory class ForbidNewLegacyNotificationFixture(fixtures.Fixture): """Make sure the test fails if new legacy notification is added""" def __init__(self): super(ForbidNewLegacyNotificationFixture, self).__init__() self.notifier = rpc.LegacyValidatingNotifier def setUp(self): super(ForbidNewLegacyNotificationFixture, self).setUp() self.notifier.fatal = True # allow the special test value used in # nova.tests.unit.test_notifications.NotificationsTestCase self.notifier.allowed_legacy_notification_event_types.append( '_decorated_function') self.addCleanup(self.cleanup) def cleanup(self): self.notifier.fatal = False self.notifier.allowed_legacy_notification_event_types.remove( '_decorated_function') nova-13.1.4/nova/tests/__init__.py0000664000567000056710000000000013064447140020113 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/uuidsentinel.py0000664000567000056710000000202513064447152021100 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys class UUIDSentinels(object): def __init__(self): from oslo_utils import uuidutils self._uuid_module = uuidutils self._sentinels = {} def __getattr__(self, name): if name.startswith('_'): raise ValueError('Sentinels must not start with _') if name not in self._sentinels: self._sentinels[name] = str(self._uuid_module.generate_uuid()) return self._sentinels[name] sys.modules[__name__] = UUIDSentinels() nova-13.1.4/nova/tests/live_migration/0000775000567000056710000000000013064447471021033 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/live_migration/hooks/0000775000567000056710000000000013064447471022156 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/live_migration/hooks/ceph.sh0000775000567000056710000004031113064447152023427 0ustar jenkinsjenkins00000000000000#!/bin/bash CEPH_REPLICAS=2 function setup_ceph_cluster { install_ceph_full configure_ceph_local echo "copy ceph.conf and admin keyring to compute only nodes" ls -la /etc/ceph sudo cp /etc/ceph/ceph.conf /tmp/ceph.conf sudo chown ${STACK_USER}:${STACK_USER} /tmp/ceph.conf $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/tmp/ceph.conf dest=/etc/ceph/ceph.conf owner=root group=root" sudo rm -f /tmp/ceph.conf sudo cp /etc/ceph/ceph.client.admin.keyring /tmp/ceph.client.admin.keyring sudo chown ${STACK_USER}:${STACK_USER} /tmp/ceph.client.admin.keyring sudo chmod 644 /tmp/ceph.client.admin.keyring ls -la /tmp $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/tmp/ceph.client.admin.keyring dest=/etc/ceph/ceph.client.admin.keyring owner=root group=root" sudo rm -f /tmp/ceph.client.admin.keyring echo "check result of copying files" $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "ls -la /etc/ceph" echo "start ceph-mon" sudo initctl emit ceph-mon id=$(hostname) echo "start ceph-osd" sudo start ceph-osd id=${OSD_ID} echo "check ceph-osd before second node addition" wait_for_ceph_up configure_ceph_remote echo "check ceph-osd tree" wait_for_ceph_up } function install_ceph_full { if uses_debs; then $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m apt \ -a "name=ceph state=present" elif is_fedora; then $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m yum \ -a "name=ceph state=present" fi } function configure_ceph_local { sudo mkdir -p ${CEPH_DATA_DIR}/{bootstrap-mds,bootstrap-osd,mds,mon,osd,tmp} # create ceph monitor initial key and directory sudo ceph-authtool /var/lib/ceph/tmp/keyring.mon.$(hostname) \ --create-keyring --name=mon. --add-key=$(ceph-authtool --gen-print-key) \ --cap mon 'allow *' sudo mkdir /var/lib/ceph/mon/ceph-$(hostname) # create a default ceph configuration file sudo tee ${CEPH_CONF_FILE} > /dev/null < /dev/null sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring #copy cinder keyring to compute only node sudo cp /etc/ceph/ceph.client.cinder.keyring /tmp/ceph.client.cinder.keyring sudo chown stack:stack /tmp/ceph.client.cinder.keyring $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/tmp/ceph.client.cinder.keyring dest=/etc/ceph/ceph.client.cinder.keyring" sudo rm -f /tmp/ceph.client.cinder.keyring sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} size ${CEPH_REPLICAS} if [[ $CEPH_REPLICAS -ne 1 ]]; then sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID} fi } function configure_and_start_nova { _ceph_configure_nova #import secret to libvirt _populate_libvirt_secret echo 'check compute processes before restart' $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "ps aux | grep compute" #stop nova-compute stop 'all' 'n-cpu' echo 'check processes after compute stop' $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "ps aux | grep compute" # restart local nova-compute sudo -H -u $STACK_USER bash -c "/tmp/start_process.sh n-cpu '/usr/local/bin/nova-compute --config-file /etc/nova/nova.conf' libvirtd" # restart remote nova-compute for SUBNODE in $SUBNODES ; do ssh $SUBNODE "sudo -H -u $STACK_USER bash -c '/tmp/start_process.sh n-cpu \"/usr/local/bin/nova-compute --config-file /etc/nova/nova.conf\" libvirtd'" done $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "ps aux | grep compute" } function _ceph_configure_cinder { sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP} sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} size ${CEPH_REPLICAS} if [[ $CEPH_REPLICAS -ne 1 ]]; then sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID} fi CINDER_CONF=${CINDER_CONF:-/etc/cinder/cinder.conf} $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=volume_backend_name value=ceph" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=volume_driver value=cinder.volume.drivers.rbd.RBDDriver" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_ceph_conf value=$CEPH_CONF_FILE" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_pool value=$CINDER_CEPH_POOL" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_user value=$CINDER_CEPH_USER" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_uuid value=$CINDER_CEPH_UUID" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_flatten_volume_from_snapshot value=False" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_max_clone_depth value=5" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=DEFAULT option=glance_api_version value=2" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=DEFAULT option=default_volume_type value=ceph" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=DEFAULT option=enabled_backends value=ceph" } function configure_and_start_cinder { _ceph_configure_cinder stop 'primary' 'c-vol' sudo -H -u $STACK_USER bash -c "/tmp/start_process.sh c-vol '/usr/local/bin/cinder-volume --config-file /etc/cinder/cinder.conf'" source $BASE/new/devstack/openrc export OS_USERNAME=admin export OS_PROJECT_NAME=admin lvm_type=$(cinder type-list | awk -F "|" 'NR==4{ print $2}') cinder type-delete $lvm_type openstack volume type create --os-volume-api-version 1 --property volume_backend_name="ceph" ceph } function _populate_libvirt_secret { cat > /tmp/secret.xml < ${CINDER_CEPH_UUID} client.${CINDER_CEPH_USER} secret EOF $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/tmp/secret.xml dest=/tmp/secret.xml" $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "virsh secret-define --file /tmp/secret.xml" local secret=$(sudo ceph -c ${CEPH_CONF_FILE} auth get-key client.${CINDER_CEPH_USER}) # TODO(tdurakov): remove this escaping as https://github.com/ansible/ansible/issues/13862 fixed secret=${secret//=/'\='} $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "virsh secret-set-value --secret ${CINDER_CEPH_UUID} --base64 $secret" $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m file -a "path=/tmp/secret.xml state=absent" } nova-13.1.4/nova/tests/live_migration/hooks/run_tests.sh0000775000567000056710000000311413064447152024536 0ustar jenkinsjenkins00000000000000#!/bin/bash # Live migration dedicated ci job will be responsible for testing different # environments based on underlying storage, used for ephemerals. # This hook allows to inject logic of environment reconfiguration in ci job. # Base scenario for this would be: # # 1. test with all local storage (use default for volumes) # 2. test with NFS for root + ephemeral disks # 3. test with Ceph for root + ephemeral disks # 4. test with Ceph for volumes and root + ephemeral disk set -xe cd $BASE/new/tempest source $BASE/new/devstack/functions source $BASE/new/devstack/functions-common source $WORKSPACE/devstack-gate/functions.sh source $BASE/new/nova/nova/tests/live_migration/hooks/utils.sh source $BASE/new/nova/nova/tests/live_migration/hooks/nfs.sh source $BASE/new/nova/nova/tests/live_migration/hooks/ceph.sh primary_node=$(cat /etc/nodepool/primary_node_private) SUBNODES=$(cat /etc/nodepool/sub_nodes_private) SERVICE_HOST=$primary_node STACK_USER=${STACK_USER:-stack} populate_start_script echo '1. test with all local storage (use default for volumes)' run_tempest "block migration test" echo '2. test with NFS for root + ephemeral disks' nfs_setup nfs_configure_tempest nfs_verify_setup run_tempest "NFS shared storage test" nfs_teardown echo '3. test with Ceph for root + ephemeral disks' source $BASE/new/devstack/lib/ceph #reset output set -xe setup_ceph_cluster configure_and_start_glance configure_and_start_nova run_tempest "Ceph nova&glance test" echo '4. test with Ceph for volumes and root + ephemeral disk' configure_and_start_cinder run_tempest "Ceph nova&glance&cinder test"nova-13.1.4/nova/tests/live_migration/hooks/utils.sh0000775000567000056710000000417013064447152023653 0ustar jenkinsjenkins00000000000000#!/bin/bash function run_tempest { local message=$1 sudo -H -u tempest tox -eall -- --concurrency=$TEMPEST_CONCURRENCY live_migration exitcode=$? if [[ $exitcode -ne 0 ]]; then die $LINENO "$message failure" fi } function populate_start_script { SCREEN_NAME=${SCREEN_NAME:-stack} DEST=${DEST:-/opt/stack} SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} ENABLED_SERVICES=${ENABLED_SERVICES:-n-cpu,g-api,c-vol} LIBVIRT_GROUP=${LIBVIRT_GROUP:-libvirtd} TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"} LOGDAYS=${LOGDAYS:-7} CURRENT_LOG_TIME=$(date "+$TIMESTAMP_FORMAT") #creates script for starting process without screen and copies it to all # nodes # # args: # $1 - service name to start # $2 - command to execute # $3 - group to run under cat > /tmp/start_process.sh <&"\$REAL_LOG_FILE" 2>&1 ln -sf "\$REAL_LOG_FILE" \$LOGDIR/\$service.log export PYTHONUNBUFFERED=1 fi if [[ -n "\$sg" ]]; then setsid sg \$sg -c "\$command" & echo \$! >\$SERVICE_DIR/\$SCREEN_NAME/\$service.pid else setsid \$command & echo \$! >\$SERVICE_DIR/\$SCREEN_NAME/\$service.pid fi exit 0 EOF chmod +x /tmp/start_process.sh $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/tmp/start_process.sh dest=/tmp/start_process.sh owner=$STACK_USER group=$STACK_USER mode=0777" $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "ls -la /tmp/start_process.sh" } function stop { local target=$1 local service=$2 $ANSIBLE $target --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a " executable=/bin/bash BASE\=$BASE source $BASE/new/devstack/functions-common ENABLED_SERVICES\=$ENABLED_SERVICES SCREEN_NAME\=$SCREEN_NAME SERVICE_DIR\=$SERVICE_DIR stop_process $service " } nova-13.1.4/nova/tests/live_migration/hooks/nfs.sh0000775000567000056710000000515413064447140023301 0ustar jenkinsjenkins00000000000000#!/bin/bash function nfs_setup { if uses_debs; then module=apt elif is_fedora; then module=yum fi $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m $module \ -a "name=nfs-common state=present" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m $module \ -a "name=nfs-kernel-server state=present" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=/etc/idmapd.conf section=Mapping option=Nobody-User value=nova" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=/etc/idmapd.conf section=Mapping option=Nobody-Group value=nova" for SUBNODE in $SUBNODES ; do $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m lineinfile -a "dest=/etc/exports line='/opt/stack/data/nova/instances $SUBNODE(rw,fsid=0,insecure,no_subtree_check,async,no_root_squash)'" done $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "exportfs -a" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m service -a "name=nfs-kernel-server state=restarted" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m service -a "name=idmapd state=restarted" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "iptables -A INPUT -p tcp --dport 111 -j ACCEPT" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "iptables -A INPUT -p udp --dport 111 -j ACCEPT" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "iptables -A INPUT -p tcp --dport 2049 -j ACCEPT" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "iptables -A INPUT -p udp --dport 2049 -j ACCEPT" $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "mount -t nfs4 -o proto\=tcp,port\=2049 $primary_node:/ /opt/stack/data/nova/instances/" } function nfs_configure_tempest { $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$BASE/new/tempest/etc/tempest.conf section=compute-feature-enabled option=block_migration_for_live_migration value=False" } function nfs_verify_setup { $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m file -a "path=/opt/stack/data/nova/instances/test_file state=touch" if [ ! -e '/opt/stack/data/nova/instances/test_file' ]; then die $LINENO "NFS configuration failure" fi } function nfs_teardown { #teardown nfs shared storage $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "umount -t nfs4 /opt/stack/data/nova/instances/" $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m service -a "name=nfs-kernel-server state=stopped" }nova-13.1.4/nova/tests/functional/0000775000567000056710000000000013064447471020165 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/integrated_helpers.py0000664000567000056710000001762713064447152024420 0ustar jenkinsjenkins00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Provides common functionality for integrated unit tests """ import random import string import uuid from oslo_log import log as logging import nova.conf import nova.image.glance from nova import test from nova.tests import fixtures as nova_fixtures from nova.tests.unit import cast_as_call import nova.tests.unit.image.fake CONF = nova.conf.CONF LOG = logging.getLogger(__name__) def generate_random_alphanumeric(length): """Creates a random alphanumeric string of specified length.""" return ''.join(random.choice(string.ascii_uppercase + string.digits) for _x in range(length)) def generate_random_numeric(length): """Creates a random numeric string of specified length.""" return ''.join(random.choice(string.digits) for _x in range(length)) def generate_new_element(items, prefix, numeric=False): """Creates a random string with prefix, that is not in 'items' list.""" while True: if numeric: candidate = prefix + generate_random_numeric(8) else: candidate = prefix + generate_random_alphanumeric(8) if candidate not in items: return candidate LOG.debug("Random collision on %s" % candidate) class _IntegratedTestBase(test.TestCase): REQUIRES_LOCKING = True ADMIN_API = False def setUp(self): super(_IntegratedTestBase, self).setUp() f = self._get_flags() self.flags(**f) self.flags(verbose=True) nova.tests.unit.image.fake.stub_out_image_service(self) self._setup_services() self.api_fixture = self.useFixture( nova_fixtures.OSAPIFixture(self.api_major_version)) # if the class needs to run as admin, make the api endpoint # the admin, otherwise it's safer to run as non admin user. if self.ADMIN_API: self.api = self.api_fixture.admin_api else: self.api = self.api_fixture.api self.useFixture(cast_as_call.CastAsCall(self.stubs)) self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset) def _setup_compute_service(self): return self.start_service('compute') def _setup_scheduler_service(self): self.flags(scheduler_driver='chance_scheduler') return self.start_service('scheduler') def _setup_services(self): self.conductor = self.start_service('conductor', manager=CONF.conductor.manager) self.compute = self._setup_compute_service() self.consoleauth = self.start_service('consoleauth') self.network = self.start_service('network') self.scheduler = self._setup_scheduler_service() def _get_flags(self): """Allow subclass to modify global config before we start services.""" # NOTE(sdague): _get_flags is used by about 13 tests that # subclass this mostly to modify the extensions list. We # should instead make that declarative in the future, at which # point we can get rid of this. return {} def get_unused_server_name(self): servers = self.api.get_servers() server_names = [server['name'] for server in servers] return generate_new_element(server_names, 'server') def get_unused_flavor_name_id(self): flavors = self.api.get_flavors() flavor_names = list() flavor_ids = list() [(flavor_names.append(flavor['name']), flavor_ids.append(flavor['id'])) for flavor in flavors] return (generate_new_element(flavor_names, 'flavor'), int(generate_new_element(flavor_ids, '', True))) def get_invalid_image(self): return str(uuid.uuid4()) def _get_any_image_href(self): image = self.api.get_images()[0] LOG.debug("Image: %s" % image) if self._image_ref_parameter in image: image_href = image[self._image_ref_parameter] else: image_href = image['id'] image_href = 'http://fake.server/%s' % image_href return image_href def _build_minimal_create_server_request(self): server = {} image_href = self._get_any_image_href() # We now have a valid imageId server[self._image_ref_parameter] = image_href # Set a valid flavorId flavor = self.api.get_flavors()[0] LOG.debug("Using flavor: %s" % flavor) server[self._flavor_ref_parameter] = ('http://fake.server/%s' % flavor['id']) # Set a valid server name server_name = self.get_unused_server_name() server['name'] = server_name return server def _create_flavor_body(self, name, ram, vcpus, disk, ephemeral, id, swap, rxtx_factor, is_public): return { "flavor": { "name": name, "ram": ram, "vcpus": vcpus, "disk": disk, "OS-FLV-EXT-DATA:ephemeral": ephemeral, "id": id, "swap": swap, "rxtx_factor": rxtx_factor, "os-flavor-access:is_public": is_public, } } def _create_flavor(self, memory_mb=2048, vcpu=2, disk=10, ephemeral=10, swap=0, rxtx_factor=1.0, is_public=True, extra_spec=None): flv_name, flv_id = self.get_unused_flavor_name_id() body = self._create_flavor_body(flv_name, memory_mb, vcpu, disk, ephemeral, flv_id, swap, rxtx_factor, is_public) self.api_fixture.admin_api.post_flavor(body) if extra_spec is not None: spec = {"extra_specs": extra_spec} self.api_fixture.admin_api.post_extra_spec(flv_id, spec) return flv_id def _build_server(self, flavor_id): server = {} image_href = self._get_any_image_href() image = self.api.get_images()[0] LOG.debug("Image: %s" % image) if self._image_ref_parameter in image: image_href = image[self._image_ref_parameter] else: image_href = image['id'] image_href = 'http://fake.server/%s' % image_href # We now have a valid imageId server[self._image_ref_parameter] = image_href # Set a valid flavorId flavor = self.api.get_flavor(flavor_id) LOG.debug("Using flavor: %s" % flavor) server[self._flavor_ref_parameter] = ('http://fake.server/%s' % flavor['id']) # Set a valid server name server_name = self.get_unused_server_name() server['name'] = server_name return server def _check_api_endpoint(self, endpoint, expected_middleware): app = self.api_fixture.osapi.app.get((None, '/v2')) while getattr(app, 'application', False): for middleware in expected_middleware: if isinstance(app.application, middleware): expected_middleware.remove(middleware) break app = app.application self.assertEqual([], expected_middleware, ("The expected wsgi middlewares %s are not " "existed") % expected_middleware) nova-13.1.4/nova/tests/functional/test_login.py0000664000567000056710000000215013064447140022675 0ustar jenkinsjenkins00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from nova.tests.functional import integrated_helpers LOG = logging.getLogger(__name__) class LoginTest(integrated_helpers._IntegratedTestBase): api_major_version = 'v2' def test_login(self): # Simple check - we list flavors - so we know we're logged in. flavors = self.api.get_flavors() for flavor in flavors: LOG.debug("flavor: %s", flavor) class LoginTestV21(LoginTest): api_major_version = 'v2.1' nova-13.1.4/nova/tests/functional/test_legacy_v2_compatible_wrapper.py0000664000567000056710000000613713064447152027413 0ustar jenkinsjenkins00000000000000# Copyright 2015 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api import openstack from nova.api.openstack import compute from nova.api.openstack import wsgi from nova.tests.functional.api import client from nova.tests.functional import test_servers from nova.tests.unit import fake_network class LegacyV2CompatibleTestBase(test_servers.ServersTestBase): api_major_version = 'v2' def setUp(self): super(LegacyV2CompatibleTestBase, self).setUp() self._check_api_endpoint('/v2', [compute.APIRouterV21, openstack.LegacyV2CompatibleWrapper]) def test_request_with_microversion_headers(self): response = self.api.api_post('os-keypairs', {"keypair": {"name": "test"}}, headers={wsgi.API_VERSION_REQUEST_HEADER: '2.100'}) self.assertNotIn(wsgi.API_VERSION_REQUEST_HEADER, response.headers) self.assertNotIn('Vary', response.headers) self.assertNotIn('type', response.body["keypair"]) def test_request_without_addtional_properties_check(self): response = self.api.api_post('os-keypairs', {"keypair": {"name": "test", "foooooo": "barrrrrr"}}, headers={wsgi.API_VERSION_REQUEST_HEADER: '2.100'}) self.assertNotIn(wsgi.API_VERSION_REQUEST_HEADER, response.headers) self.assertNotIn('Vary', response.headers) self.assertNotIn('type', response.body["keypair"]) def test_request_with_pattern_properties_check(self): fake_network.set_stub_network_methods(self) server = self._build_minimal_create_server_request() post = {'server': server} created_server = self.api.post_server(post) self._wait_for_state_change(created_server, 'BUILD') response = self.api.post_server_metadata(created_server['id'], {'a': 'b'}) self.assertEqual(response, {'a': 'b'}) def test_request_with_pattern_properties_with_avoid_metadata(self): fake_network.set_stub_network_methods(self) server = self._build_minimal_create_server_request() post = {'server': server} created_server = self.api.post_server(post) exc = self.assertRaises(client.OpenStackApiException, self.api.post_server_metadata, created_server['id'], {'a': 'b', 'x' * 300: 'y', 'h' * 300: 'i'}) self.assertEqual(exc.response.status_code, 400) nova-13.1.4/nova/tests/functional/__init__.py0000664000567000056710000000150113064447140022264 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Justin Santa Barbara # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`functional` -- Nova functional tests ===================================================== .. automodule:: nova.tests.functional :platform: Unix """ import eventlet eventlet.monkey_patch(os=False) nova-13.1.4/nova/tests/functional/api_samples_test_base.py0000664000567000056710000004630113064447152025065 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re from oslo_serialization import jsonutils import six from nova import test from nova.tests.functional import integrated_helpers PROJECT_ID = "6f70656e737461636b20342065766572" class NoMatch(test.TestingException): pass def pretty_data(data): data = jsonutils.dumps(jsonutils.loads(data), sort_keys=True, indent=4) return '\n'.join(line.rstrip() for line in data.split('\n')).strip() def objectify(data): if not data: return {} # NOTE(sdague): templates will contain values like %(foo)s # throughout them. If these are inside of double quoted # strings, life is good, and we can treat it just like valid # json to load it to python. # # However we've got some fields which are ints, like # aggregate_id. This means we've got a snippet in the sample # that looks like: # # "id": %(aggregate_id)s, # # which is not valid json, and will explode. We do a quick and # dirty transform of this to: # # "id": "%(int:aggregate_id)s", # # That makes it valid data to convert to json, but keeps # around the information that we need to drop those strings # later. The regex anchors from the ': ', as all of these will # be top rooted keys. data = re.sub(r'(\: )%\((.+)\)s([^"])', r'\1"%(int:\2)s"\3', data) return jsonutils.loads(data) class ApiSampleTestBase(integrated_helpers._IntegratedTestBase): all_extensions = False extension_name = None sample_dir = None microversion = None _use_common_server_api_samples = False def __init__(self, *args, **kwargs): super(ApiSampleTestBase, self).__init__(*args, **kwargs) self.subs = {} # TODO(auggy): subs should really be a class @property def subs(self): return self._subs @subs.setter def subs(self, value): non_strings = \ {k: v for k, v in value.items() if (not k == 'compute_host') and (not isinstance(v, six.string_types))} if len(non_strings) > 0: raise TypeError("subs can't contain non-string values:" "\n%(non_strings)s" % {'non_strings': non_strings}) else: self._subs = value @classmethod def _get_sample_path(cls, name, dirname, suffix='', api_version=None): parts = [dirname] parts.append('api_samples') # TODO(gmann): Once all tests gets merged for all extension # then we need to have a simple logic here to select sample file # directory which will be based on cls.sample_dir and api_version. # All other things will go away from here. Currently hacking this # till we merge every extensions tests. if cls.all_extensions and not cls.sample_dir: parts.append('all_extensions') # Note(gmann): if _use_common_server_api_samples is set to True # then common server sample files present in 'servers' directory # will be used. As of now it is being used for server POST request # to avoid duplicate copy of server req and resp sample files. # Example - ServersSampleBase's _post_server method. elif cls._use_common_server_api_samples: parts.append('servers') else: if cls.sample_dir: parts.append(cls.sample_dir) elif cls.extension_name: parts.append(cls.extension_name) if api_version: parts.append('v' + api_version) parts.append(name + ".json" + suffix) return os.path.join(*parts) @classmethod def _get_sample(cls, name, api_version=None): dirname = os.path.dirname(os.path.abspath(__file__)) dirname = os.path.normpath(os.path.join(dirname, "../../../doc")) return cls._get_sample_path(name, dirname, api_version=api_version) @classmethod def _get_template(cls, name, api_version=None): dirname = os.path.dirname(os.path.abspath(__file__)) dirname = os.path.normpath(os.path.join(dirname, "./api_sample_tests")) return cls._get_sample_path(name, dirname, suffix='.tpl', api_version=api_version) def _read_template(self, name): template = self._get_template(name, self.microversion) with open(template) as inf: return inf.read().strip() def _write_template(self, name, data): with open(self._get_template(name, self.microversion), 'w') as outf: outf.write(data) def _write_sample(self, name, data): with open(self._get_sample( name, self.microversion), 'w') as outf: outf.write(data) def _compare_result(self, expected, result, result_str): matched_value = None # None if expected is None: if result is None: pass elif result == u'': pass # TODO(auggy): known issue Bug#1544720 else: raise NoMatch('%(result_str)s: Expected None, got %(result)s.' % {'result_str': result_str, 'result': result}) # dictionary elif isinstance(expected, dict): if not isinstance(result, dict): raise NoMatch('%(result_str)s: %(result)s is not a dict.' % {'result_str': result_str, 'result': result}) ex_keys = sorted(expected.keys()) res_keys = sorted(result.keys()) if ex_keys != res_keys: ex_delta = [] res_delta = [] for key in ex_keys: if key not in res_keys: ex_delta.append(key) for key in res_keys: if key not in ex_keys: res_delta.append(key) raise NoMatch( 'Dictionary key mismatch:\n' 'Extra key(s) in template:\n%(ex_delta)s\n' 'Extra key(s) in %(result_str)s:\n%(res_delta)s\n' % {'ex_delta': ex_delta, 'result_str': result_str, 'res_delta': res_delta}) for key in ex_keys: # TODO(auggy): pass key name along as well for error reporting res = self._compare_result(expected[key], result[key], result_str) matched_value = res or matched_value # list elif isinstance(expected, list): if not isinstance(result, list): raise NoMatch( '%(result_str)s: %(result)s is not a list.' % {'result_str': result_str, 'result': result}) expected = expected[:] extra = [] for res_obj in result: for i, ex_obj in enumerate(expected): try: matched_value = self._compare_result(ex_obj, res_obj, result_str) del expected[i] break except NoMatch: pass else: extra.append(res_obj) error = [] if expected: error.append('Extra list items in template:') error.extend([repr(o) for o in expected]) if extra: error.append('Extra list items in %(result_str)s:' % {'result_str': result_str}) error.extend([repr(o) for o in extra]) if error: raise NoMatch('\n'.join(error)) # template string elif isinstance(expected, six.string_types) and '%' in expected: # NOTE(vish): escape stuff for regex for char in '[]<>?': expected = expected.replace(char, '\\%s' % char) # NOTE(vish): special handling of subs that are not quoted. We are # expecting an int but we had to pass in a string # so the json would parse properly. if expected.startswith("%(int:"): result = str(result) expected = expected.replace('int:', '') expected = expected % self.subs expected = '^%s$' % expected match = re.match(expected, result) if not match: raise NoMatch( 'Values do not match:\n' 'Template: %(expected)s\n%(result_str)s: %(result)s' % {'expected': expected, 'result_str': result_str, 'result': result}) try: matched_value = match.group('id') except IndexError: if match.groups(): matched_value = match.groups()[0] # string elif isinstance(expected, six.string_types): # NOTE(danms): Ignore whitespace in this comparison expected = expected.strip() if isinstance(result, six.string_types): result = result.strip() if expected != result: # NOTE(tdurakov):this attempt to parse string as JSON # is needed for correct comparison of hypervisor.cpu_info, # which is stringified JSON object # # TODO(tdurakov): remove this check as soon as # hypervisor.cpu_info become common JSON object in REST API. try: expected = objectify(expected) result = objectify(result) return self._compare_result(expected, result, result_str) except ValueError: pass raise NoMatch( 'Values do not match:\n' 'Template: %(expected)s\n%(result_str)s: ' '%(result)s' % {'expected': expected, 'result_str': result_str, 'result': result}) # int elif isinstance(expected, (six.integer_types, float)): if expected != result: raise NoMatch( 'Values do not match:\n' 'Template: %(expected)s\n%(result_str)s: ' '%(result)s' % {'expected': expected, 'result_str': result_str, 'result': result}) else: raise ValueError( 'Unexpected type %(expected_type)s' % {'expected_type': type(expected)}) return matched_value def generalize_subs(self, subs, vanilla_regexes): """Give the test a chance to modify subs after the server response was verified, and before the on-disk doc/api_samples file is checked. This may be needed by some tests to convert exact matches expected from the server into pattern matches to verify what is in the sample file. If there are no changes to be made, subs is returned unharmed. """ return subs def _update_links(self, sample_data): """Process sample data and update version specific links.""" # replace version urls url_re = self._get_host() + "/v(2|2\.1)/" + PROJECT_ID new_url = self._get_host() + "/" + self.api_major_version if self._project_id: new_url += "/" + PROJECT_ID updated_data = re.sub(url_re, new_url, sample_data) # replace unversioned urls url_re = self._get_host() + "/" + PROJECT_ID new_url = self._get_host() if self._project_id: new_url += "/" + PROJECT_ID updated_data = re.sub(url_re, new_url, updated_data) return updated_data def _verify_response(self, name, subs, response, exp_code, update_links=True): # Always also include the laundry list of base regular # expressions for possible key values in our templates. Test # specific patterns (the value of ``subs``) can override # these. regexes = self._get_regexes() regexes.update(subs) subs = regexes self.subs = subs self.assertEqual(exp_code, response.status_code) response_data = response.content response_data = pretty_data(response_data) if not os.path.exists(self._get_template(name, self.microversion)): self._write_template(name, response_data) template_data = response_data else: template_data = self._read_template(name) if (self.generate_samples and not os.path.exists(self._get_sample( name, self.microversion))): self._write_sample(name, response_data) sample_data = response_data else: with open(self._get_sample(name, self.microversion)) as sample: sample_data = sample.read() if update_links: sample_data = self._update_links(sample_data) try: template_data = objectify(template_data) response_data = objectify(response_data) response_result = self._compare_result(template_data, response_data, "Response") # NOTE(danms): replace some of the subs with patterns for the # doc/api_samples check, which won't have things like the # correct compute host name. Also let the test do some of its # own generalization, if necessary vanilla_regexes = self._get_regexes() subs['compute_host'] = vanilla_regexes['host_name'] subs['id'] = vanilla_regexes['id'] subs['uuid'] = vanilla_regexes['uuid'] subs['image_id'] = vanilla_regexes['uuid'] subs = self.generalize_subs(subs, vanilla_regexes) self.subs = subs sample_data = objectify(sample_data) self._compare_result(template_data, sample_data, "Sample") return response_result except NoMatch: raise def _get_host(self): return 'http://openstack.example.com' def _get_glance_host(self): return 'http://glance.openstack.example.com' def _get_regexes(self): text = r'(\\"|[^"])*' isotime_re = '\d{4}-[0,1]\d-[0-3]\dT\d{2}:\d{2}:\d{2}Z' strtime_re = '\d{4}-[0,1]\d-[0-3]\dT\d{2}:\d{2}:\d{2}\.\d{6}' xmltime_re = ('\d{4}-[0,1]\d-[0-3]\d ' '\d{2}:\d{2}:\d{2}' '(\.\d{6})?(\+00:00)?') # NOTE(claudiub): the x509 keypairs are different from the # ssh keypairs. For example, the x509 fingerprint has 40 bytes. return { 'isotime': isotime_re, 'strtime': strtime_re, 'strtime_or_none': r'None|%s' % strtime_re, 'xmltime': xmltime_re, 'password': '[0-9a-zA-Z]{1,12}', 'ip': '[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}', 'ip6': '([0-9a-zA-Z]{1,4}:){1,7}:?[0-9a-zA-Z]{1,4}', 'id': '(?P[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}' '-[0-9a-f]{4}-[0-9a-f]{12})', 'uuid': '[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}' '-[0-9a-f]{4}-[0-9a-f]{12}', 'reservation_id': 'r-[0-9a-zA-Z]{8}', 'private_key': '(-----BEGIN RSA PRIVATE KEY-----|)' '[a-zA-Z0-9\n/+=]*' '(-----END RSA PRIVATE KEY-----|)', 'public_key': '(ssh-rsa|-----BEGIN CERTIFICATE-----)' '[ a-zA-Z0-9\n/+=]*' '(Generated-by-Nova|-----END CERTIFICATE-----)', 'fingerprint': '(([0-9a-f]{2}:){19}|([0-9a-f]{2}:){15})' '[0-9a-f]{2}', 'keypair_type': 'ssh|x509', 'host': self._get_host(), 'host_name': r'\w+', 'glance_host': self._get_glance_host(), 'compute_host': self.compute.host, 'text': text, 'int': '[0-9]+', 'user_id': text, 'api_vers': self.api_major_version, 'compute_endpoint': self._get_compute_endpoint(), 'versioned_compute_endpoint': self._get_vers_compute_endpoint(), } def _get_compute_endpoint(self): # NOTE(sdague): "openstack" is stand in for project_id, it # should be more generic in future. if self._project_id: return '%s/%s' % (self._get_host(), PROJECT_ID) else: return self._get_host() def _get_vers_compute_endpoint(self): # NOTE(sdague): "openstack" is stand in for project_id, it # should be more generic in future. if self._project_id: return '%s/%s/%s' % (self._get_host(), self.api_major_version, PROJECT_ID) else: return '%s/%s' % (self._get_host(), self.api_major_version) def _get_response(self, url, method, body=None, strip_version=False, headers=None): headers = headers or {} headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' return self.api.api_request(url, body=body, method=method, headers=headers, strip_version=strip_version) def _do_options(self, url, strip_version=False, headers=None): return self._get_response(url, 'OPTIONS', strip_version=strip_version, headers=headers) def _do_get(self, url, strip_version=False, headers=None): return self._get_response(url, 'GET', strip_version=strip_version, headers=headers) def _do_post(self, url, name, subs, method='POST', headers=None): self.subs = subs body = self._read_template(name) % self.subs sample = self._get_sample(name, self.microversion) if self.generate_samples and not os.path.exists(sample): self._write_sample(name, body) return self._get_response(url, method, body, headers=headers) def _do_put(self, url, name, subs, headers=None): return self._do_post(url, name, subs, method='PUT', headers=headers) def _do_delete(self, url, headers=None): return self._get_response(url, 'DELETE', headers=headers) nova-13.1.4/nova/tests/functional/test_servers.py0000664000567000056710000007171613064447152023277 0ustar jenkinsjenkins00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import time import zlib from oslo_log import log as logging from oslo_utils import timeutils from nova import context from nova import exception from nova.tests.functional.api import client from nova.tests.functional import integrated_helpers from nova.tests.unit import fake_network LOG = logging.getLogger(__name__) class ServersTestBase(integrated_helpers._IntegratedTestBase): api_major_version = 'v2' _force_delete_parameter = 'forceDelete' _image_ref_parameter = 'imageRef' _flavor_ref_parameter = 'flavorRef' _access_ipv4_parameter = 'accessIPv4' _access_ipv6_parameter = 'accessIPv6' _return_resv_id_parameter = 'return_reservation_id' _min_count_parameter = 'min_count' def setUp(self): super(ServersTestBase, self).setUp() self.conductor = self.start_service( 'conductor', manager='nova.conductor.manager.ConductorManager') def _wait_for_state_change(self, server, from_status): for i in range(0, 50): server = self.api.get_server(server['id']) if server['status'] != from_status: break time.sleep(.1) return server def _wait_for_deletion(self, server_id): # Wait (briefly) for deletion for _retries in range(50): try: found_server = self.api.get_server(server_id) except client.OpenStackApiNotFoundException: found_server = None LOG.debug("Got 404, proceeding") break LOG.debug("Found_server=%s" % found_server) # TODO(justinsb): Mock doesn't yet do accurate state changes # if found_server['status'] != 'deleting': # break time.sleep(.1) # Should be gone self.assertFalse(found_server) def _delete_server(self, server_id): # Delete the server self.api.delete_server(server_id) self._wait_for_deletion(server_id) def _get_access_ips_params(self): return {self._access_ipv4_parameter: "172.19.0.2", self._access_ipv6_parameter: "fe80::2"} def _verify_access_ips(self, server): self.assertEqual('172.19.0.2', server[self._access_ipv4_parameter]) self.assertEqual('fe80::2', server[self._access_ipv6_parameter]) class ServersTest(ServersTestBase): def test_get_servers(self): # Simple check that listing servers works. servers = self.api.get_servers() for server in servers: LOG.debug("server: %s" % server) def test_create_server_with_error(self): # Create a server which will enter error state. fake_network.set_stub_network_methods(self) def throw_error(*args, **kwargs): raise exception.BuildAbortException(reason='', instance_uuid='fake') self.stub_out('nova.virt.fake.FakeDriver.spawn', throw_error) server = self._build_minimal_create_server_request() created_server = self.api.post_server({"server": server}) created_server_id = created_server['id'] found_server = self.api.get_server(created_server_id) self.assertEqual(created_server_id, found_server['id']) found_server = self._wait_for_state_change(found_server, 'BUILD') self.assertEqual('ERROR', found_server['status']) self._delete_server(created_server_id) def test_create_and_delete_server(self): # Creates and deletes a server. fake_network.set_stub_network_methods(self) # Create server # Build the server data gradually, checking errors along the way server = {} good_server = self._build_minimal_create_server_request() post = {'server': server} # Without an imageRef, this throws 500. # TODO(justinsb): Check whatever the spec says should be thrown here self.assertRaises(client.OpenStackApiException, self.api.post_server, post) # With an invalid imageRef, this throws 500. server[self._image_ref_parameter] = self.get_invalid_image() # TODO(justinsb): Check whatever the spec says should be thrown here self.assertRaises(client.OpenStackApiException, self.api.post_server, post) # Add a valid imageRef server[self._image_ref_parameter] = good_server.get( self._image_ref_parameter) # Without flavorRef, this throws 500 # TODO(justinsb): Check whatever the spec says should be thrown here self.assertRaises(client.OpenStackApiException, self.api.post_server, post) server[self._flavor_ref_parameter] = good_server.get( self._flavor_ref_parameter) # Without a name, this throws 500 # TODO(justinsb): Check whatever the spec says should be thrown here self.assertRaises(client.OpenStackApiException, self.api.post_server, post) # Set a valid server name server['name'] = good_server['name'] created_server = self.api.post_server(post) LOG.debug("created_server: %s" % created_server) self.assertTrue(created_server['id']) created_server_id = created_server['id'] # Check it's there found_server = self.api.get_server(created_server_id) self.assertEqual(created_server_id, found_server['id']) # It should also be in the all-servers list servers = self.api.get_servers() server_ids = [s['id'] for s in servers] self.assertIn(created_server_id, server_ids) found_server = self._wait_for_state_change(found_server, 'BUILD') # It should be available... # TODO(justinsb): Mock doesn't yet do this... self.assertEqual('ACTIVE', found_server['status']) servers = self.api.get_servers(detail=True) for server in servers: self.assertIn("image", server) self.assertIn("flavor", server) self._delete_server(created_server_id) def _force_reclaim(self): # Make sure that compute manager thinks the instance is # old enough to be expired the_past = timeutils.utcnow() + datetime.timedelta(hours=1) timeutils.set_time_override(override_time=the_past) self.addCleanup(timeutils.clear_time_override) ctxt = context.get_admin_context() self.compute._reclaim_queued_deletes(ctxt) def test_deferred_delete(self): # Creates, deletes and waits for server to be reclaimed. self.flags(reclaim_instance_interval=1) fake_network.set_stub_network_methods(self) # Create server server = self._build_minimal_create_server_request() created_server = self.api.post_server({'server': server}) LOG.debug("created_server: %s" % created_server) self.assertTrue(created_server['id']) created_server_id = created_server['id'] # Wait for it to finish being created found_server = self._wait_for_state_change(created_server, 'BUILD') # It should be available... self.assertEqual('ACTIVE', found_server['status']) # Cannot restore unless instance is deleted self.assertRaises(client.OpenStackApiException, self.api.post_server_action, created_server_id, {'restore': {}}) # Delete the server self.api.delete_server(created_server_id) # Wait for queued deletion found_server = self._wait_for_state_change(found_server, 'ACTIVE') self.assertEqual('SOFT_DELETED', found_server['status']) self._force_reclaim() # Wait for real deletion self._wait_for_deletion(created_server_id) def test_deferred_delete_restore(self): # Creates, deletes and restores a server. self.flags(reclaim_instance_interval=3600) fake_network.set_stub_network_methods(self) # Create server server = self._build_minimal_create_server_request() created_server = self.api.post_server({'server': server}) LOG.debug("created_server: %s" % created_server) self.assertTrue(created_server['id']) created_server_id = created_server['id'] # Wait for it to finish being created found_server = self._wait_for_state_change(created_server, 'BUILD') # It should be available... self.assertEqual('ACTIVE', found_server['status']) # Delete the server self.api.delete_server(created_server_id) # Wait for queued deletion found_server = self._wait_for_state_change(found_server, 'ACTIVE') self.assertEqual('SOFT_DELETED', found_server['status']) # Restore server self.api.post_server_action(created_server_id, {'restore': {}}) # Wait for server to become active again found_server = self._wait_for_state_change(found_server, 'DELETED') self.assertEqual('ACTIVE', found_server['status']) def test_deferred_delete_force(self): # Creates, deletes and force deletes a server. self.flags(reclaim_instance_interval=3600) fake_network.set_stub_network_methods(self) # Create server server = self._build_minimal_create_server_request() created_server = self.api.post_server({'server': server}) LOG.debug("created_server: %s" % created_server) self.assertTrue(created_server['id']) created_server_id = created_server['id'] # Wait for it to finish being created found_server = self._wait_for_state_change(created_server, 'BUILD') # It should be available... self.assertEqual('ACTIVE', found_server['status']) # Delete the server self.api.delete_server(created_server_id) # Wait for queued deletion found_server = self._wait_for_state_change(found_server, 'ACTIVE') self.assertEqual('SOFT_DELETED', found_server['status']) # Force delete server self.api.post_server_action(created_server_id, {self._force_delete_parameter: {}}) # Wait for real deletion self._wait_for_deletion(created_server_id) def test_create_server_with_metadata(self): # Creates a server with metadata. fake_network.set_stub_network_methods(self) # Build the server data gradually, checking errors along the way server = self._build_minimal_create_server_request() metadata = {} for i in range(30): metadata['key_%s' % i] = 'value_%s' % i server['metadata'] = metadata post = {'server': server} created_server = self.api.post_server(post) LOG.debug("created_server: %s" % created_server) self.assertTrue(created_server['id']) created_server_id = created_server['id'] found_server = self.api.get_server(created_server_id) self.assertEqual(created_server_id, found_server['id']) self.assertEqual(metadata, found_server.get('metadata')) # The server should also be in the all-servers details list servers = self.api.get_servers(detail=True) server_map = {server['id']: server for server in servers} found_server = server_map.get(created_server_id) self.assertTrue(found_server) # Details do include metadata self.assertEqual(metadata, found_server.get('metadata')) # The server should also be in the all-servers summary list servers = self.api.get_servers(detail=False) server_map = {server['id']: server for server in servers} found_server = server_map.get(created_server_id) self.assertTrue(found_server) # Summary should not include metadata self.assertFalse(found_server.get('metadata')) # Cleanup self._delete_server(created_server_id) def test_create_and_rebuild_server(self): # Rebuild a server with metadata. fake_network.set_stub_network_methods(self) # create a server with initially has no metadata server = self._build_minimal_create_server_request() server_post = {'server': server} metadata = {} for i in range(30): metadata['key_%s' % i] = 'value_%s' % i server_post['server']['metadata'] = metadata created_server = self.api.post_server(server_post) LOG.debug("created_server: %s" % created_server) self.assertTrue(created_server['id']) created_server_id = created_server['id'] created_server = self._wait_for_state_change(created_server, 'BUILD') # rebuild the server with metadata and other server attributes post = {} post['rebuild'] = { self._image_ref_parameter: "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6", "name": "blah", self._access_ipv4_parameter: "172.19.0.2", self._access_ipv6_parameter: "fe80::2", "metadata": {'some': 'thing'}, } post['rebuild'].update(self._get_access_ips_params()) self.api.post_server_action(created_server_id, post) LOG.debug("rebuilt server: %s" % created_server) self.assertTrue(created_server['id']) found_server = self.api.get_server(created_server_id) self.assertEqual(created_server_id, found_server['id']) self.assertEqual({'some': 'thing'}, found_server.get('metadata')) self.assertEqual('blah', found_server.get('name')) self.assertEqual(post['rebuild'][self._image_ref_parameter], found_server.get('image')['id']) self._verify_access_ips(found_server) # rebuild the server with empty metadata and nothing else post = {} post['rebuild'] = { self._image_ref_parameter: "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6", "metadata": {}, } self.api.post_server_action(created_server_id, post) LOG.debug("rebuilt server: %s" % created_server) self.assertTrue(created_server['id']) found_server = self.api.get_server(created_server_id) self.assertEqual(created_server_id, found_server['id']) self.assertEqual({}, found_server.get('metadata')) self.assertEqual('blah', found_server.get('name')) self.assertEqual(post['rebuild'][self._image_ref_parameter], found_server.get('image')['id']) self._verify_access_ips(found_server) # Cleanup self._delete_server(created_server_id) def test_rename_server(self): # Test building and renaming a server. fake_network.set_stub_network_methods(self) # Create a server server = self._build_minimal_create_server_request() created_server = self.api.post_server({'server': server}) LOG.debug("created_server: %s" % created_server) server_id = created_server['id'] self.assertTrue(server_id) # Rename the server to 'new-name' self.api.put_server(server_id, {'server': {'name': 'new-name'}}) # Check the name of the server created_server = self.api.get_server(server_id) self.assertEqual(created_server['name'], 'new-name') # Cleanup self._delete_server(server_id) def test_create_multiple_servers(self): # Creates multiple servers and checks for reservation_id. # Create 2 servers, setting 'return_reservation_id, which should # return a reservation_id server = self._build_minimal_create_server_request() server[self._min_count_parameter] = 2 server[self._return_resv_id_parameter] = True post = {'server': server} response = self.api.post_server(post) self.assertIn('reservation_id', response) reservation_id = response['reservation_id'] self.assertNotIn(reservation_id, ['', None]) # Create 1 more server, which should not return a reservation_id server = self._build_minimal_create_server_request() post = {'server': server} created_server = self.api.post_server(post) self.assertTrue(created_server['id']) created_server_id = created_server['id'] # lookup servers created by the first request. servers = self.api.get_servers(detail=True, search_opts={'reservation_id': reservation_id}) server_map = {server['id']: server for server in servers} found_server = server_map.get(created_server_id) # The server from the 2nd request should not be there. self.assertIsNone(found_server) # Should have found 2 servers. self.assertEqual(len(server_map), 2) # Cleanup self._delete_server(created_server_id) for server_id in server_map: self._delete_server(server_id) def test_create_server_with_injected_files(self): # Creates a server with injected_files. fake_network.set_stub_network_methods(self) personality = [] # Inject a text file data = 'Hello, World!' personality.append({ 'path': '/helloworld.txt', 'contents': data.encode('base64'), }) # Inject a binary file data = zlib.compress('Hello, World!') personality.append({ 'path': '/helloworld.zip', 'contents': data.encode('base64'), }) # Create server server = self._build_minimal_create_server_request() server['personality'] = personality post = {'server': server} created_server = self.api.post_server(post) LOG.debug("created_server: %s" % created_server) self.assertTrue(created_server['id']) created_server_id = created_server['id'] # Check it's there found_server = self.api.get_server(created_server_id) self.assertEqual(created_server_id, found_server['id']) found_server = self._wait_for_state_change(found_server, 'BUILD') self.assertEqual('ACTIVE', found_server['status']) # Cleanup self._delete_server(created_server_id) class ServersTestV21(ServersTest): api_major_version = 'v2.1' class ServersTestV219(ServersTestBase): api_major_version = 'v2.1' def _create_server(self, set_desc = True, desc = None): server = self._build_minimal_create_server_request() if set_desc: server['description'] = desc post = {'server': server} response = self.api.api_post('/servers', post, headers=self._headers).body return (server, response['server']) def _update_server(self, server_id, set_desc = True, desc = None): new_name = integrated_helpers.generate_random_alphanumeric(8) server = {'server': {'name': new_name}} if set_desc: server['server']['description'] = desc self.api.api_put('/servers/%s' % server_id, server, headers=self._headers) def _rebuild_server(self, server_id, set_desc = True, desc = None): new_name = integrated_helpers.generate_random_alphanumeric(8) post = {} post['rebuild'] = { "name": new_name, self._image_ref_parameter: "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6", self._access_ipv4_parameter: "172.19.0.2", self._access_ipv6_parameter: "fe80::2", "metadata": {'some': 'thing'}, } post['rebuild'].update(self._get_access_ips_params()) if set_desc: post['rebuild']['description'] = desc self.api.api_post('/servers/%s/action' % server_id, post, headers=self._headers) def _create_server_and_verify(self, set_desc = True, expected_desc = None): # Creates a server with a description and verifies it is # in the GET responses. created_server_id = self._create_server(set_desc, expected_desc)[1]['id'] self._verify_server_description(created_server_id, expected_desc) self._delete_server(created_server_id) def _update_server_and_verify(self, server_id, set_desc = True, expected_desc = None): # Updates a server with a description and verifies it is # in the GET responses. self._update_server(server_id, set_desc, expected_desc) self._verify_server_description(server_id, expected_desc) def _rebuild_server_and_verify(self, server_id, set_desc = True, expected_desc = None): # Rebuilds a server with a description and verifies it is # in the GET responses. self._rebuild_server(server_id, set_desc, expected_desc) self._verify_server_description(server_id, expected_desc) def _verify_server_description(self, server_id, expected_desc = None, desc_in_resp = True): # Calls GET on the servers and verifies that the description # is set as expected in the response, or not set at all. response = self.api.api_get('/servers/%s' % server_id, headers=self._headers) found_server = response.body['server'] self.assertEqual(server_id, found_server['id']) if desc_in_resp: # Verify the description is set as expected (can be None) self.assertEqual(expected_desc, found_server.get('description')) else: # Verify the description is not included in the response. self.assertNotIn('description', found_server) servers = self.api.api_get('/servers/detail', headers=self._headers).body['servers'] server_map = {server['id']: server for server in servers} found_server = server_map.get(server_id) self.assertTrue(found_server) if desc_in_resp: # Verify the description is set as expected (can be None) self.assertEqual(expected_desc, found_server.get('description')) else: # Verify the description is not included in the response. self.assertNotIn('description', found_server) def _create_assertRaisesRegex(self, desc): # Verifies that a 400 error is thrown on create server with self.assertRaisesRegex(client.OpenStackApiException, ".*Unexpected status code.*") as cm: self._create_server(True, desc) self.assertEqual(400, cm.exception.response.status_code) def _update_assertRaisesRegex(self, server_id, desc): # Verifies that a 400 error is thrown on update server with self.assertRaisesRegex(client.OpenStackApiException, ".*Unexpected status code.*") as cm: self._update_server(server_id, True, desc) self.assertEqual(400, cm.exception.response.status_code) def _rebuild_assertRaisesRegex(self, server_id, desc): # Verifies that a 400 error is thrown on rebuild server with self.assertRaisesRegex(client.OpenStackApiException, ".*Unexpected status code.*") as cm: self._rebuild_server(server_id, True, desc) self.assertEqual(400, cm.exception.response.status_code) def test_create_server_with_description(self): fake_network.set_stub_network_methods(self) self._headers = {} self._headers['X-OpenStack-Nova-API-Version'] = '2.19' # Create and get a server with a description self._create_server_and_verify(True, 'test description') # Create and get a server with an empty description self._create_server_and_verify(True, '') # Create and get a server with description set to None self._create_server_and_verify() # Create and get a server without setting the description self._create_server_and_verify(False) def test_update_server_with_description(self): fake_network.set_stub_network_methods(self) self._headers = {} self._headers['X-OpenStack-Nova-API-Version'] = '2.19' # Create a server with an initial description server_id = self._create_server(True, 'test desc 1')[1]['id'] # Update and get the server with a description self._update_server_and_verify(server_id, True, 'updated desc') # Update and get the server name without changing the description self._update_server_and_verify(server_id, False, 'updated desc') # Update and get the server with an empty description self._update_server_and_verify(server_id, True, '') # Update and get the server by removing the description (set to None) self._update_server_and_verify(server_id) # Update and get the server with a 2nd new description self._update_server_and_verify(server_id, True, 'updated desc2') # Cleanup self._delete_server(server_id) def test_rebuild_server_with_description(self): fake_network.set_stub_network_methods(self) self._headers = {} self._headers['X-OpenStack-Nova-API-Version'] = '2.19' # Create a server with an initial description server = self._create_server(True, 'test desc 1')[1] server_id = server['id'] self._wait_for_state_change(server, 'BUILD') # Rebuild and get the server with a description self._rebuild_server_and_verify(server_id, True, 'updated desc') # Rebuild and get the server name without changing the description self._rebuild_server_and_verify(server_id, False, 'updated desc') # Rebuild and get the server with an empty description self._rebuild_server_and_verify(server_id, True, '') # Rebuild and get the server by removing the description (set to None) self._rebuild_server_and_verify(server_id) # Rebuild and get the server with a 2nd new description self._rebuild_server_and_verify(server_id, True, 'updated desc2') # Cleanup self._delete_server(server_id) def test_version_compatibility(self): fake_network.set_stub_network_methods(self) # Create a server with microversion v2.19 and a description. self._headers = {} self._headers['X-OpenStack-Nova-API-Version'] = '2.19' server_id = self._create_server(True, 'test desc 1')[1]['id'] # Verify that the description is not included on V2.18 GETs self._headers['X-OpenStack-Nova-API-Version'] = '2.18' self._verify_server_description(server_id, desc_in_resp = False) # Verify that updating the server with description on V2.18 # results in a 400 error self._update_assertRaisesRegex(server_id, 'test update 2.18') # Verify that rebuilding the server with description on V2.18 # results in a 400 error self._rebuild_assertRaisesRegex(server_id, 'test rebuild 2.18') # Cleanup self._delete_server(server_id) # Create a server on V2.18 and verify that the description # defaults to the name on a V2.19 GET self._headers['X-OpenStack-Nova-API-Version'] = '2.18' server_req, response = self._create_server(False) server_id = response['id'] self._headers['X-OpenStack-Nova-API-Version'] = '2.19' self._verify_server_description(server_id, server_req['name']) # Cleanup self._delete_server(server_id) # Verify that creating a server with description on V2.18 # results in a 400 error self._headers['X-OpenStack-Nova-API-Version'] = '2.18' self._create_assertRaisesRegex('test create 2.18') def test_description_errors(self): fake_network.set_stub_network_methods(self) self._headers = {} self._headers['X-OpenStack-Nova-API-Version'] = '2.19' # Create servers with invalid descriptions. These throw 400. # Invalid unicode with non-printable control char self._create_assertRaisesRegex(u'invalid\0dstring') # Description is longer than 255 chars self._create_assertRaisesRegex('x' * 256) # Update and rebuild servers with invalid descriptions. # These throw 400. server_id = self._create_server(True, "desc")[1]['id'] # Invalid unicode with non-printable control char self._update_assertRaisesRegex(server_id, u'invalid\u0604string') self._rebuild_assertRaisesRegex(server_id, u'invalid\u0604string') # Description is longer than 255 chars self._update_assertRaisesRegex(server_id, 'x' * 256) self._rebuild_assertRaisesRegex(server_id, 'x' * 256) nova-13.1.4/nova/tests/functional/test_server_group.py0000664000567000056710000010506613064447152024324 0ustar jenkinsjenkins00000000000000# Copyright 2015 Ericsson AB # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import time from oslo_config import cfg from nova import context from nova import db from nova.db.sqlalchemy import api as db_api from nova import test from nova.tests import fixtures as nova_fixtures from nova.tests.functional.api import client from nova.tests.functional import api_paste_fixture from nova.tests.unit import fake_network from nova.tests.unit import policy_fixture import nova.scheduler.utils import nova.servicegroup import nova.tests.unit.image.fake CONF = cfg.CONF # An alternate project id PROJECT_ID_ALT = "616c6c796f7572626173656172656f73" class ServerGroupTestBase(test.TestCase): REQUIRES_LOCKING = True api_major_version = 'v2' microversion = None _image_ref_parameter = 'imageRef' _flavor_ref_parameter = 'flavorRef' # Note(gibi): RamFilter is needed to ensure that # test_boot_servers_with_affinity_no_valid_host behaves as expected _scheduler_default_filters = ('ServerGroupAntiAffinityFilter', 'ServerGroupAffinityFilter', 'RamFilter') # Override servicegroup parameters to make the tests run faster _service_down_time = 2 _report_interval = 1 anti_affinity = {'name': 'fake-name-1', 'policies': ['anti-affinity']} affinity = {'name': 'fake-name-2', 'policies': ['affinity']} def _get_weight_classes(self): return [] def setUp(self): super(ServerGroupTestBase, self).setUp() self.flags(scheduler_default_filters=self._scheduler_default_filters) self.flags(scheduler_weight_classes=self._get_weight_classes()) self.flags(service_down_time=self._service_down_time) self.flags(report_interval=self._report_interval) self.useFixture(policy_fixture.RealPolicyFixture()) if self.api_major_version == 'v2.1': api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) else: self.useFixture(api_paste_fixture.ApiPasteLegacyV2Fixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2')) self.api = api_fixture.api self.api.microversion = self.microversion self.admin_api = api_fixture.admin_api self.admin_api.microversion = self.microversion # the image fake backend needed for image discovery nova.tests.unit.image.fake.stub_out_image_service(self) self.start_service('conductor', manager=CONF.conductor.manager) self.start_service('scheduler') self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset) def _wait_for_state_change(self, server, expected_status, max_retries=10): retry_count = 0 while True: server = self.admin_api.get_server(server['id']) if server['status'] == expected_status: break retry_count += 1 if retry_count == max_retries: self.fail('Wait for state change failed, ' 'expected_status=%s, actual_status=%s' % (expected_status, server['status'])) time.sleep(0.5) return server def _boot_a_server_to_group(self, group, expected_status='ACTIVE', flavor=None): server = self._build_minimal_create_server_request('some-server') if flavor: server[self._flavor_ref_parameter] = ('http://fake.server/%s' % flavor['id']) post = {'server': server, 'os:scheduler_hints': {'group': group['id']}} created_server = self.api.post_server(post) self.assertTrue(created_server['id']) # Wait for it to finish being created found_server = self._wait_for_state_change(created_server, expected_status) return found_server def _build_minimal_create_server_request(self, name): server = {} image = self.api.get_images()[0] if self._image_ref_parameter in image: image_href = image[self._image_ref_parameter] else: image_href = image['id'] image_href = 'http://fake.server/%s' % image_href # We now have a valid imageId server[self._image_ref_parameter] = image_href # Set a valid flavorId flavor = self.api.get_flavors()[1] server[self._flavor_ref_parameter] = ('http://fake.server/%s' % flavor['id']) server['name'] = name return server class ServerGroupTestV2(ServerGroupTestBase): api_major_version = 'v2' def setUp(self): super(ServerGroupTestV2, self).setUp() self.start_service('network') self.compute = self.start_service('compute') # NOTE(gibi): start a second compute host to be able to test affinity self.compute2 = self.start_service('compute', host='host2') fake_network.set_stub_network_methods(self) def test_get_no_groups(self): groups = self.api.get_server_groups() self.assertEqual([], groups) def test_create_and_delete_groups(self): groups = [self.anti_affinity, self.affinity] created_groups = [] for group in groups: created_group = self.api.post_server_groups(group) created_groups.append(created_group) self.assertEqual(group['name'], created_group['name']) self.assertEqual(group['policies'], created_group['policies']) self.assertEqual([], created_group['members']) self.assertEqual({}, created_group['metadata']) self.assertIn('id', created_group) group_details = self.api.get_server_group(created_group['id']) self.assertEqual(created_group, group_details) existing_groups = self.api.get_server_groups() self.assertIn(created_group, existing_groups) existing_groups = self.api.get_server_groups() self.assertEqual(len(groups), len(existing_groups)) for group in created_groups: self.api.delete_server_group(group['id']) existing_groups = self.api.get_server_groups() self.assertNotIn(group, existing_groups) def test_create_wrong_policy(self): ex = self.assertRaises(client.OpenStackApiException, self.api.post_server_groups, {'name': 'fake-name-1', 'policies': ['wrong-policy']}) self.assertEqual(400, ex.response.status_code) self.assertIn('Invalid input', ex.response.text) self.assertIn('wrong-policy', ex.response.text) def test_get_groups_all_projects(self): # This test requires APIs using two projects. # Create an API using project 'openstack1'. # This is a non-admin API. # # NOTE(sdague): this is actually very much *not* how this # fixture should be used. This actually spawns a whole # additional API server. Should be addressed in the future. api_openstack1 = self.useFixture(nova_fixtures.OSAPIFixture( api_version=self.api_major_version, project_id=PROJECT_ID_ALT)).api api_openstack1.microversion = self.microversion # Create a server group in project 'openstack' # Project 'openstack' is used by self.api group1 = self.anti_affinity openstack_group = self.api.post_server_groups(group1) # Create a server group in project 'openstack1' group2 = self.affinity openstack1_group = api_openstack1.post_server_groups(group2) # The admin should be able to get server groups in all projects. all_projects_admin = self.admin_api.get_server_groups( all_projects=True) self.assertIn(openstack_group, all_projects_admin) self.assertIn(openstack1_group, all_projects_admin) # The non-admin should only be able to get server groups # in his project. # The all_projects parameter is ignored for non-admin clients. all_projects_non_admin = api_openstack1.get_server_groups( all_projects=True) self.assertNotIn(openstack_group, all_projects_non_admin) self.assertIn(openstack1_group, all_projects_non_admin) def test_create_duplicated_policy(self): ex = self.assertRaises(client.OpenStackApiException, self.api.post_server_groups, {"name": "fake-name-1", "policies": ["affinity", "affinity"]}) self.assertEqual(400, ex.response.status_code) self.assertIn('Invalid input', ex.response.text) def test_create_multiple_policies(self): ex = self.assertRaises(client.OpenStackApiException, self.api.post_server_groups, {"name": "fake-name-1", "policies": ["anti-affinity", "affinity"]}) self.assertEqual(400, ex.response.status_code) def _boot_servers_to_group(self, group, flavor=None): servers = [] for _ in range(0, 2): server = self._boot_a_server_to_group(group, flavor=flavor) servers.append(server) return servers def test_boot_servers_with_affinity(self): created_group = self.api.post_server_groups(self.affinity) servers = self._boot_servers_to_group(created_group) members = self.api.get_server_group(created_group['id'])['members'] host = servers[0]['OS-EXT-SRV-ATTR:host'] for server in servers: self.assertIn(server['id'], members) self.assertEqual(host, server['OS-EXT-SRV-ATTR:host']) def test_boot_servers_with_affinity_overquota(self): # Tests that we check server group member quotas and cleanup created # resources when we fail with OverQuota. self.flags(quota_server_group_members=1) # make sure we start with 0 servers servers = self.api.get_servers(detail=False) self.assertEqual(0, len(servers)) created_group = self.api.post_server_groups(self.affinity) ex = self.assertRaises(client.OpenStackApiException, self._boot_servers_to_group, created_group) self.assertEqual(403, ex.response.status_code) # _boot_servers_to_group creates 2 instances in the group in order, not # multiple servers in a single request. Since our quota is 1, the first # server create would pass, the second should fail, and we should be # left with 1 server and it's 1 block device mapping. servers = self.api.get_servers(detail=False) self.assertEqual(1, len(servers)) ctxt = context.get_admin_context() servers = db.instance_get_all(ctxt) self.assertEqual(1, len(servers)) ctxt_mgr = db_api.get_context_manager(ctxt) with ctxt_mgr.reader.using(ctxt): bdms = db_api._block_device_mapping_get_query(ctxt).all() self.assertEqual(1, len(bdms)) self.assertEqual(servers[0]['uuid'], bdms[0]['instance_uuid']) def test_boot_servers_with_affinity_no_valid_host(self): created_group = self.api.post_server_groups(self.affinity) # Using big enough flavor to use up the resources on the host flavor = self.api.get_flavors()[2] self._boot_servers_to_group(created_group, flavor=flavor) # The third server cannot be booted as there is not enough resource # on the host where the first two server was booted failed_server = self._boot_a_server_to_group(created_group, flavor=flavor, expected_status='ERROR') self.assertEqual('No valid host was found. ' 'There are not enough hosts available.', failed_server['fault']['message']) def test_boot_servers_with_anti_affinity(self): created_group = self.api.post_server_groups(self.anti_affinity) servers = self._boot_servers_to_group(created_group) members = self.api.get_server_group(created_group['id'])['members'] self.assertNotEqual(servers[0]['OS-EXT-SRV-ATTR:host'], servers[1]['OS-EXT-SRV-ATTR:host']) for server in servers: self.assertIn(server['id'], members) def test_boot_server_with_anti_affinity_no_valid_host(self): created_group = self.api.post_server_groups(self.anti_affinity) self._boot_servers_to_group(created_group) # We have 2 computes so the third server won't fit into the same group failed_server = self._boot_a_server_to_group(created_group, expected_status='ERROR') self.assertEqual('No valid host was found. ' 'There are not enough hosts available.', failed_server['fault']['message']) def _rebuild_with_group(self, group): created_group = self.api.post_server_groups(group) servers = self._boot_servers_to_group(created_group) post = {'rebuild': {self._image_ref_parameter: '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'}} self.api.post_server_action(servers[1]['id'], post) rebuilt_server = self._wait_for_state_change(servers[1], 'ACTIVE') self.assertEqual(post['rebuild'][self._image_ref_parameter], rebuilt_server.get('image')['id']) return [servers[0], rebuilt_server] def test_rebuild_with_affinity(self): untouched_server, rebuilt_server = self._rebuild_with_group( self.affinity) self.assertEqual(untouched_server['OS-EXT-SRV-ATTR:host'], rebuilt_server['OS-EXT-SRV-ATTR:host']) def test_rebuild_with_anti_affinity(self): untouched_server, rebuilt_server = self._rebuild_with_group( self.anti_affinity) self.assertNotEqual(untouched_server['OS-EXT-SRV-ATTR:host'], rebuilt_server['OS-EXT-SRV-ATTR:host']) def _migrate_with_group_no_valid_host(self, group): created_group = self.api.post_server_groups(group) servers = self._boot_servers_to_group(created_group) post = {'migrate': {}} ex = self.assertRaises(client.OpenStackApiException, self.admin_api.post_server_action, servers[1]['id'], post) self.assertEqual(400, ex.response.status_code) self.assertIn('No valid host found for cold migrate', ex.response.text) def test_migrate_with_group_no_valid_host(self): for group in [self.affinity, self.anti_affinity]: self._migrate_with_group_no_valid_host(group) def test_migrate_with_anti_affinity(self): # Start additional host to test migration with anti-affinity self.start_service('compute', host='host3') created_group = self.api.post_server_groups(self.anti_affinity) servers = self._boot_servers_to_group(created_group) post = {'migrate': {}} self.admin_api.post_server_action(servers[1]['id'], post) migrated_server = self._wait_for_state_change(servers[1], 'VERIFY_RESIZE') self.assertNotEqual(servers[0]['OS-EXT-SRV-ATTR:host'], migrated_server['OS-EXT-SRV-ATTR:host']) def _get_compute_service_by_host_name(self, host_name): host = None if self.compute.host == host_name: host = self.compute elif self.compute2.host == host_name: host = self.compute2 else: raise AssertionError('host = %s does not found in ' 'existing hosts %s' % (host_name, str([self.compute.host, self.compute2.host]))) return host def test_evacuate_with_anti_affinity(self): created_group = self.api.post_server_groups(self.anti_affinity) servers = self._boot_servers_to_group(created_group) host = self._get_compute_service_by_host_name( servers[1]['OS-EXT-SRV-ATTR:host']) host.stop() # Need to wait service_down_time amount of seconds to ensure # nova considers the host down time.sleep(self._service_down_time) # Start additional host to test evacuation self.start_service('compute', host='host3') post = {'evacuate': {'onSharedStorage': False}} self.admin_api.post_server_action(servers[1]['id'], post) evacuated_server = self._wait_for_state_change(servers[1], 'ACTIVE') self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'], servers[0]['OS-EXT-SRV-ATTR:host']) host.start() def test_evacuate_with_anti_affinity_no_valid_host(self): created_group = self.api.post_server_groups(self.anti_affinity) servers = self._boot_servers_to_group(created_group) host = self._get_compute_service_by_host_name( servers[1]['OS-EXT-SRV-ATTR:host']) host.stop() # Need to wait service_down_time amount of seconds to ensure # nova considers the host down time.sleep(self._service_down_time) post = {'evacuate': {'onSharedStorage': False}} self.admin_api.post_server_action(servers[1]['id'], post) server_after_failed_evac = self._wait_for_state_change(servers[1], 'ACTIVE') # assert that after a failed evac the server active on the same host # as before self.assertEqual(server_after_failed_evac['OS-EXT-SRV-ATTR:host'], servers[1]['OS-EXT-SRV-ATTR:host']) host.start() def test_evacuate_with_affinity_no_valid_host(self): created_group = self.api.post_server_groups(self.affinity) servers = self._boot_servers_to_group(created_group) host = self._get_compute_service_by_host_name( servers[1]['OS-EXT-SRV-ATTR:host']) host.stop() # Need to wait service_down_time amount of seconds to ensure # nova considers the host down time.sleep(self._service_down_time) post = {'evacuate': {'onSharedStorage': False}} self.admin_api.post_server_action(servers[1]['id'], post) server_after_failed_evac = self._wait_for_state_change(servers[1], 'ACTIVE') # assert that after a failed evac the server active on the same host # as before self.assertEqual(server_after_failed_evac['OS-EXT-SRV-ATTR:host'], servers[1]['OS-EXT-SRV-ATTR:host']) host.start() class ServerGroupAffinityConfTest(ServerGroupTestBase): api_major_version = 'v2.1' # Load only anti-affinity filter so affinity will be missing _scheduler_default_filters = 'ServerGroupAntiAffinityFilter' @mock.patch('nova.scheduler.utils._SUPPORTS_AFFINITY', None) def test_affinity_no_filter(self): created_group = self.api.post_server_groups(self.affinity) failed_server = self._boot_a_server_to_group(created_group, expected_status='ERROR') self.assertEqual('ServerGroup policy is not supported: ' 'ServerGroupAffinityFilter not configured', failed_server['fault']['message']) self.assertEqual(400, failed_server['fault']['code']) class ServerGroupAntiAffinityConfTest(ServerGroupTestBase): api_major_version = 'v2.1' # Load only affinity filter so anti-affinity will be missing _scheduler_default_filters = 'ServerGroupAffinityFilter' @mock.patch('nova.scheduler.utils._SUPPORTS_ANTI_AFFINITY', None) def test_anti_affinity_no_filter(self): created_group = self.api.post_server_groups(self.anti_affinity) failed_server = self._boot_a_server_to_group(created_group, expected_status='ERROR') self.assertEqual('ServerGroup policy is not supported: ' 'ServerGroupAntiAffinityFilter not configured', failed_server['fault']['message']) self.assertEqual(400, failed_server['fault']['code']) class ServerGroupSoftAffinityConfTest(ServerGroupTestBase): api_major_version = 'v2.1' microversion = '2.15' soft_affinity = {'name': 'fake-name-4', 'policies': ['soft-affinity']} def _get_weight_classes(self): # Load only soft-anti-affinity weigher so affinity will be missing return ['nova.scheduler.weights.affinity.' 'ServerGroupSoftAntiAffinityWeigher'] @mock.patch('nova.scheduler.utils._SUPPORTS_SOFT_AFFINITY', None) def test_soft_affinity_no_filter(self): created_group = self.api.post_server_groups(self.soft_affinity) failed_server = self._boot_a_server_to_group(created_group, expected_status='ERROR') self.assertEqual('ServerGroup policy is not supported: ' 'ServerGroupSoftAffinityWeigher not configured', failed_server['fault']['message']) self.assertEqual(400, failed_server['fault']['code']) class ServerGroupSoftAntiAffinityConfTest(ServerGroupTestBase): api_major_version = 'v2.1' microversion = '2.15' soft_anti_affinity = {'name': 'fake-name-3', 'policies': ['soft-anti-affinity']} # Load only soft affinity filter so anti-affinity will be missing _scheduler_weight_classes = ['nova.scheduler.weights.affinity.' 'ServerGroupSoftAffinityWeigher'] def _get_weight_classes(self): # Load only soft affinity filter so anti-affinity will be missing return ['nova.scheduler.weights.affinity.' 'ServerGroupSoftAffinityWeigher'] @mock.patch('nova.scheduler.utils._SUPPORTS_SOFT_ANTI_AFFINITY', None) def test_soft_anti_affinity_no_filter(self): created_group = self.api.post_server_groups(self.soft_anti_affinity) failed_server = self._boot_a_server_to_group(created_group, expected_status='ERROR') self.assertEqual('ServerGroup policy is not supported: ' 'ServerGroupSoftAntiAffinityWeigher not configured', failed_server['fault']['message']) self.assertEqual(400, failed_server['fault']['code']) class ServerGroupTestV21(ServerGroupTestV2): api_major_version = 'v2.1' def test_soft_affinity_not_supported(self): ex = self.assertRaises(client.OpenStackApiException, self.api.post_server_groups, {'name': 'fake-name-1', 'policies': ['soft-affinity']}) self.assertEqual(400, ex.response.status_code) self.assertIn('Invalid input', ex.response.text) self.assertIn('soft-affinity', ex.response.text) class ServerGroupTestV215(ServerGroupTestV2): api_major_version = 'v2.1' microversion = '2.15' soft_anti_affinity = {'name': 'fake-name-3', 'policies': ['soft-anti-affinity']} soft_affinity = {'name': 'fake-name-4', 'policies': ['soft-affinity']} def setUp(self): super(ServerGroupTestV215, self).setUp() soft_affinity_patcher = mock.patch( 'nova.scheduler.utils._SUPPORTS_SOFT_AFFINITY') soft_anti_affinity_patcher = mock.patch( 'nova.scheduler.utils._SUPPORTS_SOFT_ANTI_AFFINITY') self.addCleanup(soft_affinity_patcher.stop) self.addCleanup(soft_anti_affinity_patcher.stop) self.mock_soft_affinity = soft_affinity_patcher.start() self.mock_soft_anti_affinity = soft_anti_affinity_patcher.start() self.mock_soft_affinity.return_value = None self.mock_soft_anti_affinity.return_value = None def _get_weight_classes(self): return ['nova.scheduler.weights.affinity.' 'ServerGroupSoftAffinityWeigher', 'nova.scheduler.weights.affinity.' 'ServerGroupSoftAntiAffinityWeigher'] def test_evacuate_with_anti_affinity(self): created_group = self.api.post_server_groups(self.anti_affinity) servers = self._boot_servers_to_group(created_group) host = self._get_compute_service_by_host_name( servers[1]['OS-EXT-SRV-ATTR:host']) host.stop() # Need to wait service_down_time amount of seconds to ensure # nova considers the host down time.sleep(self._service_down_time) # Start additional host to test evacuation compute3 = self.start_service('compute', host='host3') post = {'evacuate': {}} self.admin_api.post_server_action(servers[1]['id'], post) evacuated_server = self._wait_for_state_change(servers[1], 'ACTIVE') self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'], servers[0]['OS-EXT-SRV-ATTR:host']) compute3.kill() host.start() def test_evacuate_with_anti_affinity_no_valid_host(self): created_group = self.api.post_server_groups(self.anti_affinity) servers = self._boot_servers_to_group(created_group) host = self._get_compute_service_by_host_name( servers[1]['OS-EXT-SRV-ATTR:host']) host.stop() # Need to wait service_down_time amount of seconds to ensure # nova considers the host down time.sleep(self._service_down_time) post = {'evacuate': {}} self.admin_api.post_server_action(servers[1]['id'], post) server_after_failed_evac = self._wait_for_state_change(servers[1], 'ACTIVE') # assert that after a failed evac the server active on the same host # as before self.assertEqual(server_after_failed_evac['OS-EXT-SRV-ATTR:host'], servers[1]['OS-EXT-SRV-ATTR:host']) host.start() def test_evacuate_with_affinity_no_valid_host(self): created_group = self.api.post_server_groups(self.affinity) servers = self._boot_servers_to_group(created_group) host = self._get_compute_service_by_host_name( servers[1]['OS-EXT-SRV-ATTR:host']) host.stop() # Need to wait service_down_time amount of seconds to ensure # nova considers the host down time.sleep(self._service_down_time) post = {'evacuate': {}} self.admin_api.post_server_action(servers[1]['id'], post) server_after_failed_evac = self._wait_for_state_change(servers[1], 'ACTIVE') # assert that after a failed evac the server active on the same host # as before self.assertEqual(server_after_failed_evac['OS-EXT-SRV-ATTR:host'], servers[1]['OS-EXT-SRV-ATTR:host']) host.start() def test_create_and_delete_groups(self): groups = [self.anti_affinity, self.affinity, self.soft_affinity, self.soft_anti_affinity] created_groups = [] for group in groups: created_group = self.api.post_server_groups(group) created_groups.append(created_group) self.assertEqual(group['name'], created_group['name']) self.assertEqual(group['policies'], created_group['policies']) self.assertEqual([], created_group['members']) self.assertEqual({}, created_group['metadata']) self.assertIn('id', created_group) group_details = self.api.get_server_group(created_group['id']) self.assertEqual(created_group, group_details) existing_groups = self.api.get_server_groups() self.assertIn(created_group, existing_groups) existing_groups = self.api.get_server_groups() self.assertEqual(len(groups), len(existing_groups)) for group in created_groups: self.api.delete_server_group(group['id']) existing_groups = self.api.get_server_groups() self.assertNotIn(group, existing_groups) def test_boot_servers_with_soft_affinity(self): created_group = self.api.post_server_groups(self.soft_affinity) servers = self._boot_servers_to_group(created_group) members = self.api.get_server_group(created_group['id'])['members'] self.assertEqual(2, len(servers)) self.assertIn(servers[0]['id'], members) self.assertIn(servers[1]['id'], members) self.assertEqual(servers[0]['OS-EXT-SRV-ATTR:host'], servers[1]['OS-EXT-SRV-ATTR:host']) def test_boot_servers_with_soft_affinity_no_resource_on_first_host(self): created_group = self.api.post_server_groups(self.soft_affinity) # Using big enough flavor to use up the resources on the first host flavor = self.api.get_flavors()[2] servers = self._boot_servers_to_group(created_group, flavor) # The third server cannot be booted on the first host as there # is not enough resource there, but as opposed to the affinity policy # it will be booted on the other host, which has enough resources. third_server = self._boot_a_server_to_group(created_group, flavor=flavor) members = self.api.get_server_group(created_group['id'])['members'] hosts = [] for server in servers: hosts.append(server['OS-EXT-SRV-ATTR:host']) self.assertIn(third_server['id'], members) self.assertNotIn(third_server['OS-EXT-SRV-ATTR:host'], hosts) def test_boot_servers_with_soft_anti_affinity(self): created_group = self.api.post_server_groups(self.soft_anti_affinity) servers = self._boot_servers_to_group(created_group) members = self.api.get_server_group(created_group['id'])['members'] self.assertEqual(2, len(servers)) self.assertIn(servers[0]['id'], members) self.assertIn(servers[1]['id'], members) self.assertNotEqual(servers[0]['OS-EXT-SRV-ATTR:host'], servers[1]['OS-EXT-SRV-ATTR:host']) def test_boot_servers_with_soft_anti_affinity_one_available_host(self): self.compute2.kill() created_group = self.api.post_server_groups(self.soft_anti_affinity) servers = self._boot_servers_to_group(created_group) members = self.api.get_server_group(created_group['id'])['members'] host = servers[0]['OS-EXT-SRV-ATTR:host'] for server in servers: self.assertIn(server['id'], members) self.assertEqual(host, server['OS-EXT-SRV-ATTR:host']) def test_rebuild_with_soft_affinity(self): untouched_server, rebuilt_server = self._rebuild_with_group( self.soft_affinity) self.assertEqual(untouched_server['OS-EXT-SRV-ATTR:host'], rebuilt_server['OS-EXT-SRV-ATTR:host']) def test_rebuild_with_soft_anti_affinity(self): untouched_server, rebuilt_server = self._rebuild_with_group( self.soft_anti_affinity) self.assertNotEqual(untouched_server['OS-EXT-SRV-ATTR:host'], rebuilt_server['OS-EXT-SRV-ATTR:host']) def _migrate_with_soft_affinity_policies(self, group): created_group = self.api.post_server_groups(group) servers = self._boot_servers_to_group(created_group) post = {'migrate': {}} self.admin_api.post_server_action(servers[1]['id'], post) migrated_server = self._wait_for_state_change(servers[1], 'VERIFY_RESIZE') return [migrated_server['OS-EXT-SRV-ATTR:host'], servers[0]['OS-EXT-SRV-ATTR:host']] def test_migrate_with_soft_affinity(self): migrated_server, other_server = ( self._migrate_with_soft_affinity_policies(self.soft_affinity)) self.assertNotEqual(migrated_server, other_server) def test_migrate_with_soft_anti_affinity(self): migrated_server, other_server = ( self._migrate_with_soft_affinity_policies(self.soft_anti_affinity)) self.assertEqual(migrated_server, other_server) def _evacuate_with_soft_anti_affinity_policies(self, group): created_group = self.api.post_server_groups(group) servers = self._boot_servers_to_group(created_group) host = self._get_compute_service_by_host_name( servers[1]['OS-EXT-SRV-ATTR:host']) host.stop() # Need to wait service_down_time amount of seconds to ensure # nova considers the host down time.sleep(self._service_down_time) post = {'evacuate': {}} self.admin_api.post_server_action(servers[1]['id'], post) evacuated_server = self._wait_for_state_change(servers[1], 'ACTIVE') # Note(gibi): need to get the server again as the state of the instance # goes to ACTIVE first then the host of the instance changes to the # new host later evacuated_server = self.admin_api.get_server(evacuated_server['id']) host.start() return [evacuated_server['OS-EXT-SRV-ATTR:host'], servers[0]['OS-EXT-SRV-ATTR:host']] def test_evacuate_with_soft_affinity(self): evacuated_server, other_server = ( self._evacuate_with_soft_anti_affinity_policies( self.soft_affinity)) self.assertNotEqual(evacuated_server, other_server) def test_evacuate_with_soft_anti_affinity(self): evacuated_server, other_server = ( self._evacuate_with_soft_anti_affinity_policies( self.soft_anti_affinity)) self.assertEqual(evacuated_server, other_server) nova-13.1.4/nova/tests/functional/regressions/0000775000567000056710000000000013064447471022530 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/regressions/test_bug_1541691.py0000664000567000056710000000417413064447152025632 0ustar jenkinsjenkins00000000000000# Copyright 2016 HPE, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import nova.scheduler.utils import nova.servicegroup from nova import test from nova.tests import fixtures as nova_fixtures from nova.tests.functional.api import client import nova.tests.unit.image.fake from nova.tests.unit import policy_fixture CONF = cfg.CONF class TestServerValidation(test.TestCase): REQUIRES_LOCKING = True microversion = None def setUp(self): super(TestServerValidation, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) # the image fake backend needed for image discovery nova.tests.unit.image.fake.stub_out_image_service(self) self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset) self.api = api_fixture.api self.image_id = self.api.get_images()[0]['id'] self.flavor_id = self.api.get_flavors()[0]['id'] def test_name_validation(self): """Regression test for bug #1541691. The current jsonschema validation spits a giant wall of regex at you (about 500k characters). This is not useful to determine why your request actually failed. Ensure that once we fix this it doesn't regress. """ server = dict(name='server1 ', imageRef=self.image_id, flavorRef=self.flavor_id) server_args = {'server': server} self.assertRaises(client.OpenStackApiException, self.api.post_server, server_args) nova-13.1.4/nova/tests/functional/regressions/test_bug_1522536.py0000664000567000056710000000505013064447152025621 0ustar jenkinsjenkins00000000000000# Copyright 2016 HPE, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import nova.scheduler.utils import nova.servicegroup from nova import test from nova.tests import fixtures as nova_fixtures from nova.tests.functional.api import client from nova.tests.unit import cast_as_call import nova.tests.unit.image.fake from nova.tests.unit import policy_fixture CONF = cfg.CONF class TestServerGet(test.TestCase): REQUIRES_LOCKING = True def setUp(self): super(TestServerGet, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) self.api = api_fixture.api # the image fake backend needed for image discovery nova.tests.unit.image.fake.stub_out_image_service(self) self.start_service('conductor', manager=CONF.conductor.manager) self.flags(scheduler_driver='chance_scheduler') self.start_service('scheduler') self.network = self.start_service('network') self.compute = self.start_service('compute') self.useFixture(cast_as_call.CastAsCall(self.stubs)) self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset) self.image_id = self.api.get_images()[0]['id'] self.flavor_id = self.api.get_flavors()[0]['id'] def test_id_overlap(self): """Regression test for bug #1522536. Before fixing this bug, getting a numeric id caused a 500 error because it treated the numeric value as the db index, fetched the server, but then processing of extensions blew up. Since we have fixed this bug it returns a 404, which is expected. In future a 400 might be more appropriate. """ server = dict(name='server1', imageRef=self.image_id, flavorRef=self.flavor_id) self.api.post_server({'server': server}) self.assertRaises(client.OpenStackApiNotFoundException, self.api.get_server, 1) nova-13.1.4/nova/tests/functional/regressions/test_bug_1558866.py0000664000567000056710000000606513064447152025647 0ustar jenkinsjenkins00000000000000# Copyright 2016 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_config import cfg from nova import test from nova.tests import fixtures as nova_fixtures from nova.tests.functional.api import client as api_client from nova.tests.unit.image import fake as fake_image from nova.tests.unit import policy_fixture CONF = cfg.CONF CONF.import_opt('null_kernel', 'nova.compute.api') class TestServerGet(test.TestCase): def setUp(self): super(TestServerGet, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) self.api = api_fixture.api # the image fake backend needed for image discovery image_service = fake_image.stub_out_image_service(self) self.addCleanup(fake_image.FakeImageService_reset) # NOTE(mriedem): This image has an invalid architecture metadata value # and is used for negative testing in the functional stack. timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3) image = {'id': 'c456eb30-91d7-4f43-8f46-2efd9eccd744', 'name': 'fake-image-invalid-arch', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': False, 'container_format': 'raw', 'disk_format': 'raw', 'size': '25165824', 'properties': {'kernel_id': CONF.null_kernel, 'ramdisk_id': CONF.null_kernel, 'architecture': 'x64'}} self.image_id = image_service.create(None, image)['id'] self.flavor_id = self.api.get_flavors()[0]['id'] def test_boot_server_with_invalid_image_meta(self): """Regression test for bug #1558866. Glance allows you to provide any architecture value for image meta properties but nova validates the image metadata against the nova.compute.arch.ALL values during the conversion to the ImageMeta object. This test ensures we get a 400 back in that case rather than a 500. """ server = dict(name='server1', imageRef=self.image_id, flavorRef=self.flavor_id) ex = self.assertRaises(api_client.OpenStackApiException, self.api.post_server, {'server': server}) self.assertEqual(400, ex.response.status_code) nova-13.1.4/nova/tests/functional/regressions/__init__.py0000664000567000056710000000000013064447140024620 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/regressions/README.rst0000664000567000056710000000146713064447140024220 0ustar jenkinsjenkins00000000000000================================ Tests for Specific Regressions ================================ When we have a bug reported by end users that we can write a full stack reproduce on, we should. And we should keep a regression test for that bug in our tree. It can be deleted at some future date if needed, but largely should not be changed. Writing Regression Tests ======================== - These should be full stack tests which inherit from nova.test.TestCase directly. (This is to prevent coupling with other tests). - They should setup a full stack cloud in their setUp via fixtures - They should each live in a file which is named test_bug_######.py Writing Tests Before the Bug is Fixed ===================================== TODO describe writing and landing tests before the bug is fixed as a reproduce. nova-13.1.4/nova/tests/functional/regressions/test_bug_1548980.py0000664000567000056710000000675013064447152025644 0ustar jenkinsjenkins00000000000000# Copyright 2016 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from oslo_config import cfg import nova.scheduler.utils import nova.servicegroup from nova import test from nova.tests import fixtures as nova_fixtures from nova.tests.functional.api import client from nova.tests.unit import cast_as_call import nova.tests.unit.image.fake from nova.tests.unit import policy_fixture CONF = cfg.CONF class TestServerGet(test.TestCase): REQUIRES_LOCKING = True def setUp(self): super(TestServerGet, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) # The non-admin API client is fine to stay at 2.1 since it just creates # and deletes the server. self.api = api_fixture.api self.admin_api = api_fixture.admin_api # The admin API client needs to be at microversion 2.16 to exhibit the # regression. self.admin_api.microversion = '2.16' # the image fake backend needed for image discovery nova.tests.unit.image.fake.stub_out_image_service(self) self.start_service('conductor', manager=CONF.conductor.manager) self.flags(scheduler_driver='chance_scheduler') self.start_service('scheduler') self.network = self.start_service('network') self.compute = self.start_service('compute') self.consoleauth = self.start_service('consoleauth') self.useFixture(cast_as_call.CastAsCall(self.stubs)) self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset) self.image_id = self.api.get_images()[0]['id'] self.flavor_id = self.api.get_flavors()[0]['id'] def test_list_deleted_instances(self): """Regression test for bug #1548980. Before fixing this bug, listing deleted instances returned a 404 because lazy-loading services from a deleted instance failed. Now we should be able to list the deleted instance and the host_state attribute should be "". """ server = dict(name='server1', imageRef=self.image_id, flavorRef=self.flavor_id) server = self.api.post_server({'server': server}) self.api.delete_server(server['id']) # Wait 30 seconds for it to be gone. for x in range(30): try: self.api.get_server(server['id']) time.sleep(1) except client.OpenStackApiNotFoundException: break else: self.fail('Timed out waiting to delete server: %s' % server['id']) servers = self.admin_api.get_servers(search_opts={'deleted': 1}) self.assertEqual(1, len(servers)) self.assertEqual(server['id'], servers[0]['id']) # host_status is returned in the 2.16 microversion and since the server # is deleted it should be the empty string self.assertEqual(0, len(servers[0]['host_status'])) nova-13.1.4/nova/tests/functional/regressions/test_bug_1552888.py0000664000567000056710000000275313064447152025645 0ustar jenkinsjenkins00000000000000# Copyright 2016 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import test from nova.tests import fixtures as nova_fixtures from nova.tests.unit import policy_fixture class TestAggregateCreation(test.TestCase): def setUp(self): super(TestAggregateCreation, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) self.admin_api = api_fixture.admin_api def test_name_validation(self): """Regression test for bug #1552888. The current aggregate accepts a null param for availablitliy zone, change to the validation might affect some command like 'nova aggregate create foo' This test ensure those kind of change won't affect validation """ body = {"aggregate": {"name": "foo", "availability_zone": None}} # This should success self.admin_api.api_post('/os-aggregates', body) nova-13.1.4/nova/tests/functional/test_extensions.py0000664000567000056710000000336413064447152023777 0ustar jenkinsjenkins00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging # Import extensions to pull in osapi_compute_extension CONF option used below. from nova.tests.functional import api_paste_fixture from nova.tests.functional import integrated_helpers CONF = cfg.CONF LOG = logging.getLogger(__name__) class ExtensionsTest(integrated_helpers._IntegratedTestBase): api_major_version = 'v2' def setUp(self): self.useFixture(api_paste_fixture.ApiPasteLegacyV2Fixture()) super(ExtensionsTest, self).setUp() def _get_flags(self): f = super(ExtensionsTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.tests.unit.api.openstack.compute.legacy_v2.extensions.' 'foxinsocks.Foxinsocks') return f def test_get_foxnsocks(self): # Simple check that fox-n-socks works. response = self.api.api_request('/foxnsocks') foxnsocks = response.content LOG.debug("foxnsocks: %s" % foxnsocks) self.assertEqual('Try to say this Mr. Knox, sir...', foxnsocks) nova-13.1.4/nova/tests/functional/api_sample_tests/0000775000567000056710000000000013064447471023521 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/test_flavor_manage.py0000664000567000056710000000513113064447152027727 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import api_sample_base CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class FlavorManageSampleJsonTests(api_sample_base.ApiSampleTestBaseV21): ADMIN_API = True extension_name = 'flavor-manage' def _get_flags(self): f = super(FlavorManageSampleJsonTests, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.flavormanage.' 'Flavormanage') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.flavor_disabled.' 'Flavor_disabled') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.flavor_access.' 'Flavor_access') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.flavorextradata.' 'Flavorextradata') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.flavor_swap.' 'Flavor_swap') return f def _create_flavor(self): """Create a flavor.""" subs = { 'flavor_id': '10', 'flavor_name': "test_flavor" } response = self._do_post("flavors", "flavor-create-post-req", subs) self._verify_response("flavor-create-post-resp", subs, response, 200) # TODO(sdague): remove duplication def test_create_flavor(self): # Get api sample to create a flavor. self._create_flavor() def test_delete_flavor(self): # Get api sample to delete a flavor. self._create_flavor() response = self._do_delete("flavors/10") self.assertEqual(202, response.status_code) self.assertEqual('', response.content) nova-13.1.4/nova/tests/functional/api_sample_tests/test_used_limits.py0000664000567000056710000000510213064447152027445 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import api_sample_base CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class UsedLimitsSamplesJsonTest(api_sample_base.ApiSampleTestBaseV21): ADMIN_API = True extension_name = "os-used-limits" extra_extensions_to_load = ["limits"] def setUp(self): super(UsedLimitsSamplesJsonTest, self).setUp() # NOTE(park): We have to separate the template files between V2 # and V2.1 as the response are different. self.template = 'usedlimits-get-resp' if self._legacy_v2_code: self.template = 'v2-usedlimits-get-resp' def _get_flags(self): f = super(UsedLimitsSamplesJsonTest, self)._get_flags() if self._legacy_v2_code: f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( "nova.api.openstack.compute." "legacy_v2.contrib.server_group_quotas." "Server_group_quotas") f['osapi_compute_extension'].append( "nova.api.openstack.compute." "legacy_v2.contrib.used_limits.Used_limits") f['osapi_compute_extension'].append( "nova.api.openstack.compute." "legacy_v2.contrib.used_limits_for_admin." "Used_limits_for_admin") return f def test_get_used_limits(self): # Get api sample to used limits. response = self._do_get('limits') self._verify_response(self.template, {}, response, 200) def test_get_used_limits_for_admin(self): # TODO(sdague): if we split the admin tests out the whole # class doesn't need admin api enabled. tenant_id = 'openstack' response = self._do_get('limits?tenant_id=%s' % tenant_id) self._verify_response(self.template, {}, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/api_sample_base.py0000664000567000056710000001164313064447152027200 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_config import cfg import testscenarios from nova.api.openstack import API_V21_CORE_EXTENSIONS # noqa from nova import test from nova.tests.functional import api_paste_fixture from nova.tests.functional import api_samples_test_base from nova.tests.unit import fake_network from nova.tests.unit import fake_utils CONF = cfg.CONF CONF.import_opt('osapi_compute_link_prefix', 'nova.api.openstack.common') CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') # API samples heavily uses testscenarios. This allows us to use the # same tests, with slight variations in configuration to ensure our # various ways of calling the API are compatible. Testscenarios works # through the class level ``scenarios`` variable. It is an array of # tuples where the first value in each tuple is an arbitrary name for # the scenario (should be unique), and the second item is a dictionary # of attributes to change in the class for the test. # # By default we're running scenarios for 3 situations # # - Hitting the default /v2 endpoint # # - Hitting the default /v2.1 endpoint # # - Hitting the /v2 but fixing the paste pipeline so that it uses the # legacy v2 code. This requires a fixture. # # Things we need to set: # # - api_major_version - what version of the API we should be hitting # # - microversion - what API microversion should be used # # - _additional_fixtures - any additional fixtures need # # - _legacy_v2_code - True/False if we are using the legacy v2 code # stack. Sadly, a few tests really care about this. # # NOTE(sdague): if you want to build a test that only tests specific # microversions, then replace the ``scenarios`` class variable in that # test class with something like: # # [("v2_11", {'api_major_version': 'v2.1', 'microversion': '2.11'})] class ApiSampleTestBaseV21(testscenarios.WithScenarios, api_samples_test_base.ApiSampleTestBase): api_major_version = 'v2' # any additional fixtures needed for this scenario _additional_fixtures = [] sample_dir = None extra_extensions_to_load = None _legacy_v2_code = False _project_id = True scenarios = [ # test v2 with the v2.1 compatibility stack ('v2', { 'api_major_version': 'v2'}), # test v2.1 base microversion ('v2_1', { 'api_major_version': 'v2.1'}), # test v2 with the v2 legacy code ('v2legacy', { 'api_major_version': 'v2', '_legacy_v2_code': True, '_additional_fixtures': [ api_paste_fixture.ApiPasteLegacyV2Fixture]}), # test v2.18 code without project id ('v2_1_noproject_id', { 'api_major_version': 'v2.1', '_project_id': False, '_additional_fixtures': [ api_paste_fixture.ApiPasteNoProjectId]}) ] def setUp(self): self.flags(use_ipv6=False, osapi_compute_link_prefix=self._get_host(), osapi_glance_link_prefix=self._get_glance_host()) if not self.all_extensions: self.flags(osapi_compute_extension=[]) # Set the whitelist to ensure only the extensions we are # interested in are loaded so the api samples don't include # data from extensions we are not interested in whitelist = API_V21_CORE_EXTENSIONS.copy() if self.extension_name: whitelist.add(self.extension_name) if self.extra_extensions_to_load: whitelist.update(set(self.extra_extensions_to_load)) CONF.set_override('extensions_whitelist', whitelist, 'osapi_v21') # load any additional fixtures specified by the scenario for fix in self._additional_fixtures: self.useFixture(fix()) # super class call is delayed here so that we have the right # paste and conf before loading all the services, as we can't # change these later. super(ApiSampleTestBaseV21, self).setUp() self.useFixture(test.SampleNetworks(host=self.network.host)) fake_network.stub_compute_with_ips(self.stubs) fake_utils.stub_out_utils_spawn_n(self.stubs) # this is used to generate sample docs self.generate_samples = os.getenv('GENERATE_SAMPLES') is not None nova-13.1.4/nova/tests/functional/api_sample_tests/test_security_group_default_rules.py0000664000567000056710000000450613064447152033134 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import api_sample_base CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class SecurityGroupDefaultRulesSampleJsonTest( api_sample_base.ApiSampleTestBaseV21): ADMIN_API = True extension_name = 'os-security-group-default-rules' def _get_flags(self): f = super(SecurityGroupDefaultRulesSampleJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append('nova.api.openstack.compute.' 'contrib.security_group_default_rules.' 'Security_group_default_rules') return f def test_security_group_default_rules_create(self): response = self._do_post('os-security-group-default-rules', 'security-group-default-rules-create-req', {}) self._verify_response('security-group-default-rules-create-resp', {}, response, 200) def test_security_group_default_rules_list(self): self.test_security_group_default_rules_create() response = self._do_get('os-security-group-default-rules') self._verify_response('security-group-default-rules-list-resp', {}, response, 200) def test_security_group_default_rules_show(self): self.test_security_group_default_rules_create() rule_id = '1' response = self._do_get('os-security-group-default-rules/%s' % rule_id) self._verify_response('security-group-default-rules-show-resp', {}, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_server_external_events.py0000664000567000056710000000373613064447152031733 0ustar jenkinsjenkins00000000000000# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class ServerExternalEventsSamplesJsonTest(test_servers.ServersSampleBase): ADMIN_API = True extension_name = "os-server-external-events" def _get_flags(self): f = super(ServerExternalEventsSamplesJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.server_external_events.' 'Server_external_events') return f def setUp(self): """setUp Method for AdminActions api samples extension This method creates the server that will be used in each tests """ super(ServerExternalEventsSamplesJsonTest, self).setUp() self.uuid = self._post_server() def test_create_event(self): subs = { 'uuid': self.uuid, 'name': 'network-changed', 'status': 'completed', 'tag': 'foo', } response = self._do_post('os-server-external-events', 'event-create-req', subs) self._verify_response('event-create-resp', subs, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_extended_availability_zone.py0000664000567000056710000000445113064447152032517 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class ExtendedAvailabilityZoneJsonTests(test_servers.ServersSampleBase): extension_name = "os-extended-availability-zone" def _get_flags(self): f = super(ExtendedAvailabilityZoneJsonTests, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.keypairs.Keypairs') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_availability_zone.' 'Extended_availability_zone') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_ips.Extended_ips') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_ips_mac.' 'Extended_ips_mac') return f def test_show(self): uuid = self._post_server() response = self._do_get('servers/%s' % uuid) subs = {} subs['hostid'] = '[a-f0-9]+' subs['access_ip_v4'] = '1.2.3.4' subs['access_ip_v6'] = '80fe::' self._verify_response('server-get-resp', subs, response, 200) def test_detail(self): self._post_server() response = self._do_get('servers/detail') subs = {} subs['hostid'] = '[a-f0-9]+' subs['access_ip_v4'] = '1.2.3.4' subs['access_ip_v6'] = '80fe::' self._verify_response('servers-detail-resp', subs, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_floating_ips.py0000664000567000056710000001031213064447152027601 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova import context from nova.tests.functional.api_sample_tests import api_sample_base CONF = cfg.CONF CONF.import_opt('default_floating_pool', 'nova.network.floating_ips') CONF.import_opt('public_interface', 'nova.network.linux_net') CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class FloatingIpsTest(api_sample_base.ApiSampleTestBaseV21): extension_name = "os-floating-ips" def _get_flags(self): f = super(FloatingIpsTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append('nova.api.openstack.compute.' 'contrib.floating_ips.Floating_ips') f['osapi_compute_extension'].append('nova.api.openstack.compute.' 'contrib.extended_floating_ips.Extended_floating_ips') return f def setUp(self): super(FloatingIpsTest, self).setUp() pool = CONF.default_floating_pool interface = CONF.public_interface self.ip_pool = [ { 'address': "10.10.10.1", 'pool': pool, 'interface': interface }, { 'address': "10.10.10.2", 'pool': pool, 'interface': interface }, { 'address': "10.10.10.3", 'pool': pool, 'interface': interface }, ] self.compute.db.floating_ip_bulk_create( context.get_admin_context(), self.ip_pool) def tearDown(self): self.compute.db.floating_ip_bulk_destroy( context.get_admin_context(), self.ip_pool) super(FloatingIpsTest, self).tearDown() def test_floating_ips_list_empty(self): response = self._do_get('os-floating-ips') self._verify_response('floating-ips-list-empty-resp', {}, response, 200) def test_floating_ips_list(self): self._do_post('os-floating-ips', 'floating-ips-create-nopool-req', {}) self._do_post('os-floating-ips', 'floating-ips-create-nopool-req', {}) response = self._do_get('os-floating-ips') self._verify_response('floating-ips-list-resp', {}, response, 200) def test_floating_ips_create_nopool(self): response = self._do_post('os-floating-ips', 'floating-ips-create-nopool-req', {}) self._verify_response('floating-ips-create-resp', {}, response, 200) def test_floating_ips_create(self): response = self._do_post('os-floating-ips', 'floating-ips-create-req', {"pool": CONF.default_floating_pool}) self._verify_response('floating-ips-create-resp', {}, response, 200) def test_floating_ips_get(self): self.test_floating_ips_create() # NOTE(sdague): the first floating ip will always have 1 as an id, # but it would be better if we could get this from the create response = self._do_get('os-floating-ips/%d' % 1) self._verify_response('floating-ips-get-resp', {}, response, 200) def test_floating_ips_delete(self): self.test_floating_ips_create() response = self._do_delete('os-floating-ips/%d' % 1) self.assertEqual(202, response.status_code) self.assertEqual("", response.content) nova-13.1.4/nova/tests/functional/api_sample_tests/test_deferred_delete.py0000664000567000056710000000404313064447152030231 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class DeferredDeleteSampleJsonTests(test_servers.ServersSampleBase): extension_name = "os-deferred-delete" def _get_flags(self): f = super(DeferredDeleteSampleJsonTests, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.deferred_delete.' 'Deferred_delete') return f def setUp(self): super(DeferredDeleteSampleJsonTests, self).setUp() self.flags(reclaim_instance_interval=1) def test_restore(self): uuid = self._post_server() self._do_delete('servers/%s' % uuid) response = self._do_post('servers/%s/action' % uuid, 'restore-post-req', {}) self.assertEqual(202, response.status_code) self.assertEqual('', response.content) def test_force_delete(self): uuid = self._post_server() self._do_delete('servers/%s' % uuid) response = self._do_post('servers/%s/action' % uuid, 'force-delete-post-req', {}) self.assertEqual(202, response.status_code) self.assertEqual('', response.content) nova-13.1.4/nova/tests/functional/api_sample_tests/test_networks.py0000664000567000056710000000666113064447152027013 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import api_sample_base from nova.tests.unit.api.openstack.compute import test_networks CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class NetworksJsonTests(api_sample_base.ApiSampleTestBaseV21): ADMIN_API = True extension_name = "os-networks" def _get_flags(self): f = super(NetworksJsonTests, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append('nova.api.openstack.compute.' 'contrib.os_networks.Os_networks') f['osapi_compute_extension'].append('nova.api.openstack.compute.' 'contrib.extended_networks.Extended_networks') return f def setUp(self): super(NetworksJsonTests, self).setUp() fake_network_api = test_networks.FakeNetworkAPI() self.stub_out("nova.network.api.API.get_all", fake_network_api.get_all) self.stub_out("nova.network.api.API.get", fake_network_api.get) self.stub_out("nova.network.api.API.associate", fake_network_api.associate) self.stub_out("nova.network.api.API.delete", fake_network_api.delete) self.stub_out("nova.network.api.API.create", fake_network_api.create) self.stub_out("nova.network.api.API.add_network_to_project", fake_network_api.add_network_to_project) def test_network_list(self): response = self._do_get('os-networks') self._verify_response('networks-list-resp', {}, response, 200) def test_network_disassociate(self): uuid = test_networks.FAKE_NETWORKS[0]['uuid'] response = self._do_post('os-networks/%s/action' % uuid, 'networks-disassociate-req', {}) self.assertEqual(202, response.status_code) self.assertEqual("", response.content) def test_network_show(self): uuid = test_networks.FAKE_NETWORKS[0]['uuid'] response = self._do_get('os-networks/%s' % uuid) self._verify_response('network-show-resp', {}, response, 200) def test_network_create(self): response = self._do_post("os-networks", 'network-create-req', {}) self._verify_response('network-create-resp', {}, response, 200) def test_network_add(self): response = self._do_post("os-networks/add", 'network-add-req', {}) self.assertEqual(202, response.status_code) self.assertEqual("", response.content) def test_network_delete(self): response = self._do_delete('os-networks/always_delete') self.assertEqual(202, response.status_code) self.assertEqual("", response.content) nova-13.1.4/nova/tests/functional/api_sample_tests/test_server_usage.py0000664000567000056710000000460713064447152027627 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class ServerUsageSampleJsonTest(test_servers.ServersSampleBase): extension_name = 'os-server-usage' def _get_flags(self): f = super(ServerUsageSampleJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.server_usage.Server_usage') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.keypairs.Keypairs') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_ips.Extended_ips') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_ips_mac.' 'Extended_ips_mac') return f def setUp(self): """setUp method for server usage.""" super(ServerUsageSampleJsonTest, self).setUp() self.uuid = self._post_server() def test_show(self): response = self._do_get('servers/%s' % self.uuid) subs = {} subs['id'] = self.uuid subs['hostid'] = '[a-f0-9]+' subs['access_ip_v4'] = '1.2.3.4' subs['access_ip_v6'] = '80fe::' self._verify_response('server-get-resp', subs, response, 200) def test_details(self): response = self._do_get('servers/detail') subs = {} subs['id'] = self.uuid subs['hostid'] = '[a-f0-9]+' subs['access_ip_v4'] = '1.2.3.4' subs['access_ip_v6'] = '80fe::' self._verify_response('servers-detail-resp', subs, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_attach_interfaces.py0000664000567000056710000001642713064447152030607 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova import exception from nova.network import api as network_api from nova.tests.functional.api_sample_tests import test_servers from nova.tests.unit import fake_network_cache_model CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class AttachInterfacesSampleJsonTest(test_servers.ServersSampleBase): extension_name = 'os-attach-interfaces' def _get_flags(self): f = super(AttachInterfacesSampleJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.' 'attach_interfaces.Attach_interfaces') return f def setUp(self): super(AttachInterfacesSampleJsonTest, self).setUp() def fake_list_ports(self, *args, **kwargs): uuid = kwargs.get('device_id', None) if not uuid: raise exception.InstanceNotFound(instance_id=None) port_data = { "id": "ce531f90-199f-48c0-816c-13e38010b442", "network_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6", "admin_state_up": True, "status": "ACTIVE", "mac_address": "fa:16:3e:4c:2c:30", "fixed_ips": [ { "ip_address": "192.168.1.3", "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef" } ], "device_id": uuid, } ports = {'ports': [port_data]} return ports def fake_show_port(self, context, port_id=None): if not port_id: raise exception.PortNotFound(port_id=None) port_data = { "id": port_id, "network_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6", "admin_state_up": True, "status": "ACTIVE", "mac_address": "fa:16:3e:4c:2c:30", "fixed_ips": [ { "ip_address": "192.168.1.3", "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef" } ], "device_id": 'bece68a3-2f8b-4e66-9092-244493d6aba7', } port = {'port': port_data} return port def fake_attach_interface(self, context, instance, network_id, port_id, requested_ip='192.168.1.3'): if not network_id: network_id = "fake_net_uuid" if not port_id: port_id = "fake_port_uuid" vif = fake_network_cache_model.new_vif() vif['id'] = port_id vif['network']['id'] = network_id vif['network']['subnets'][0]['ips'][0] = requested_ip return vif def fake_detach_interface(self, context, instance, port_id): pass self.stub_out('nova.network.api.API.list_ports', fake_list_ports) self.stub_out('nova.network.api.API.show_port', fake_show_port) self.stub_out('nova.compute.api.API.attach_interface', fake_attach_interface) self.stub_out('nova.compute.api.API.detach_interface', fake_detach_interface) self.flags(timeout=30, group='neutron') def generalize_subs(self, subs, vanilla_regexes): subs['subnet_id'] = vanilla_regexes['uuid'] subs['net_id'] = vanilla_regexes['uuid'] subs['port_id'] = vanilla_regexes['uuid'] subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}' subs['ip_address'] = vanilla_regexes['ip'] return subs def test_list_interfaces(self): instance_uuid = self._post_server() response = self._do_get('servers/%s/os-interface' % instance_uuid) subs = { 'ip_address': '192.168.1.3', 'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef', 'mac_addr': 'fa:16:3e:4c:2c:30', 'net_id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6', 'port_id': 'ce531f90-199f-48c0-816c-13e38010b442', 'port_state': 'ACTIVE' } self._verify_response('attach-interfaces-list-resp', subs, response, 200) def _stub_show_for_instance(self, instance_uuid, port_id): show_port = network_api.API().show_port(None, port_id) show_port['port']['device_id'] = instance_uuid self.stub_out('nova.network.api.API.show_port', lambda *a, **k: show_port) def test_show_interfaces(self): instance_uuid = self._post_server() port_id = 'ce531f90-199f-48c0-816c-13e38010b442' self._stub_show_for_instance(instance_uuid, port_id) response = self._do_get('servers/%s/os-interface/%s' % (instance_uuid, port_id)) subs = { 'ip_address': '192.168.1.3', 'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef', 'mac_addr': 'fa:16:3e:4c:2c:30', 'net_id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6', 'port_id': port_id, 'port_state': 'ACTIVE' } self._verify_response('attach-interfaces-show-resp', subs, response, 200) def test_create_interfaces(self, instance_uuid=None): if instance_uuid is None: instance_uuid = self._post_server() subs = { 'net_id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6', 'port_id': 'ce531f90-199f-48c0-816c-13e38010b442', 'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef', 'ip_address': '192.168.1.3', 'port_state': 'ACTIVE', 'mac_addr': 'fa:16:3e:4c:2c:30', } self._stub_show_for_instance(instance_uuid, subs['port_id']) response = self._do_post('servers/%s/os-interface' % instance_uuid, 'attach-interfaces-create-req', subs) self._verify_response('attach-interfaces-create-resp', subs, response, 200) def test_delete_interfaces(self): instance_uuid = self._post_server() port_id = 'ce531f90-199f-48c0-816c-13e38010b442' response = self._do_delete('servers/%s/os-interface/%s' % (instance_uuid, port_id)) self.assertEqual(202, response.status_code) self.assertEqual('', response.content) nova-13.1.4/nova/tests/functional/api_sample_tests/test_remote_consoles.py0000664000567000056710000001203613064447152030330 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class ConsolesSampleJsonTests(test_servers.ServersSampleBase): microversion = None extension_name = "os-remote-consoles" def _get_flags(self): f = super(ConsolesSampleJsonTests, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.consoles.Consoles') return f def setUp(self): super(ConsolesSampleJsonTests, self).setUp() self.api.microversion = self.microversion self.flags(enabled=True, group='vnc') self.flags(enabled=True, group='spice') self.flags(enabled=True, group='rdp') self.flags(enabled=True, group='serial_console') def test_get_vnc_console(self): uuid = self._post_server() response = self._do_post('servers/%s/action' % uuid, 'get-vnc-console-post-req', {'action': 'os-getVNCConsole'}) subs = {"url": "((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"} self._verify_response('get-vnc-console-post-resp', subs, response, 200) def test_get_spice_console(self): uuid = self._post_server() response = self._do_post('servers/%s/action' % uuid, 'get-spice-console-post-req', {'action': 'os-getSPICEConsole'}) subs = {"url": "((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"} self._verify_response('get-spice-console-post-resp', subs, response, 200) def test_get_rdp_console(self): uuid = self._post_server() response = self._do_post('servers/%s/action' % uuid, 'get-rdp-console-post-req', {'action': 'os-getRDPConsole'}) subs = {"url": "((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"} self._verify_response('get-rdp-console-post-resp', subs, response, 200) def test_get_serial_console(self): uuid = self._post_server() response = self._do_post('servers/%s/action' % uuid, 'get-serial-console-post-req', {'action': 'os-getSerialConsole'}) subs = {"url": "((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"} self._verify_response('get-serial-console-post-resp', subs, response, 200) class ConsolesV26SampleJsonTests(test_servers.ServersSampleBase): microversion = '2.6' extension_name = "os-remote-consoles" # NOTE(gmann): microversion tests do not need to run for v2 API # so defining scenarios only for v2.6 which will run the original tests # by appending '(v2_6)' in test_id. scenarios = [('v2_6', {'api_major_version': 'v2.1'})] def setUp(self): super(ConsolesV26SampleJsonTests, self).setUp() self.http_regex = "(https?://)([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*" def test_create_console(self): uuid = self._post_server() body = {'protocol': 'vnc', 'type': 'novnc'} response = self._do_post('servers/%s/remote-consoles' % uuid, 'create-vnc-console-req', body) subs = {"url": self.http_regex} self._verify_response('create-vnc-console-resp', subs, response, 200) class ConsolesV28SampleJsonTests(test_servers.ServersSampleBase): extension_name = "os-remote-consoles" microversion = '2.8' scenarios = [('v2_8', {'api_major_version': 'v2.1'})] def setUp(self): super(ConsolesV28SampleJsonTests, self).setUp() self.http_regex = "(https?://)([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*" self.flags(enabled=True, group='mks') def test_create_mks_console(self): uuid = self._post_server() body = {'protocol': 'mks', 'type': 'webmks'} response = self._do_post('servers/%s/remote-consoles' % uuid, 'create-mks-console-req', body) subs = {"url": self.http_regex} self._verify_response('create-mks-console-resp', subs, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_availability_zone.py0000664000567000056710000000332013064447152030631 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import nova.conf from nova.tests.functional.api_sample_tests import test_servers CONF = nova.conf.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class AvailabilityZoneJsonTest(test_servers.ServersSampleBase): ADMIN_API = True extension_name = "os-availability-zone" def _get_flags(self): f = super(AvailabilityZoneJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.availability_zone.' 'Availability_zone') return f def test_availability_zone_list(self): response = self._do_get('os-availability-zone') self._verify_response('availability-zone-list-resp', {}, response, 200) def test_availability_zone_detail(self): response = self._do_get('os-availability-zone/detail') self._verify_response('availability-zone-detail-resp', {}, response, 200) def test_availability_zone_post(self): self._post_server() nova-13.1.4/nova/tests/functional/api_sample_tests/test_security_groups.py0000664000567000056710000001535613064447152030406 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers import nova.tests.functional.api_samples_test_base as astb CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') def fake_get(*args, **kwargs): nova_group = {} nova_group['id'] = 1 nova_group['description'] = 'default' nova_group['name'] = 'default' nova_group['project_id'] = astb.PROJECT_ID nova_group['rules'] = [] return nova_group def fake_get_instances_security_groups_bindings(self, context, servers, detailed=False): result = {} for s in servers: result[s.get('id')] = [{'name': 'test'}] return result def fake_add_to_instance(self, context, instance, security_group_name): pass def fake_remove_from_instance(self, context, instance, security_group_name): pass def fake_list(self, context, names=None, ids=None, project=None, search_opts=None): return [fake_get()] def fake_get_instance_security_groups(self, context, instance_uuid, detailed=False): return [fake_get()] def fake_create_security_group(self, context, name, description): return fake_get() class SecurityGroupsJsonTest(test_servers.ServersSampleBase): extension_name = 'os-security-groups' def _get_flags(self): f = super(SecurityGroupsJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.security_groups.' 'Security_groups') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.keypairs.Keypairs') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_ips.Extended_ips') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_ips_mac.' 'Extended_ips_mac') return f def setUp(self): self.flags(security_group_api=('neutron')) super(SecurityGroupsJsonTest, self).setUp() path = 'nova.network.security_group.neutron_driver.SecurityGroupAPI.' self.stub_out(path + 'get', fake_get) self.stub_out(path + 'get_instances_security_groups_bindings', fake_get_instances_security_groups_bindings) self.stub_out(path + 'add_to_instance', fake_add_to_instance) self.stub_out(path + 'remove_from_instance', fake_remove_from_instance) self.stub_out(path + 'list', fake_list) self.stub_out(path + 'get_instance_security_groups', fake_get_instance_security_groups) self.stub_out(path + 'create_security_group', fake_create_security_group) def test_server_create(self): self._post_server(use_common_server_api_samples=False) def test_server_get(self): uuid = self._post_server(use_common_server_api_samples=False) response = self._do_get('servers/%s' % uuid) subs = {} subs['hostid'] = '[a-f0-9]+' subs['access_ip_v4'] = '1.2.3.4' subs['access_ip_v6'] = '80fe::' self._verify_response('server-get-resp', subs, response, 200) def test_server_detail(self): self._post_server(use_common_server_api_samples=False) response = self._do_get('servers/detail') subs = {} subs['hostid'] = '[a-f0-9]+' subs['access_ip_v4'] = '1.2.3.4' subs['access_ip_v6'] = '80fe::' self._verify_response('servers-detail-resp', subs, response, 200) def _get_create_subs(self): return { 'group_name': 'default', "description": "default", } def _create_security_group(self): subs = self._get_create_subs() return self._do_post('os-security-groups', 'security-group-post-req', subs) def _add_group(self, uuid): subs = { 'group_name': 'test' } return self._do_post('servers/%s/action' % uuid, 'security-group-add-post-req', subs) def test_security_group_create(self): response = self._create_security_group() subs = self._get_create_subs() self._verify_response('security-groups-create-resp', subs, response, 200) def test_security_groups_list(self): # Get api sample of security groups get list request. response = self._do_get('os-security-groups') self._verify_response('security-groups-list-get-resp', {}, response, 200) def test_security_groups_get(self): # Get api sample of security groups get request. security_group_id = '11111111-1111-1111-1111-111111111111' response = self._do_get('os-security-groups/%s' % security_group_id) self._verify_response('security-groups-get-resp', {}, response, 200) def test_security_groups_list_server(self): # Get api sample of security groups for a specific server. uuid = self._post_server(use_common_server_api_samples=False) response = self._do_get('servers/%s/os-security-groups' % uuid) self._verify_response('server-security-groups-list-resp', {}, response, 200) def test_security_groups_add(self): self._create_security_group() uuid = self._post_server(use_common_server_api_samples=False) response = self._add_group(uuid) self.assertEqual(202, response.status_code) self.assertEqual('', response.content) def test_security_groups_remove(self): self._create_security_group() uuid = self._post_server(use_common_server_api_samples=False) self._add_group(uuid) subs = { 'group_name': 'test' } response = self._do_post('servers/%s/action' % uuid, 'security-group-remove-post-req', subs) self.assertEqual(202, response.status_code) self.assertEqual('', response.content) nova-13.1.4/nova/tests/functional/api_sample_tests/test_instance_usage_audit_log.py0000664000567000056710000000360113064447152032145 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import urllib from oslo_config import cfg from nova.tests.functional.api_sample_tests import api_sample_base CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class InstanceUsageAuditLogJsonTest(api_sample_base.ApiSampleTestBaseV21): ADMIN_API = True extension_name = "os-instance-usage-audit-log" def _get_flags(self): f = super(InstanceUsageAuditLogJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append('nova.api.openstack.compute.' 'contrib.instance_usage_audit_log.' 'Instance_usage_audit_log') return f def test_show_instance_usage_audit_log(self): response = self._do_get('os-instance_usage_audit_log/%s' % urllib.quote('2012-07-05 10:00:00')) self._verify_response('inst-usage-audit-log-show-get-resp', {}, response, 200) def test_index_instance_usage_audit_log(self): response = self._do_get('os-instance_usage_audit_log') self._verify_response('inst-usage-audit-log-index-get-resp', {}, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_shelve.py0000664000567000056710000000435013064447152026416 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers CONF = cfg.CONF CONF.import_opt('shelved_offload_time', 'nova.compute.manager') CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class ShelveJsonTest(test_servers.ServersSampleBase): extension_name = "os-shelve" def _get_flags(self): f = super(ShelveJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.shelve.Shelve') return f def setUp(self): super(ShelveJsonTest, self).setUp() # Don't offload instance, so we can test the offload call. CONF.set_override('shelved_offload_time', -1) def _test_server_action(self, uuid, template, action): response = self._do_post('servers/%s/action' % uuid, template, {'action': action}) self.assertEqual(202, response.status_code) self.assertEqual("", response.content) def test_shelve(self): uuid = self._post_server() self._test_server_action(uuid, 'os-shelve', 'shelve') def test_shelve_offload(self): uuid = self._post_server() self._test_server_action(uuid, 'os-shelve', 'shelve') self._test_server_action(uuid, 'os-shelve-offload', 'shelveOffload') def test_unshelve(self): uuid = self._post_server() self._test_server_action(uuid, 'os-shelve', 'shelve') self._test_server_action(uuid, 'os-unshelve', 'unshelve') nova-13.1.4/nova/tests/functional/api_sample_tests/test_pause_server.py0000664000567000056710000000402613064447152027633 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class PauseServerSamplesJsonTest(test_servers.ServersSampleBase): extension_name = "os-pause-server" def _get_flags(self): f = super(PauseServerSamplesJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.admin_actions.Admin_actions') return f def setUp(self): """setUp Method for PauseServer api samples extension This method creates the server that will be used in each test """ super(PauseServerSamplesJsonTest, self).setUp() self.uuid = self._post_server() def test_post_pause(self): # Get api samples to pause server request. response = self._do_post('servers/%s/action' % self.uuid, 'pause-server', {}) self.assertEqual(202, response.status_code) def test_post_unpause(self): # Get api samples to unpause server request. self.test_post_pause() response = self._do_post('servers/%s/action' % self.uuid, 'unpause-server', {}) self.assertEqual(202, response.status_code) nova-13.1.4/nova/tests/functional/api_sample_tests/test_aggregates.py0000664000567000056710000000666613064447152027255 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import api_sample_base CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class AggregatesSampleJsonTest(api_sample_base.ApiSampleTestBaseV21): ADMIN_API = True extension_name = "os-aggregates" def _get_flags(self): f = super(AggregatesSampleJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.aggregates.Aggregates') return f def test_aggregate_create(self): subs = { "aggregate_id": '(?P\d+)' } response = self._do_post('os-aggregates', 'aggregate-post-req', subs) return self._verify_response('aggregate-post-resp', subs, response, 200) def test_list_aggregates(self): self.test_aggregate_create() response = self._do_get('os-aggregates') self._verify_response('aggregates-list-get-resp', {}, response, 200) def test_aggregate_get(self): agg_id = self.test_aggregate_create() response = self._do_get('os-aggregates/%s' % agg_id) self._verify_response('aggregates-get-resp', {}, response, 200) def test_add_metadata(self): agg_id = self.test_aggregate_create() response = self._do_post('os-aggregates/%s/action' % agg_id, 'aggregate-metadata-post-req', {'action': 'set_metadata'}) self._verify_response('aggregates-metadata-post-resp', {}, response, 200) def test_add_host(self): aggregate_id = self.test_aggregate_create() subs = { "host_name": self.compute.host, } response = self._do_post('os-aggregates/%s/action' % aggregate_id, 'aggregate-add-host-post-req', subs) self._verify_response('aggregates-add-host-post-resp', subs, response, 200) def test_remove_host(self): self.test_add_host() subs = { "host_name": self.compute.host, } response = self._do_post('os-aggregates/1/action', 'aggregate-remove-host-post-req', subs) self._verify_response('aggregates-remove-host-post-resp', subs, response, 200) def test_update_aggregate(self): aggregate_id = self.test_aggregate_create() response = self._do_put('os-aggregates/%s' % aggregate_id, 'aggregate-update-post-req', {}) self._verify_response('aggregate-update-post-resp', {}, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_quota_sets.py0000664000567000056710000000771713064447152027331 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import api_sample_base CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class QuotaSetsSampleJsonTests(api_sample_base.ApiSampleTestBaseV21): ADMIN_API = True extension_name = "os-quota-sets" def _get_flags(self): f = super(QuotaSetsSampleJsonTests, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append('nova.api.openstack.compute.' 'contrib.server_group_quotas.' 'Server_group_quotas') f['osapi_compute_extension'].append('nova.api.openstack.compute.' 'contrib.quotas.Quotas') f['osapi_compute_extension'].append('nova.api.openstack.compute.' 'contrib.extended_quotas.Extended_quotas') f['osapi_compute_extension'].append('nova.api.openstack.compute.' 'contrib.user_quotas.User_quotas') return f def test_show_quotas(self): # Get api sample to show quotas. response = self._do_get('os-quota-sets/fake_tenant') self._verify_response('quotas-show-get-resp', {}, response, 200) def test_show_quotas_defaults(self): # Get api sample to show quotas defaults. response = self._do_get('os-quota-sets/fake_tenant/defaults') self._verify_response('quotas-show-defaults-get-resp', {}, response, 200) def test_update_quotas(self): # Get api sample to update quotas. response = self._do_put('os-quota-sets/fake_tenant', 'quotas-update-post-req', {}) self._verify_response('quotas-update-post-resp', {}, response, 200) def test_delete_quotas(self): # Get api sample to delete quota. response = self._do_delete('os-quota-sets/fake_tenant') self.assertEqual(202, response.status_code) self.assertEqual('', response.content) def test_update_quotas_force(self): # Get api sample to update quotas. response = self._do_put('os-quota-sets/fake_tenant', 'quotas-update-force-post-req', {}) return self._verify_response('quotas-update-force-post-resp', {}, response, 200) def test_show_quotas_for_user(self): # Get api sample to show quotas for user. response = self._do_get('os-quota-sets/fake_tenant?user_id=1') self._verify_response('user-quotas-show-get-resp', {}, response, 200) def test_delete_quotas_for_user(self): response = self._do_delete('os-quota-sets/fake_tenant?user_id=1') self.assertEqual(202, response.status_code) self.assertEqual('', response.content) def test_update_quotas_for_user(self): # Get api sample to update quotas for user. response = self._do_put('os-quota-sets/fake_tenant?user_id=1', 'user-quotas-update-post-req', {}) return self._verify_response('user-quotas-update-post-resp', {}, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_hide_server_addresses.py0000664000567000056710000000361013064447152031462 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.compute import vm_states from nova.tests.functional.api_sample_tests import test_servers CONF = cfg.CONF CONF.import_opt('osapi_hide_server_address_states', 'nova.api.openstack.compute.hide_server_addresses') CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class ServersSampleHideAddressesJsonTest(test_servers.ServersSampleJsonTest): extension_name = 'os-hide-server-addresses' # Override the sample dirname because # test_servers.ServersSampleJsonTest does and so it won't default # to the extension name sample_dir = extension_name def _get_flags(self): f = super(ServersSampleHideAddressesJsonTest, self)._get_flags() f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.hide_server_addresses.' 'Hide_server_addresses') return f def setUp(self): # We override osapi_hide_server_address_states in order # to have an example of in the json samples of the # addresses being hidden CONF.set_override("osapi_hide_server_address_states", [vm_states.ACTIVE]) super(ServersSampleHideAddressesJsonTest, self).setUp() nova-13.1.4/nova/tests/functional/api_sample_tests/test_keypairs.py0000664000567000056710000002132113064447152026754 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_config import cfg from nova.objects import keypair as keypair_obj from nova.tests.functional.api_sample_tests import api_sample_base from nova.tests.unit import fake_crypto CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class KeyPairsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21): microversion = None sample_dir = "keypairs" expected_delete_status_code = 202 expected_post_status_code = 200 def _get_flags(self): f = super(KeyPairsSampleJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.keypairs.Keypairs') return f def setUp(self): super(KeyPairsSampleJsonTest, self).setUp() self.api.microversion = self.microversion # TODO(sdague): this is only needed because we randomly choose the # uuid each time. def generalize_subs(self, subs, vanilla_regexes): subs['keypair_name'] = 'keypair-[0-9a-f-]+' return subs def test_keypairs_post(self): return self._check_keypairs_post() def _check_keypairs_post(self, **kwargs): """Get api sample of key pairs post request.""" key_name = 'keypair-' + str(uuid.uuid4()) subs = dict(keypair_name=key_name, **kwargs) response = self._do_post('os-keypairs', 'keypairs-post-req', subs) subs = {'keypair_name': key_name} self._verify_response('keypairs-post-resp', subs, response, self.expected_post_status_code) # NOTE(maurosr): return the key_name is necessary cause the # verification returns the label of the last compared information in # the response, not necessarily the key name. return key_name def test_keypairs_import_key_post(self): public_key = fake_crypto.get_ssh_public_key() self._check_keypairs_import_key_post(public_key) def _check_keypairs_import_key_post(self, public_key, **kwargs): # Get api sample of key pairs post to import user's key. key_name = 'keypair-' + str(uuid.uuid4()) subs = { 'keypair_name': key_name, } params = subs.copy() params['public_key'] = public_key params.update(**kwargs) response = self._do_post('os-keypairs', 'keypairs-import-post-req', params) self._verify_response('keypairs-import-post-resp', subs, response, self.expected_post_status_code) def test_keypairs_list(self): # Get api sample of key pairs list request. key_name = self.test_keypairs_post() response = self._do_get('os-keypairs') subs = {'keypair_name': key_name} self._verify_response('keypairs-list-resp', subs, response, 200) def test_keypairs_get(self): # Get api sample of key pairs get request. key_name = self.test_keypairs_post() response = self._do_get('os-keypairs/%s' % key_name) subs = {'keypair_name': key_name} self._verify_response('keypairs-get-resp', subs, response, 200) def test_keypairs_delete(self): # Get api sample of key pairs delete request. key_name = self.test_keypairs_post() response = self._do_delete('os-keypairs/%s' % key_name) self.assertEqual(self.expected_delete_status_code, response.status_code) class KeyPairsV22SampleJsonTest(KeyPairsSampleJsonTest): microversion = '2.2' expected_post_status_code = 201 expected_delete_status_code = 204 # NOTE(gmann): microversion tests do not need to run for v2 API # so defining scenarios only for v2.2 which will run the original tests # by appending '(v2_2)' in test_id. scenarios = [('v2_2', {'api_major_version': 'v2.1'})] def test_keypairs_post(self): # NOTE(claudiub): overrides the method with the same name in # KeypairsSampleJsonTest, as it is used by other tests. return self._check_keypairs_post( keypair_type=keypair_obj.KEYPAIR_TYPE_SSH) def test_keypairs_post_x509(self): return self._check_keypairs_post( keypair_type=keypair_obj.KEYPAIR_TYPE_X509) def test_keypairs_post_invalid(self): key_name = 'keypair-' + str(uuid.uuid4()) subs = dict(keypair_name=key_name, keypair_type='fakey_type') response = self._do_post('os-keypairs', 'keypairs-post-req', subs) self.assertEqual(400, response.status_code) def test_keypairs_import_key_post(self): # NOTE(claudiub): overrides the method with the same name in # KeypairsSampleJsonTest, since the API sample expects a keypair_type. public_key = fake_crypto.get_ssh_public_key() self._check_keypairs_import_key_post( public_key, keypair_type=keypair_obj.KEYPAIR_TYPE_SSH) def test_keypairs_import_key_post_x509(self): public_key = fake_crypto.get_x509_cert_and_fingerprint()[0] public_key = public_key.replace('\n', '\\n') self._check_keypairs_import_key_post( public_key, keypair_type=keypair_obj.KEYPAIR_TYPE_X509) def _check_keypairs_import_key_post_invalid(self, keypair_type): key_name = 'keypair-' + str(uuid.uuid4()) subs = { 'keypair_name': key_name, 'keypair_type': keypair_type, 'public_key': fake_crypto.get_ssh_public_key() } response = self._do_post('os-keypairs', 'keypairs-import-post-req', subs) self.assertEqual(400, response.status_code) def test_keypairs_import_key_post_invalid_type(self): self._check_keypairs_import_key_post_invalid( keypair_type='fakey_type') def test_keypairs_import_key_post_invalid_combination(self): self._check_keypairs_import_key_post_invalid( keypair_type=keypair_obj.KEYPAIR_TYPE_X509) class KeyPairsV210SampleJsonTest(KeyPairsSampleJsonTest): ADMIN_API = True microversion = '2.10' expected_post_status_code = 201 expected_delete_status_code = 204 scenarios = [('v2_10', {'api_major_version': 'v2.1'})] def test_keypair_create_for_user(self): subs = { 'keypair_type': keypair_obj.KEYPAIR_TYPE_SSH, 'public_key': fake_crypto.get_ssh_public_key(), 'user_id': "fake" } self._check_keypairs_post(**subs) def test_keypairs_post(self): return self._check_keypairs_post( keypair_type=keypair_obj.KEYPAIR_TYPE_SSH, user_id="admin") def test_keypairs_import_key_post(self): # NOTE(claudiub): overrides the method with the same name in # KeypairsSampleJsonTest, since the API sample expects a keypair_type. public_key = fake_crypto.get_ssh_public_key() self._check_keypairs_import_key_post( public_key, keypair_type=keypair_obj.KEYPAIR_TYPE_SSH, user_id="fake") def test_keypairs_delete_for_user(self): # Delete a keypair on behalf of a user subs = { 'keypair_type': keypair_obj.KEYPAIR_TYPE_SSH, 'public_key': fake_crypto.get_ssh_public_key(), 'user_id': "fake" } key_name = self._check_keypairs_post(**subs) response = self._do_delete('os-keypairs/%s?user_id=fake' % key_name) self.assertEqual(self.expected_delete_status_code, response.status_code) class KeyPairsV210SampleJsonTestNotAdmin(KeyPairsV210SampleJsonTest): ADMIN_API = False def test_keypairs_post(self): return self._check_keypairs_post( keypair_type=keypair_obj.KEYPAIR_TYPE_SSH, user_id="fake") def test_keypairs_post_for_other_user(self): key_name = 'keypair-' + str(uuid.uuid4()) subs = dict(keypair_name=key_name, keypair_type=keypair_obj.KEYPAIR_TYPE_SSH, user_id='fake1') response = self._do_post('os-keypairs', 'keypairs-post-req', subs) self.assertEqual(403, response.status_code) nova-13.1.4/nova/tests/functional/api_sample_tests/test_extended_server_attributes.py0000664000567000056710000000712213064447152032564 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class ExtendedServerAttributesJsonTest(test_servers.ServersSampleBase): extension_name = "os-extended-server-attributes" def _get_flags(self): f = super(ExtendedServerAttributesJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.keypairs.Keypairs') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_ips.Extended_ips') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_ips_mac.' 'Extended_ips_mac') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_server_attributes.' 'Extended_server_attributes') return f def test_show(self): uuid = self._post_server() response = self._do_get('servers/%s' % uuid) subs = {} subs['hostid'] = '[a-f0-9]+' subs['id'] = uuid subs['instance_name'] = 'instance-\d{8}' subs['hypervisor_hostname'] = r'[\w\.\-]+' subs['access_ip_v4'] = '1.2.3.4' subs['access_ip_v6'] = '80fe::' self._verify_response('server-get-resp', subs, response, 200) def test_detail(self): uuid = self._post_server() response = self._do_get('servers/detail') subs = {} subs['hostid'] = '[a-f0-9]+' subs['id'] = uuid subs['instance_name'] = 'instance-\d{8}' subs['hypervisor_hostname'] = r'[\w\.\-]+' subs['access_ip_v4'] = '1.2.3.4' subs['access_ip_v6'] = '80fe::' self._verify_response('servers-detail-resp', subs, response, 200) class ExtendedServerAttributesJsonTestV216(ExtendedServerAttributesJsonTest): microversion = '2.16' scenarios = [('v2_16', {'api_major_version': 'v2.1'})] def test_show(self): uuid = self._post_server() response = self._do_get('servers/%s' % uuid) subs = {} subs['hostid'] = '[a-f0-9]+' subs['id'] = uuid subs['instance_name'] = 'instance-\d{8}' subs['hypervisor_hostname'] = r'[\w\.\-]+' subs['access_ip_v4'] = '1.2.3.4' subs['access_ip_v6'] = '80fe::' self._verify_response('server-get-resp', subs, response, 200) def test_detail(self): uuid = self._post_server() response = self._do_get('servers/detail') subs = {} subs['hostid'] = '[a-f0-9]+' subs['id'] = uuid subs['instance_name'] = 'instance-\d{8}' subs['hypervisor_hostname'] = r'[\w\.\-]+' subs['access_ip_v4'] = '1.2.3.4' subs['access_ip_v6'] = '80fe::' self._verify_response('servers-detail-resp', subs, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_cells.py0000664000567000056710000001104513064447152026231 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from six.moves import range from nova.cells import state import nova.conf from nova.db.sqlalchemy import models from nova import exception from nova.tests.functional.api_sample_tests import api_sample_base CONF = nova.conf.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class CellsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21): extension_name = "os-cells" def _get_flags(self): f = super(CellsSampleJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.cells.Cells') f['osapi_compute_extension'].append('nova.api.openstack.compute.' 'contrib.cell_capacities.Cell_capacities') return f def setUp(self): # db_check_interval < 0 makes cells manager always hit the DB self.flags(enable=True, db_check_interval=-1, group='cells') super(CellsSampleJsonTest, self).setUp() self.cells = self.start_service('cells', manager=CONF.cells.manager) self._stub_cells() def _stub_cells(self, num_cells=5): self.cell_list = [] self.cells_next_id = 1 def _fake_cell_get_all(context): return self.cell_list def _fake_cell_get(inst, context, cell_name): for cell in self.cell_list: if cell['name'] == cell_name: return cell raise exception.CellNotFound(cell_name=cell_name) for x in range(num_cells): cell = models.Cell() our_id = self.cells_next_id self.cells_next_id += 1 cell.update({'id': our_id, 'name': 'cell%s' % our_id, 'transport_url': 'rabbit://username%s@/' % our_id, 'is_parent': our_id % 2 == 0}) self.cell_list.append(cell) self.stub_out('nova.db.cell_get_all', _fake_cell_get_all) self.stub_out('nova.cells.rpcapi.CellsAPI.cell_get', _fake_cell_get) def test_cells_empty_list(self): # Override this self._stub_cells(num_cells=0) response = self._do_get('os-cells') self._verify_response('cells-list-empty-resp', {}, response, 200) def test_cells_list(self): response = self._do_get('os-cells') self._verify_response('cells-list-resp', {}, response, 200) def test_cells_get(self): response = self._do_get('os-cells/cell3') self._verify_response('cells-get-resp', {}, response, 200) def test_get_cell_capacity(self): self._mock_cell_capacity() state_manager = state.CellStateManager() my_state = state_manager.get_my_state() response = self._do_get('os-cells/%s/capacities' % my_state.name) return self._verify_response('cells-capacities-resp', {}, response, 200) def test_get_all_cells_capacity(self): self._mock_cell_capacity() response = self._do_get('os-cells/capacities') return self._verify_response('cells-capacities-resp', {}, response, 200) def _mock_cell_capacity(self): self.mox.StubOutWithMock(self.cells.manager.state_manager, 'get_our_capacities') response = {"ram_free": {"units_by_mb": {"8192": 0, "512": 13, "4096": 1, "2048": 3, "16384": 0}, "total_mb": 7680}, "disk_free": {"units_by_mb": {"81920": 11, "20480": 46, "40960": 23, "163840": 5, "0": 0}, "total_mb": 1052672} } self.cells.manager.state_manager.get_our_capacities(). \ AndReturn(response) self.mox.ReplayAll() nova-13.1.4/nova/tests/functional/api_sample_tests/test_hosts.py0000664000567000056710000000444713064447152026277 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import api_sample_base CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class HostsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21): ADMIN_API = True extension_name = "os-hosts" def _get_flags(self): f = super(HostsSampleJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.hosts.Hosts') return f def test_host_startup(self): response = self._do_get('os-hosts/%s/startup' % self.compute.host) self._verify_response('host-get-startup', {}, response, 200) def test_host_reboot(self): response = self._do_get('os-hosts/%s/reboot' % self.compute.host) self._verify_response('host-get-reboot', {}, response, 200) def test_host_shutdown(self): response = self._do_get('os-hosts/%s/shutdown' % self.compute.host) self._verify_response('host-get-shutdown', {}, response, 200) def test_host_maintenance(self): response = self._do_put('os-hosts/%s' % self.compute.host, 'host-put-maintenance-req', {}) self._verify_response('host-put-maintenance-resp', {}, response, 200) def test_host_get(self): response = self._do_get('os-hosts/%s' % self.compute.host) self._verify_response('host-get-resp', {}, response, 200) def test_hosts_list(self): response = self._do_get('os-hosts') self._verify_response('hosts-list-resp', {}, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_pci.py0000664000567000056710000001563313064447152025711 0ustar jenkinsjenkins00000000000000# Copyright 2013 Intel. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import mock from oslo_serialization import jsonutils import testtools from nova import objects from nova.objects import fields from nova.objects import pci_device_pool from nova.tests.functional.api_sample_tests import api_sample_base from nova.tests.functional.api_sample_tests import test_servers skip_msg = "Bug 1426241" fake_db_dev_1 = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': None, 'id': 1, 'compute_node_id': 1, 'address': '0000:04:10.0', 'vendor_id': '8086', 'numa_node': 0, 'product_id': '1520', 'dev_type': fields.PciDeviceType.SRIOV_VF, 'status': 'available', 'dev_id': 'pci_0000_04_10_0', 'label': 'label_8086_1520', 'instance_uuid': '69ba1044-0766-4ec0-b60d-09595de034a1', 'request_id': None, 'extra_info': '{"key1": "value1", "key2": "value2"}' } fake_db_dev_2 = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': None, 'id': 2, 'compute_node_id': 1, 'address': '0000:04:10.1', 'vendor_id': '8086', 'numa_node': 1, 'product_id': '1520', 'dev_type': fields.PciDeviceType.SRIOV_VF, 'status': 'available', 'dev_id': 'pci_0000_04_10_1', 'label': 'label_8086_1520', 'instance_uuid': 'd5b446a6-a1b4-4d01-b4f0-eac37b3a62fc', 'request_id': None, 'extra_info': '{"key3": "value3", "key4": "value4"}' } class ExtendedServerPciSampleJsonTest(test_servers.ServersSampleBase): extension_name = "os-pci" def setUp(self): raise testtools.TestCase.skipException(skip_msg) def test_show(self): uuid = self._post_server() response = self._do_get('servers/%s' % uuid) subs = {'hostid': '[a-f0-9]+'} self._verify_response('server-get-resp', subs, response, 200) def test_detail(self): self._post_server() response = self._do_get('servers/detail') subs = {'hostid': '[a-f0-9]+'} self._verify_response('servers-detail-resp', subs, response, 200) class ExtendedHyervisorPciSampleJsonTest(api_sample_base.ApiSampleTestBaseV21): ADMIN_API = True extra_extensions_to_load = ['os-hypervisors'] extension_name = 'os-pci' def setUp(self): raise testtools.TestCase.skipException(skip_msg) super(ExtendedHyervisorPciSampleJsonTest, self).setUp() cpu_info = collections.OrderedDict([ ('arch', 'x86_64'), ('model', 'Nehalem'), ('vendor', 'Intel'), ('features', ['pge', 'clflush']), ('topology', { 'cores': 1, 'threads': 1, 'sockets': 4, }), ]) self.fake_compute_node = objects.ComputeNode( cpu_info=jsonutils.dumps(cpu_info), current_workload=0, disk_available_least=0, host_ip="1.1.1.1", state="up", status="enabled", free_disk_gb=1028, free_ram_mb=7680, hypervisor_hostname="fake-mini", hypervisor_type="fake", hypervisor_version=1000, id=1, local_gb=1028, local_gb_used=0, memory_mb=8192, memory_mb_used=512, running_vms=0, vcpus=1, vcpus_used=0, service_id=2, host='043b3cacf6f34c90a7245151fc8ebcda', pci_device_pools=pci_device_pool.from_pci_stats( {"count": 5, "vendor_id": "8086", "product_id": "1520", "keya": "valuea", "key1": "value1", "numa_node": 1}),) self.fake_service = objects.Service( id=2, host='043b3cacf6f34c90a7245151fc8ebcda', disabled=False, disabled_reason=None) @mock.patch("nova.servicegroup.API.service_is_up", return_value=True) @mock.patch("nova.objects.Service.get_by_compute_host") @mock.patch("nova.objects.ComputeNode.get_by_id") def test_pci_show(self, mock_obj, mock_svc_get, mock_service): mock_obj.return_value = self.fake_compute_node mock_svc_get.return_value = self.fake_service hypervisor_id = 1 response = self._do_get('os-hypervisors/%s' % hypervisor_id) subs = { 'hypervisor_id': hypervisor_id, } self._verify_response('hypervisors-pci-show-resp', subs, response, 200) @mock.patch("nova.servicegroup.API.service_is_up", return_value=True) @mock.patch("nova.objects.Service.get_by_compute_host") @mock.patch("nova.objects.ComputeNodeList.get_all") def test_pci_detail(self, mock_obj, mock_svc_get, mock_service): mock_obj.return_value = [self.fake_compute_node] mock_svc_get.return_value = self.fake_service hypervisor_id = 1 subs = { 'hypervisor_id': hypervisor_id } response = self._do_get('os-hypervisors/detail') self._verify_response('hypervisors-pci-detail-resp', subs, response, 200) class PciSampleJsonTest(api_sample_base.ApiSampleTestBaseV21): ADMIN_API = True extension_name = "os-pci" def setUp(self): raise testtools.TestCase.skipException(skip_msg) def _fake_pci_device_get_by_id(self, context, id): return fake_db_dev_1 def _fake_pci_device_get_all_by_node(self, context, id): return [fake_db_dev_1, fake_db_dev_2] def test_pci_show(self): self.stub_out('nova.db.pci_device_get_by_id', self._fake_pci_device_get_by_id) response = self._do_get('os-pci/1') self._verify_response('pci-show-resp', {}, response, 200) def test_pci_index(self): self.stub_out('nova.db.pci_device_get_all_by_node', self._fake_pci_device_get_all_by_node) response = self._do_get('os-pci') self._verify_response('pci-index-resp', {}, response, 200) def test_pci_detail(self): self.stub_out('nova.db.pci_device_get_all_by_node', self._fake_pci_device_get_all_by_node) response = self._do_get('os-pci/detail') self._verify_response('pci-detail-resp', {}, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/0000775000567000056710000000000013064447471026016 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-console-output/0000775000567000056710000000000013064447471031435 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-console-output/console-output-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-console-output/console-output-post0000664000567000056710000000007313064447140035334 0ustar jenkinsjenkins00000000000000{ "output": "FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE" }././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-console-output/console-output-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-console-output/console-output-post0000664000567000056710000000007413064447140035335 0ustar jenkinsjenkins00000000000000{ "os-getConsoleOutput": { "length": 50 } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/0000775000567000056710000000000013064447471031045 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-action-create-image.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-action-create-i0000664000567000056710000000020113064447152035225 0ustar jenkinsjenkins00000000000000{ "createImage" : { "name" : "%(name)s", "metadata": { "meta_var": "meta_val" } } } ././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/flavors-detail-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/flavors-detail-resp.js0000664000567000056710000000637713064447152035277 0ustar jenkinsjenkins00000000000000{ "flavors": [ { "OS-FLV-DISABLED:disabled": false, "disk": 1, "OS-FLV-EXT-DATA:ephemeral": 0, "id": "1", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/1", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny", "os-flavor-access:is_public": true, "ram": 512, "rxtx_factor": 1.0, "swap": "", "vcpus": 1 }, { "OS-FLV-DISABLED:disabled": false, "disk": 20, "OS-FLV-EXT-DATA:ephemeral": 0, "id": "2", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/2", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/2", "rel": "bookmark" } ], "name": "m1.small", "os-flavor-access:is_public": true, "ram": 2048, "rxtx_factor": 1.0, "swap": "", "vcpus": 1 }, { "OS-FLV-DISABLED:disabled": false, "disk": 40, "OS-FLV-EXT-DATA:ephemeral": 0, "id": "3", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/3", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/3", "rel": "bookmark" } ], "name": "m1.medium", "os-flavor-access:is_public": true, "ram": 4096, "rxtx_factor": 1.0, "swap": "", "vcpus": 2 }, { "OS-FLV-DISABLED:disabled": false, "disk": 80, "OS-FLV-EXT-DATA:ephemeral": 0, "id": "4", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/4", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/4", "rel": "bookmark" } ], "name": "m1.large", "os-flavor-access:is_public": true, "ram": 8192, "rxtx_factor": 1.0, "swap": "", "vcpus": 4 }, { "OS-FLV-DISABLED:disabled": false, "disk": 160, "OS-FLV-EXT-DATA:ephemeral": 0, "id": "5", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/5", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/5", "rel": "bookmark" } ], "name": "m1.xlarge", "os-flavor-access:is_public": true, "ram": 16384, "rxtx_factor": 1.0, "swap": "", "vcpus": 8 } ] } ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-get-resp.json.t0000664000567000056710000000421113064447152035226 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "", "accessIPv6": "", "addresses": { "private": [ { "addr": "%(ip)s", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "config_drive": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "nova", "OS-EXT-SRV-ATTR:host": "%(compute_host)s", "OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [], "OS-SRV-USG:launched_at": "%(strtime)s", "OS-SRV-USG:terminated_at": null, "progress": 0, "security_groups": [ { "name": "default" } ], "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "user_id": "fake" } } ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/flavors-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/flavors-list-resp.json0000664000567000056710000000370213064447152035332 0ustar jenkinsjenkins00000000000000{ "flavors": [ { "id": "1", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/1", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny" }, { "id": "2", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/2", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/2", "rel": "bookmark" } ], "name": "m1.small" }, { "id": "3", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/3", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/3", "rel": "bookmark" } ], "name": "m1.medium" }, { "id": "4", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/4", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/4", "rel": "bookmark" } ], "name": "m1.large" }, { "id": "5", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/5", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/5", "rel": "bookmark" } ], "name": "m1.xlarge" } ] }././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-create-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-create-req.json0000664000567000056710000000031113064447152035263 0ustar jenkinsjenkins00000000000000{ "server" : { "name" : "new-server-test", "imageRef" : "%(image_id)s", "flavorRef" : "1", "metadata" : { "My Server Name" : "Apache1" } } } ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-action-reboot.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-action-reboot.j0000664000567000056710000000006713064447152035270 0ustar jenkinsjenkins00000000000000{ "reboot" : { "type" : "%(type)s" } } ././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/extensions-list-resp-v2.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/extensions-list-resp-v0000664000567000056710000006346713064447152035366 0ustar jenkinsjenkins00000000000000{ "extensions": [ { "alias": "NMN", "description": "%(text)s", "links": [], "name": "Multinic", "namespace": "http://docs.openstack.org/compute/ext/multinic/api/v1.1", "updated": "%(isotime)s" }, { "alias": "OS-DCF", "description": "%(text)s", "links": [], "name": "DiskConfig", "namespace": "http://docs.openstack.org/compute/ext/disk_config/api/v1.1", "updated": "%(isotime)s" }, { "alias": "OS-EXT-AZ", "description": "%(text)s", "links": [], "name": "ExtendedAvailabilityZone", "namespace": "http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2", "updated": "%(isotime)s" }, { "alias": "OS-EXT-IPS", "description": "%(text)s", "links": [], "name": "ExtendedIps", "namespace": "http://docs.openstack.org/compute/ext/extended_ips/api/v1.1", "updated": "%(isotime)s" }, { "alias": "OS-EXT-IPS-MAC", "description": "%(text)s", "links": [], "name": "ExtendedIpsMac", "namespace": "http://docs.openstack.org/compute/ext/extended_ips_mac/api/v1.1", "updated": "%(isotime)s" }, { "alias": "OS-EXT-IMG-SIZE", "description": "%(text)s", "links": [], "name": "ImageSize", "namespace": "http://docs.openstack.org/compute/ext/image_size/api/v1.1", "updated": "%(isotime)s" }, { "alias": "OS-EXT-SRV-ATTR", "description": "%(text)s", "links": [], "name": "ExtendedServerAttributes", "namespace": "http://docs.openstack.org/compute/ext/extended_status/api/v1.1", "updated": "%(isotime)s" }, { "alias": "OS-EXT-STS", "description": "%(text)s", "links": [], "name": "ExtendedStatus", "namespace": "http://docs.openstack.org/compute/ext/extended_status/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-extended-volumes", "description": "%(text)s", "links": [], "name": "ExtendedVolumes", "namespace": "http://docs.openstack.org/compute/ext/extended_volumes/api/v1.1", "updated": "%(isotime)s" }, { "alias": "OS-EXT-VIF-NET", "description": "%(text)s", "links": [], "name": "ExtendedVIFNet", "namespace": "http://docs.openstack.org/compute/ext/extended-virtual-interfaces-net/api/v1.1", "updated": "%(isotime)s" }, { "alias": "OS-FLV-DISABLED", "description": "%(text)s", "links": [], "name": "FlavorDisabled", "namespace": "http://docs.openstack.org/compute/ext/flavor_disabled/api/v1.1", "updated": "%(isotime)s" }, { "alias": "OS-FLV-EXT-DATA", "description": "%(text)s", "links": [], "name": "FlavorExtraData", "namespace": "http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1", "updated": "%(isotime)s" }, { "alias": "OS-SRV-USG", "description": "%(text)s", "links": [], "name": "ServerUsage", "namespace": "http://docs.openstack.org/compute/ext/server_usage/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-console-auth-tokens", "description": "%(text)s", "links": [], "name": "ConsoleAuthTokens", "namespace": "http://docs.openstack.org/compute/ext/consoles-auth-tokens/api/v2", "updated": "%(isotime)s" }, { "alias": "OS-SCH-HNT", "description": "%(text)s", "links": [], "name": "SchedulerHints", "namespace": "http://docs.openstack.org/compute/ext/scheduler-hints/api/v2", "updated": "%(isotime)s" }, { "alias": "os-admin-actions", "description": "%(text)s", "links": [], "name": "AdminActions", "namespace": "http://docs.openstack.org/compute/ext/admin-actions/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-aggregates", "description": "%(text)s", "links": [], "name": "Aggregates", "namespace": "http://docs.openstack.org/compute/ext/aggregates/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-agents", "description": "%(text)s", "links": [], "name": "Agents", "namespace": "http://docs.openstack.org/compute/ext/agents/api/v2", "updated": "%(isotime)s" }, { "alias": "os-assisted-volume-snapshots", "description": "%(text)s", "links": [], "name": "AssistedVolumeSnapshots", "namespace": "http://docs.openstack.org/compute/ext/assisted-volume-snapshots/api/v2", "updated": "%(isotime)s" }, { "alias": "os-attach-interfaces", "description": "Attach interface support.", "links": [], "name": "AttachInterfaces", "namespace": "http://docs.openstack.org/compute/ext/interfaces/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-availability-zone", "description": "%(text)s", "links": [], "name": "AvailabilityZone", "namespace": "http://docs.openstack.org/compute/ext/availabilityzone/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-baremetal-nodes", "description": "%(text)s", "links": [], "name": "BareMetalNodes", "namespace": "http://docs.openstack.org/compute/ext/baremetal_nodes/api/v2", "updated": "%(isotime)s" }, { "alias": "os-block-device-mapping-v2-boot", "description": "%(text)s", "links": [], "name": "BlockDeviceMappingV2Boot", "namespace": "http://docs.openstack.org/compute/ext/block_device_mapping_v2_boot/api/v2", "updated": "%(isotime)s" }, { "alias": "os-cells", "description": "%(text)s", "links": [], "name": "Cells", "namespace": "http://docs.openstack.org/compute/ext/cells/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-cell-capacities", "description": "%(text)s", "links": [], "name": "CellCapacities", "namespace": "http://docs.openstack.org/compute/ext/cell_capacities/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-certificates", "description": "%(text)s", "links": [], "name": "Certificates", "namespace": "http://docs.openstack.org/compute/ext/certificates/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-cloudpipe", "description": "%(text)s", "links": [], "name": "Cloudpipe", "namespace": "http://docs.openstack.org/compute/ext/cloudpipe/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-cloudpipe-update", "description": "%(text)s", "links": [], "name": "CloudpipeUpdate", "namespace": "http://docs.openstack.org/compute/ext/cloudpipe-update/api/v2", "updated": "%(isotime)s" }, { "alias": "os-config-drive", "description": "%(text)s", "links": [], "name": "ConfigDrive", "namespace": "http://docs.openstack.org/compute/ext/config_drive/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-console-output", "description": "%(text)s", "links": [], "name": "ConsoleOutput", "namespace": "http://docs.openstack.org/compute/ext/os-console-output/api/v2", "updated": "%(isotime)s" }, { "alias": "os-consoles", "description": "%(text)s", "links": [], "name": "Consoles", "namespace": "http://docs.openstack.org/compute/ext/os-consoles/api/v2", "updated": "%(isotime)s" }, { "alias": "os-create-server-ext", "description": "%(text)s", "links": [], "name": "Createserverext", "namespace": "http://docs.openstack.org/compute/ext/createserverext/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-deferred-delete", "description": "%(text)s", "links": [], "name": "DeferredDelete", "namespace": "http://docs.openstack.org/compute/ext/deferred-delete/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-evacuate", "description": "%(text)s", "links": [], "name": "Evacuate", "namespace": "http://docs.openstack.org/compute/ext/evacuate/api/v2", "updated": "%(isotime)s" }, { "alias": "os-extended-floating-ips", "description": "%(text)s", "links": [], "name": "ExtendedFloatingIps", "namespace": "http://docs.openstack.org/compute/ext/extended_floating_ips/api/v2", "updated": "%(isotime)s" }, { "alias": "os-fixed-ips", "description": "Fixed IPs support.", "links": [], "name": "FixedIPs", "namespace": "http://docs.openstack.org/compute/ext/fixed_ips/api/v2", "updated": "%(isotime)s" }, { "alias": "os-flavor-access", "description": "%(text)s", "links": [], "name": "FlavorAccess", "namespace": "http://docs.openstack.org/compute/ext/flavor_access/api/v2", "updated": "%(isotime)s" }, { "alias": "os-flavor-extra-specs", "description": "%(text)s", "links": [], "name": "FlavorExtraSpecs", "namespace": "http://docs.openstack.org/compute/ext/flavor_extra_specs/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-flavor-manage", "description": "%(text)s", "links": [], "name": "FlavorManage", "namespace": "http://docs.openstack.org/compute/ext/flavor_manage/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-flavor-rxtx", "description": "%(text)s", "links": [], "name": "FlavorRxtx", "namespace": "http://docs.openstack.org/compute/ext/flavor_rxtx/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-flavor-swap", "description": "%(text)s", "links": [], "name": "FlavorSwap", "namespace": "http://docs.openstack.org/compute/ext/flavor_swap/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-floating-ip-dns", "description": "%(text)s", "links": [], "name": "FloatingIpDns", "namespace": "http://docs.openstack.org/ext/floating_ip_dns/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-floating-ip-pools", "description": "%(text)s", "links": [], "name": "FloatingIpPools", "namespace": "http://docs.openstack.org/compute/ext/floating_ip_pools/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-floating-ips", "description": "%(text)s", "links": [], "name": "FloatingIps", "namespace": "http://docs.openstack.org/compute/ext/floating_ips/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-floating-ips-bulk", "description": "%(text)s", "links": [], "name": "FloatingIpsBulk", "namespace": "http://docs.openstack.org/compute/ext/floating_ips_bulk/api/v2", "updated": "%(isotime)s" }, { "alias": "os-hide-server-addresses", "description": "Support hiding server addresses in certain states.", "links": [], "name": "HideServerAddresses", "namespace": "http://docs.openstack.org/compute/ext/hide_server_addresses/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-hosts", "description": "%(text)s", "links": [], "name": "Hosts", "namespace": "http://docs.openstack.org/compute/ext/hosts/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-services", "description": "%(text)s", "links": [], "name": "Services", "namespace": "http://docs.openstack.org/compute/ext/services/api/v2", "updated": "%(isotime)s" }, { "alias": "os-extended-services", "description": "%(text)s", "links": [], "name": "ExtendedServices", "namespace": "http://docs.openstack.org/compute/ext/extended_services/api/v2", "updated": "%(isotime)s" }, { "alias": "os-fping", "description": "%(text)s", "links": [], "name": "Fping", "namespace": "http://docs.openstack.org/compute/ext/fping/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-hypervisors", "description": "%(text)s", "links": [], "name": "Hypervisors", "namespace": "http://docs.openstack.org/compute/ext/hypervisors/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-extended-hypervisors", "description": "%(text)s", "links": [], "name": "ExtendedHypervisors", "namespace": "http://docs.openstack.org/compute/ext/extended_hypervisors/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-hypervisor-status", "description": "%(text)s", "links": [], "name": "HypervisorStatus", "namespace": "http://docs.openstack.org/compute/ext/hypervisor_status/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-server-external-events", "description": "%(text)s", "links": [], "name": "ServerExternalEvents", "namespace": "http://docs.openstack.org/compute/ext/server-external-events/api/v2", "updated": "%(isotime)s" }, { "alias": "os-instance_usage_audit_log", "description": "%(text)s", "links": [], "name": "OSInstanceUsageAuditLog", "namespace": "http://docs.openstack.org/ext/services/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-keypairs", "description": "%(text)s", "links": [], "name": "Keypairs", "namespace": "http://docs.openstack.org/compute/ext/keypairs/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-multiple-create", "description": "%(text)s", "links": [], "name": "MultipleCreate", "namespace": "http://docs.openstack.org/compute/ext/multiplecreate/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-networks", "description": "%(text)s", "links": [], "name": "Networks", "namespace": "http://docs.openstack.org/compute/ext/os-networks/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-tenant-networks", "description": "%(text)s", "links": [], "name": "OSTenantNetworks", "namespace": "http://docs.openstack.org/compute/ext/os-tenant-networks/api/v2", "updated": "%(isotime)s" }, { "alias": "os-networks-associate", "description": "%(text)s", "links": [], "name": "NetworkAssociationSupport", "namespace": "http://docs.openstack.org/compute/ext/networks_associate/api/v2", "updated": "%(isotime)s" }, { "alias": "os-quota-class-sets", "description": "%(text)s", "links": [], "name": "QuotaClasses", "namespace": "http://docs.openstack.org/compute/ext/quota-classes-sets/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-extended-networks", "description": "%(text)s", "links": [], "name": "ExtendedNetworks", "namespace": "http://docs.openstack.org/compute/ext/extended_networks/api/v2", "updated": "%(isotime)s" }, { "alias": "os-extended-quotas", "description": "%(text)s", "links": [], "name": "ExtendedQuotas", "namespace": "http://docs.openstack.org/compute/ext/extended_quotas/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-quota-sets", "description": "%(text)s", "links": [], "name": "Quotas", "namespace": "http://docs.openstack.org/compute/ext/quotas-sets/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-user-quotas", "description": "%(text)s", "links": [], "name": "UserQuotas", "namespace": "http://docs.openstack.org/compute/ext/user_quotas/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-rescue", "description": "%(text)s", "links": [], "name": "Rescue", "namespace": "http://docs.openstack.org/compute/ext/rescue/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-extended-rescue-with-image", "description": "%(text)s", "links": [], "name": "ExtendedRescueWithImage", "namespace": "http://docs.openstack.org/compute/ext/extended_rescue_with_image/api/v2", "updated": "%(isotime)s" }, { "alias": "os-security-group-default-rules", "description": "%(text)s", "links": [], "name": "SecurityGroupDefaultRules", "namespace": "http://docs.openstack.org/compute/ext/securitygroupdefaultrules/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-security-groups", "description": "%(text)s", "links": [], "name": "SecurityGroups", "namespace": "http://docs.openstack.org/compute/ext/securitygroups/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-server-diagnostics", "description": "%(text)s", "links": [], "name": "ServerDiagnostics", "namespace": "http://docs.openstack.org/compute/ext/server-diagnostics/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-server-list-multi-status", "description": "%(text)s", "links": [], "name": "ServerListMultiStatus", "namespace": "http://docs.openstack.org/compute/ext/os-server-list-multi-status/api/v2", "updated": "%(isotime)s" }, { "alias": "os-server-password", "description": "%(text)s", "links": [], "name": "ServerPassword", "namespace": "http://docs.openstack.org/compute/ext/server-password/api/v2", "updated": "%(isotime)s" }, { "alias": "os-server-start-stop", "description": "%(text)s", "links": [], "name": "ServerStartStop", "namespace": "http://docs.openstack.org/compute/ext/servers/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-shelve", "description": "%(text)s", "links": [], "name": "Shelve", "namespace": "http://docs.openstack.org/compute/ext/shelve/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-simple-tenant-usage", "description": "%(text)s", "links": [], "name": "SimpleTenantUsage", "namespace": "http://docs.openstack.org/compute/ext/os-simple-tenant-usage/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-used-limits", "description": "%(text)s", "links": [], "name": "UsedLimits", "namespace": "http://docs.openstack.org/compute/ext/used_limits/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-used-limits-for-admin", "description": "%(text)s", "links": [], "name": "UsedLimitsForAdmin", "namespace": "http://docs.openstack.org/compute/ext/used_limits_for_admin/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-user-data", "description": "%(text)s", "links": [], "name": "UserData", "namespace": "http://docs.openstack.org/compute/ext/userdata/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-virtual-interfaces", "description": "%(text)s", "links": [], "name": "VirtualInterfaces", "namespace": "http://docs.openstack.org/compute/ext/virtual_interfaces/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-volume-attachment-update", "description": "%(text)s", "links": [], "name": "VolumeAttachmentUpdate", "namespace": "http://docs.openstack.org/compute/ext/os-volume-attachment-update/api/v2", "updated": "%(isotime)s" }, { "alias": "os-volumes", "description": "%(text)s", "links": [], "name": "Volumes", "namespace": "http://docs.openstack.org/compute/ext/volumes/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-instance-actions", "description": "%(text)s", "links": [], "name": "InstanceActions", "namespace": "http://docs.openstack.org/compute/ext/instance-actions/api/v1.1", "updated": "%(isotime)s" }, { "alias": "os-migrations", "description": "%(text)s", "links": [], "name": "Migrations", "namespace": "http://docs.openstack.org/compute/ext/migrations/api/v2.0", "updated": "%(isotime)s" }, { "alias": "os-baremetal-ext-status", "description": "%(text)s", "links": [], "name": "BareMetalExtStatus", "namespace": "http://docs.openstack.org/compute/ext/baremetal_ext_status/api/v2", "updated": "%(isotime)s" }, { "alias": "os-preserve-ephemeral-rebuild", "description": "%(text)s", "links": [], "name": "PreserveEphemeralOnRebuild", "namespace": "http://docs.openstack.org/compute/ext/preserve_ephemeral_rebuild/api/v2", "updated": "%(isotime)s" }, { "alias": "os-extended-services-delete", "description": "%(text)s", "links": [], "name": "ExtendedServicesDelete", "namespace": "http://docs.openstack.org/compute/ext/extended_services_delete/api/v2", "updated": "%(isotime)s" }, { "alias": "os-server-groups", "description": "%(text)s", "links": [], "name": "ServerGroups", "namespace": "http://docs.openstack.org/compute/ext/servergroups/api/v2", "updated": "%(isotime)s" }, { "alias": "os-extended-evacuate-find-host", "description": "%(text)s", "links": [], "name": "ExtendedEvacuateFindHost", "namespace": "http://docs.openstack.org/compute/ext/extended_evacuate_find_host/api/v2", "updated": "%(isotime)s" }, { "alias": "os-server-group-quotas", "description": "%(text)s", "links": [], "name": "ServerGroupQuotas", "namespace": "http://docs.openstack.org/compute/ext/server-group-quotas/api/v2", "updated": "%(isotime)s" }, { "alias": "os-server-sort-keys", "description": "%(text)s", "links": [], "name": "ServerSortKeys", "namespace": "http://docs.openstack.org/compute/ext/server_sort_keys/api/v2", "updated": "%(isotime)s" } ] } ././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-action-rebuild-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-action-rebuild-0000664000567000056710000000270513064447152035252 0ustar jenkinsjenkins00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "addresses": { "private": [ { "addr": "%(ip)s", "version": 4 } ] }, "adminPass": "%(password)s", "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(uuid)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ], "metadata": { "meta var": "meta val" }, "name": "%(name)s", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "user_id": "fake" } } ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/servers-details-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/servers-details-resp.j0000664000567000056710000000466613064447152035313 0ustar jenkinsjenkins00000000000000{ "servers": [ { "accessIPv4": "", "accessIPv6": "", "addresses": { "private": [ { "addr": "%(ip)s", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "config_drive": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "nova", "OS-EXT-SRV-ATTR:host": "%(compute_host)s", "OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [], "OS-SRV-USG:launched_at": "%(strtime)s", "OS-SRV-USG:terminated_at": null, "progress": 0, "security_groups": [ { "name": "default" } ], "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "user_id": "fake" } ] } ././@LongLink0000000000000000000000000000016400000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-action-confirm-resize.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-action-confirm-0000664000567000056710000000003713064447152035255 0ustar jenkinsjenkins00000000000000{ "confirmResize" : null } ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-action-rebuild.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-action-rebuild.0000664000567000056710000000151413064447152035250 0ustar jenkinsjenkins00000000000000{ "rebuild" : { "imageRef" : "%(uuid)s", "name" : "%(name)s", "adminPass" : "%(pass)s", "accessIPv4" : "%(access_ip_v4)s", "accessIPv6" : "%(access_ip_v6)s", "metadata" : { "meta var" : "meta val" }, "personality" : [ { "path" : "/etc/banner.txt", "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 b25zLiINCg0KLVJpY2hhcmQgQmFjaA==" } ] } } ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/extensions-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/extensions-list-resp.j0000664000567000056710000007332313064447152035343 0ustar jenkinsjenkins00000000000000{ "extensions": [ { "alias": "NMN", "description": "Multiple network support.", "links": [], "name": "Multinic", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-DCF", "description": "Disk Management Extension.", "links": [], "name": "DiskConfig", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-AZ", "description": "Extended Availability Zone support.", "links": [], "name": "ExtendedAvailabilityZone", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-IMG-SIZE", "description": "Adds image size to image listings.", "links": [], "name": "ImageSize", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-IPS", "description": "", "links": [], "name": "ExtendedIps", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-IPS-MAC", "description": "", "links": [], "name": "ExtendedIpsMac", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-SRV-ATTR", "description": "Extended Server Attributes support.", "links": [], "name": "ExtendedServerAttributes", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-STS", "description": "", "links": [], "name": "ExtendedStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-FLV-DISABLED", "description": "", "links": [], "name": "FlavorDisabled", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-FLV-EXT-DATA", "description": "", "links": [], "name": "FlavorExtraData", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-SCH-HNT", "description": "Pass arbitrary key/value pairs to the scheduler.", "links": [], "name": "SchedulerHints", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-SRV-USG", "description": "Adds launched_at and terminated_at on Servers.", "links": [], "name": "ServerUsage", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-access-ips", "description": "Access IPs support.", "links": [], "name": "AccessIPs", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-admin-actions", "description": "Enable admin-only server actions\n\n Actions include: resetNetwork, injectNetworkInfo, os-resetState\n ", "links": [], "name": "AdminActions", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-admin-password", "description": "Admin password management support.", "links": [], "name": "AdminPassword", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-agents", "description": "Agents support.", "links": [], "name": "Agents", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-aggregates", "description": "Admin-only aggregate administration.", "links": [], "name": "Aggregates", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-assisted-volume-snapshots", "description": "Assisted volume snapshots.", "links": [], "name": "AssistedVolumeSnapshots", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-attach-interfaces", "description": "Attach interface support.", "links": [], "name": "AttachInterfaces", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-availability-zone", "description": "1. Add availability_zone to the Create Server API.\n 2. Add availability zones describing.\n ", "links": [], "name": "AvailabilityZone", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-baremetal-ext-status", "description": "", "links": [], "name": "BareMetalExtStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-baremetal-nodes", "description": "Admin-only bare-metal node administration.", "links": [], "name": "BareMetalNodes", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-block-device-mapping", "description": "Block device mapping boot support.", "links": [], "name": "BlockDeviceMapping", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-block-device-mapping-v2-boot", "description": "", "links": [], "name": "BlockDeviceMappingV2Boot", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-cell-capacities", "description": "", "links": [], "name": "CellCapacities", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-cells", "description": "Enables cells-related functionality such as adding neighbor cells,\n listing neighbor cells, and getting the capabilities of the local cell.\n ", "links": [], "name": "Cells", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-certificates", "description": "Certificates support.", "links": [], "name": "Certificates", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-cloudpipe", "description": "Adds actions to create cloudpipe instances.\n\n When running with the Vlan network mode, you need a mechanism to route\n from the public Internet to your vlans. This mechanism is known as a\n cloudpipe.\n\n At the time of creating this class, only OpenVPN is supported. Support for\n a SSH Bastion host is forthcoming.\n ", "links": [], "name": "Cloudpipe", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-cloudpipe-update", "description": "", "links": [], "name": "CloudpipeUpdate", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-config-drive", "description": "Config Drive Extension.", "links": [], "name": "ConfigDrive", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-console-auth-tokens", "description": "Console token authentication support.", "links": [], "name": "ConsoleAuthTokens", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-console-output", "description": "Console log output support, with tailing ability.", "links": [], "name": "ConsoleOutput", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-consoles", "description": "Interactive Console support.", "links": [], "name": "Consoles", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-create-backup", "description": "Create a backup of a server.", "links": [], "name": "CreateBackup", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-create-server-ext", "description": "", "links": [], "name": "Createserverext", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-deferred-delete", "description": "Instance deferred delete.", "links": [], "name": "DeferredDelete", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-evacuate", "description": "Enables server evacuation.", "links": [], "name": "Evacuate", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-evacuate-find-host", "description": "", "links": [], "name": "ExtendedEvacuateFindHost", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-floating-ips", "description": "", "links": [], "name": "ExtendedFloatingIps", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-hypervisors", "description": "", "links": [], "name": "ExtendedHypervisors", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-networks", "description": "", "links": [], "name": "ExtendedNetworks", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-quotas", "description": "", "links": [], "name": "ExtendedQuotas", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-rescue-with-image", "description": "", "links": [], "name": "ExtendedRescueWithImage", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-services", "description": "", "links": [], "name": "ExtendedServices", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-services-delete", "description": "", "links": [], "name": "ExtendedServicesDelete", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-status", "description": "Extended Status support.", "links": [], "name": "ExtendedStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-volumes", "description": "Extended Volumes support.", "links": [], "name": "ExtendedVolumes", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-fixed-ips", "description": "Fixed IPs support.", "links": [], "name": "FixedIPs", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-access", "description": "Flavor access support.", "links": [], "name": "FlavorAccess", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-extra-specs", "description": "Flavors extra specs support.", "links": [], "name": "FlavorExtraSpecs", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-manage", "description": "Flavor create/delete API support.", "links": [], "name": "FlavorManage", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-rxtx", "description": "Support to show the rxtx status of a flavor.", "links": [], "name": "FlavorRxtx", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-swap", "description": "", "links": [], "name": "FlavorSwap", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-floating-ip-dns", "description": "Floating IP DNS support.", "links": [], "name": "FloatingIpDns", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-floating-ip-pools", "description": "Floating IPs support.", "links": [], "name": "FloatingIpPools", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-floating-ips", "description": "Floating IPs support.", "links": [], "name": "FloatingIps", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-floating-ips-bulk", "description": "Bulk handling of Floating IPs.", "links": [], "name": "FloatingIpsBulk", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-fping", "description": "Fping Management Extension.", "links": [], "name": "Fping", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-hide-server-addresses", "description": "Support hiding server addresses in certain states.", "links": [], "name": "HideServerAddresses", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-hosts", "description": "Admin-only host administration.", "links": [], "name": "Hosts", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-hypervisor-status", "description": "", "links": [], "name": "HypervisorStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-hypervisors", "description": "Admin-only hypervisor administration.", "links": [], "name": "Hypervisors", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-instance-actions", "description": "View a log of actions and events taken on an instance.", "links": [], "name": "InstanceActions", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-instance_usage_audit_log", "description": "Admin-only Task Log Monitoring.", "links": [], "name": "OSInstanceUsageAuditLog", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-keypairs", "description": "Keypair Support.", "links": [], "name": "Keypairs", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-lock-server", "description": "Enable lock/unlock server actions.", "links": [], "name": "LockServer", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-migrate-server", "description": "Enable migrate and live-migrate server actions.", "links": [], "name": "MigrateServer", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-migrations", "description": "Provide data on migrations.", "links": [], "name": "Migrations", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-multiple-create", "description": "Allow multiple create in the Create Server v2.1 API.", "links": [], "name": "MultipleCreate", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-networks", "description": "Admin-only Network Management Extension.", "links": [], "name": "Networks", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-networks-associate", "description": "Network association support.", "links": [], "name": "NetworkAssociationSupport", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-pause-server", "description": "Enable pause/unpause server actions.", "links": [], "name": "PauseServer", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-personality", "description": "Personality support.", "links": [], "name": "Personality", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-preserve-ephemeral-rebuild", "description": "Allow preservation of the ephemeral partition on rebuild.", "links": [], "name": "PreserveEphemeralOnRebuild", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-quota-class-sets", "description": "Quota classes management support.", "links": [], "name": "QuotaClasses", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-quota-sets", "description": "Quotas management support.", "links": [], "name": "Quotas", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-rescue", "description": "Instance rescue mode.", "links": [], "name": "Rescue", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-security-group-default-rules", "description": "Default rules for security group support.", "links": [], "name": "SecurityGroupDefaultRules", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-security-groups", "description": "Security group support.", "links": [], "name": "SecurityGroups", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-diagnostics", "description": "Allow Admins to view server diagnostics through server action.", "links": [], "name": "ServerDiagnostics", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-external-events", "description": "Server External Event Triggers.", "links": [], "name": "ServerExternalEvents", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-group-quotas", "description": "", "links": [], "name": "ServerGroupQuotas", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-groups", "description": "Server group support.", "links": [], "name": "ServerGroups", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-list-multi-status", "description": "", "links": [], "name": "ServerListMultiStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-password", "description": "Server password support.", "links": [], "name": "ServerPassword", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-sort-keys", "description": "", "links": [], "name": "ServerSortKeys", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-start-stop", "description": "", "links": [], "name": "ServerStartStop", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-services", "description": "Services support.", "links": [], "name": "Services", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-shelve", "description": "Instance shelve mode.", "links": [], "name": "Shelve", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-simple-tenant-usage", "description": "Simple tenant usage extension.", "links": [], "name": "SimpleTenantUsage", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-suspend-server", "description": "Enable suspend/resume server actions.", "links": [], "name": "SuspendServer", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-tenant-networks", "description": "Tenant-based Network Management Extension.", "links": [], "name": "OSTenantNetworks", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-used-limits", "description": "Provide data on limited resources that are being used.", "links": [], "name": "UsedLimits", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-used-limits-for-admin", "description": "", "links": [], "name": "UsedLimitsForAdmin", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-user-data", "description": "Add user_data to the Create Server API.", "links": [], "name": "UserData", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-user-quotas", "description": "", "links": [], "name": "UserQuotas", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-virtual-interfaces", "description": "Virtual interface support.", "links": [], "name": "VirtualInterfaces", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-volume-attachment-update", "description": "", "links": [], "name": "VolumeAttachmentUpdate", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-volumes", "description": "Volumes support.", "links": [], "name": "Volumes", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" } ] } ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/servers-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/servers-list-resp.json0000664000567000056710000000067613064447152035356 0ustar jenkinsjenkins00000000000000{ "servers": [ { "id": "%(uuid)s", "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ], "name": "new-server-test" } ] } ././@LongLink0000000000000000000000000000017300000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/extensions-list-resp-v21-compatible.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/extensions-list-resp-v0000664000567000056710000007374613064447152035367 0ustar jenkinsjenkins00000000000000{ "extensions": [ { "alias": "NMN", "description": "Multiple network support.", "links": [], "name": "Multinic", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-DCF", "description": "Disk Management Extension.", "links": [], "name": "DiskConfig", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-AZ", "description": "Extended Availability Zone support.", "links": [], "name": "ExtendedAvailabilityZone", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-IMG-SIZE", "description": "Adds image size to image listings.", "links": [], "name": "ImageSize", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-IPS", "description": "", "links": [], "name": "ExtendedIps", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-IPS-MAC", "description": "", "links": [], "name": "ExtendedIpsMac", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-SRV-ATTR", "description": "Extended Server Attributes support.", "links": [], "name": "ExtendedServerAttributes", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-STS", "description": "", "links": [], "name": "ExtendedStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-VIF-NET", "description": "", "links": [], "name": "ExtendedVIFNet", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-FLV-DISABLED", "description": "", "links": [], "name": "FlavorDisabled", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-FLV-EXT-DATA", "description": "", "links": [], "name": "FlavorExtraData", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-SCH-HNT", "description": "Pass arbitrary key/value pairs to the scheduler.", "links": [], "name": "SchedulerHints", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-SRV-USG", "description": "Adds launched_at and terminated_at on Servers.", "links": [], "name": "ServerUsage", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-access-ips", "description": "Access IPs support.", "links": [], "name": "AccessIPs", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-admin-actions", "description": "Enable admin-only server actions\n\n Actions include: resetNetwork, injectNetworkInfo, os-resetState\n ", "links": [], "name": "AdminActions", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-admin-password", "description": "Admin password management support.", "links": [], "name": "AdminPassword", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-agents", "description": "Agents support.", "links": [], "name": "Agents", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-aggregates", "description": "Admin-only aggregate administration.", "links": [], "name": "Aggregates", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-assisted-volume-snapshots", "description": "Assisted volume snapshots.", "links": [], "name": "AssistedVolumeSnapshots", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-attach-interfaces", "description": "Attach interface support.", "links": [], "name": "AttachInterfaces", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-availability-zone", "description": "1. Add availability_zone to the Create Server API.\n 2. Add availability zones describing.\n ", "links": [], "name": "AvailabilityZone", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-baremetal-ext-status", "description": "", "links": [], "name": "BareMetalExtStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-baremetal-nodes", "description": "Admin-only bare-metal node administration.", "links": [], "name": "BareMetalNodes", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-block-device-mapping", "description": "Block device mapping boot support.", "links": [], "name": "BlockDeviceMapping", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-block-device-mapping-v2-boot", "description": "", "links": [], "name": "BlockDeviceMappingV2Boot", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-cell-capacities", "description": "", "links": [], "name": "CellCapacities", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-cells", "description": "Enables cells-related functionality such as adding neighbor cells,\n listing neighbor cells, and getting the capabilities of the local cell.\n ", "links": [], "name": "Cells", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-certificates", "description": "Certificates support.", "links": [], "name": "Certificates", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-cloudpipe", "description": "Adds actions to create cloudpipe instances.\n\n When running with the Vlan network mode, you need a mechanism to route\n from the public Internet to your vlans. This mechanism is known as a\n cloudpipe.\n\n At the time of creating this class, only OpenVPN is supported. Support for\n a SSH Bastion host is forthcoming.\n ", "links": [], "name": "Cloudpipe", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-cloudpipe-update", "description": "", "links": [], "name": "CloudpipeUpdate", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-config-drive", "description": "Config Drive Extension.", "links": [], "name": "ConfigDrive", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-console-auth-tokens", "description": "Console token authentication support.", "links": [], "name": "ConsoleAuthTokens", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-console-output", "description": "Console log output support, with tailing ability.", "links": [], "name": "ConsoleOutput", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-consoles", "description": "Interactive Console support.", "links": [], "name": "Consoles", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-create-backup", "description": "Create a backup of a server.", "links": [], "name": "CreateBackup", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-create-server-ext", "description": "", "links": [], "name": "Createserverext", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-deferred-delete", "description": "Instance deferred delete.", "links": [], "name": "DeferredDelete", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-evacuate", "description": "Enables server evacuation.", "links": [], "name": "Evacuate", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-evacuate-find-host", "description": "", "links": [], "name": "ExtendedEvacuateFindHost", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-floating-ips", "description": "", "links": [], "name": "ExtendedFloatingIps", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-hypervisors", "description": "", "links": [], "name": "ExtendedHypervisors", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-networks", "description": "", "links": [], "name": "ExtendedNetworks", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-quotas", "description": "", "links": [], "name": "ExtendedQuotas", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-rescue-with-image", "description": "", "links": [], "name": "ExtendedRescueWithImage", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-services", "description": "", "links": [], "name": "ExtendedServices", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-services-delete", "description": "", "links": [], "name": "ExtendedServicesDelete", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-status", "description": "Extended Status support.", "links": [], "name": "ExtendedStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-volumes", "description": "Extended Volumes support.", "links": [], "name": "ExtendedVolumes", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-fixed-ips", "description": "Fixed IPs support.", "links": [], "name": "FixedIPs", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-access", "description": "Flavor access support.", "links": [], "name": "FlavorAccess", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-extra-specs", "description": "Flavors extra specs support.", "links": [], "name": "FlavorExtraSpecs", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-manage", "description": "Flavor create/delete API support.", "links": [], "name": "FlavorManage", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-rxtx", "description": "Support to show the rxtx status of a flavor.", "links": [], "name": "FlavorRxtx", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-swap", "description": "", "links": [], "name": "FlavorSwap", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-floating-ip-dns", "description": "Floating IP DNS support.", "links": [], "name": "FloatingIpDns", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-floating-ip-pools", "description": "Floating IPs support.", "links": [], "name": "FloatingIpPools", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-floating-ips", "description": "Floating IPs support.", "links": [], "name": "FloatingIps", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-floating-ips-bulk", "description": "Bulk handling of Floating IPs.", "links": [], "name": "FloatingIpsBulk", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-fping", "description": "Fping Management Extension.", "links": [], "name": "Fping", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-hide-server-addresses", "description": "Support hiding server addresses in certain states.", "links": [], "name": "HideServerAddresses", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-hosts", "description": "Admin-only host administration.", "links": [], "name": "Hosts", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-hypervisor-status", "description": "", "links": [], "name": "HypervisorStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-hypervisors", "description": "Admin-only hypervisor administration.", "links": [], "name": "Hypervisors", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-instance-actions", "description": "View a log of actions and events taken on an instance.", "links": [], "name": "InstanceActions", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-instance_usage_audit_log", "description": "Admin-only Task Log Monitoring.", "links": [], "name": "OSInstanceUsageAuditLog", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-keypairs", "description": "Keypair Support.", "links": [], "name": "Keypairs", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-lock-server", "description": "Enable lock/unlock server actions.", "links": [], "name": "LockServer", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-migrate-server", "description": "Enable migrate and live-migrate server actions.", "links": [], "name": "MigrateServer", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-migrations", "description": "Provide data on migrations.", "links": [], "name": "Migrations", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-multiple-create", "description": "Allow multiple create in the Create Server v2.1 API.", "links": [], "name": "MultipleCreate", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-networks", "description": "Admin-only Network Management Extension.", "links": [], "name": "Networks", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-networks-associate", "description": "Network association support.", "links": [], "name": "NetworkAssociationSupport", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-pause-server", "description": "Enable pause/unpause server actions.", "links": [], "name": "PauseServer", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-personality", "description": "Personality support.", "links": [], "name": "Personality", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-preserve-ephemeral-rebuild", "description": "Allow preservation of the ephemeral partition on rebuild.", "links": [], "name": "PreserveEphemeralOnRebuild", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-quota-class-sets", "description": "Quota classes management support.", "links": [], "name": "QuotaClasses", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-quota-sets", "description": "Quotas management support.", "links": [], "name": "Quotas", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-rescue", "description": "Instance rescue mode.", "links": [], "name": "Rescue", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-security-group-default-rules", "description": "Default rules for security group support.", "links": [], "name": "SecurityGroupDefaultRules", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-security-groups", "description": "Security group support.", "links": [], "name": "SecurityGroups", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-diagnostics", "description": "Allow Admins to view server diagnostics through server action.", "links": [], "name": "ServerDiagnostics", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-external-events", "description": "Server External Event Triggers.", "links": [], "name": "ServerExternalEvents", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-group-quotas", "description": "", "links": [], "name": "ServerGroupQuotas", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-groups", "description": "Server group support.", "links": [], "name": "ServerGroups", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-list-multi-status", "description": "", "links": [], "name": "ServerListMultiStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-password", "description": "Server password support.", "links": [], "name": "ServerPassword", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-sort-keys", "description": "", "links": [], "name": "ServerSortKeys", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-start-stop", "description": "", "links": [], "name": "ServerStartStop", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-services", "description": "Services support.", "links": [], "name": "Services", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-shelve", "description": "Instance shelve mode.", "links": [], "name": "Shelve", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-simple-tenant-usage", "description": "Simple tenant usage extension.", "links": [], "name": "SimpleTenantUsage", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-suspend-server", "description": "Enable suspend/resume server actions.", "links": [], "name": "SuspendServer", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-tenant-networks", "description": "Tenant-based Network Management Extension.", "links": [], "name": "OSTenantNetworks", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-used-limits", "description": "Provide data on limited resources that are being used.", "links": [], "name": "UsedLimits", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-used-limits-for-admin", "description": "", "links": [], "name": "UsedLimitsForAdmin", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-user-data", "description": "Add user_data to the Create Server API.", "links": [], "name": "UserData", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-user-quotas", "description": "", "links": [], "name": "UserQuotas", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-virtual-interfaces", "description": "Virtual interface support.", "links": [], "name": "VirtualInterfaces", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-volume-attachment-update", "description": "", "links": [], "name": "VolumeAttachmentUpdate", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-volumes", "description": "Volumes support.", "links": [], "name": "Volumes", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" } ] } ././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-action-revert-resize.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-action-revert-r0000664000567000056710000000003613064447152035310 0ustar jenkinsjenkins00000000000000{ "revertResize" : null } ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-action-resize.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-action-resize.j0000664000567000056710000000007213064447152035273 0ustar jenkinsjenkins00000000000000{ "resize" : { "flavorRef" : "%(id)s" } } ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-create-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-create-resp.jso0000664000567000056710000000100413064447152035267 0ustar jenkinsjenkins00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "adminPass": "%(password)s", "id": "%(id)s", "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ], "security_groups": [ { "name": "default" } ] } } ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/flavor-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/all_extensions/flavor-get-resp.json.t0000664000567000056710000000110613064447152035211 0ustar jenkinsjenkins00000000000000{ "flavor": { "OS-FLV-DISABLED:disabled": false, "OS-FLV-EXT-DATA:ephemeral": 0, "disk": 1, "id": "1", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/1", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny", "os-flavor-access:is_public": true, "ram": 512, "rxtx_factor": 1.0, "swap": "", "vcpus": 1 } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-diagnostics/0000775000567000056710000000000013064447471032250 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000017200000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-diagnostics/server-diagnost0000664000567000056710000000060313064447140035277 0ustar jenkinsjenkins00000000000000{ "cpu0_time": 17300000000, "memory": 524288, "vda_errors": -1, "vda_read": 262144, "vda_read_req": 112, "vda_write": 5778432, "vda_write_req": 488, "vnet1_rx": 2070139, "vnet1_rx_drop": 0, "vnet1_rx_errors": 0, "vnet1_rx_packets": 26701, "vnet1_tx": 140208, "vnet1_tx_drop": 0, "vnet1_tx_errors": 0, "vnet1_tx_packets": 662 } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/0000775000567000056710000000000013064447471031677 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016500000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/instance-action-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/instance-action-g0000664000567000056710000000154613064447140035124 0ustar jenkinsjenkins00000000000000{ "instanceAction": { "action": "%(action)s", "instance_uuid": "%(instance_uuid)s", "request_id": "%(request_id)s", "user_id": "%(integer_id)s", "project_id": "%(integer_id)s", "start_time": "%(strtime)s", "message": "", "events": [ { "event": "%(event)s", "start_time": "%(strtime)s", "finish_time": "%(strtime)s", "result": "%(result)s", "traceback": "" }, { "event": "%(event)s", "start_time": "%(strtime)s", "finish_time": "%(strtime)s", "result": "%(result)s", "traceback": "" } ] } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.21/0000775000567000056710000000000013064447471032447 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000017300000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.21/instance-action-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.21/instance-ac0000664000567000056710000000154613064447140034556 0ustar jenkinsjenkins00000000000000{ "instanceAction": { "action": "%(action)s", "instance_uuid": "%(instance_uuid)s", "request_id": "%(request_id)s", "user_id": "%(integer_id)s", "project_id": "%(integer_id)s", "start_time": "%(strtime)s", "message": "", "events": [ { "event": "%(event)s", "start_time": "%(strtime)s", "finish_time": "%(strtime)s", "result": "%(result)s", "traceback": "" }, { "event": "%(event)s", "start_time": "%(strtime)s", "finish_time": "%(strtime)s", "result": "%(result)s", "traceback": "" } ] } } ././@LongLink0000000000000000000000000000017500000000000011220 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.21/instance-actions-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.21/instance-ac0000664000567000056710000000115613064447140034553 0ustar jenkinsjenkins00000000000000{ "instanceActions": [ { "action": "%(action)s", "instance_uuid": "%(uuid)s", "request_id": "%(request_id)s", "user_id": "%(integer_id)s", "project_id": "%(integer_id)s", "start_time": "%(strtime)s", "message": "" }, { "action": "%(action)s", "instance_uuid": "%(uuid)s", "request_id": "%(request_id)s", "user_id": "%(integer_id)s", "project_id": "%(integer_id)s", "start_time": "%(strtime)s", "message": "" } ] } ././@LongLink0000000000000000000000000000016700000000000011221 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/instance-actions-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/instance-actions-0000664000567000056710000000115613064447140035135 0ustar jenkinsjenkins00000000000000{ "instanceActions": [ { "action": "%(action)s", "instance_uuid": "%(uuid)s", "request_id": "%(request_id)s", "user_id": "%(integer_id)s", "project_id": "%(integer_id)s", "start_time": "%(strtime)s", "message": "" }, { "action": "%(action)s", "instance_uuid": "%(uuid)s", "request_id": "%(request_id)s", "user_id": "%(integer_id)s", "project_id": "%(integer_id)s", "start_time": "%(strtime)s", "message": "" } ] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-pause-server/0000775000567000056710000000000013064447471031056 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-pause-server/unpause-server.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-pause-server/unpause-server.json.t0000664000567000056710000000003013064447140035161 0ustar jenkinsjenkins00000000000000{ "unpause": null } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-pause-server/pause-server.json.tpl0000664000567000056710000000002613064447140035157 0ustar jenkinsjenkins00000000000000{ "pause": null } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-group-default-rules/0000775000567000056710000000000013064447471034210 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000021700000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-group-default-rules/secur0000664000567000056710000000032113064447140035241 0ustar jenkinsjenkins00000000000000{ "security_group_default_rule": { "id": 1, "from_port": 80, "to_port": 80, "ip_protocol": "TCP", "ip_range": { "cidr": "10.10.10.0/24" } } }././@LongLink0000000000000000000000000000022100000000000011210 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-group-default-rules/secur0000664000567000056710000000032013064447140035240 0ustar jenkinsjenkins00000000000000{ "security_group_default_rule": { "from_port": 80, "id": 1, "ip_protocol": "TCP", "ip_range":{ "cidr": "10.10.10.0/24" }, "to_port": 80 } }././@LongLink0000000000000000000000000000022000000000000011207 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-group-default-rules/secur0000664000567000056710000000024013064447140035241 0ustar jenkinsjenkins00000000000000{ "security_group_default_rule": { "ip_protocol": "TCP", "from_port": "80", "to_port": "80", "cidr": "10.10.10.0/24" } }././@LongLink0000000000000000000000000000021700000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-group-default-rules/secur0000664000567000056710000000040213064447140035241 0ustar jenkinsjenkins00000000000000{ "security_group_default_rules": [ { "from_port": 80, "id": 1, "ip_protocol": "TCP", "ip_range": { "cidr": "10.10.10.0/24" }, "to_port": 80 } ] }nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-access/0000775000567000056710000000000013064447471030546 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016400000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-add-tenant-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-add-tenan0000664000567000056710000000021013064447140035034 0ustar jenkinsjenkins00000000000000{ "flavor_access": [ { "flavor_id": "%(flavor_id)s", "tenant_id": "%(tenant_id)s" } ] } ././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-create-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-create-re0000664000567000056710000000030513064447152035060 0ustar jenkinsjenkins00000000000000{ "flavor": { "name": "%(flavor_name)s", "ram": 1024, "vcpus": 2, "disk": 10, "id": "%(flavor_id)s", "os-flavor-access:is_public": false } } ././@LongLink0000000000000000000000000000016700000000000011221 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-remove-tenant-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-remove-te0000664000567000056710000000003313064447140035107 0ustar jenkinsjenkins00000000000000{ "flavor_access": [] }././@LongLink0000000000000000000000000000015600000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-list-resp0000664000567000056710000000020613064447140035130 0ustar jenkinsjenkins00000000000000{ "flavor_access": [ { "flavor_id": "%(flavor_id)s", "tenant_id": "fake_tenant" } ] } ././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-create-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-create-re0000664000567000056710000000113113064447152035056 0ustar jenkinsjenkins00000000000000{ "flavor": { "disk": 10, "id": "%(flavor_id)s", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/%(flavor_id)s", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/%(flavor_id)s", "rel": "bookmark" } ], "name": "%(flavor_name)s", "os-flavor-access:is_public": false, "ram": 1024, "vcpus": 2, "OS-FLV-DISABLED:disabled": false, "OS-FLV-EXT-DATA:ephemeral": 0, "swap": "" } } ././@LongLink0000000000000000000000000000016600000000000011220 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-remove-tenant-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-remove-te0000664000567000056710000000011013064447140035103 0ustar jenkinsjenkins00000000000000{ "removeTenantAccess": { "tenant": "%(tenant_id)s" } } ././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-detail-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-detail-re0000664000567000056710000000613713064447152035070 0ustar jenkinsjenkins00000000000000{ "flavors": [ { "OS-FLV-DISABLED:disabled": false, "disk": 1, "OS-FLV-EXT-DATA:ephemeral": 0, "id": "1", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/1", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny", "os-flavor-access:is_public": true, "ram": 512, "swap": "", "vcpus": 1 }, { "OS-FLV-DISABLED:disabled": false, "disk": 20, "OS-FLV-EXT-DATA:ephemeral": 0, "id": "2", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/2", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/2", "rel": "bookmark" } ], "name": "m1.small", "os-flavor-access:is_public": true, "ram": 2048, "swap": "", "vcpus": 1 }, { "OS-FLV-DISABLED:disabled": false, "disk": 40, "OS-FLV-EXT-DATA:ephemeral": 0, "id": "3", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/3", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/3", "rel": "bookmark" } ], "name": "m1.medium", "os-flavor-access:is_public": true, "ram": 4096, "swap": "", "vcpus": 2 }, { "OS-FLV-DISABLED:disabled": false, "disk": 80, "OS-FLV-EXT-DATA:ephemeral": 0, "id": "4", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/4", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/4", "rel": "bookmark" } ], "name": "m1.large", "os-flavor-access:is_public": true, "ram": 8192, "swap": "", "vcpus": 4 }, { "OS-FLV-DISABLED:disabled": false, "disk": 160, "OS-FLV-EXT-DATA:ephemeral": 0, "id": "5", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/5", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/5", "rel": "bookmark" } ], "name": "m1.xlarge", "os-flavor-access:is_public": true, "ram": 16384, "swap": "", "vcpus": 8 } ] } ././@LongLink0000000000000000000000000000015600000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-show-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-show-resp0000664000567000056710000000111613064447152035141 0ustar jenkinsjenkins00000000000000{ "flavor": { "disk": 1, "id": "%(flavor_id)s", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/%(flavor_id)s", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/%(flavor_id)s", "rel": "bookmark" } ], "name": "m1.tiny", "os-flavor-access:is_public": true, "ram": 512, "vcpus": 1, "OS-FLV-DISABLED:disabled": false, "OS-FLV-EXT-DATA:ephemeral": 0, "swap": "" } } ././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-add-tenant-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-access/flavor-access-add-tenan0000664000567000056710000000010513064447140035037 0ustar jenkinsjenkins00000000000000{ "addTenantAccess": { "tenant": "%(tenant_id)s" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-instance-usage-audit-log/0000775000567000056710000000000013064447471033226 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000020700000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-instance-usage-audit-log/inst-usage-audit-log-show-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-instance-usage-audit-log/inst-usag0000664000567000056710000000067313064447152035065 0ustar jenkinsjenkins00000000000000{ "instance_usage_audit_log": { "hosts_not_run": [ "%(host_name)s" ], "log": {}, "num_hosts": 1, "num_hosts_done": 0, "num_hosts_not_run": 1, "num_hosts_running": 0, "overall_status": "0 of 1 hosts done. 0 errors.", "period_beginning": "%(xmltime)s", "period_ending": "%(xmltime)s", "total_errors": 0, "total_instances": 0 } } ././@LongLink0000000000000000000000000000021000000000000011206 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-instance-usage-audit-log/inst-usage-audit-log-index-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-instance-usage-audit-log/inst-usag0000664000567000056710000000067413064447152035066 0ustar jenkinsjenkins00000000000000{ "instance_usage_audit_logs": { "hosts_not_run": [ "%(host_name)s" ], "log": {}, "num_hosts": 1, "num_hosts_done": 0, "num_hosts_not_run": 1, "num_hosts_running": 0, "overall_status": "0 of 1 hosts done. 0 errors.", "period_beginning": "%(xmltime)s", "period_ending": "%(xmltime)s", "total_errors": 0, "total_instances": 0 } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-groups/0000775000567000056710000000000013064447471031260 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-groups/server-groups-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-groups/server-groups-get-re0000664000567000056710000000025113064447140035176 0ustar jenkinsjenkins00000000000000{ "server_group": { "id": "%(id)s", "name": "%(name)s", "policies": ["anti-affinity"], "members": [], "metadata": {} } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/0000775000567000056710000000000013064447471032031 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016600000000000011220 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-0000664000567000056710000000037613064447140034653 0ustar jenkinsjenkins00000000000000{ "server_group": { "id": "%(id)s", "name": "%(name)s", "policies": ["anti-affinity"], "members": [], "metadata": {}, "project_id": "6f70656e737461636b20342065766572", "user_id": "fake" } } ././@LongLink0000000000000000000000000000016700000000000011221 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-0000664000567000056710000000037613064447140034653 0ustar jenkinsjenkins00000000000000{ "server_group": { "id": "%(id)s", "name": "%(name)s", "policies": ["anti-affinity"], "members": [], "metadata": {}, "project_id": "6f70656e737461636b20342065766572", "user_id": "fake" } } ././@LongLink0000000000000000000000000000016600000000000011220 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-0000664000567000056710000000014213064447140034642 0ustar jenkinsjenkins00000000000000{ "server_group": { "name": "%(name)s", "policies": ["anti-affinity"] } } ././@LongLink0000000000000000000000000000016700000000000011221 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-0000664000567000056710000000045213064447140034646 0ustar jenkinsjenkins00000000000000{ "server_groups": [ { "id": "%(id)s", "name": "test", "policies": ["anti-affinity"], "members": [], "metadata": {}, "project_id": "6f70656e737461636b20342065766572", "user_id": "fake" } ] } ././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-groups/server-groups-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-groups/server-groups-post-r0000664000567000056710000000025213064447140035240 0ustar jenkinsjenkins00000000000000{ "server_group": { "id": "%(id)s", "name": "%(name)s", "policies": ["anti-affinity"], "members": [], "metadata": {} } } ././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-groups/server-groups-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-groups/server-groups-post-r0000664000567000056710000000014213064447140035236 0ustar jenkinsjenkins00000000000000{ "server_group": { "name": "%(name)s", "policies": ["anti-affinity"] } } ././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-groups/server-groups-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-groups/server-groups-list-r0000664000567000056710000000031513064447140035226 0ustar jenkinsjenkins00000000000000{ "server_groups": [ { "id": "%(id)s", "name": "test", "policies": ["anti-affinity"], "members": [], "metadata": {} } ] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-baremetal-nodes/0000775000567000056710000000000013064447471031477 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-baremetal-nodes/baremetal-node-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-baremetal-nodes/baremetal-node-get0000664000567000056710000000046413064447140035053 0ustar jenkinsjenkins00000000000000{ "node": { "cpus": "2", "disk_gb": "10", "host": "IRONIC MANAGED", "id": "058d27fa-241b-445a-a386-08c04f96db43", "instance_uuid": "1ea4e53e-149a-4f02-9515-590c9fb2315a", "interfaces": [], "memory_mb": "1024", "task_state": "active" } }././@LongLink0000000000000000000000000000016400000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-baremetal-nodes/baremetal-node-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-baremetal-nodes/baremetal-node-lis0000664000567000056710000000106113064447140035055 0ustar jenkinsjenkins00000000000000{ "nodes": [ { "cpus": "2", "disk_gb": "10", "host": "IRONIC MANAGED", "id": "058d27fa-241b-445a-a386-08c04f96db43", "interfaces": [], "memory_mb": "1024", "task_state": "active" }, { "cpus": "2", "disk_gb": "10", "host": "IRONIC MANAGED", "id": "e2025409-f3ce-4d6a-9788-c565cf3b1b1c", "interfaces": [], "memory_mb": "1024", "task_state": "active" } ] }nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-scheduler-hints/0000775000567000056710000000000013064447471031536 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016500000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-scheduler-hints/scheduler-hints-po0000664000567000056710000000057113064447152035175 0ustar jenkinsjenkins00000000000000{ "server": { "adminPass": "%(password)s", "id": "%(id)s", "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ] } } ././@LongLink0000000000000000000000000000016400000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-scheduler-hints/scheduler-hints-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-scheduler-hints/scheduler-hints-po0000664000567000056710000000031113064447152035165 0ustar jenkinsjenkins00000000000000{ "server" : { "name" : "new-server-test", "imageRef" : "%(image_id)s", "flavorRef" : "1" }, "OS-SCH-HNT:scheduler_hints": { "same_host": "%(uuid)s" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-cells/0000775000567000056710000000000013064447471027537 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-cells/cells-list-resp.json.tpl0000664000567000056710000000160413064447140034244 0ustar jenkinsjenkins00000000000000{ "cells": [ { "name": "cell1", "rpc_host": null, "rpc_port": null, "type": "child", "username": "username1" }, { "name": "cell3", "rpc_host": null, "rpc_port": null, "type": "child", "username": "username3" }, { "name": "cell5", "rpc_host": null, "rpc_port": null, "type": "child", "username": "username5" }, { "name": "cell2", "rpc_host": null, "rpc_port": null, "type": "parent", "username": "username2" }, { "name": "cell4", "rpc_host": null, "rpc_port": null, "type": "parent", "username": "username4" } ] }././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-cells/cells-list-empty-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-cells/cells-list-empty-resp.json.t0000664000567000056710000000002313064447140035036 0ustar jenkinsjenkins00000000000000{ "cells": [] }nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-cells/cells-get-resp.json.tpl0000664000567000056710000000023513064447140034047 0ustar jenkinsjenkins00000000000000{ "cell": { "name": "cell3", "rpc_host": null, "rpc_port": null, "type": "child", "username": "username3" } }././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-cells/cells-capacities-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-cells/cells-capacities-resp.json.t0000664000567000056710000000116213064447140035041 0ustar jenkinsjenkins00000000000000{ "cell": { "capacities": { "disk_free": { "total_mb": 1052672, "units_by_mb": { "0": 0, "163840": 5, "20480": 46, "40960": 23, "81920": 11 } }, "ram_free": { "total_mb": 7680, "units_by_mb": { "16384": 0, "2048": 3, "4096": 1, "512": 13, "8192": 0 } } } } }nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-console-auth-tokens/0000775000567000056710000000000013064447471032337 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000020100000000000011206 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-console-auth-tokens/get-console-co0000664000567000056710000000025713064447140035075 0ustar jenkinsjenkins00000000000000{ "console": { "instance_uuid": "%(id)s", "host": "%(host)s", "port": %(port)s, "internal_access_path": "%(internal_access_path)s" } } ././@LongLink0000000000000000000000000000017000000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-console-auth-tokens/get-rdp-console-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-console-auth-tokens/get-rdp-consol0000664000567000056710000000010013064447140035077 0ustar jenkinsjenkins00000000000000{ "os-getRDPConsole": { "type": "rdp-html5" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/0000775000567000056710000000000013064447471030232 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/server-evacuate-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/server-evacuate-req.json.0000664000567000056710000000021713064447140035062 0ustar jenkinsjenkins00000000000000{ "evacuate": { "host": "%(host)s", "adminPass": "%(adminPass)s", "onSharedStorage": "%(onSharedStorage)s" } } ././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/server-evacuate-find-host-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/server-evacuate-find-host0000664000567000056710000000004413064447140035136 0ustar jenkinsjenkins00000000000000{ "adminPass": "%(password)s" } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.14/0000775000567000056710000000000013064447471031004 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015600000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.14/server-evacuate-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.14/server-evacuate-req0000664000567000056710000000013513064447140034605 0ustar jenkinsjenkins00000000000000{ "evacuate": { "host": "%(host)s", "adminPass": "%(adminPass)s" } } ././@LongLink0000000000000000000000000000017000000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.14/server-evacuate-find-host-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.14/server-evacuate-fin0000664000567000056710000000010113064447140034563 0ustar jenkinsjenkins00000000000000{ "evacuate": { "adminPass": "%(adminPass)s" } } ././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/server-evacuate-find-host-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/server-evacuate-find-host0000664000567000056710000000016313064447140035140 0ustar jenkinsjenkins00000000000000{ "evacuate": { "adminPass": "%(adminPass)s", "onSharedStorage": "%(onSharedStorage)s" } } ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/server-evacuate-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/server-evacuate-resp.json0000664000567000056710000000004413064447140035164 0ustar jenkinsjenkins00000000000000{ "adminPass": "%(password)s" } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-rescue/0000775000567000056710000000000013064447471027723 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-rescue/server-get-resp-rescue.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-rescue/server-get-resp-rescue.json0000664000567000056710000000277413064447152035142 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "addresses": { "private": [ { "addr": "%(ip)s", "version": 4, "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed" } ] }, "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(id)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(id)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "status": "%(status)s", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "user_id": "fake", "key_name": null } } ././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-rescue/server-get-resp-unrescue.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-rescue/server-get-resp-unrescue.js0000664000567000056710000000302313064447152035134 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "addresses": { "private": [ { "addr": "%(ip)s", "version": 4, "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed" } ] }, "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(id)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(id)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "%(status)s", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "user_id": "fake", "key_name": null } } ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-rescue/server-unrescue-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-rescue/server-unrescue-req.json.tp0000664000567000056710000000003013064447140035144 0ustar jenkinsjenkins00000000000000{ "unrescue": null }nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-rescue/server-rescue.json.tpl0000664000567000056710000000004413064447140034175 0ustar jenkinsjenkins00000000000000{ "adminPass": "%(password)s" } ././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-rescue/server-rescue-req-with-image-ref.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-rescue/server-rescue-req-with-imag0000664000567000056710000000020213064447140035074 0ustar jenkinsjenkins00000000000000{ "rescue": { "adminPass": "MySecretPass", "rescue_image_ref": "70a599e0-31e7-49b7-b260-868f441e862b" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-rescue/server-rescue-req.json.tpl0000664000567000056710000000007613064447140034767 0ustar jenkinsjenkins00000000000000{ "rescue": { "adminPass": "%(password)s" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-deferred-delete/0000775000567000056710000000000013064447471031455 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-deferred-delete/restore-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-deferred-delete/restore-post-req.j0000664000567000056710000000003013064447140035045 0ustar jenkinsjenkins00000000000000{ "restore": null } ././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-deferred-delete/force-delete-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-deferred-delete/force-delete-post-0000664000567000056710000000003413064447140034764 0ustar jenkinsjenkins00000000000000{ "forceDelete": null } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-groups/0000775000567000056710000000000013064447471031621 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-groups/server-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-groups/server-post-req.js0000664000567000056710000000051413064447152035231 0ustar jenkinsjenkins00000000000000{ "server" : { "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "name" : "new-server-test", "imageRef" : "%(image_id)s", "flavorRef" : "1", "metadata" : { "My Server Name" : "Apache1" }, "security_groups": [{"name": "test"}] } } ././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-groups/server-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-groups/server-get-resp.js0000664000567000056710000000310213064447152035201 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "addresses": { "private": [ { "addr": "%(ip)s", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "user_id": "fake", "security_groups": [{"name": "test"}], "key_name": null } } ././@LongLink0000000000000000000000000000016700000000000011221 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-groups/security-groups-create-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-groups/security-groups-cr0000664000567000056710000000031413064447140035321 0ustar jenkinsjenkins00000000000000{ "security_group": { "description": "%(description)s", "id": 1, "name": "%(group_name)s", "rules": [], "tenant_id": "6f70656e737461636b20342065766572" } } ././@LongLink0000000000000000000000000000017200000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-groups/security-group-remove-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-groups/security-group-rem0000664000567000056710000000011013064447140035307 0ustar jenkinsjenkins00000000000000{ "removeSecurityGroup": { "name": "%(group_name)s" } } ././@LongLink0000000000000000000000000000016700000000000011221 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-groups/security-group-add-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-groups/security-group-add0000664000567000056710000000010513064447140035260 0ustar jenkinsjenkins00000000000000{ "addSecurityGroup": { "name": "%(group_name)s" } } ././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-groups/security-group-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-groups/security-group-pos0000664000567000056710000000015113064447140035332 0ustar jenkinsjenkins00000000000000{ "security_group": { "name": "%(group_name)s", "description": "description" } } ././@LongLink0000000000000000000000000000017100000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-groups/security-groups-list-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-groups/security-groups-li0000664000567000056710000000034613064447140035326 0ustar jenkinsjenkins00000000000000{ "security_groups": [ { "description": "default", "id": 1, "name": "default", "rules": [], "tenant_id": "6f70656e737461636b20342065766572" } ] } ././@LongLink0000000000000000000000000000016400000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-groups/security-groups-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-groups/security-groups-ge0000664000567000056710000000027513064447140035316 0ustar jenkinsjenkins00000000000000{ "security_group": { "description": "default", "id": 1, "name": "default", "rules": [], "tenant_id": "6f70656e737461636b20342065766572" } } ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-groups/server-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-groups/server-post-resp.j0000664000567000056710000000065013064447152035231 0ustar jenkinsjenkins00000000000000{ "server": { "adminPass": "%(password)s", "id": "%(id)s", "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ], "security_groups": [{"name": "test"}] } } ././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-groups/servers-detail-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-groups/servers-detail-res0000664000567000056710000000344013064447152035261 0ustar jenkinsjenkins00000000000000{ "servers": [ { "updated": "%(isotime)s", "created": "%(isotime)s", "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "addresses": { "private": [ { "addr": "%(ip)s", "version": 4, "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed" } ] }, "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(uuid)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(id)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(id)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "user_id": "fake", "security_groups": [{"name": "test"}], "key_name": null }] } ././@LongLink0000000000000000000000000000017400000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-groups/server-security-groups-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-security-groups/server-security-gr0000664000567000056710000000034613064447140035321 0ustar jenkinsjenkins00000000000000{ "security_groups": [ { "description": "default", "id": 1, "name": "default", "rules": [], "tenant_id": "6f70656e737461636b20342065766572" } ] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-networks/0000775000567000056710000000000013064447471030311 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-networks/network-create-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-networks/network-create-req.json.t0000664000567000056710000000044513064447140035161 0ustar jenkinsjenkins00000000000000{ "network": { "label": "new net 111", "cidr": "10.20.105.0/24", "mtu": 9000, "dhcp_server": "10.20.105.2", "enable_dhcp": false, "share_address": true, "allowed_start": "10.20.105.10", "allowed_end": "10.20.105.200" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-networks/network-add-req.json.tpl0000664000567000056710000000002213064447140034771 0ustar jenkinsjenkins00000000000000{ "id": "1" } ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-networks/network-create-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-networks/network-create-resp.json.0000664000567000056710000000164613064447140035163 0ustar jenkinsjenkins00000000000000{ "network": { "bridge": null, "vpn_public_port": null, "dhcp_start": "%(ip)s", "bridge_interface": null, "updated_at": null, "id": "%(id)s", "cidr_v6": null, "deleted_at": null, "gateway": "%(ip)s", "rxtx_base": null, "label": "new net 111", "priority": null, "project_id": null, "vpn_private_address": null, "deleted": null, "vlan": null, "broadcast": "%(ip)s", "netmask": "%(ip)s", "injected": null, "cidr": "10.20.105.0/24", "vpn_public_address": null, "multi_host": null, "dns2": null, "created_at": null, "host": null, "gateway_v6": null, "netmask_v6": null, "dns1": null, "mtu": 9000, "dhcp_server": "10.20.105.2", "enable_dhcp": false, "share_address": true } } ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-networks/networks-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-networks/networks-list-resp.json.t0000664000567000056710000000421113064447152035234 0ustar jenkinsjenkins00000000000000{ "networks": [ { "bridge": "br100", "bridge_interface": "eth0", "broadcast": "%(ip)s", "cidr": "10.0.0.0/29", "cidr_v6": null, "created_at": "%(strtime)s", "deleted": false, "deleted_at": null, "dhcp_start": "%(ip)s", "dns1": null, "dns2": null, "gateway": "%(ip)s", "gateway_v6": null, "host": "nsokolov-desktop", "id": "%(id)s", "injected": false, "label": "mynet_0", "multi_host": false, "netmask": "%(ip)s", "netmask_v6": null, "priority": null, "project_id": "1234", "rxtx_base": null, "updated_at": "%(strtime)s", "vlan": 100, "vpn_private_address": "%(ip)s", "vpn_public_address": "%(ip)s", "vpn_public_port": 1000, "mtu": null, "dhcp_server": "%(ip)s", "enable_dhcp": true, "share_address": false }, { "bridge": "br101", "bridge_interface": "eth0", "broadcast": "%(ip)s", "cidr": "10.0.0.10/29", "cidr_v6": null, "created_at": "%(strtime)s", "deleted": false, "deleted_at": null, "dhcp_start": "%(ip)s", "dns1": null, "dns2": null, "gateway": "%(ip)s", "gateway_v6": null, "host": null, "id": "%(id)s", "injected": false, "label": "mynet_1", "multi_host": false, "netmask": "%(ip)s", "netmask_v6": null, "priority": null, "project_id": null, "rxtx_base": null, "updated_at": null, "vlan": 101, "vpn_private_address": "%(ip)s", "vpn_public_address": null, "vpn_public_port": 1001, "mtu": null, "dhcp_server": "%(ip)s", "enable_dhcp": true, "share_address": false } ] } ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-networks/network-show-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-networks/network-show-resp.json.tp0000664000567000056710000000212713064447152035242 0ustar jenkinsjenkins00000000000000{ "network": { "bridge": "br100", "bridge_interface": "eth0", "broadcast": "%(ip)s", "cidr": "10.0.0.0/29", "cidr_v6": null, "created_at": "%(strtime)s", "deleted": false, "deleted_at": null, "dhcp_start": "%(ip)s", "dns1": null, "dns2": null, "gateway": "%(ip)s", "gateway_v6": null, "host": "nsokolov-desktop", "id": "%(id)s", "injected": false, "label": "mynet_0", "multi_host": false, "netmask": "%(ip)s", "netmask_v6": null, "priority": null, "project_id": "1234", "rxtx_base": null, "updated_at": "%(strtime)s", "vlan": 100, "vpn_private_address": "%(ip)s", "vpn_public_address": "%(ip)s", "vpn_public_port": 1000, "mtu": null, "dhcp_server": "%(ip)s", "enable_dhcp": true, "share_address": false } } ././@LongLink0000000000000000000000000000015600000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-networks/networks-disassociate-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-networks/networks-disassociate-req0000664000567000056710000000003513064447152035340 0ustar jenkinsjenkins00000000000000{ "disassociate": null } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-certificates/0000775000567000056710000000000013064447471031102 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-certificates/certificate-create-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-certificates/certificate-create-re0000664000567000056710000000000013064447140035133 0ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-certificates/certificate-get-root-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-certificates/certificate-get-root-0000664000567000056710000000012713064447140035113 0ustar jenkinsjenkins00000000000000{ "certificate": { "data": "%(text)s", "private_key": null } } ././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-certificates/certificate-create-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-certificates/certificate-create-re0000664000567000056710000000013513064447140035144 0ustar jenkinsjenkins00000000000000{ "certificate": { "data": "%(text)s", "private_key": "%(text)s" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-create-backup/0000775000567000056710000000000013064447471031143 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-create-backup/create-backup-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-create-backup/create-backup-req.js0000664000567000056710000000016213064447140034764 0ustar jenkinsjenkins00000000000000{ "createBackup": { "name": "Backup 1", "backup_type": "daily", "rotation": 1 } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ips-bulk/0000775000567000056710000000000013064447471031764 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000017300000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ips-bulk/floating-ips-bul0000664000567000056710000000020513064447140035051 0ustar jenkinsjenkins00000000000000{ "floating_ips_bulk_create": { "interface": "eth0", "ip_range": "192.168.1.0/24", "pool": "nova" } }././@LongLink0000000000000000000000000000020100000000000011206 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ips-bulk/floating-ips-bul0000664000567000056710000000037213064447140035056 0ustar jenkinsjenkins00000000000000{ "floating_ip_info": [ { "address": "10.10.10.3", "instance_uuid": null, "interface": "eth0", "pool": "nova", "fixed_ip": null, "project_id": null } ] } ././@LongLink0000000000000000000000000000017200000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ips-bulk/floating-ips-bul0000664000567000056710000000004313064447140035051 0ustar jenkinsjenkins00000000000000{ "ip_range": "%(ip_range)s" } ././@LongLink0000000000000000000000000000017100000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ips-bulk/floating-ips-bul0000664000567000056710000000125013064447140035052 0ustar jenkinsjenkins00000000000000{ "floating_ip_info": [ { "address": "10.10.10.1", "instance_uuid": null, "interface": "eth0", "pool": "nova", "fixed_ip": null, "project_id": null }, { "address": "10.10.10.2", "instance_uuid": null, "interface": "eth0", "pool": "nova", "fixed_ip": null, "project_id": null }, { "address": "10.10.10.3", "instance_uuid": null, "interface": "eth0", "pool": "nova", "fixed_ip": null, "project_id": null } ] } ././@LongLink0000000000000000000000000000017200000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ips-bulk/floating-ips-bul0000664000567000056710000000022113064447140035047 0ustar jenkinsjenkins00000000000000{ "floating_ips_bulk_create": { "ip_range": "%(ip_range)s", "pool": "%(pool)s", "interface": "%(interface)s" } } ././@LongLink0000000000000000000000000000017300000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ips-bulk/floating-ips-bul0000664000567000056710000000006413064447140035054 0ustar jenkinsjenkins00000000000000{ "floating_ips_bulk_delete": "192.168.1.0/24" }nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-multiple-create/0000775000567000056710000000000013064447471031531 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016500000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-multiple-create/multiple-create-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-multiple-create/multiple-create-po0000664000567000056710000000005713064447140035157 0ustar jenkinsjenkins00000000000000{ "reservation_id": "%(reservation_id)s" } ././@LongLink0000000000000000000000000000017400000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-multiple-create/multiple-create-no0000664000567000056710000000041713064447140035155 0ustar jenkinsjenkins00000000000000{ "server": { "name": "new-server-test", "imageRef": "%(image_id)s", "flavorRef": "1", "metadata": { "My Server Name": "Apache1" }, "min_count": "%(min_count)s", "max_count": "%(max_count)s" } } ././@LongLink0000000000000000000000000000017500000000000011220 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-multiple-create/multiple-create-no0000664000567000056710000000057113064447152035161 0ustar jenkinsjenkins00000000000000{ "server": { "adminPass": "%(password)s", "id": "%(id)s", "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ] } } ././@LongLink0000000000000000000000000000016400000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-multiple-create/multiple-create-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-multiple-create/multiple-create-po0000664000567000056710000000047013064447140035156 0ustar jenkinsjenkins00000000000000{ "server": { "name": "new-server-test", "imageRef": "%(image_id)s", "flavorRef": "1", "metadata": { "My Server Name": "Apache1" }, "return_reservation_id": "True", "min_count": "%(min_count)s", "max_count": "%(max_count)s" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/server-ips/0000775000567000056710000000000013064447471030115 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/server-ips/server-ips-network-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/server-ips/server-ips-network-resp.js0000664000567000056710000000014613064447140035202 0ustar jenkinsjenkins00000000000000{ "private": [ { "addr": "%(ip)s", "version": 4 } ] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/server-ips/server-ips-resp.json.tpl0000664000567000056710000000022713064447140034646 0ustar jenkinsjenkins00000000000000{ "addresses": { "private": [ { "addr": "%(ip)s", "version": 4 } ] } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/0000775000567000056710000000000013064447471030260 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/0000775000567000056710000000000013064447471031027 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-disable-put-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-disable-put0000664000567000056710000000016213064447140034611 0ustar jenkinsjenkins00000000000000{ "service": { "binary": "nova-compute", "host": "host1", "status": "disabled" } }././@LongLink0000000000000000000000000000016600000000000011220 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-disable-log-put-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-disable-log0000664000567000056710000000014513064447140034563 0ustar jenkinsjenkins00000000000000{ "host": "%(host)s", "binary": "%(binary)s", "disabled_reason": "%(disabled_reason)s" } ././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/services-list-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/services-list-get-r0000664000567000056710000000240713064447140034556 0ustar jenkinsjenkins00000000000000{ "services": [ { "binary": "nova-scheduler", "disabled_reason": "test1", "host": "host1", "id": 1, "state": "up", "status": "disabled", "updated_at": "%(strtime)s", "forced_down": false, "zone": "internal" }, { "binary": "nova-compute", "disabled_reason": "test2", "host": "host1", "id": 2, "state": "up", "status": "disabled", "updated_at": "%(strtime)s", "forced_down": false, "zone": "nova" }, { "binary": "nova-scheduler", "disabled_reason": null, "host": "host2", "id": 3, "state": "down", "status": "enabled", "updated_at": "%(strtime)s", "forced_down": false, "zone": "internal" }, { "binary": "nova-compute", "disabled_reason": "test4", "host": "host2", "id": 4, "state": "down", "status": "disabled", "updated_at": "%(strtime)s", "forced_down": false, "zone": "nova" } ] } ././@LongLink0000000000000000000000000000016700000000000011221 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-disable-log-put-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-disable-log0000664000567000056710000000022613064447140034563 0ustar jenkinsjenkins00000000000000{ "service": { "binary": "nova-compute", "disabled_reason": "test2", "host": "host1", "status": "disabled" } }././@LongLink0000000000000000000000000000016600000000000011220 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-force-down-put-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-force-down-0000664000567000056710000000016213064447140034520 0ustar jenkinsjenkins00000000000000{ "service": { "binary": "nova-compute", "host": "host1", "forced_down": true } } ././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-disable-put-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-disable-put0000664000567000056710000000006713064447140034615 0ustar jenkinsjenkins00000000000000{ "host": "%(host)s", "binary": "%(binary)s" } ././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-enable-put-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-enable-put-0000664000567000056710000000006713064447140034515 0ustar jenkinsjenkins00000000000000{ "host": "%(host)s", "binary": "%(binary)s" } ././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-enable-put-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-enable-put-0000664000567000056710000000016113064447140034510 0ustar jenkinsjenkins00000000000000{ "service": { "binary": "nova-compute", "host": "host1", "status": "enabled" } }././@LongLink0000000000000000000000000000016500000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-force-down-put-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/v2.11/service-force-down-0000664000567000056710000000013313064447140034516 0ustar jenkinsjenkins00000000000000{ "host": "%(host)s", "binary": "%(binary)s", "forced_down": %(forced_down)s } ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/service-disable-put-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/service-disable-put-resp.0000664000567000056710000000016213064447140035067 0ustar jenkinsjenkins00000000000000{ "service": { "binary": "nova-compute", "host": "host1", "status": "disabled" } }././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/service-disable-log-put-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/service-disable-log-put-r0000664000567000056710000000014513064447140035061 0ustar jenkinsjenkins00000000000000{ "host": "%(host)s", "binary": "%(binary)s", "disabled_reason": "%(disabled_reason)s" } ././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/services-list-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/services-list-get-resp.js0000664000567000056710000000217713064447140035136 0ustar jenkinsjenkins00000000000000{ "services": [ { "binary": "nova-scheduler", "disabled_reason": "test1", "host": "host1", "id": 1, "state": "up", "status": "disabled", "updated_at": "%(strtime)s", "zone": "internal" }, { "binary": "nova-compute", "disabled_reason": "test2", "host": "host1", "id": 2, "state": "up", "status": "disabled", "updated_at": "%(strtime)s", "zone": "nova" }, { "binary": "nova-scheduler", "disabled_reason": null, "host": "host2", "id": 3, "state": "down", "status": "enabled", "updated_at": "%(strtime)s", "zone": "internal" }, { "binary": "nova-compute", "disabled_reason": "test4", "host": "host2", "id": 4, "state": "down", "status": "disabled", "updated_at": "%(strtime)s", "zone": "nova" } ] } ././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/service-disable-log-put-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/service-disable-log-put-r0000664000567000056710000000022613064447140035061 0ustar jenkinsjenkins00000000000000{ "service": { "binary": "nova-compute", "disabled_reason": "test2", "host": "host1", "status": "disabled" } }././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/service-disable-put-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/service-disable-put-req.j0000664000567000056710000000006713064447140035063 0ustar jenkinsjenkins00000000000000{ "host": "%(host)s", "binary": "%(binary)s" } ././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/service-enable-put-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/service-enable-put-req.js0000664000567000056710000000006713064447140035071 0ustar jenkinsjenkins00000000000000{ "host": "%(host)s", "binary": "%(binary)s" } ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/service-enable-put-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-services/service-enable-put-resp.j0000664000567000056710000000016113064447140035063 0ustar jenkinsjenkins00000000000000{ "service": { "binary": "nova-compute", "host": "host1", "status": "enabled" } }nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-volumes/0000775000567000056710000000000013064447471030127 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-volumes/list-volume-attachments-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-volumes/list-volume-attachments-re0000664000567000056710000000071413064447140035242 0ustar jenkinsjenkins00000000000000{ "volumeAttachments": [ { "device": "/dev/sdd", "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803", "serverId": "%(uuid)s", "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f803" }, { "device": "/dev/sdc", "id": "a26887c6-c47b-4654-abb5-dfadf7d3f804", "serverId": "%(uuid)s", "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f804" } ] } ././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-volumes/attach-volume-to-server-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-volumes/attach-volume-to-server-re0000664000567000056710000000015013064447140035140 0ustar jenkinsjenkins00000000000000{ "volumeAttachment": { "volumeId": "%(volume_id)s", "device": "%(device)s" } } ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-volumes/snapshots-show-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-volumes/snapshots-show-resp.json.t0000664000567000056710000000037513064447140035231 0ustar jenkinsjenkins00000000000000{ "snapshot": { "createdAt": "%(strtime)s", "displayDescription": "%(description)s", "displayName": "%(snapshot_name)s", "id": "100", "size": 100, "status": "available", "volumeId": 12 } } ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-volumes/os-volumes-index-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-volumes/os-volumes-index-resp.json0000664000567000056710000000121713064447140035201 0ustar jenkinsjenkins00000000000000{ "volumes": [ { "attachments": [ { "device": "/", "id": "%(uuid)s", "serverId": "%(uuid)s", "volumeId": "%(uuid)s" } ], "availabilityZone": "zone1:host1", "createdAt": "%(strtime)s", "displayDescription": "%(volume_desc)s", "displayName": "%(volume_name)s", "id": "%(uuid)s", "metadata": {}, "size": 100, "snapshotId": null, "status": "in-use", "volumeType": "Backup" } ] } ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-volumes/snapshots-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-volumes/snapshots-list-resp.json.t0000664000567000056710000000143413064447140035221 0ustar jenkinsjenkins00000000000000{ "snapshots": [ { "createdAt": "%(strtime)s", "displayDescription": "%(text)s", "displayName": "%(text)s", "id": 100, "size": 100, "status": "available", "volumeId": 12 }, { "createdAt": "%(strtime)s", "displayDescription": "%(text)s", "displayName": "%(text)s", "id": 101, "size": 100, "status": "available", "volumeId": 12 }, { "createdAt": "%(strtime)s", "displayDescription": "%(text)s", "displayName": "%(text)s", "id": 102, "size": 100, "status": "available", "volumeId": 12 } ] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-volumes/update-volume-req.json.tpl0000664000567000056710000000011013064447140035155 0ustar jenkinsjenkins00000000000000{ "volumeAttachment": { "volumeId": "%(volume_id)s" } } ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-volumes/os-volumes-detail-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-volumes/os-volumes-detail-resp.jso0000664000567000056710000000121713064447140035156 0ustar jenkinsjenkins00000000000000{ "volumes": [ { "attachments": [ { "device": "/", "id": "%(uuid)s", "serverId": "%(uuid)s", "volumeId": "%(uuid)s" } ], "availabilityZone": "zone1:host1", "createdAt": "%(strtime)s", "displayDescription": "%(volume_desc)s", "displayName": "%(volume_name)s", "id": "%(uuid)s", "metadata": {}, "size": 100, "snapshotId": null, "status": "in-use", "volumeType": "Backup" } ] } ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-volumes/snapshots-detail-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-volumes/snapshots-detail-resp.json0000664000567000056710000000151113064447140035242 0ustar jenkinsjenkins00000000000000{ "snapshots": [ { "createdAt": "%(strtime)s", "displayDescription": "Default description", "displayName": "Default name", "id": 100, "size": 100, "status": "available", "volumeId": 12 }, { "createdAt": "%(strtime)s", "displayDescription": "Default description", "displayName": "Default name", "id": 101, "size": 100, "status": "available", "volumeId": 12 }, { "createdAt": "%(strtime)s", "displayDescription": "Default description", "displayName": "Default name", "id": 102, "size": 100, "status": "available", "volumeId": 12 } ] } ././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-volumes/attach-volume-to-server-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-volumes/attach-volume-to-server-re0000664000567000056710000000024713064447140035147 0ustar jenkinsjenkins00000000000000{ "volumeAttachment": { "device": "%(device)s", "id": "%(volume_id)s", "serverId": "%(uuid)s", "volumeId": "%(volume_id)s" } } ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-volumes/os-volumes-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-volumes/os-volumes-post-resp.json.0000664000567000056710000000101313064447140035127 0ustar jenkinsjenkins00000000000000{ "volume": { "status": "in-use", "displayDescription": "%(volume_desc)s", "availabilityZone": "zone1:host1", "displayName": "%(volume_name)s", "attachments": [ { "device": "/", "serverId": "%(uuid)s", "id": "%(uuid)s", "volumeId": "%(uuid)s" } ], "volumeType": "Backup", "snapshotId": null, "metadata": {}, "id": "%(uuid)s", "createdAt": "%(strtime)s", "size": 100 } } ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-volumes/os-volumes-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-volumes/os-volumes-get-resp.json.t0000664000567000056710000000106213064447140035111 0ustar jenkinsjenkins00000000000000{ "volume": { "attachments": [ { "device": "/", "id": "%(uuid)s", "serverId": "%(uuid)s", "volumeId": "%(uuid)s" } ], "availabilityZone": "zone1:host1", "createdAt": "%(strtime)s", "displayDescription": "%(volume_desc)s", "displayName": "%(volume_name)s", "id": "%(uuid)s", "metadata": {}, "size": 100, "snapshotId": null, "status": "in-use", "volumeType": "Backup" } } ././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-volumes/volume-attachment-detail-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-volumes/volume-attachment-detail-r0000664000567000056710000000032313064447140035175 0ustar jenkinsjenkins00000000000000{ "volumeAttachment": { "device": "/dev/sdd", "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803", "serverId": "%(uuid)s", "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f803" } } ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-volumes/snapshot-create-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-volumes/snapshot-create-req.json.t0000664000567000056710000000027113064447140035142 0ustar jenkinsjenkins00000000000000{ "snapshot": { "display_name": "%(snapshot_name)s", "display_description": "%(description)s", "volume_id": "%(volume_id)s", "force": false } } ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-volumes/snapshot-create-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-volumes/snapshot-create-resp.json.0000664000567000056710000000040313064447140035135 0ustar jenkinsjenkins00000000000000{ "snapshot": { "createdAt": "%(strtime)s", "displayDescription": "%(description)s", "displayName": "%(snapshot_name)s", "id": 100, "size": 100, "status": "available", "volumeId": "%(uuid)s" } } ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-volumes/os-volumes-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-volumes/os-volumes-post-req.json.t0000664000567000056710000000026713064447140035143 0ustar jenkinsjenkins00000000000000{ "volume": { "availability_zone": "zone1:host1", "display_name": "%(volume_name)s", "display_description": "%(volume_desc)s", "size": 100 } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-lock-server/0000775000567000056710000000000013064447471030671 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-lock-server/lock-server.json.tpl0000664000567000056710000000002513064447140034604 0ustar jenkinsjenkins00000000000000{ "lock": null } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-lock-server/unlock-server.json.tpl0000664000567000056710000000002713064447140035151 0ustar jenkinsjenkins00000000000000{ "unlock": null } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-extended-server-attributes/0000775000567000056710000000000013064447471033725 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016600000000000011220 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-extended-server-attributes/server-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-extended-server-attributes/server-0000664000567000056710000000332113064447152035226 0ustar jenkinsjenkins00000000000000{ "server": { "OS-EXT-SRV-ATTR:host": "%(compute_host)s", "OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s", "OS-EXT-SRV-ATTR:instance_name": "%(instance_name)s", "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "updated": "%(isotime)s", "created": "%(isotime)s", "addresses": { "private": [ { "addr": "%(ip)s", "version": 4, "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed" } ] }, "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(uuid)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "user_id": "fake", "key_name": null } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-extended-server-attributes/v2.16/0000775000567000056710000000000013064447471034501 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000017400000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-extended-server-attributes/v2.16/server-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-extended-server-attributes/v2.16/s0000664000567000056710000000414313064447152034664 0ustar jenkinsjenkins00000000000000{ "server": { "OS-EXT-SRV-ATTR:host": "%(compute_host)s", "OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s", "OS-EXT-SRV-ATTR:instance_name": "%(instance_name)s", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-SRV-ATTR:launch_index": 0, "OS-EXT-SRV-ATTR:reservation_id": "%(reservation_id)s", "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", "OS-EXT-SRV-ATTR:kernel_id": null, "OS-EXT-SRV-ATTR:ramdisk_id": null, "OS-EXT-SRV-ATTR:user_data": null, "locked": false, "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "updated": "%(isotime)s", "created": "%(isotime)s", "addresses": { "private": [ { "addr": "%(ip)s", "version": 4, "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed" } ] }, "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(uuid)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "host_status": "UP", "tenant_id": "6f70656e737461636b20342065766572", "user_id": "fake", "key_name": null } } ././@LongLink0000000000000000000000000000020000000000000011205 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-extended-server-attributes/v2.16/servers-detail-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-extended-server-attributes/v2.16/s0000664000567000056710000000457013064447152034670 0ustar jenkinsjenkins00000000000000{ "servers": [ { "OS-EXT-SRV-ATTR:host": "%(compute_host)s", "OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s", "OS-EXT-SRV-ATTR:instance_name": "%(instance_name)s", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-SRV-ATTR:launch_index": 0, "OS-EXT-SRV-ATTR:reservation_id": "%(reservation_id)s", "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", "OS-EXT-SRV-ATTR:kernel_id": null, "OS-EXT-SRV-ATTR:ramdisk_id": null, "OS-EXT-SRV-ATTR:user_data": null, "locked": false, "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "updated": "%(isotime)s", "created": "%(isotime)s", "addresses": { "private": [ { "addr": "%(ip)s", "version": 4, "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed" } ] }, "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(uuid)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(id)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(id)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "host_status": "UP", "tenant_id": "6f70656e737461636b20342065766572", "user_id": "fake", "key_name": null } ] } ././@LongLink0000000000000000000000000000017200000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-extended-server-attributes/servers-detail-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-extended-server-attributes/servers0000664000567000056710000000370213064447152035337 0ustar jenkinsjenkins00000000000000{ "servers": [ { "OS-EXT-SRV-ATTR:host": "%(compute_host)s", "OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s", "OS-EXT-SRV-ATTR:instance_name": "%(instance_name)s", "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "updated": "%(isotime)s", "created": "%(isotime)s", "addresses": { "private": [ { "addr": "%(ip)s", "version": 4, "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed" } ] }, "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(uuid)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(id)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(id)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "user_id": "fake", "key_name": null } ] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-availability-zone/0000775000567000056710000000000013064447471032060 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000017100000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-availability-zone/availability-zone-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-availability-zone/availability-zon0000664000567000056710000000030213064447140035245 0ustar jenkinsjenkins00000000000000{ "availabilityZoneInfo": [ { "hosts": null, "zoneName": "nova", "zoneState": { "available": true } } ] } ././@LongLink0000000000000000000000000000017300000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-availability-zone/availability-zone-detail-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-availability-zone/availability-zon0000664000567000056710000000314213064447140035252 0ustar jenkinsjenkins00000000000000{ "availabilityZoneInfo": [ { "hosts": { "consoleauth": { "nova-consoleauth": { "active": true, "available": true, "updated_at": %(strtime_or_none)s } }, "conductor": { "nova-conductor": { "active": true, "available": true, "updated_at": %(strtime_or_none)s } }, "scheduler": { "nova-scheduler": { "active": true, "available": true, "updated_at": %(strtime_or_none)s } }, "network": { "nova-network": { "active": true, "available": true, "updated_at": %(strtime_or_none)s } } }, "zoneName": "internal", "zoneState": { "available": true } }, { "hosts": { "compute": { "nova-compute": { "active": true, "available": true, "updated_at": %(strtime_or_none)s } } }, "zoneName": "nova", "zoneState": { "available": true } } ] } ././@LongLink0000000000000000000000000000017100000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-availability-zone/availability-zone-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-availability-zone/availability-zon0000664000567000056710000000057113064447152035260 0ustar jenkinsjenkins00000000000000{ "server": { "adminPass": "%(password)s", "id": "%(id)s", "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ] } } ././@LongLink0000000000000000000000000000017000000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-availability-zone/availability-zone-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-availability-zone/availability-zon0000664000567000056710000000145413064447140035256 0ustar jenkinsjenkins00000000000000{ "server" : { "name" : "new-server-test", "imageRef" : "%(image_id)s", "flavorRef" : "1", "metadata" : { "My Server Name" : "Apache1" }, "availability_zone": "%(availability_zone)s", "personality" : [ { "path" : "/etc/banner.txt", "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA==" } ] } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/0000775000567000056710000000000013064447471027645 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/0000775000567000056710000000000013064447471030334 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-get-resp.json.0000664000567000056710000000052713064447140034655 0ustar jenkinsjenkins00000000000000{ "keypair": { "public_key": "%(public_key)s", "name": "%(keypair_name)s", "type": "%(keypair_type)s", "fingerprint": "%(fingerprint)s", "user_id": "fake", "deleted": false, "created_at": "%(strtime)s", "updated_at": null, "deleted_at": null, "id": 1 } } ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-post-req.json.0000664000567000056710000000014213064447140034672 0ustar jenkinsjenkins00000000000000{ "keypair": { "name": "%(keypair_name)s", "type": "%(keypair_type)s" } } ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-post-resp.json0000664000567000056710000000037113064447140035002 0ustar jenkinsjenkins00000000000000{ "keypair": { "fingerprint": "%(fingerprint)s", "name": "%(keypair_name)s", "type": "%(keypair_type)s", "private_key": "%(private_key)s", "public_key": "%(public_key)s", "user_id": "fake" } } ././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-import-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-import-post-re0000664000567000056710000000021213064447140034771 0ustar jenkinsjenkins00000000000000{ "keypair": { "name": "%(keypair_name)s", "type": "%(keypair_type)s", "public_key": "%(public_key)s" } } ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-list-resp.json0000664000567000056710000000042013064447140034763 0ustar jenkinsjenkins00000000000000{ "keypairs": [ { "keypair": { "fingerprint": "%(fingerprint)s", "name": "%(keypair_name)s", "type": "%(keypair_type)s", "public_key": "%(public_key)s" } } ] } ././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-import-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-import-post-re0000664000567000056710000000031713064447140034777 0ustar jenkinsjenkins00000000000000{ "keypair": { "fingerprint": "%(fingerprint)s", "name": "%(keypair_name)s", "type": "%(keypair_type)s", "public_key": "%(public_key)s", "user_id": "fake" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-get-resp.json.tpl0000664000567000056710000000046313064447140034705 0ustar jenkinsjenkins00000000000000{ "keypair": { "public_key": "%(public_key)s", "name": "%(keypair_name)s", "fingerprint": "%(fingerprint)s", "user_id": "fake", "deleted": false, "created_at": "%(strtime)s", "updated_at": null, "deleted_at": null, "id": 1 } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-post-req.json.tpl0000664000567000056710000000007613064447140034731 0ustar jenkinsjenkins00000000000000{ "keypair": { "name": "%(keypair_name)s" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/0000775000567000056710000000000013064447471030413 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-get-resp.json0000664000567000056710000000053613064447140034656 0ustar jenkinsjenkins00000000000000{ "keypair": { "public_key": "%(public_key)s", "name": "%(keypair_name)s", "type": "%(keypair_type)s", "fingerprint": "%(fingerprint)s", "user_id": "%(user_id)s", "deleted": false, "created_at": "%(strtime)s", "updated_at": null, "deleted_at": null, "id": 1 } } ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-post-req.json0000664000567000056710000000020413064447140034672 0ustar jenkinsjenkins00000000000000{ "keypair": { "name": "%(keypair_name)s", "type": "%(keypair_type)s", "user_id": "%(user_id)s" } } ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-post-resp.jso0000664000567000056710000000040013064447140034674 0ustar jenkinsjenkins00000000000000{ "keypair": { "fingerprint": "%(fingerprint)s", "name": "%(keypair_name)s", "type": "%(keypair_type)s", "private_key": "%(private_key)s", "public_key": "%(public_key)s", "user_id": "%(user_id)s" } } ././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-import-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-import-post-r0000664000567000056710000000025413064447140034711 0ustar jenkinsjenkins00000000000000{ "keypair": { "name": "%(keypair_name)s", "type": "%(keypair_type)s", "public_key": "%(public_key)s", "user_id": "%(user_id)s" } } ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-list-resp.jso0000664000567000056710000000042013064447140034664 0ustar jenkinsjenkins00000000000000{ "keypairs": [ { "keypair": { "fingerprint": "%(fingerprint)s", "name": "%(keypair_name)s", "type": "%(keypair_type)s", "public_key": "%(public_key)s" } } ] } ././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-import-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-import-post-r0000664000567000056710000000032613064447140034711 0ustar jenkinsjenkins00000000000000{ "keypair": { "fingerprint": "%(fingerprint)s", "name": "%(keypair_name)s", "type": "%(keypair_type)s", "public_key": "%(public_key)s", "user_id": "%(user_id)s" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-post-resp.json.tpl0000664000567000056710000000032513064447140035110 0ustar jenkinsjenkins00000000000000{ "keypair": { "fingerprint": "%(fingerprint)s", "name": "%(keypair_name)s", "private_key": "%(private_key)s", "public_key": "%(public_key)s", "user_id": "fake" } } ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-import-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-import-post-req.jso0000664000567000056710000000014613064447140035263 0ustar jenkinsjenkins00000000000000{ "keypair": { "name": "%(keypair_name)s", "public_key": "%(public_key)s" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-list-resp.json.tpl0000664000567000056710000000034413064447140035077 0ustar jenkinsjenkins00000000000000{ "keypairs": [ { "keypair": { "fingerprint": "%(fingerprint)s", "name": "%(keypair_name)s", "public_key": "%(public_key)s" } } ] } ././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-import-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-import-post-resp.js0000664000567000056710000000025313064447140035265 0ustar jenkinsjenkins00000000000000{ "keypair": { "fingerprint": "%(fingerprint)s", "name": "%(keypair_name)s", "public_key": "%(public_key)s", "user_id": "fake" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-virtual-interfaces/0000775000567000056710000000000013064447471032244 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-virtual-interfaces/v2.12/0000775000567000056710000000000013064447471033014 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-virtual-interfaces/v2.12/vifs-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-virtual-interfaces/v2.12/vifs-list0000664000567000056710000000024013064447140034644 0ustar jenkinsjenkins00000000000000{ "virtual_interfaces": [ { "id": "%(id)s", "mac_address": "%(mac_addr)s", "net_id": "%(id)s" } ] } ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-virtual-interfaces/vifs-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-virtual-interfaces/vifs-list-resp.0000664000567000056710000000017713064447140035132 0ustar jenkinsjenkins00000000000000{ "virtual_interfaces": [ { "id": "%(id)s", "mac_address": "%(mac_addr)s" } ] }././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-virtual-interfaces/vifs-list-resp-v2.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-virtual-interfaces/vifs-list-resp-0000664000567000056710000000025713064447140035130 0ustar jenkinsjenkins00000000000000{ "virtual_interfaces": [ { "id": "%(id)s", "mac_address": "%(mac_addr)s", "OS-EXT-VIF-NET:net_id": "%(id)s" } ] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-admin-password/0000775000567000056710000000000013064447471031365 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000017100000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-admin-password/admin-password-change-password.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-admin-password/admin-password-chan0000664000567000056710000000011013064447140035130 0ustar jenkinsjenkins00000000000000{ "changePassword" : { "adminPass" : "%(password)s" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-agents/0000775000567000056710000000000013064447471027716 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-agents/agent-update-put-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-agents/agent-update-put-req.json.t0000664000567000056710000000016513064447140035017 0ustar jenkinsjenkins00000000000000{ "para": { "url": "%(url)s", "md5hash": "%(md5hash)s", "version": "%(version)s" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-agents/agent-post-req.json.tpl0000664000567000056710000000034213064447140034245 0ustar jenkinsjenkins00000000000000{ "agent": { "hypervisor": "%(hypervisor)s", "os": "%(os)s", "architecture": "%(architecture)s", "version": "%(version)s", "md5hash": "%(md5hash)s", "url": "%(url)s" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-agents/agent-post-resp.json.tpl0000664000567000056710000000041513064447152034433 0ustar jenkinsjenkins00000000000000{ "agent": { "agent_id": 1, "architecture": "x86", "hypervisor": "hypervisor", "md5hash": "add6bb58e139be103324d04d82d8f545", "os": "os", "url": "http://example.com/path/to/resource", "version": "8.0" } } ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-agents/agent-update-put-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-agents/agent-update-put-resp.json.0000664000567000056710000000027013064447140035012 0ustar jenkinsjenkins00000000000000{ "agent": { "agent_id": "1", "md5hash": "add6bb58e139be103324d04d82d8f545", "url": "http://example.com/path/to/resource", "version": "7.0" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-agents/agents-get-resp.json.tpl0000664000567000056710000000047613064447152034417 0ustar jenkinsjenkins00000000000000{ "agents": [ { "agent_id": 1, "architecture": "x86", "hypervisor": "hypervisor", "md5hash": "add6bb58e139be103324d04d82d8f545", "os": "os", "url": "http://example.com/path/to/resource", "version": "8.0" } ] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/0000775000567000056710000000000013064447471031543 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000017100000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs0000664000567000056710000000003513064447140035202 0ustar jenkinsjenkins00000000000000{ "key1": "%(value1)s" } ././@LongLink0000000000000000000000000000017100000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs0000664000567000056710000000013213064447140035200 0ustar jenkinsjenkins00000000000000{ "extra_specs": { "key1": "%(value1)s", "key2": "%(value2)s" } } ././@LongLink0000000000000000000000000000017200000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs0000664000567000056710000000013213064447140035200 0ustar jenkinsjenkins00000000000000{ "extra_specs": { "key1": "%(value1)s", "key2": "%(value2)s" } } ././@LongLink0000000000000000000000000000017000000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs0000664000567000056710000000013213064447140035200 0ustar jenkinsjenkins00000000000000{ "extra_specs": { "key1": "%(value1)s", "key2": "%(value2)s" } } ././@LongLink0000000000000000000000000000016700000000000011221 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs0000664000567000056710000000003513064447140035202 0ustar jenkinsjenkins00000000000000{ "key1": "%(value1)s" } ././@LongLink0000000000000000000000000000017200000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs0000664000567000056710000000003513064447140035202 0ustar jenkinsjenkins00000000000000{ "key1": "%(value1)s" } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-migrate-server/0000775000567000056710000000000013064447471031371 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015600000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-migrate-server/live-migrate-server.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-migrate-server/live-migrate-server0000664000567000056710000000020613064447140035174 0ustar jenkinsjenkins00000000000000{ "os-migrateLive": { "host": "%(hostname)s", "block_migration": false, "disk_over_commit": false } } ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-migrate-server/migrate-server.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-migrate-server/migrate-server.json0000664000567000056710000000003013064447140035202 0ustar jenkinsjenkins00000000000000{ "migrate": null } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-migrate-server/v2.25/0000775000567000056710000000000013064447471032145 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016400000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-migrate-server/v2.25/live-migrate-server.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-migrate-server/v2.25/live-migrate-0000664000567000056710000000014413064447140034522 0ustar jenkinsjenkins00000000000000{ "os-migrateLive": { "host": "%(hostname)s", "block_migration": "auto" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-multinic/0000775000567000056710000000000013064447471030261 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-multinic/multinic-remove-fixed-ip-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-multinic/multinic-remove-fixed-ip-0000664000567000056710000000010013064447140035063 0ustar jenkinsjenkins00000000000000{ "removeFixedIp":{ "address": "%(ip)s" } } ././@LongLink0000000000000000000000000000015600000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-multinic/multinic-add-fixed-ip-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-multinic/multinic-add-fixed-ip-req0000664000567000056710000000010113064447140035027 0ustar jenkinsjenkins00000000000000{ "addFixedIp": { "networkId": %(networkId)s } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-config-drive/0000775000567000056710000000000013064447471031011 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-config-drive/server-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-config-drive/server-post-req.json.0000664000567000056710000000043513064447152035036 0ustar jenkinsjenkins00000000000000{ "server" : { "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "name" : "new-server-test", "imageRef" : "%(image_id)s", "flavorRef" : "1", "metadata" : { "My Server Name" : "Apache1" } } } ././@LongLink0000000000000000000000000000016500000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-config-drive/server-config-drive-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-config-drive/server-config-drive-g0000664000567000056710000000307113064447152035035 0ustar jenkinsjenkins00000000000000{ "server": { "config_drive": "%(cdrive)s", "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "addresses": { "private": [ { "addr": "%(ip)s", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "user_id": "fake", "key_name": null } } ././@LongLink0000000000000000000000000000017200000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-config-drive/servers-config-drive-details-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-config-drive/servers-config-drive-0000664000567000056710000000344413064447152035055 0ustar jenkinsjenkins00000000000000{ "servers": [ { "config_drive": "%(cdrive)s", "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "addresses": { "private": [ { "addr": "%(ip)s", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(id)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "user_id": "fake", "key_name": null } ] } ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-config-drive/server-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-config-drive/server-post-resp.json0000664000567000056710000000057113064447152035143 0ustar jenkinsjenkins00000000000000{ "server": { "adminPass": "%(password)s", "id": "%(id)s", "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ] } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-assisted-volume-snapshots/0000775000567000056710000000000013064447471033601 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000020300000000000011210 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-assisted-volume-snapshots/snapshot0000664000567000056710000000011613064447152035355 0ustar jenkinsjenkins00000000000000{ "snapshot": { "id": 100, "volumeId": "%(uuid)s" } } ././@LongLink0000000000000000000000000000020200000000000011207 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-assisted-volume-snapshots/snapshot0000664000567000056710000000033213064447152035355 0ustar jenkinsjenkins00000000000000{ "snapshot": { "volume_id": "%(volume_id)s", "create_info": { "snapshot_id": "%(snapshot_id)s", "type": "%(type)s", "new_file": "%(new_file)s" } } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-password/0000775000567000056710000000000013064447471031603 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-password/get-password-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-password/get-password-resp.0000664000567000056710000000005513064447140035163 0ustar jenkinsjenkins00000000000000{ "password": "%(encrypted_password)s" } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/extension-info/0000775000567000056710000000000013064447471030763 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015600000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/extension-info/extensions-get-resp-v2.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/extension-info/extensions-get-resp-v20000664000567000056710000000037113064447152035153 0ustar jenkinsjenkins00000000000000{ "extension": { "alias": "os-agents", "description": "%(text)s", "links": [], "name": "Agents", "namespace": "http://docs.openstack.org/compute/ext/agents/api/v2", "updated": "%(isotime)s" } }././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/extension-info/extensions-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/extension-info/extensions-get-resp.js0000664000567000056710000000040413064447140035233 0ustar jenkinsjenkins00000000000000{ "extension": { "alias": "os-agents", "description": "Agents support.", "links": [], "name": "Agents", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" } }nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-extended-volumes/0000775000567000056710000000000013064447471031725 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-extended-volumes/server-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-extended-volumes/server-get-resp.j0000664000567000056710000000322313064447152035126 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "addresses": { "private": [ { "addr": "%(ip)s", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "user_id": "fake", "os-extended-volumes:volumes_attached": [ {"id": "volume_id1"}, {"id": "volume_id2"} ], "key_name": null } } ././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-extended-volumes/servers-detail-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-extended-volumes/servers-detail-re0000664000567000056710000000357513064447152035213 0ustar jenkinsjenkins00000000000000{ "servers": [ { "updated": "%(isotime)s", "created": "%(isotime)s", "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "addresses": { "private": [ { "addr": "%(ip)s", "version": 4, "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed" } ] }, "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(uuid)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(id)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(id)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "user_id": "fake", "os-extended-volumes:volumes_attached": [ {"id": "volume_id1"}, {"id": "volume_id2"} ], "key_name": null }] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-flavor-rxtx/0000775000567000056710000000000013064447471030731 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.0000664000567000056710000000116413064447152035156 0ustar jenkinsjenkins00000000000000{ "flavor": { "disk": 10, "id": "%(flavor_id)s", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/%(flavor_id)s", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/%(flavor_id)s", "rel": "bookmark" } ], "name": "%(flavor_name)s", "os-flavor-access:is_public": true, "ram": 1024, "rxtx_factor": 2.0, "vcpus": 2, "OS-FLV-DISABLED:disabled": false, "OS-FLV-EXT-DATA:ephemeral": 0, "swap": "" } } ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.j0000664000567000056710000000026413064447152035146 0ustar jenkinsjenkins00000000000000{ "flavor": { "name": "%(flavor_name)s", "ram": 1024, "vcpus": 2, "disk": 10, "id": "%(flavor_id)s", "rxtx_factor": 2.0 } } ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.0000664000567000056710000000637713064447152035157 0ustar jenkinsjenkins00000000000000{ "flavors": [ { "OS-FLV-DISABLED:disabled": false, "disk": 1, "OS-FLV-EXT-DATA:ephemeral": 0, "id": "1", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/1", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny", "os-flavor-access:is_public": true, "ram": 512, "rxtx_factor": 1.0, "swap": "", "vcpus": 1 }, { "OS-FLV-DISABLED:disabled": false, "disk": 20, "OS-FLV-EXT-DATA:ephemeral": 0, "id": "2", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/2", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/2", "rel": "bookmark" } ], "name": "m1.small", "os-flavor-access:is_public": true, "ram": 2048, "rxtx_factor": 1.0, "swap": "", "vcpus": 1 }, { "OS-FLV-DISABLED:disabled": false, "disk": 40, "OS-FLV-EXT-DATA:ephemeral": 0, "id": "3", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/3", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/3", "rel": "bookmark" } ], "name": "m1.medium", "os-flavor-access:is_public": true, "ram": 4096, "rxtx_factor": 1.0, "swap": "", "vcpus": 2 }, { "OS-FLV-DISABLED:disabled": false, "disk": 80, "OS-FLV-EXT-DATA:ephemeral": 0, "id": "4", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/4", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/4", "rel": "bookmark" } ], "name": "m1.large", "os-flavor-access:is_public": true, "ram": 8192, "rxtx_factor": 1.0, "swap": "", "vcpus": 4 }, { "OS-FLV-DISABLED:disabled": false, "disk": 160, "OS-FLV-EXT-DATA:ephemeral": 0, "id": "5", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/5", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/5", "rel": "bookmark" } ], "name": "m1.xlarge", "os-flavor-access:is_public": true, "ram": 16384, "rxtx_factor": 1.0, "swap": "", "vcpus": 8 } ] } ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.j0000664000567000056710000000110613064447152035116 0ustar jenkinsjenkins00000000000000{ "flavor": { "OS-FLV-DISABLED:disabled": false, "disk": 1, "OS-FLV-EXT-DATA:ephemeral": 0, "id": "1", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/1", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny", "os-flavor-access:is_public": true, "ram": 512, "rxtx_factor": 1.0, "swap": "", "vcpus": 1 } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-used-limits/0000775000567000056710000000000013064447471030674 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015600000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-used-limits/v2-usedlimits-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-used-limits/v2-usedlimits-get-resp0000664000567000056710000000560613064447152035055 0ustar jenkinsjenkins00000000000000{ "limits": { "absolute": { "maxImageMeta": 128, "maxPersonality": 5, "maxPersonalitySize": 10240, "maxSecurityGroupRules": 20, "maxSecurityGroups": 10, "maxServerMeta": 128, "maxTotalCores": 20, "maxTotalFloatingIps": 10, "maxTotalInstances": 10, "maxTotalKeypairs": 100, "maxTotalRAMSize": 51200, "maxServerGroups": 10, "maxServerGroupMembers": 10, "totalCoresUsed": 0, "totalInstancesUsed": 0, "totalRAMUsed": 0, "totalSecurityGroupsUsed": 0, "totalFloatingIpsUsed": 0, "totalServerGroupsUsed": 0 }, "rate": [ { "limit": [ { "next-available": "%(isotime)s", "remaining": 120, "unit": "MINUTE", "value": 120, "verb": "POST" }, { "next-available": "%(isotime)s", "remaining": 120, "unit": "MINUTE", "value": 120, "verb": "PUT" }, { "next-available": "%(isotime)s", "remaining": 120, "unit": "MINUTE", "value": 120, "verb": "DELETE" } ], "regex": ".*", "uri": "*" }, { "limit": [ { "next-available": "%(isotime)s", "remaining": 120, "unit": "MINUTE", "value": 120, "verb": "POST" } ], "regex": "^/servers", "uri": "*/servers" }, { "limit": [ { "next-available": "%(isotime)s", "remaining": 120, "unit": "MINUTE", "value": 120, "verb": "GET" } ], "regex": ".*changes-since.*", "uri": "*changes-since*" }, { "limit": [ { "next-available": "%(isotime)s", "remaining": 12, "unit": "MINUTE", "value": 12, "verb": "GET" } ], "regex": "^/os-fping", "uri": "*/os-fping" } ] } } ././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-used-limits/usedlimits-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-used-limits/usedlimits-get-resp.js0000664000567000056710000000141213064447152035132 0ustar jenkinsjenkins00000000000000{ "limits": { "absolute": { "maxImageMeta": 128, "maxPersonality": 5, "maxPersonalitySize": 10240, "maxSecurityGroupRules": 20, "maxSecurityGroups": 10, "maxServerMeta": 128, "maxTotalCores": 20, "maxTotalFloatingIps": 10, "maxTotalInstances": 10, "maxTotalKeypairs": 100, "maxTotalRAMSize": 51200, "maxServerGroups": 10, "maxServerGroupMembers": 10, "totalCoresUsed": 0, "totalInstancesUsed": 0, "totalRAMUsed": 0, "totalSecurityGroupsUsed": 0, "totalFloatingIpsUsed": 0, "totalServerGroupsUsed": 0 }, "rate": [] } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-manage/0000775000567000056710000000000013064447471030535 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015600000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-manage/flavor-create-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-manage/flavor-create-post-resp0000664000567000056710000000113013064447152035133 0ustar jenkinsjenkins00000000000000{ "flavor": { "disk": 10, "id": "%(flavor_id)s", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/%(flavor_id)s", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/%(flavor_id)s", "rel": "bookmark" } ], "name": "%(flavor_name)s", "os-flavor-access:is_public": true, "ram": 1024, "vcpus": 2, "OS-FLV-DISABLED:disabled": false, "OS-FLV-EXT-DATA:ephemeral": 0, "swap": "" } } ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-manage/flavor-create-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavor-manage/flavor-create-post-req.0000664000567000056710000000023013064447152035027 0ustar jenkinsjenkins00000000000000{ "flavor": { "name": "%(flavor_name)s", "ram": 1024, "vcpus": 2, "disk": 10, "id": "%(flavor_id)s" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-attach-interfaces/0000775000567000056710000000000013064447471032022 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000017300000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-attach-interfaces/attach-interface0000664000567000056710000000062213064447140035140 0ustar jenkinsjenkins00000000000000{ "interfaceAttachment": { "fixed_ips": [ { "ip_address": "192.168.1.3", "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef" } ], "mac_addr": "fa:16:3e:4c:2c:30", "net_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6", "port_id": "ce531f90-199f-48c0-816c-13e38010b442", "port_state": "ACTIVE" } }././@LongLink0000000000000000000000000000017100000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-attach-interfaces/attach-interface0000664000567000056710000000071713064447140035145 0ustar jenkinsjenkins00000000000000{ "interfaceAttachments": [ { "fixed_ips": [ { "ip_address": "192.168.1.3", "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef" } ], "mac_addr": "fa:16:3e:4c:2c:30", "net_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6", "port_id": "ce531f90-199f-48c0-816c-13e38010b442", "port_state": "ACTIVE" } ] }././@LongLink0000000000000000000000000000017200000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-attach-interfaces/attach-interface0000664000567000056710000000014113064447140035134 0ustar jenkinsjenkins00000000000000{ "interfaceAttachment": { "port_id": "ce531f90-199f-48c0-816c-13e38010b442" } } ././@LongLink0000000000000000000000000000017100000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-attach-interfaces/attach-interface0000664000567000056710000000062213064447140035140 0ustar jenkinsjenkins00000000000000{ "interfaceAttachment": { "fixed_ips": [ { "ip_address": "192.168.1.3", "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef" } ], "mac_addr": "fa:16:3e:4c:2c:30", "net_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6", "port_id": "ce531f90-199f-48c0-816c-13e38010b442", "port_state": "ACTIVE" } }nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/0000775000567000056710000000000013064447471027507 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/server-action-create-image.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/server-action-create-image.js0000664000567000056710000000020113064447140035131 0ustar jenkinsjenkins00000000000000{ "createImage" : { "name" : "%(name)s", "metadata": { "meta_var": "meta_val" } } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/server-post-req.json.tpl0000664000567000056710000000043513064447152034254 0ustar jenkinsjenkins00000000000000{ "server" : { "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "name" : "new-server-test", "imageRef" : "%(image_id)s", "flavorRef" : "1", "metadata" : { "My Server Name" : "Apache1" } } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/server-get-resp.json.tpl0000664000567000056710000000302313064447152034224 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "addresses": { "private": [ { "addr": "%(ip)s", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "user_id": "fake" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/server-create-req.json.tpl0000664000567000056710000000043513064447152034532 0ustar jenkinsjenkins00000000000000{ "server" : { "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "name" : "new-server-test", "imageRef" : "%(image_id)s", "flavorRef" : "1", "metadata" : { "My Server Name" : "Apache1" } } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/server-action-reboot.json.tpl0000664000567000056710000000006713064447140035245 0ustar jenkinsjenkins00000000000000{ "reboot" : { "type" : "%(type)s" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/v2.9/0000775000567000056710000000000013064447471030205 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/v2.9/server-get-resp.json.tpl0000664000567000056710000000305413064447152034726 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "addresses": { "private": [ { "addr": "%(ip)s", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "user_id": "fake", "locked": false } } ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/v2.9/servers-details-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/v2.9/servers-details-resp.jso0000664000567000056710000000342713064447152035007 0ustar jenkinsjenkins00000000000000{ "servers": [ { "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "addresses": { "private": [ { "addr": "%(ip)s", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(id)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "user_id": "fake", "locked": false } ] } ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/v2.9/servers-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/v2.9/servers-list-resp.json.t0000664000567000056710000000067013064447140034747 0ustar jenkinsjenkins00000000000000{ "servers": [ { "id": "%(id)s", "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(id)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(id)s", "rel": "bookmark" } ], "name": "new-server-test" } ] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/v2.17/0000775000567000056710000000000013064447471030264 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016700000000000011221 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/v2.17/server-action-trigger-crash-dump.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/v2.17/server-action-trigger-c0000664000567000056710000000004313064447140034637 0ustar jenkinsjenkins00000000000000{ "trigger_crash_dump": null } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/server-action-start.json.tpl0000664000567000056710000000003413064447140035102 0ustar jenkinsjenkins00000000000000{ "%(action)s" : null } ././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/server-action-rebuild-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/server-action-rebuild-resp.js0000664000567000056710000000264013064447152035217 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "addresses": { "private": [ { "addr": "%(ip)s", "version": 4 } ] }, "adminPass": "%(password)s", "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(uuid)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ], "metadata": { "meta_var": "meta_val" }, "name": "%(name)s", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "user_id": "fake" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/servers-details-resp.json.tpl0000664000567000056710000000337213064447152035264 0ustar jenkinsjenkins00000000000000{ "servers": [ { "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "addresses": { "private": [ { "addr": "%(ip)s", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(id)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "user_id": "fake" } ] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/server-update-req.json.tpl0000664000567000056710000000026613064447140034550 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "OS-DCF:diskConfig": "AUTO", "name" : "new-server-test" } } ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/server-action-confirm-resize.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/server-action-confirm-resize.0000664000567000056710000000003713064447140035214 0ustar jenkinsjenkins00000000000000{ "confirmResize" : null } ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/server-action-rebuild.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/server-action-rebuild.json.tp0000664000567000056710000000042713064447152035230 0ustar jenkinsjenkins00000000000000{ "rebuild" : { "accessIPv4" : "%(access_ip_v4)s", "accessIPv6" : "%(access_ip_v6)s", "imageRef" : "%(uuid)s", "name" : "%(name)s", "adminPass" : "%(pass)s", "metadata" : { "meta_var" : "meta_val" } } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/servers-list-resp.json.tpl0000664000567000056710000000067013064447140034605 0ustar jenkinsjenkins00000000000000{ "servers": [ { "id": "%(id)s", "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(id)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(id)s", "rel": "bookmark" } ], "name": "new-server-test" } ] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/server-action-stop.json.tpl0000664000567000056710000000003413064447140034732 0ustar jenkinsjenkins00000000000000{ "%(action)s" : null } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/server-update-resp.json.tpl0000664000567000056710000000265313064447140034734 0ustar jenkinsjenkins00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "addresses": { "private": [ { "addr": "192.168.0.3", "version": 4 } ] }, "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(id)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(id)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "user_id": "fake" } } ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/server-action-revert-resize.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/server-action-revert-resize.j0000664000567000056710000000003613064447140035237 0ustar jenkinsjenkins00000000000000{ "revertResize" : null } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/server-post-resp.json.tpl0000664000567000056710000000057113064447152034437 0ustar jenkinsjenkins00000000000000{ "server": { "adminPass": "%(password)s", "id": "%(id)s", "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ] } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/server-action-resize.json.tpl0000664000567000056710000000007213064447152035253 0ustar jenkinsjenkins00000000000000{ "resize" : { "flavorRef" : "%(id)s" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/0000775000567000056710000000000013064447471030266 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-post-req.json.tp0000664000567000056710000000056713064447152034665 0ustar jenkinsjenkins00000000000000{ "server" : { "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "name" : "new-server-test", "description" : "new-server-description", "imageRef" : "%(glance_host)s/images/%(image_id)s", "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" } } } ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-get-resp.json.tp0000664000567000056710000000313513064447152034633 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "addresses": { "private": [ { "addr": "%(ip)s", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "description": "new-server-description", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "user_id": "fake", "locked": false } } ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-put-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-put-resp.json.tp0000664000567000056710000000272713064447152034672 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "addresses": { "private": [ { "addr": "%(ip)s", "version": 4 } ] }, "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "updated-server-test", "description": "updated-server-description", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "user_id": "fake", "locked": false } } ././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-action-rebuild-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-action-rebuild-r0000664000567000056710000000274313064447152034657 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "addresses": { "private": [ { "addr": "%(ip)s", "version": 4 } ] }, "adminPass": "%(password)s", "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(uuid)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ], "locked": false, "metadata": { "meta_var": "meta_val" }, "name": "%(name)s", "description": "%(description)s", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "user_id": "fake" } } ././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/servers-details-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/servers-details-resp.js0000664000567000056710000000351413064447152034706 0ustar jenkinsjenkins00000000000000{ "servers": [ { "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "addresses": { "private": [ { "addr": "%(ip)s", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(id)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "description": "new-server-description", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "user_id": "fake", "locked": false } ] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-put-req.json.tpl0000664000567000056710000000017013064447140034647 0ustar jenkinsjenkins00000000000000{ "server" : { "name" : "updated-server-test", "description" : "updated-server-description" } } ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-action-rebuild.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-action-rebuild.j0000664000567000056710000000050213064447140034634 0ustar jenkinsjenkins00000000000000{ "rebuild" : { "accessIPv4" : "%(access_ip_v4)s", "accessIPv6" : "%(access_ip_v6)s", "imageRef" : "%(uuid)s", "name" : "%(name)s", "description" : "%(description)s", "adminPass" : "%(pass)s", "metadata" : { "meta_var" : "meta_val" } } } ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/servers-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/servers-list-resp.json.0000664000567000056710000000067013064447140034644 0ustar jenkinsjenkins00000000000000{ "servers": [ { "id": "%(id)s", "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(id)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(id)s", "rel": "bookmark" } ], "name": "new-server-test" } ] } ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-post-resp.json.t0000664000567000056710000000057113064447152034662 0ustar jenkinsjenkins00000000000000{ "server": { "adminPass": "%(password)s", "id": "%(id)s", "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ] } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers/server-create-resp.json.tpl0000664000567000056710000000100413064447140034702 0ustar jenkinsjenkins00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "adminPass": "%(password)s", "id": "%(id)s", "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ], "security_groups": [ { "name": "default" } ] } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-fixed-ips/0000775000567000056710000000000013064447471030325 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-fixed-ips/fixedips-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-fixed-ips/fixedips-get-resp.json.t0000664000567000056710000000023113064447140035006 0ustar jenkinsjenkins00000000000000{ "fixed_ip": { "cidr": "%(cidr)s", "hostname": "%(hostname)s", "host": "%(host)s", "address": "%(address)s" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-fixed-ips/v2.4/0000775000567000056710000000000013064447471031016 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-fixed-ips/v2.4/fixedips-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-fixed-ips/v2.4/fixedips-get-resp.j0000664000567000056710000000027313064447140034523 0ustar jenkinsjenkins00000000000000{ "fixed_ip": { "cidr": "%(cidr)s", "hostname": "%(hostname)s", "host": "%(host)s", "address": "%(address)s", "reserved": %(reserved)s } } ././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-fixed-ips/v2.4/fixedip-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-fixed-ips/v2.4/fixedip-post-req.js0000664000567000056710000000003013064447140034536 0ustar jenkinsjenkins00000000000000{ "reserve": null } ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-fixed-ips/fixedip-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-fixed-ips/fixedip-post-req.json.tp0000664000567000056710000000003013064447140035024 0ustar jenkinsjenkins00000000000000{ "reserve": null } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-shelve/0000775000567000056710000000000013064447471027723 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-shelve/os-unshelve.json.tpl0000664000567000056710000000003313064447140033651 0ustar jenkinsjenkins00000000000000{ "%(action)s": null } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-shelve/os-shelve-offload.json.tpl0000664000567000056710000000003313064447140034716 0ustar jenkinsjenkins00000000000000{ "%(action)s": null } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-shelve/os-shelve.json.tpl0000664000567000056710000000003313064447140033306 0ustar jenkinsjenkins00000000000000{ "%(action)s": null } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/0000775000567000056710000000000013064447471030542 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016400000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/quotas-show-defaults-ge0000664000567000056710000000074313064447140035152 0ustar jenkinsjenkins00000000000000{ "quota_set": { "cores": 20, "floating_ips": 10, "fixed_ips": -1, "id": "fake_tenant", "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 10, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "security_group_rules": 20, "security_groups": 10, "server_groups": 10, "server_group_members": 10 } } ././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/user-quotas-update-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/user-quotas-update-post0000664000567000056710000000070513064447140035213 0ustar jenkinsjenkins00000000000000{ "quota_set": { "cores": 20, "floating_ips": 10, "fixed_ips": -1, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 9, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "security_group_rules": 20, "security_groups": 10, "server_groups": 10, "server_group_members": 10 } } ././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/user-quotas-show-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/user-quotas-show-get-re0000664000567000056710000000074313064447140035111 0ustar jenkinsjenkins00000000000000{ "quota_set": { "cores": 20, "floating_ips": 10, "fixed_ips": -1, "id": "fake_tenant", "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 10, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "security_group_rules": 20, "security_groups": 10, "server_groups": 10, "server_group_members": 10 } } ././@LongLink0000000000000000000000000000015600000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/quotas-update-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/quotas-update-post-resp0000664000567000056710000000070613064447140035207 0ustar jenkinsjenkins00000000000000{ "quota_set": { "cores": 20, "floating_ips": 10, "fixed_ips": -1, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 10, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "security_group_rules": 20, "security_groups": 45, "server_groups": 10, "server_group_members": 10 } } ././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/quotas-update-force-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/quotas-update-force-res0000664000567000056710000000074313064447140035141 0ustar jenkinsjenkins00000000000000{ "quota_set": { "cores": 20, "floating_ips": 10, "fixed_ips": -1, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 45, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "security_group_rules": 20, "security_groups": 10, "server_groups": 10, "server_group_members": 10, "id": "fake_tenant" } } ././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/quotas-show-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/quotas-show-get-resp.js0000664000567000056710000000074313064447140035113 0ustar jenkinsjenkins00000000000000{ "quota_set": { "cores": 20, "floating_ips": 10, "fixed_ips": -1, "id": "fake_tenant", "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 10, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "security_group_rules": 20, "security_groups": 10, "server_groups": 10, "server_group_members": 10 } } ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/quotas-update-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/quotas-update-post-req.0000664000567000056710000000007313064447140035100 0ustar jenkinsjenkins00000000000000{ "quota_set": { "security_groups": 45 } } ././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/quotas-update-force-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/quotas-update-force-pos0000664000567000056710000000011613064447140035143 0ustar jenkinsjenkins00000000000000{ "quota_set": { "force": "True", "instances": 45 } } ././@LongLink0000000000000000000000000000016400000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/quotas-update-force-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/quotas-update-force-pos0000664000567000056710000000070613064447140035150 0ustar jenkinsjenkins00000000000000{ "quota_set": { "cores": 20, "fixed_ips": -1, "floating_ips": 10, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 45, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "security_group_rules": 20, "security_groups": 10, "server_groups": 10, "server_group_members": 10 } } ././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/user-quotas-update-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-quota-sets/user-quotas-update-post0000664000567000056710000000011513064447140035206 0ustar jenkinsjenkins00000000000000{ "quota_set": { "force": "True", "instances": 9 } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-tenant-networks/0000775000567000056710000000000013064447471031600 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-tenant-networks/networks-list-res.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-tenant-networks/networks-list-res.0000664000567000056710000000037713064447140035215 0ustar jenkinsjenkins00000000000000{ "networks": [ { "cidr": "10.0.0.0/29", "id": "%(id)s", "label": "test_0" }, { "cidr": "10.0.0.8/29", "id": "%(id)s", "label": "test_1" } ] } ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-tenant-networks/networks-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-tenant-networks/networks-post-req.0000664000567000056710000000024613064447140035220 0ustar jenkinsjenkins00000000000000{ "network": { "label": "public", "cidr": "172.0.0.0/24", "vlan_start": 1, "num_networks": 1, "network_size": 255 } } ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-tenant-networks/networks-post-res.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-tenant-networks/networks-post-res.0000664000567000056710000000015513064447140035221 0ustar jenkinsjenkins00000000000000{ "network": { "cidr": "172.0.0.0/24", "id": "%(id)s", "label": "public" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-simple-tenant-usage/0000775000567000056710000000000013064447471032317 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000020000000000000011205 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-simple-tenant-usage/simple-tenant-0000664000567000056710000000145413064447140035074 0ustar jenkinsjenkins00000000000000{ "tenant_usage": { "server_usages": [ { "ended_at": null, "flavor": "m1.tiny", "hours": 1.0, "instance_id": "%(uuid)s", "local_gb": 1, "memory_mb": 512, "name": "new-server-test", "started_at": "%(strtime)s", "state": "active", "tenant_id": "6f70656e737461636b20342065766572", "uptime": 3600, "vcpus": 1 } ], "start": "%(strtime)s", "stop": "%(strtime)s", "tenant_id": "6f70656e737461636b20342065766572", "total_hours": 1.0, "total_local_gb_usage": 1.0, "total_memory_mb_usage": 512.0, "total_vcpus_usage": 1.0 } } ././@LongLink0000000000000000000000000000016700000000000011221 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-simple-tenant-usage/simple-tenant-0000664000567000056710000000052313064447140035070 0ustar jenkinsjenkins00000000000000{ "tenant_usages": [ { "start": "%(strtime)s", "stop": "%(strtime)s", "tenant_id": "6f70656e737461636b20342065766572", "total_hours": 1.0, "total_local_gb_usage": 1.0, "total_memory_mb_usage": 512.0, "total_vcpus_usage": 1.0 } ] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-personality/0000775000567000056710000000000013064447471031006 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-personality/server-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-personality/server-post-req.json.t0000664000567000056710000000135513064447152035221 0ustar jenkinsjenkins00000000000000{ "server": { "name": "new-server-test", "imageRef": "%(image_id)s", "flavorRef": "1", "metadata": { "My Server Name": "Apache1" }, "personality": [ { "path": "/etc/banner.txt", "contents": "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA==" } ] } } ././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-personality/server-action-rebuild-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-personality/server-action-rebuild-0000664000567000056710000000265713064447152035221 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "addresses": { "private": [ { "addr": "%(ip)s", "version": 4 } ] }, "adminPass": "%(password)s", "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(uuid)s", "image": { "id": "%(image_id)s", "links": [ { "href": "%(compute_endpoint)s/images/%(image_id)s", "rel": "bookmark" } ] }, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ], "metadata": { "meta_var": "meta_val" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "user_id": "fake" } } ././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-personality/server-action-rebuild-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-personality/server-action-rebuild-0000664000567000056710000000145513064447152035214 0ustar jenkinsjenkins00000000000000{ "rebuild": { "accessIPv4" : "%(access_ip_v4)s", "accessIPv6" : "%(access_ip_v6)s", "imageRef": "%(image_id)s", "name": "new-server-test", "metadata": { "meta_var": "meta_val" }, "personality": [ { "path": "/etc/banner.txt", "contents": "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 b25zLiINCg0KLVJpY2hhcmQgQmFjaA==" } ] } } ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-personality/server-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-personality/server-post-resp.json.0000664000567000056710000000057113064447152035216 0ustar jenkinsjenkins00000000000000{ "server": { "adminPass": "%(password)s", "id": "%(id)s", "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ] } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-cloudpipe/0000775000567000056710000000000013064447471030421 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-cloudpipe/cloud-pipe-create-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-cloudpipe/cloud-pipe-create-req.js0000664000567000056710000000010413064447140035032 0ustar jenkinsjenkins00000000000000{ "cloudpipe": { "project_id": "%(project_id)s" } } ././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-cloudpipe/cloud-pipe-update-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-cloudpipe/cloud-pipe-update-req.js0000664000567000056710000000015013064447140035052 0ustar jenkinsjenkins00000000000000{ "configure_project": { "vpn_ip": "%(vpn_ip)s", "vpn_port": "%(vpn_port)s" } } ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-cloudpipe/cloud-pipe-create-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-cloudpipe/cloud-pipe-create-resp.j0000664000567000056710000000004013064447140035030 0ustar jenkinsjenkins00000000000000{ "instance_id": "%(id)s" } ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-cloudpipe/cloud-pipe-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-cloudpipe/cloud-pipe-get-resp.json0000664000567000056710000000046113064447140035073 0ustar jenkinsjenkins00000000000000{ "cloudpipes": [ { "created_at": "%(isotime)s", "instance_id": "%(uuid)s", "internal_ip": "%(ip)s", "project_id": "%(project_id)s", "public_ip": "%(ip)s", "public_port": 22, "state": "down" } ] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/limits/0000775000567000056710000000000013064447471027317 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/limits/v2-limit-get-resp.json.tpl0000664000567000056710000000525013064447152034175 0ustar jenkinsjenkins00000000000000{ "limits": { "absolute": { "maxImageMeta": 128, "maxPersonality": 5, "maxPersonalitySize": 10240, "maxServerMeta": 128, "maxTotalCores": 20, "maxTotalFloatingIps": 10, "maxTotalInstances": 10, "maxTotalKeypairs": 100, "maxTotalRAMSize": 51200, "maxSecurityGroups": 10, "maxSecurityGroupRules": 20, "maxServerGroups": 10, "maxServerGroupMembers": 10 }, "rate": [ { "limit": [ { "next-available": "%(isotime)s", "remaining": 120, "unit": "MINUTE", "value": 120, "verb": "POST" }, { "next-available": "%(isotime)s", "remaining": 120, "unit": "MINUTE", "value": 120, "verb": "PUT" }, { "next-available": "%(isotime)s", "remaining": 120, "unit": "MINUTE", "value": 120, "verb": "DELETE" } ], "regex": ".*", "uri": "*" }, { "limit": [ { "next-available": "%(isotime)s", "remaining": 120, "unit": "MINUTE", "value": 120, "verb": "POST" } ], "regex": "^/servers", "uri": "*/servers" }, { "limit": [ { "next-available": "%(isotime)s", "remaining": 120, "unit": "MINUTE", "value": 120, "verb": "GET" } ], "regex": ".*changes-since.*", "uri": "*changes-since*" }, { "limit": [ { "next-available": "%(isotime)s", "remaining": 12, "unit": "MINUTE", "value": 12, "verb": "GET" } ], "regex": "^/os-fping", "uri": "*/os-fping" } ] } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/limits/limit-get-resp.json.tpl0000664000567000056710000000105413064447152033646 0ustar jenkinsjenkins00000000000000{ "limits": { "absolute": { "maxImageMeta": 128, "maxPersonality": 5, "maxPersonalitySize": 10240, "maxServerMeta": 128, "maxTotalCores": 20, "maxTotalFloatingIps": 10, "maxTotalInstances": 10, "maxTotalKeypairs": 100, "maxTotalRAMSize": 51200, "maxSecurityGroups": 10, "maxSecurityGroupRules": 20, "maxServerGroups": 10, "maxServerGroupMembers": 10 }, "rate": [] } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-quota-class-sets/0000775000567000056710000000000013064447471031645 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000017300000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-quota-class-sets/quota-classes-upd0000664000567000056710000000061313064447140035133 0ustar jenkinsjenkins00000000000000{ "quota_class_set": { "cores": 50, "floating_ips": 10, "fixed_ips": -1, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 50, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "security_group_rules": 20, "security_groups": 10 } } ././@LongLink0000000000000000000000000000017000000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-quota-class-sets/quota-classes-sho0000664000567000056710000000064713064447140035143 0ustar jenkinsjenkins00000000000000{ "quota_class_set": { "cores": 20, "floating_ips": 10, "fixed_ips": -1, "id": "%(set_id)s", "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 10, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "security_group_rules": 20, "security_groups": 10 } } ././@LongLink0000000000000000000000000000017200000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-quota-class-sets/quota-classes-update-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-quota-class-sets/quota-classes-upd0000664000567000056710000000061313064447140035133 0ustar jenkinsjenkins00000000000000{ "quota_class_set": { "instances": 50, "cores": 50, "ram": 51200, "floating_ips": 10, "fixed_ips": -1, "metadata_items": 128, "injected_files": 5, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "security_groups": 10, "security_group_rules": 20, "key_pairs": 100 } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-extended-availability-zone/0000775000567000056710000000000013064447471033656 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016600000000000011220 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-extended-availability-zone/server-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-extended-availability-zone/server-0000664000567000056710000000310413064447152035156 0ustar jenkinsjenkins00000000000000{ "server": { "updated": "%(isotime)s", "created": "%(isotime)s", "OS-EXT-AZ:availability_zone": "nova", "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "addresses": { "private": [ { "addr": "%(ip)s", "version": 4, "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed" } ] }, "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(uuid)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "user_id": "fake", "key_name": null } } ././@LongLink0000000000000000000000000000017200000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-extended-availability-zone/servers-detail-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-extended-availability-zone/servers0000664000567000056710000000345513064447152035275 0ustar jenkinsjenkins00000000000000{ "servers": [ { "updated": "%(isotime)s", "created": "%(isotime)s", "OS-EXT-AZ:availability_zone": "nova", "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "addresses": { "private": [ { "addr": "%(ip)s", "version": 4, "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed" } ] }, "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(uuid)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(id)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(id)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "user_id": "fake", "key_name": null } ] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/0000775000567000056710000000000013064447471031032 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-search-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-search-res0000664000567000056710000000026313064447140035376 0ustar jenkinsjenkins00000000000000{ "hypervisors": [ { "hypervisor_hostname": "fake-mini", "id": 1, "state": "up", "status": "enabled" } ] } ././@LongLink0000000000000000000000000000017000000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-without-servers-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-without-se0000664000567000056710000000026313064447140035452 0ustar jenkinsjenkins00000000000000{ "hypervisors": [ { "hypervisor_hostname": "fake-mini", "id": 1, "state": "up", "status": "enabled" } ] } ././@LongLink0000000000000000000000000000016500000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-with-servers-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-with-serve0000664000567000056710000000100213064447140035427 0ustar jenkinsjenkins00000000000000{ "hypervisors": [ { "hypervisor_hostname": "fake-mini", "id": 1, "state": "up", "status": "enabled", "servers": [ { "name": "test_server1", "uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" }, { "name": "test_server2", "uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb" } ] } ] } ././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-statistics-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-statistics0000664000567000056710000000055713064447140035542 0ustar jenkinsjenkins00000000000000{ "hypervisor_statistics": { "count": 1, "current_workload": 0, "disk_available_least": 0, "free_disk_gb": 1028, "free_ram_mb": 7680, "local_gb": 1028, "local_gb_used": 0, "memory_mb": 8192, "memory_mb_used": 512, "running_vms": 0, "vcpus": 1, "vcpus_used": 0 } }././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-uptime-res0000664000567000056710000000037213064447140035435 0ustar jenkinsjenkins00000000000000{ "hypervisor": { "hypervisor_hostname": "fake-mini", "id": %(hypervisor_id)s, "state": "up", "status": "enabled", "uptime": " 08:32:11 up 93 days, 18:25, 12 users, load average: 0.20, 0.12, 0.14" } } ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-list-resp.0000664000567000056710000000026313064447140035342 0ustar jenkinsjenkins00000000000000{ "hypervisors": [ { "hypervisor_hostname": "fake-mini", "state": "up", "status": "enabled", "id": 1 } ] } ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-show-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-show-resp.0000664000567000056710000000156513064447152035360 0ustar jenkinsjenkins00000000000000{ "hypervisor": { "cpu_info": "{\"arch\": \"x86_64\", \"model\": \"Nehalem\", \"vendor\": \"Intel\", \"features\": [\"pge\", \"clflush\"], \"topology\": {\"cores\": 1, \"threads\": 1, \"sockets\": 4}}", "current_workload": 0, "disk_available_least": 0, "state": "up", "status": "enabled", "host_ip": "%(ip)s", "free_disk_gb": 1028, "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", "hypervisor_version": 1000, "id": %(hypervisor_id)s, "local_gb": 1028, "local_gb_used": 0, "memory_mb": 8192, "memory_mb_used": 512, "running_vms": 0, "service": { "host": "%(host_name)s", "id": 2, "disabled_reason": null }, "vcpus": 1, "vcpus_used": 0 } } ././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-detail-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/hypervisors-detail-res0000664000567000056710000000175213064447152035402 0ustar jenkinsjenkins00000000000000{ "hypervisors": [ { "cpu_info": "{\"arch\": \"x86_64\", \"model\": \"Nehalem\", \"vendor\": \"Intel\", \"features\": [\"pge\", \"clflush\"], \"topology\": {\"cores\": 1, \"threads\": 1, \"sockets\": 4}}", "current_workload": 0, "state": "up", "status": "enabled", "disk_available_least": 0, "host_ip": "%(ip)s", "free_disk_gb": 1028, "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", "hypervisor_version": 1000, "id": %(hypervisor_id)s, "local_gb": 1028, "local_gb_used": 0, "memory_mb": 8192, "memory_mb_used": 512, "running_vms": 0, "service": { "host": "%(host_name)s", "id": 2, "disabled_reason": null }, "vcpus": 1, "vcpus_used": 0 } ] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/0000775000567000056710000000000013064447471031553 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016600000000000011220 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-spice-console-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-spice-console-0000664000567000056710000000010413064447140035057 0ustar jenkinsjenkins00000000000000{ "os-getSPICEConsole": { "type": "spice-html5" } } ././@LongLink0000000000000000000000000000017000000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-serial-console-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-serial-console0000664000567000056710000000015213064447140035161 0ustar jenkinsjenkins00000000000000{ "console": { "type": "serial", "url": "ws://127.0.0.1:6083/?token=%(uuid)s" } } ././@LongLink0000000000000000000000000000016700000000000011221 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-spice-console-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-spice-console-0000664000567000056710000000020013064447140035054 0ustar jenkinsjenkins00000000000000{ "console": { "type": "spice-html5", "url": "http://127.0.0.1:6082/spice_auto.html?token=%(uuid)s" } } ././@LongLink0000000000000000000000000000016700000000000011221 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-serial-console-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-serial-console0000664000567000056710000000010013064447140035152 0ustar jenkinsjenkins00000000000000{ "os-getSerialConsole": { "type": "serial" } } ././@LongLink0000000000000000000000000000016500000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-rdp-console-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-rdp-console-po0000664000567000056710000000015713064447140035110 0ustar jenkinsjenkins00000000000000{ "console": { "type": "rdp-html5", "url": "http://127.0.0.1:6083/?token=%(uuid)s" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/v2.6/0000775000567000056710000000000013064447471032246 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000017000000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/v2.6/create-vnc-console-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/v2.6/create-vnc-co0000664000567000056710000000015713064447140034613 0ustar jenkinsjenkins00000000000000{ "remote_console": { "protocol": "vnc", "type": "novnc", "url": "%(url)s" } } ././@LongLink0000000000000000000000000000016700000000000011221 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/v2.6/create-vnc-console-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/v2.6/create-vnc-co0000664000567000056710000000012513064447140034606 0ustar jenkinsjenkins00000000000000{ "remote_console": { "protocol": "vnc", "type": "novnc" } } ././@LongLink0000000000000000000000000000016500000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-vnc-console-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-vnc-console-po0000664000567000056710000000017013064447140035104 0ustar jenkinsjenkins00000000000000{ "console": { "type": "novnc", "url": "http://127.0.0.1:6080/vnc_auto.html?token=%(uuid)s" } } ././@LongLink0000000000000000000000000000016400000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-vnc-console-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-vnc-console-po0000664000567000056710000000007413064447140035107 0ustar jenkinsjenkins00000000000000{ "os-getVNCConsole": { "type": "novnc" } } ././@LongLink0000000000000000000000000000016400000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-rdp-console-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/get-rdp-console-po0000664000567000056710000000010013064447140035074 0ustar jenkinsjenkins00000000000000{ "os-getRDPConsole": { "type": "rdp-html5" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/v2.8/0000775000567000056710000000000013064447471032250 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000017000000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/v2.8/create-mks-console-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/v2.8/create-mks-co0000664000567000056710000000016013064447140034613 0ustar jenkinsjenkins00000000000000{ "remote_console": { "protocol": "mks", "type": "webmks", "url": "%(url)s" } } ././@LongLink0000000000000000000000000000016700000000000011221 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/v2.8/create-mks-console-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-remote-consoles/v2.8/create-mks-co0000664000567000056710000000012613064447140034615 0ustar jenkinsjenkins00000000000000{ "remote_console": { "protocol": "mks", "type": "webmks" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-external-events/0000775000567000056710000000000013064447471033065 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-external-events/event-create-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-external-events/event-creat0000664000567000056710000000024013064447140035212 0ustar jenkinsjenkins00000000000000{ "events": [ { "name": "%(name)s", "tag": "%(tag)s", "status": "%(status)s", "server_uuid": "%(uuid)s" } ] } ././@LongLink0000000000000000000000000000016400000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-external-events/event-create-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-external-events/event-creat0000664000567000056710000000031713064447140035217 0ustar jenkinsjenkins00000000000000{ "events": [ { "code": 200, "name": "%(name)s", "server_uuid": "%(uuid)s", "status": "%(status)s", "tag": "%(tag)s" } ] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hosts/0000775000567000056710000000000013064447471027575 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hosts/host-get-resp.json.tpl0000664000567000056710000000131613064447140033761 0ustar jenkinsjenkins00000000000000{ "host": [ { "resource": { "cpu": 1, "disk_gb": 1028, "host": "%(host_name)s", "memory_mb": 8192, "project": "(total)" } }, { "resource": { "cpu": 0, "disk_gb": 0, "host": "%(host_name)s", "memory_mb": 512, "project": "(used_now)" } }, { "resource": { "cpu": 0, "disk_gb": 0, "host": "%(host_name)s", "memory_mb": 0, "project": "(used_max)" } } ] } ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hosts/host-put-maintenance-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hosts/host-put-maintenance-req.jso0000664000567000056710000000007613064447140035136 0ustar jenkinsjenkins00000000000000{ "status": "enable", "maintenance_mode": "disable" } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hosts/host-get-shutdown.json.tpl0000664000567000056710000000010013064447140034651 0ustar jenkinsjenkins00000000000000{ "host": "%(host_name)s", "power_action": "shutdown" } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hosts/host-get-reboot.json.tpl0000664000567000056710000000007613064447140034304 0ustar jenkinsjenkins00000000000000{ "host": "%(host_name)s", "power_action": "reboot" } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hosts/hosts-list-resp.json.tpl0000664000567000056710000000123413064447140034337 0ustar jenkinsjenkins00000000000000{ "hosts": [ { "host_name": "%(host_name)s", "service": "conductor", "zone": "internal" }, { "host_name": "%(host_name)s", "service": "compute", "zone": "nova" }, { "host_name": "%(host_name)s", "service": "consoleauth", "zone": "internal" }, { "host_name": "%(host_name)s", "service": "network", "zone": "internal" }, { "host_name": "%(host_name)s", "service": "scheduler", "zone": "internal" } ] } ././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hosts/host-put-maintenance-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hosts/host-put-maintenance-resp.js0000664000567000056710000000014413064447140035135 0ustar jenkinsjenkins00000000000000{ "host": "%(host_name)s", "maintenance_mode": "off_maintenance", "status": "enabled" } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hosts/host-get-startup.json.tpl0000664000567000056710000000007713064447140034515 0ustar jenkinsjenkins00000000000000{ "host": "%(host_name)s", "power_action": "startup" } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-preserve-ephemeral-rebuild/0000775000567000056710000000000013064447471033654 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000021700000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-preserve-ephemeral.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-preserve-ephemeral-rebuild/server-0000664000567000056710000000036113064447140035153 0ustar jenkinsjenkins00000000000000{ "rebuild": { "imageRef": "%(uuid)s", "name": "%(name)s", "adminPass": "%(pass)s", "metadata": { "meta_var": "meta_val" }, "preserve_ephemeral": %(preserve_ephemeral)s } } ././@LongLink0000000000000000000000000000022400000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-preserve-ephemeral-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-preserve-ephemeral-rebuild/server-0000664000567000056710000000264013064447152035160 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "addresses": { "private": [ { "addr": "%(ip)s", "version": 4 } ] }, "adminPass": "%(password)s", "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(uuid)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ], "metadata": { "meta_var": "meta_val" }, "name": "%(name)s", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "user_id": "fake" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ips/0000775000567000056710000000000013064447471031031 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016500000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ips/floating-ips-list-empty-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ips/floating-ips-list-emp0000664000567000056710000000003313064447140035065 0ustar jenkinsjenkins00000000000000{ "floating_ips": [] } ././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ips/floating-ips-create-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ips/floating-ips-create-r0000664000567000056710000000003213064447140035034 0ustar jenkinsjenkins00000000000000{ "pool": "%(pool)s" }././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ips/floating-ips-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ips/floating-ips-list-res0000664000567000056710000000055013064447140035101 0ustar jenkinsjenkins00000000000000{ "floating_ips": [ { "fixed_ip": null, "id": 1, "instance_id": null, "ip": "10.10.10.1", "pool": "nova" }, { "fixed_ip": null, "id": 2, "instance_id": null, "ip": "10.10.10.2", "pool": "nova" } ] } ././@LongLink0000000000000000000000000000016700000000000011221 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ips/floating-ips-create-nopool-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ips/floating-ips-create-n0000664000567000056710000000000013064447140035023 0ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ips/floating-ips-create-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ips/floating-ips-create-r0000664000567000056710000000023213064447140035036 0ustar jenkinsjenkins00000000000000{ "floating_ip": { "fixed_ip": null, "id": 1, "instance_id": null, "ip": "10.10.10.1", "pool": "nova" } } ././@LongLink0000000000000000000000000000015600000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ips/floating-ips-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ips/floating-ips-get-resp0000664000567000056710000000023213064447140035062 0ustar jenkinsjenkins00000000000000{ "floating_ip": { "fixed_ip": null, "id": 1, "instance_id": null, "ip": "10.10.10.1", "pool": "nova" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-disk-config/0000775000567000056710000000000013064447471030632 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015600000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-resize-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-resize-post-req0000664000567000056710000000013013064447152035120 0ustar jenkinsjenkins00000000000000{ "resize": { "flavorRef": "3", "OS-DCF:diskConfig": "AUTO" } } ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-post-req.json.t0000664000567000056710000000035613064447152035045 0ustar jenkinsjenkins00000000000000{ "server" : { "OS-DCF:diskConfig": "AUTO", "name" : "new-server-test", "imageRef" : "%(image_id)s", "flavorRef" : "1", "metadata" : { "My Server Name" : "Apache1" } } } ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-get-resp.json.t0000664000567000056710000000303113064447152035012 0ustar jenkinsjenkins00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "accessIPv4": "", "accessIPv6": "", "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(id)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(id)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "key_name": null, "user_id": "fake" } } ././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-action-rebuild-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-action-rebuild-0000664000567000056710000000266013064447152035037 0ustar jenkinsjenkins00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "accessIPv4": "", "accessIPv6": "", "addresses": { "private": [ { "addr": "192.168.0.3", "version": 4 } ] }, "adminPass": "%(password)s", "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(id)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(id)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "user_id": "fake" } } ././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-action-rebuild-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-action-rebuild-0000664000567000056710000000014413064447152035032 0ustar jenkinsjenkins00000000000000{ "rebuild": { "imageRef" : "%(image_id)s", "OS-DCF:diskConfig": "AUTO" } } ././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-disk-config/list-servers-detail-get.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-disk-config/list-servers-detail-ge0000664000567000056710000000340613064447152035047 0ustar jenkinsjenkins00000000000000{ "servers": [ { "OS-DCF:diskConfig": "AUTO", "accessIPv4": "", "accessIPv6": "", "addresses": { "private": [ { "addr": "192.168.0.3", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(id)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(id)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "key_name": null, "user_id": "fake" } ] } ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-post-resp.json.0000664000567000056710000000063613064447152035044 0ustar jenkinsjenkins00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "adminPass": "%(password)s", "id": "%(id)s", "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ] } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-admin-actions/0000775000567000056710000000000013064447471031163 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000017200000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-admin-actions/admin-actions-reset-server-state.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-admin-actions/admin-actions-reset-0000664000567000056710000000007313064447140035022 0ustar jenkinsjenkins00000000000000{ "os-resetState": { "state": "active" } } ././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-admin-actions/admin-actions-reset-state.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-admin-actions/admin-actions-reset-0000664000567000056710000000007313064447140035022 0ustar jenkinsjenkins00000000000000{ 'os-resetState': { 'state': 'active' } } ././@LongLink0000000000000000000000000000017300000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-admin-actions/admin-actions-inject-network-info.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-admin-actions/admin-actions-inject0000664000567000056710000000004213064447140035073 0ustar jenkinsjenkins00000000000000{ "injectNetworkInfo": null } ././@LongLink0000000000000000000000000000016500000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-admin-actions/admin-actions-reset-network.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-admin-actions/admin-actions-reset-0000664000567000056710000000003513064447140035020 0ustar jenkinsjenkins00000000000000{ "resetNetwork": null } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-fping/0000775000567000056710000000000013064447471027540 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-fping/fping-get-details-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-fping/fping-get-details-resp.json.0000664000567000056710000000020513064447140034751 0ustar jenkinsjenkins00000000000000{ "server": { "alive": false, "id": "%(uuid)s", "project_id": "6f70656e737461636b20342065766572" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-fping/fping-get-resp.json.tpl0000664000567000056710000000024613064447140034053 0ustar jenkinsjenkins00000000000000{ "servers": [ { "alive": false, "id": "%(uuid)s", "project_id": "6f70656e737461636b20342065766572" } ] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/consoles/0000775000567000056710000000000013064447471027643 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/consoles/consoles-list-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/consoles/consoles-list-get-resp.json.0000664000567000056710000000022613064447140035127 0ustar jenkinsjenkins00000000000000{ "consoles": [ { "console": { "console_type": "fake", "id": 1 } } ] }nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/consoles/consoles-get-resp.json.tpl0000664000567000056710000000021613064447140034675 0ustar jenkinsjenkins00000000000000{"console": {"console_type": "fake", "port": 5999, "instance_name": "instance-00000001", "host": "fake", "password": "%(password)s", "id": 1}}nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/consoles/consoles-create-req.json.tpl0000664000567000056710000000000013064447140035166 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-access-ips/0000775000567000056710000000000013064447471030467 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-access-ips/server-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-access-ips/server-post-req.json.tp0000664000567000056710000000043513064447152035060 0ustar jenkinsjenkins00000000000000{ "server" : { "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "name" : "new-server-test", "imageRef" : "%(image_id)s", "flavorRef" : "1", "metadata" : { "My Server Name" : "Apache1" } } } ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-access-ips/server-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-access-ips/server-get-resp.json.tp0000664000567000056710000000302313064447152035030 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "addresses": { "private": [ { "addr": "%(ip)s", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "user_id": "fake", "key_name": null } } ././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-access-ips/server-action-rebuild-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-access-ips/server-action-rebuild-r0000664000567000056710000000265713064447152035064 0ustar jenkinsjenkins00000000000000{ "server": { "addresses": { "private": [ { "addr": "%(ip)s", "version": 4 } ] }, "adminPass": "%(password)s", "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(uuid)s", "image": { "id": "%(image_id)s", "links": [ { "href": "%(compute_endpoint)s/images/%(image_id)s", "rel": "bookmark" } ] }, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ], "metadata": { "meta_var": "meta_val" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "user_id": "fake", "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s" } } ././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-access-ips/servers-details-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-access-ips/servers-details-resp.js0000664000567000056710000000336213064447152035110 0ustar jenkinsjenkins00000000000000{ "servers": [ { "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "addresses": { "private": [ { "addr": "%(ip)s", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(id)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "user_id": "fake", "key_name": null } ] } ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-access-ips/server-action-rebuild.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-access-ips/server-action-rebuild.j0000664000567000056710000000037613064447152035051 0ustar jenkinsjenkins00000000000000{ "rebuild" : { "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "imageRef" : "%(image_id)s", "name" : "new-server-test", "metadata" : { "meta_var" : "meta_val" } } } ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-access-ips/servers-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-access-ips/servers-list-resp.json.0000664000567000056710000000067013064447152035050 0ustar jenkinsjenkins00000000000000{ "servers": [ { "id": "%(id)s", "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(id)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(id)s", "rel": "bookmark" } ], "name": "new-server-test" } ] } ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-access-ips/server-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-access-ips/server-post-resp.json.t0000664000567000056710000000057113064447152035063 0ustar jenkinsjenkins00000000000000{ "server": { "adminPass": "%(password)s", "id": "%(id)s", "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ] } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-user-data/0000775000567000056710000000000013064447471030322 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-user-data/userdata-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-user-data/userdata-post-req.json.t0000664000567000056710000000036013064447152035032 0ustar jenkinsjenkins00000000000000{ "server" : { "name" : "new-server-test", "imageRef" : "%(image_id)s", "flavorRef" : "1", "metadata" : { "My Server Name" : "Apache1" }, "user_data" : "%(user_data)s" } } ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-user-data/userdata-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-user-data/userdata-post-resp.json.0000664000567000056710000000057113064447152035034 0ustar jenkinsjenkins00000000000000{ "server": { "adminPass": "%(password)s", "id": "%(id)s", "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ] } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-pci/0000775000567000056710000000000013064447471027210 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-pci/server-get-resp.json.tpl0000664000567000056710000000275213064447140033732 0ustar jenkinsjenkins00000000000000{ "server": { "addresses": { "private": [ { "addr": "%(ip)s", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(host)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "%(host)s/v3/servers/%(uuid)s", "rel": "self" }, { "href": "%(host)s/servers/%(uuid)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "os-pci:pci_devices": [ { "id": 1 } ], "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "user_id": "fake" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-pci/pci-show-resp.json.tpl0000664000567000056710000000071713064447140033377 0ustar jenkinsjenkins00000000000000{ "pci_device": { "address": "0000:04:10.0", "compute_node_id": 1, "dev_id": "pci_0000_04_10_0", "dev_type": "type-VF", "extra_info": { "key1": "value1", "key2": "value2" }, "id": 1, "server_uuid": "69ba1044-0766-4ec0-b60d-09595de034a1", "label": "label_8086_1520", "product_id": "1520", "status": "available", "vendor_id": "8086" } } ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-pci/hypervisors-pci-show-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-pci/hypervisors-pci-show-resp.json0000664000567000056710000000220713064447140035170 0ustar jenkinsjenkins00000000000000{ "hypervisor": { "cpu_info": "{\"arch\": \"x86_64\", \"model\": \"Nehalem\", \"vendor\": \"Intel\", \"features\": [\"pge\", \"clflush\"], \"topology\": {\"cores\": 1, \"threads\": 1, \"sockets\": 4}}", "current_workload": 0, "state": "up", "status": "enabled", "disk_available_least": 0, "host_ip": "%(ip)s", "free_disk_gb": 1028, "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", "hypervisor_version": 1000, "id": 1, "local_gb": 1028, "local_gb_used": 0, "memory_mb": 8192, "memory_mb_used": 512, "os-pci:pci_stats": [ { "count": 5, "key1": "value1", "keya": "valuea", "product_id": "1520", "vendor_id": "8086", "numa_node": 1 } ], "running_vms": 0, "service": { "host": "043b3cacf6f34c90a7245151fc8ebcda", "id": 2, "disabled_reason": null }, "vcpus": 1, "vcpus_used": 0 } } ././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-pci/hypervisors-pci-detail-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-pci/hypervisors-pci-detail-resp.js0000664000567000056710000000244413064447140035120 0ustar jenkinsjenkins00000000000000{ "hypervisors": [ { "cpu_info": "{\"arch\": \"x86_64\", \"model\": \"Nehalem\", \"vendor\": \"Intel\", \"features\": [\"pge\", \"clflush\"], \"topology\": {\"cores\": 1, \"threads\": 1, \"sockets\": 4}}", "state": "up", "status": "enabled", "current_workload": 0, "disk_available_least": 0, "host_ip": "%(ip)s", "free_disk_gb": 1028, "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", "hypervisor_version": 1000, "id": 1, "local_gb": 1028, "local_gb_used": 0, "memory_mb": 8192, "memory_mb_used": 512, "os-pci:pci_stats": [ { "count": 5, "key1": "value1", "keya": "valuea", "product_id": "1520", "vendor_id": "8086", "numa_node": 1 } ], "running_vms": 0, "service": { "host": "043b3cacf6f34c90a7245151fc8ebcda", "id": 2, "disabled_reason": null }, "vcpus": 1, "vcpus_used": 0 } ] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-pci/pci-index-resp.json.tpl0000664000567000056710000000071513064447140033524 0ustar jenkinsjenkins00000000000000{ "pci_devices": [ { "address": "0000:04:10.0", "compute_node_id": 1, "id": 1, "product_id": "1520", "status": "available", "vendor_id": "8086" }, { "address": "0000:04:10.1", "compute_node_id": 1, "id": 2, "product_id": "1520", "status": "available", "vendor_id": "8086" } ] }nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-pci/pci-detail-resp.json.tpl0000664000567000056710000000203213064447140033651 0ustar jenkinsjenkins00000000000000{ "pci_devices": [ { "address": "0000:04:10.0", "compute_node_id": 1, "dev_id": "pci_0000_04_10_0", "dev_type": "type-VF", "extra_info": { "key1": "value1", "key2": "value2" }, "id": 1, "server_uuid": "69ba1044-0766-4ec0-b60d-09595de034a1", "label": "label_8086_1520", "product_id": "1520", "status": "available", "vendor_id": "8086" }, { "address": "0000:04:10.1", "compute_node_id": 1, "dev_id": "pci_0000_04_10_1", "dev_type": "type-VF", "extra_info": { "key3": "value3", "key4": "value4" }, "id": 2, "server_uuid": "d5b446a6-a1b4-4d01-b4f0-eac37b3a62fc", "label": "label_8086_1520", "product_id": "1520", "status": "available", "vendor_id": "8086" } ] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-pci/servers-detail-resp.json.tpl0000664000567000056710000000333413064447140034575 0ustar jenkinsjenkins00000000000000{ "servers": [ { "addresses": { "private": [ { "addr": "%(ip)s", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(host)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "%(host)s/v3/servers/%(uuid)s", "rel": "self" }, { "href": "%(host)s/servers/%(uuid)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "os-pci:pci_devices": [ { "id": 1 } ], "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "user_id": "fake" } ] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-networks-associate/0000775000567000056710000000000013064447471032262 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000017400000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-networks-associate/network-disassociate-host-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-networks-associate/network-disasso0000664000567000056710000000004213064447140035326 0ustar jenkinsjenkins00000000000000{ "disassociate_host": null } ././@LongLink0000000000000000000000000000017700000000000011222 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-networks-associate/network-disassociate-project-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-networks-associate/network-disasso0000664000567000056710000000004513064447140035331 0ustar jenkinsjenkins00000000000000{ "disassociate_project": null } ././@LongLink0000000000000000000000000000016700000000000011221 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-networks-associate/network-disassociate-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-networks-associate/network-disasso0000664000567000056710000000003513064447140035330 0ustar jenkinsjenkins00000000000000{ "disassociate": null } ././@LongLink0000000000000000000000000000017100000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-networks-associate/network-associate-host-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-networks-associate/network-associa0000664000567000056710000000004513064447140035306 0ustar jenkinsjenkins00000000000000{ "associate_host": "%(host)s" } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-migrations/0000775000567000056710000000000013064447471030611 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-migrations/v2.23/0000775000567000056710000000000013064447471031363 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-migrations/v2.23/migrations-get.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-migrations/v2.23/migrations-get.js0000664000567000056710000000506513064447140034651 0ustar jenkinsjenkins00000000000000{ "migrations": [ { "created_at": "2016-01-29T13:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 1, "instance_uuid": "%(instance_1)s", "links": [ { "href": "%(host)s/v2.1/6f70656e737461636b20342065766572/servers/%(instance_1)s/migrations/1", "rel": "self" }, { "href": "%(host)s/6f70656e737461636b20342065766572/servers/%(instance_1)s/migrations/1", "rel": "bookmark" } ], "new_instance_type_id": 2, "old_instance_type_id": 1, "source_compute": "compute1", "source_node": "node1", "migration_type": "live-migration", "status": "running", "updated_at": "2016-01-29T13:42:02.000000" }, { "created_at": "2016-01-29T13:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 2, "instance_uuid": "%(instance_1)s", "new_instance_type_id": 2, "old_instance_type_id": 1, "source_compute": "compute1", "source_node": "node1", "migration_type": "live-migration", "status": "error", "updated_at": "2016-01-29T13:42:02.000000" }, { "created_at": "2016-01-22T13:42:02.000000", "dest_compute": "compute20", "dest_host": "5.6.7.8", "dest_node": "node20", "id": 3, "instance_uuid": "%(instance_2)s", "new_instance_type_id": 6, "old_instance_type_id": 5, "source_compute": "compute10", "source_node": "node10", "migration_type": "resize", "status": "error", "updated_at": "2016-01-22T13:42:02.000000" }, { "created_at": "2016-01-22T13:42:02.000000", "dest_compute": "compute20", "dest_host": "5.6.7.8", "dest_node": "node20", "id": 4, "instance_uuid": "%(instance_2)s", "new_instance_type_id": 6, "old_instance_type_id": 5, "source_compute": "compute10", "source_node": "node10", "migration_type": "resize", "status": "migrating", "updated_at": "2016-01-22T13:42:02.000000" } ] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-migrations/migrations-get.json.tpl0000664000567000056710000000201013064447140035215 0ustar jenkinsjenkins00000000000000{ "migrations": [ { "created_at": "2012-10-29T13:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 1234, "instance_uuid": "instance_id_123", "new_instance_type_id": 2, "old_instance_type_id": 1, "source_compute": "compute1", "source_node": "node1", "status": "Done", "updated_at": "2012-10-29T13:42:02.000000" }, { "created_at": "2013-10-22T13:42:02.000000", "dest_compute": "compute20", "dest_host": "5.6.7.8", "dest_node": "node20", "id": 5678, "instance_uuid": "instance_id_456", "new_instance_type_id": 6, "old_instance_type_id": 5, "source_compute": "compute10", "source_node": "node10", "status": "Done", "updated_at": "2013-10-22T13:42:02.000000" } ] }nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-dns/0000775000567000056710000000000013064447471031430 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000020600000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-dns/floating-ip-dns-cr0000664000567000056710000000013013064447140034733 0ustar jenkinsjenkins00000000000000{ "dns_entry": { "ip": "%(ip)s", "dns_type": "%(dns_type)s" } } ././@LongLink0000000000000000000000000000020700000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-dns/floating-ip-dns-cr0000664000567000056710000000024413064447140034741 0ustar jenkinsjenkins00000000000000{ "dns_entry": { "domain": "%(domain)s", "id": null, "ip": "%(ip)s", "name": "%(name)s", "type": "%(dns_type)s" } } ././@LongLink0000000000000000000000000000020000000000000011205 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-dns/floating-ip-dns-cr0000664000567000056710000000013713064447140034742 0ustar jenkinsjenkins00000000000000{ "domain_entry": { "scope": "%(scope)s", "project": "%(project)s" } } ././@LongLink0000000000000000000000000000016500000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-dns/floating-ip-dns-li0000664000567000056710000000031013064447140034733 0ustar jenkinsjenkins00000000000000{ "domain_entries": [ { "availability_zone": null, "domain": "%(domain)s", "project": "%(project)s", "scope": "%(scope)s" } ] } ././@LongLink0000000000000000000000000000017300000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-dns/floating-ip-dns-en0000664000567000056710000000030413064447140034734 0ustar jenkinsjenkins00000000000000{ "dns_entries": [ { "domain": "%(domain)s", "id": null, "ip": "%(ip)s", "name": "%(name)s", "type": null } ] } ././@LongLink0000000000000000000000000000017200000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-dns/floating-ip-dns-en0000664000567000056710000000023213064447140034734 0ustar jenkinsjenkins00000000000000{ "dns_entry": { "domain": "%(domain)s", "id": null, "ip": "%(ip)s", "name": "%(name)s", "type": null } } ././@LongLink0000000000000000000000000000020100000000000011206 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-dns/floating-ip-dns-cr0000664000567000056710000000024213064447140034737 0ustar jenkinsjenkins00000000000000{ "domain_entry": { "availability_zone": null, "domain": "%(domain)s", "project": "%(project)s", "scope": "%(scope)s" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-extended-status/0000775000567000056710000000000013064447471031556 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-extended-status/server-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-extended-status/server-get-resp.js0000664000567000056710000000321313064447152035141 0ustar jenkinsjenkins00000000000000{ "server": { "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-EXT-STS:power_state": 1, "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "updated": "%(isotime)s", "created": "%(isotime)s", "addresses": { "private": [ { "addr": "%(ip)s", "version": 4, "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed" } ] }, "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(uuid)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "user_id": "fake", "key_name": null } } ././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-extended-status/servers-detail-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-extended-status/servers-detail-res0000664000567000056710000000355613064447152035226 0ustar jenkinsjenkins00000000000000{ "servers": [ { "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-EXT-STS:power_state": 1, "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "updated": "%(isotime)s", "created": "%(isotime)s", "addresses": { "private": [ { "addr": "%(ip)s", "version": 4, "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed" } ] }, "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(uuid)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(id)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(id)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "user_id": "fake", "key_name": null }] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavors/0000775000567000056710000000000013064447471027472 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavors/flavors-detail-resp.json.tpl0000664000567000056710000000613713064447152035051 0ustar jenkinsjenkins00000000000000{ "flavors": [ { "OS-FLV-DISABLED:disabled": false, "disk": 1, "OS-FLV-EXT-DATA:ephemeral": 0, "id": "1", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/1", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny", "os-flavor-access:is_public": true, "ram": 512, "swap": "", "vcpus": 1 }, { "OS-FLV-DISABLED:disabled": false, "disk": 20, "OS-FLV-EXT-DATA:ephemeral": 0, "id": "2", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/2", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/2", "rel": "bookmark" } ], "name": "m1.small", "os-flavor-access:is_public": true, "ram": 2048, "swap": "", "vcpus": 1 }, { "OS-FLV-DISABLED:disabled": false, "disk": 40, "OS-FLV-EXT-DATA:ephemeral": 0, "id": "3", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/3", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/3", "rel": "bookmark" } ], "name": "m1.medium", "os-flavor-access:is_public": true, "ram": 4096, "swap": "", "vcpus": 2 }, { "OS-FLV-DISABLED:disabled": false, "disk": 80, "OS-FLV-EXT-DATA:ephemeral": 0, "id": "4", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/4", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/4", "rel": "bookmark" } ], "name": "m1.large", "os-flavor-access:is_public": true, "ram": 8192, "swap": "", "vcpus": 4 }, { "OS-FLV-DISABLED:disabled": false, "disk": 160, "OS-FLV-EXT-DATA:ephemeral": 0, "id": "5", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/5", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/5", "rel": "bookmark" } ], "name": "m1.xlarge", "os-flavor-access:is_public": true, "ram": 16384, "swap": "", "vcpus": 8 } ] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavors/flavors-list-resp.json.tpl0000664000567000056710000000370313064447140034553 0ustar jenkinsjenkins00000000000000{ "flavors": [ { "id": "1", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/1", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny" }, { "id": "2", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/2", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/2", "rel": "bookmark" } ], "name": "m1.small" }, { "id": "3", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/3", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/3", "rel": "bookmark" } ], "name": "m1.medium" }, { "id": "4", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/4", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/4", "rel": "bookmark" } ], "name": "m1.large" }, { "id": "5", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/5", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/5", "rel": "bookmark" } ], "name": "m1.xlarge" } ] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/flavors/flavor-get-resp.json.tpl0000664000567000056710000000105213064447152034172 0ustar jenkinsjenkins00000000000000{ "flavor": { "disk": 1, "id": "1", "links": [ { "href": "%(versioned_compute_endpoint)s/flavors/1", "rel": "self" }, { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny", "os-flavor-access:is_public": true, "ram": 512, "vcpus": 1, "OS-FLV-DISABLED:disabled": false, "OS-FLV-EXT-DATA:ephemeral": 0, "swap": "" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers-sort/0000775000567000056710000000000013064447471030474 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers-sort/server-sort-keys-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/servers-sort/server-sort-keys-list-re0000664000567000056710000000067013064447140035234 0ustar jenkinsjenkins00000000000000{ "servers": [ { "id": "%(id)s", "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(id)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(id)s", "rel": "bookmark" } ], "name": "new-server-test" } ] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/server-migrations/0000775000567000056710000000000013064447471031476 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/server-migrations/v2.23/0000775000567000056710000000000013064447471032250 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/server-migrations/v2.23/migrations-index.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/server-migrations/v2.23/migrations-in0000664000567000056710000000130713064447152034750 0ustar jenkinsjenkins00000000000000{ "migrations": [ { "created_at": "2016-01-29T13:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 1, "server_uuid": "%(server_uuid_1)s", "source_compute": "compute1", "source_node": "node1", "status": "running", "memory_total_bytes": 123456, "memory_processed_bytes": 12345, "memory_remaining_bytes": 120000, "disk_total_bytes": 234567, "disk_processed_bytes": 23456, "disk_remaining_bytes": 230000, "updated_at": "2016-01-29T13:42:02.000000" } ] } ././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/server-migrations/v2.23/migrations-get.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/server-migrations/v2.23/migrations-ge0000664000567000056710000000116013064447152034732 0ustar jenkinsjenkins00000000000000{ "migration": { "created_at": "2016-01-29T13:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 1, "server_uuid": "%(server_uuid)s", "source_compute": "compute1", "source_node": "node1", "status": "running", "memory_total_bytes": 123456, "memory_processed_bytes": 12345, "memory_remaining_bytes": 120000, "disk_total_bytes": 234567, "disk_processed_bytes": 23456, "disk_remaining_bytes": 230000, "updated_at": "2016-01-29T13:42:02.000000" } } ././@LongLink0000000000000000000000000000015600000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/server-migrations/live-migrate-server.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/server-migrations/live-migrate-server0000664000567000056710000000020613064447140035301 0ustar jenkinsjenkins00000000000000{ "os-migrateLive": { "host": "%(hostname)s", "block_migration": false, "disk_over_commit": false } } ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/server-migrations/force_complete.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/server-migrations/force_complete.json0000664000567000056710000000003713064447140035350 0ustar jenkinsjenkins00000000000000{ "force_complete": null } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-suspend-server/0000775000567000056710000000000013064447471031422 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-suspend-server/server-resume.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-suspend-server/server-resume.json.0000664000567000056710000000002713064447140035167 0ustar jenkinsjenkins00000000000000{ "resume": null } ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-suspend-server/server-suspend.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-suspend-server/server-suspend.json0000664000567000056710000000003013064447140035264 0ustar jenkinsjenkins00000000000000{ "suspend": null } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-pools/0000775000567000056710000000000013064447471032000 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016700000000000011221 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-pools/floatingippools-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-floating-ip-pools/floatingippools-0000664000567000056710000000021613064447140035201 0ustar jenkinsjenkins00000000000000{ "floating_ip_pools": [ { "name": "%(pool1)s" }, { "name": "%(pool2)s" } ] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hide-server-addresses/0000775000567000056710000000000013064447471032625 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hide-server-addresses/server-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hide-server-addresses/server-get-r0000664000567000056710000000237213064447152035072 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { }, "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "user_id": "fake" } } ././@LongLink0000000000000000000000000000016600000000000011220 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hide-server-addresses/servers-details-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hide-server-addresses/servers-deta0000664000567000056710000000266413064447152035160 0ustar jenkinsjenkins00000000000000{ "servers": [ { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": {}, "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(id)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "user_id": "fake" } ] } ././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hide-server-addresses/servers-list-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-hide-server-addresses/servers-list0000664000567000056710000000067013064447140035206 0ustar jenkinsjenkins00000000000000{ "servers": [ { "id": "%(id)s", "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(id)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(id)s", "rel": "bookmark" } ], "name": "new-server-test" } ] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/images/0000775000567000056710000000000013064447471027263 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/images/image-metadata-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/images/image-metadata-post-resp.json.0000664000567000056710000000030113064447140035011 0ustar jenkinsjenkins00000000000000{ "metadata": { "Label": "UpdatedImage", "architecture": "x86_64", "auto_disk_config": "True", "kernel_id": "False", "ramdisk_id": "nokernel" } }././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/images/image-metadata-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/images/image-metadata-get-resp.json.t0000664000567000056710000000024313064447140034774 0ustar jenkinsjenkins00000000000000{ "metadata": { "architecture": "x86_64", "auto_disk_config": "True", "kernel_id": "nokernel", "ramdisk_id": "nokernel" } }nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/images/image-get-resp.json.tpl0000664000567000056710000000215013064447140033551 0ustar jenkinsjenkins00000000000000{ "image": { "OS-DCF:diskConfig": "AUTO", "created": "2011-01-01T01:02:03Z", "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "%(versioned_compute_endpoint)s/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "self" }, { "href": "%(compute_endpoint)s/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "metadata": { "architecture": "x86_64", "auto_disk_config": "True", "kernel_id": "nokernel", "ramdisk_id": "nokernel" }, "minDisk": 0, "minRam": 0, "name": "fakeimage7", "OS-EXT-IMG-SIZE:size": %(int)s, "progress": 100, "status": "ACTIVE", "updated": "2011-01-01T01:02:03Z" } } ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/images/image-metadata-put-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/images/image-metadata-put-resp.json.t0000664000567000056710000000013213064447140035022 0ustar jenkinsjenkins00000000000000{ "metadata": { "Label": "Changed", "auto_disk_config": "True" } }././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/images/image-metadata-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/images/image-metadata-post-req.json.t0000664000567000056710000000013213064447140035015 0ustar jenkinsjenkins00000000000000{ "metadata": { "kernel_id": "False", "Label": "UpdatedImage" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/images/images-list-get-resp.json.tpl0000664000567000056710000001223713064447140034714 0ustar jenkinsjenkins00000000000000{ "images": [ { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "%(versioned_compute_endpoint)s/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "self" }, { "href": "%(compute_endpoint)s/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "name": "fakeimage7" }, { "id": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "links": [ { "href": "%(versioned_compute_endpoint)s/images/155d900f-4e14-4e4c-a73d-069cbf4541e6", "rel": "self" }, { "href": "%(compute_endpoint)s/images/155d900f-4e14-4e4c-a73d-069cbf4541e6", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/155d900f-4e14-4e4c-a73d-069cbf4541e6", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "name": "fakeimage123456" }, { "id": "a2459075-d96c-40d5-893e-577ff92e721c", "links": [ { "href": "%(versioned_compute_endpoint)s/images/a2459075-d96c-40d5-893e-577ff92e721c", "rel": "self" }, { "href": "%(compute_endpoint)s/images/a2459075-d96c-40d5-893e-577ff92e721c", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/a2459075-d96c-40d5-893e-577ff92e721c", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "name": "fakeimage123456" }, { "id": "a440c04b-79fa-479c-bed1-0b816eaec379", "links": [ { "href": "%(versioned_compute_endpoint)s/images/a440c04b-79fa-479c-bed1-0b816eaec379", "rel": "self" }, { "href": "%(compute_endpoint)s/images/a440c04b-79fa-479c-bed1-0b816eaec379", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/a440c04b-79fa-479c-bed1-0b816eaec379", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "name": "fakeimage6" }, { "id": "c905cedb-7281-47e4-8a62-f26bc5fc4c77", "links": [ { "href": "%(versioned_compute_endpoint)s/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77", "rel": "self" }, { "href": "%(compute_endpoint)s/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "name": "fakeimage123456" }, { "id": "cedef40a-ed67-4d10-800e-17455edce175", "links": [ { "href": "%(versioned_compute_endpoint)s/images/cedef40a-ed67-4d10-800e-17455edce175", "rel": "self" }, { "href": "%(compute_endpoint)s/images/cedef40a-ed67-4d10-800e-17455edce175", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/cedef40a-ed67-4d10-800e-17455edce175", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "name": "fakeimage123456" }, { "id": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6", "links": [ { "href": "%(versioned_compute_endpoint)s/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6", "rel": "self" }, { "href": "%(compute_endpoint)s/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "name": "fakeimage123456" } ] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/images/image-meta-key-get.json.tpl0000664000567000056710000000006713064447140034321 0ustar jenkinsjenkins00000000000000{ "meta": { "kernel_id": "nokernel" } }././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/images/images-details-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/images/images-details-get-resp.json.t0000664000567000056710000002003213064447140035022 0ustar jenkinsjenkins00000000000000{ "images": [ { "OS-DCF:diskConfig": "AUTO", "created": "2011-01-01T01:02:03Z", "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "%(versioned_compute_endpoint)s/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "self" }, { "href": "%(compute_endpoint)s/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "metadata": { "architecture": "x86_64", "auto_disk_config": "True", "kernel_id": "nokernel", "ramdisk_id": "nokernel" }, "minDisk": 0, "minRam": 0, "name": "fakeimage7", "OS-EXT-IMG-SIZE:size": %(int)s, "progress": 100, "status": "ACTIVE", "updated": "2011-01-01T01:02:03Z" }, { "created": "2011-01-01T01:02:03Z", "id": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "links": [ { "href": "%(versioned_compute_endpoint)s/images/155d900f-4e14-4e4c-a73d-069cbf4541e6", "rel": "self" }, { "href": "%(compute_endpoint)s/images/155d900f-4e14-4e4c-a73d-069cbf4541e6", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/155d900f-4e14-4e4c-a73d-069cbf4541e6", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "metadata": { "architecture": "x86_64", "kernel_id": "nokernel", "ramdisk_id": "nokernel" }, "minDisk": 0, "minRam": 0, "name": "fakeimage123456", "OS-EXT-IMG-SIZE:size": %(int)s, "progress": 100, "status": "ACTIVE", "updated": "2011-01-01T01:02:03Z" }, { "created": "2011-01-01T01:02:03Z", "id": "a2459075-d96c-40d5-893e-577ff92e721c", "links": [ { "href": "%(versioned_compute_endpoint)s/images/a2459075-d96c-40d5-893e-577ff92e721c", "rel": "self" }, { "href": "%(compute_endpoint)s/images/a2459075-d96c-40d5-893e-577ff92e721c", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/a2459075-d96c-40d5-893e-577ff92e721c", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "metadata": { "kernel_id": "nokernel", "ramdisk_id": "nokernel" }, "minDisk": 0, "minRam": 0, "name": "fakeimage123456", "OS-EXT-IMG-SIZE:size": %(int)s, "progress": 100, "status": "ACTIVE", "updated": "2011-01-01T01:02:03Z" }, { "OS-DCF:diskConfig": "MANUAL", "created": "2011-01-01T01:02:03Z", "id": "a440c04b-79fa-479c-bed1-0b816eaec379", "links": [ { "href": "%(versioned_compute_endpoint)s/images/a440c04b-79fa-479c-bed1-0b816eaec379", "rel": "self" }, { "href": "%(compute_endpoint)s/images/a440c04b-79fa-479c-bed1-0b816eaec379", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/a440c04b-79fa-479c-bed1-0b816eaec379", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "metadata": { "architecture": "x86_64", "auto_disk_config": "False", "kernel_id": "nokernel", "ramdisk_id": "nokernel" }, "minDisk": 0, "minRam": 0, "name": "fakeimage6", "OS-EXT-IMG-SIZE:size": %(int)s, "progress": 100, "status": "ACTIVE", "updated": "2011-01-01T01:02:03Z" }, { "created": "2011-01-01T01:02:03Z", "id": "c905cedb-7281-47e4-8a62-f26bc5fc4c77", "links": [ { "href": "%(versioned_compute_endpoint)s/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77", "rel": "self" }, { "href": "%(compute_endpoint)s/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "metadata": { "kernel_id": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ramdisk_id": null }, "minDisk": 0, "minRam": 0, "name": "fakeimage123456", "OS-EXT-IMG-SIZE:size": %(int)s, "progress": 100, "status": "ACTIVE", "updated": "2011-01-01T01:02:03Z" }, { "created": "2011-01-01T01:02:03Z", "id": "cedef40a-ed67-4d10-800e-17455edce175", "links": [ { "href": "%(versioned_compute_endpoint)s/images/cedef40a-ed67-4d10-800e-17455edce175", "rel": "self" }, { "href": "%(compute_endpoint)s/images/cedef40a-ed67-4d10-800e-17455edce175", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/cedef40a-ed67-4d10-800e-17455edce175", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "metadata": { "kernel_id": "nokernel", "ramdisk_id": "nokernel" }, "minDisk": 0, "minRam": 0, "name": "fakeimage123456", "OS-EXT-IMG-SIZE:size": %(int)s, "progress": 100, "status": "ACTIVE", "updated": "2011-01-01T01:02:03Z" }, { "created": "2011-01-01T01:02:03Z", "id": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6", "links": [ { "href": "%(versioned_compute_endpoint)s/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6", "rel": "self" }, { "href": "%(compute_endpoint)s/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "metadata": { "kernel_id": "nokernel", "ramdisk_id": "nokernel" }, "minDisk": 0, "minRam": 0, "name": "fakeimage123456", "OS-EXT-IMG-SIZE:size": %(int)s, "progress": 100, "status": "ACTIVE", "updated": "2011-01-01T01:02:03Z" } ] } ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/images/image-meta-key-put-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/images/image-meta-key-put-resp.json.t0000664000567000056710000000007313064447140034762 0ustar jenkinsjenkins00000000000000{ "meta": { "auto_disk_config": "False" } }././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/images/image-metadata-put-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/images/image-metadata-put-req.json.tp0000664000567000056710000000013313064447140035021 0ustar jenkinsjenkins00000000000000{ "metadata": { "auto_disk_config": "True", "Label": "Changed" } } ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/images/image-meta-key-put-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/images/image-meta-key-put-req.json.tp0000664000567000056710000000007413064447140034761 0ustar jenkinsjenkins00000000000000{ "meta": { "auto_disk_config": "False" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-usage/0000775000567000056710000000000013064447471031045 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-usage/server-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-usage/server-get-resp.json.0000664000567000056710000000315613064447152035051 0ustar jenkinsjenkins00000000000000{ "server": { "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "OS-SRV-USG:launched_at": "%(strtime)s", "OS-SRV-USG:terminated_at": null, "addresses": { "private": [ { "addr": "%(ip)s", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "%(isotime)s", "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(uuid)s", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "%(isotime)s", "user_id": "fake" } } ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-usage/servers-detail-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-server-usage/servers-detail-resp.j0000664000567000056710000000352213064447152035116 0ustar jenkinsjenkins00000000000000{ "servers": [ { "accessIPv4": "%(access_ip_v4)s", "accessIPv6": "%(access_ip_v6)s", "status": "ACTIVE", "created": "%(isotime)s", "OS-SRV-USG:launched_at": "%(strtime)s", "user_id": "fake", "addresses": { "private": [ { "addr": "%(ip)s", "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "key_name": null, "links": [ { "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s", "rel": "self" }, { "href": "%(compute_endpoint)s/servers/%(id)s", "rel": "bookmark" } ], "updated": "%(isotime)s", "name": "new-server-test", "image": { "id": "%(uuid)s", "links": [ { "href": "%(compute_endpoint)s/images/%(uuid)s", "rel": "bookmark" } ] }, "id": "%(uuid)s", "OS-SRV-USG:terminated_at": null, "tenant_id": "6f70656e737461636b20342065766572", "progress": 0, "flavor": { "id": "1", "links": [ { "href": "%(compute_endpoint)s/flavors/1", "rel": "bookmark" } ] }, "hostId": "%(hostid)s", "metadata": { "My Server Name": "Apache1" } }] } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-aggregates/0000775000567000056710000000000013064447471030546 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregate-add-host-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregate-add-host-post0000664000567000056710000000007413064447140035075 0ustar jenkinsjenkins00000000000000{ "add_host": { "host": "%(host_name)s" } } ././@LongLink0000000000000000000000000000016400000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregates-metadata-pos0000664000567000056710000000054213064447140035151 0ustar jenkinsjenkins00000000000000{ "aggregate": { "availability_zone": "nova", "created_at": "%(strtime)s", "deleted": false, "deleted_at": null, "hosts": [], "id": 1, "metadata": { "availability_zone": "nova", "key": "value" }, "name": "name", "updated_at": %(strtime)s } } ././@LongLink0000000000000000000000000000016400000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregates-add-host-pos0000664000567000056710000000054713064447140035101 0ustar jenkinsjenkins00000000000000{ "aggregate": { "availability_zone": "nova", "created_at": "%(strtime)s", "deleted": false, "deleted_at": null, "hosts": [ "%(compute_host)s" ], "id": 1, "metadata": { "availability_zone": "nova" }, "name": "name", "updated_at": null } } ././@LongLink0000000000000000000000000000016500000000000011217 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregate-remove-host-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregate-remove-host-p0000664000567000056710000000007713064447140035117 0ustar jenkinsjenkins00000000000000{ "remove_host": { "host": "%(host_name)s" } } ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregates-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregates-get-resp.jso0000664000567000056710000000047713064447140035121 0ustar jenkinsjenkins00000000000000{ "aggregate": { "availability_zone": "nova", "created_at": "%(strtime)s", "deleted": false, "deleted_at": null, "hosts": [], "id": 1, "metadata": { "availability_zone": "nova" }, "name": "name", "updated_at": null } } ././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregates-list-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregates-list-get-res0000664000567000056710000000060013064447152035107 0ustar jenkinsjenkins00000000000000{ "aggregates": [ { "availability_zone": "nova", "created_at": "%(strtime)s", "deleted": false, "deleted_at": null, "hosts": [], "id": 1, "metadata": { "availability_zone": "nova" }, "name": "name", "updated_at": null } ] } ././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregate-update-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregate-update-post-r0000664000567000056710000000051513064447140035113 0ustar jenkinsjenkins00000000000000{ "aggregate": { "availability_zone": "nova2", "created_at": "%(strtime)s", "deleted": false, "deleted_at": null, "hosts": [], "id": 1, "metadata": { "availability_zone": "nova2" }, "name": "newname", "updated_at": "%(strtime)s" } } ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregate-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregate-post-resp.jso0000664000567000056710000000036013064447140035133 0ustar jenkinsjenkins00000000000000{ "aggregate": { "availability_zone": "nova", "created_at": "%(strtime)s", "deleted": false, "deleted_at": null, "id": %(aggregate_id)s, "name": "name", "updated_at": null } } ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregate-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregate-post-req.json0000664000567000056710000000013513064447140035127 0ustar jenkinsjenkins00000000000000{ "aggregate": { "name": "name", "availability_zone": "nova" } } ././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregate-metadata-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregate-metadata-post0000664000567000056710000000021313064447140035145 0ustar jenkinsjenkins00000000000000{ "set_metadata": { "metadata": { "key": "value" } } } ././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregate-update-post-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregate-update-post-r0000664000567000056710000000014113064447140035106 0ustar jenkinsjenkins00000000000000{ "aggregate": { "name": "newname", "availability_zone": "nova2" } } ././@LongLink0000000000000000000000000000016700000000000011221 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/os-aggregates/aggregates-remove-host-0000664000567000056710000000047713064447140035126 0ustar jenkinsjenkins00000000000000{ "aggregate": { "availability_zone": "nova", "created_at": "%(strtime)s", "deleted": false, "deleted_at": null, "hosts": [], "id": 1, "metadata": { "availability_zone": "nova" }, "name": "name", "updated_at": null } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/versions/0000775000567000056710000000000013064447471027666 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/versions/versions-get-resp.json.tpl0000664000567000056710000000132513064447140034745 0ustar jenkinsjenkins00000000000000{ "versions": [ { "id": "v2.0", "links": [ { "href": "%(host)s/v2/", "rel": "self" } ], "status": "SUPPORTED", "version": "", "min_version": "", "updated": "2011-01-21T11:33:21Z" }, { "id": "v2.1", "links": [ { "href": "%(host)s/v2.1/", "rel": "self" } ], "status": "CURRENT", "version": "%(max_api_version)s", "min_version": "2.1", "updated": "2013-07-23T11:33:21Z" } ] } ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/versions/v21-version-get-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/versions/v21-version-get-resp.json.tp0000664000567000056710000000123013064447140035007 0ustar jenkinsjenkins00000000000000{ "version": { "id": "v2.1", "links": [ { "href": "%(host)s/v2.1/", "rel": "self" }, { "href": "http://docs.openstack.org/", "rel": "describedby", "type": "text/html" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.compute+json;version=2.1" } ], "status": "CURRENT", "version": "%(max_api_version)s", "min_version": "2.1", "updated": "2013-07-23T11:33:21Z" } } nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/versions/v2-version-get-resp.json.tpl0000664000567000056710000000117713064447140035114 0ustar jenkinsjenkins00000000000000{ "version": { "id": "v2.0", "links": [ { "href": "%(host)s/v2/", "rel": "self" }, { "href": "http://docs.openstack.org/", "rel": "describedby", "type": "text/html" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.compute+json;version=2" } ], "min_version": "", "status": "SUPPORTED", "updated": "2011-01-21T11:33:21Z", "version": "" } }nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/server-metadata/0000775000567000056710000000000013064447471031102 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/server-metadata/server-metadata-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/server-metadata/server-metadata-req.j0000664000567000056710000000006313064447140035116 0ustar jenkinsjenkins00000000000000{ "meta": { "foo": "%(value)s" } } ././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/server-metadata/server-metadata-all-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/server-metadata/server-metadata-all-r0000664000567000056710000000006713064447140035112 0ustar jenkinsjenkins00000000000000{ "metadata": { "foo": "%(value)s" } } ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/server-metadata/server-metadata-resp.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/server-metadata/server-metadata-resp.0000664000567000056710000000006313064447140035126 0ustar jenkinsjenkins00000000000000{ "meta": { "foo": "%(value)s" } } ././@LongLink0000000000000000000000000000016000000000000011212 Lustar 00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/server-metadata/server-metadata-all-req.json.tplnova-13.1.4/nova/tests/functional/api_sample_tests/api_samples/server-metadata/server-metadata-all-r0000664000567000056710000000006713064447140035112 0ustar jenkinsjenkins00000000000000{ "metadata": { "foo": "%(value)s" } } nova-13.1.4/nova/tests/functional/api_sample_tests/test_baremetal_nodes.py0000664000567000056710000000601513064447152030254 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from nova.tests.functional.api_sample_tests import api_sample_base CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class FakeNode(object): def __init__(self, uuid='058d27fa-241b-445a-a386-08c04f96db43'): self.uuid = uuid self.provision_state = 'active' self.properties = {'cpus': '2', 'memory_mb': '1024', 'local_gb': '10'} self.instance_uuid = '1ea4e53e-149a-4f02-9515-590c9fb2315a' class NodeManager(object): def list(self, detail=False): return [FakeNode(), FakeNode('e2025409-f3ce-4d6a-9788-c565cf3b1b1c')] def get(self, id): return FakeNode(id) def list_ports(self, id): return [] class fake_client(object): node = NodeManager() class BareMetalNodesSampleJsonTest(api_sample_base.ApiSampleTestBaseV21): ADMIN_API = True extension_name = "os-baremetal-nodes" def _get_flags(self): f = super(BareMetalNodesSampleJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append('nova.api.openstack.compute.' 'contrib.baremetal_nodes.Baremetal_nodes') return f @mock.patch("nova.api.openstack.compute.baremetal_nodes" "._get_ironic_client") @mock.patch("nova.api.openstack.compute.legacy_v2.contrib.baremetal_nodes" "._get_ironic_client") def test_baremetal_nodes_list(self, mock_get_irc, v2_1_mock_get_irc): mock_get_irc.return_value = fake_client() v2_1_mock_get_irc.return_value = fake_client() response = self._do_get('os-baremetal-nodes') self._verify_response('baremetal-node-list-resp', {}, response, 200) @mock.patch("nova.api.openstack.compute.baremetal_nodes" "._get_ironic_client") @mock.patch("nova.api.openstack.compute.legacy_v2.contrib.baremetal_nodes" "._get_ironic_client") def test_baremetal_nodes_get(self, mock_get_irc, v2_1_mock_get_irc): mock_get_irc.return_value = fake_client() v2_1_mock_get_irc.return_value = fake_client() response = self._do_get('os-baremetal-nodes/' '058d27fa-241b-445a-a386-08c04f96db43') self._verify_response('baremetal-node-get-resp', {}, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_user_data.py0000664000567000056710000000337113064447152027101 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 from oslo_config import cfg from nova.tests.functional.api_sample_tests import api_sample_base from nova.tests.unit.image import fake CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class UserDataJsonTest(api_sample_base.ApiSampleTestBaseV21): extension_name = "os-user-data" def _get_flags(self): f = super(UserDataJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.user_data.User_data') return f def test_user_data_post(self): user_data_contents = '#!/bin/bash\n/bin/su\necho "I am in you!"\n' user_data = base64.b64encode(user_data_contents) subs = { 'image_id': fake.get_valid_image_id(), 'compute_endpoint': self._get_compute_endpoint(), 'user_data': user_data } response = self._do_post('servers', 'userdata-post-req', subs) self._verify_response('userdata-post-resp', subs, response, 202) nova-13.1.4/nova/tests/functional/api_sample_tests/test_lock_server.py0000664000567000056710000000404013064447152027442 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class LockServerSamplesJsonTest(test_servers.ServersSampleBase): extension_name = "os-lock-server" def _get_flags(self): f = super(LockServerSamplesJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.admin_actions.Admin_actions') return f def setUp(self): """setUp Method for LockServer api samples extension This method creates the server that will be used in each tests """ super(LockServerSamplesJsonTest, self).setUp() self.uuid = self._post_server() def test_post_lock_server(self): # Get api samples to lock server request. response = self._do_post('servers/%s/action' % self.uuid, 'lock-server', {}) self.assertEqual(202, response.status_code) def test_post_unlock_server(self): # Get api samples to unlock server request. self.test_post_lock_server() response = self._do_post('servers/%s/action' % self.uuid, 'unlock-server', {}) self.assertEqual(202, response.status_code) nova-13.1.4/nova/tests/functional/api_sample_tests/test_tenant_networks.py0000664000567000056710000000533013064447152030354 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_serialization import jsonutils from nova.tests.functional.api_sample_tests import api_sample_base CONF = cfg.CONF CONF.import_opt('enable_network_quota', 'nova.api.openstack.compute.legacy_v2.contrib.' 'os_tenant_networks') CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class TenantNetworksJsonTests(api_sample_base.ApiSampleTestBaseV21): ADMIN_API = True extension_name = "os-tenant-networks" def _get_flags(self): f = super(TenantNetworksJsonTests, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append('nova.api.openstack.compute.' 'contrib.os_tenant_networks.Os_tenant_networks') return f def setUp(self): super(TenantNetworksJsonTests, self).setUp() CONF.set_override("enable_network_quota", True) def fake(*args, **kwargs): pass self.stub_out("nova.quota.QUOTAS.reserve", fake) self.stub_out("nova.quota.QUOTAS.commit", fake) self.stub_out("nova.quota.QUOTAS.rollback", fake) self.stub_out("nova.quota.QuotaEngine.reserve", fake) self.stub_out("nova.quota.QuotaEngine.commit", fake) self.stub_out("nova.quota.QuotaEngine.rollback", fake) def test_list_networks(self): response = self._do_get('os-tenant-networks') self._verify_response('networks-list-res', {}, response, 200) def test_create_network(self): response = self._do_post('os-tenant-networks', "networks-post-req", {}) self._verify_response('networks-post-res', {}, response, 200) def test_delete_network(self): response = self._do_post('os-tenant-networks', "networks-post-req", {}) net = jsonutils.loads(response.content) response = self._do_delete('os-tenant-networks/%s' % net["network"]["id"]) self.assertEqual(202, response.status_code) self.assertEqual("", response.content) nova-13.1.4/nova/tests/functional/api_sample_tests/test_simple_tenant_usage.py0000664000567000056710000000530113064447152031153 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import urllib from oslo_config import cfg from oslo_utils import timeutils from nova.tests.functional.api_sample_tests import test_servers import nova.tests.functional.api_samples_test_base as astb CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class SimpleTenantUsageSampleJsonTest(test_servers.ServersSampleBase): extension_name = "os-simple-tenant-usage" def _get_flags(self): f = super(SimpleTenantUsageSampleJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.simple_tenant_usage.' 'Simple_tenant_usage') return f def setUp(self): """setUp method for simple tenant usage.""" super(SimpleTenantUsageSampleJsonTest, self).setUp() started = timeutils.utcnow() now = started + datetime.timedelta(hours=1) timeutils.set_time_override(started) self._post_server() timeutils.set_time_override(now) self.query = { 'start': str(started), 'end': str(now) } def tearDown(self): """tearDown method for simple tenant usage.""" super(SimpleTenantUsageSampleJsonTest, self).tearDown() timeutils.clear_time_override() def test_get_tenants_usage(self): # Get api sample to get all tenants usage request. response = self._do_get('os-simple-tenant-usage?%s' % ( urllib.urlencode(self.query))) self._verify_response('simple-tenant-usage-get', {}, response, 200) def test_get_tenant_usage_details(self): # Get api sample to get specific tenant usage request. tenant_id = astb.PROJECT_ID response = self._do_get('os-simple-tenant-usage/%s?%s' % (tenant_id, urllib.urlencode(self.query))) self._verify_response('simple-tenant-usage-get-specific', {}, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_hypervisors.py0000664000567000056710000001443013064447152027525 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from nova.cells import utils as cells_utils from nova import objects from nova.tests.functional.api_sample_tests import api_sample_base CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class HypervisorsSampleJsonTests(api_sample_base.ApiSampleTestBaseV21): ADMIN_API = True extension_name = "os-hypervisors" def _get_flags(self): f = super(HypervisorsSampleJsonTests, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.hypervisors.Hypervisors') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_hypervisors.' 'Extended_hypervisors') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.hypervisor_status.' 'Hypervisor_status') return f def test_hypervisors_list(self): response = self._do_get('os-hypervisors') self._verify_response('hypervisors-list-resp', {}, response, 200) def test_hypervisors_search(self): response = self._do_get('os-hypervisors/fake/search') self._verify_response('hypervisors-search-resp', {}, response, 200) def test_hypervisors_without_servers(self): response = self._do_get('os-hypervisors/fake/servers') self._verify_response('hypervisors-without-servers-resp', {}, response, 200) @mock.patch("nova.compute.api.HostAPI.instance_get_all_by_host") def test_hypervisors_with_servers(self, mock_instance_get): instance = [ { "deleted": None, "name": "test_server1", "uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" }, { "deleted": None, "name": "test_server2", "uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb" }] mock_instance_get.return_value = instance response = self._do_get('os-hypervisors/fake/servers') self._verify_response('hypervisors-with-servers-resp', {}, response, 200) def test_hypervisors_detail(self): hypervisor_id = '1' subs = { 'hypervisor_id': hypervisor_id } response = self._do_get('os-hypervisors/detail') self._verify_response('hypervisors-detail-resp', subs, response, 200) def test_hypervisors_show(self): hypervisor_id = '1' subs = { 'hypervisor_id': hypervisor_id } response = self._do_get('os-hypervisors/%s' % hypervisor_id) self._verify_response('hypervisors-show-resp', subs, response, 200) def test_hypervisors_statistics(self): response = self._do_get('os-hypervisors/statistics') self._verify_response('hypervisors-statistics-resp', {}, response, 200) def test_hypervisors_uptime(self): def fake_get_host_uptime(self, context, hyp): return (" 08:32:11 up 93 days, 18:25, 12 users, load average:" " 0.20, 0.12, 0.14") self.stub_out('nova.compute.api.HostAPI.get_host_uptime', fake_get_host_uptime) hypervisor_id = '1' response = self._do_get('os-hypervisors/%s/uptime' % hypervisor_id) subs = { 'hypervisor_id': hypervisor_id, } self._verify_response('hypervisors-uptime-resp', subs, response, 200) @mock.patch("nova.servicegroup.API.service_is_up", return_value=True) class HypervisorsCellsSampleJsonTests(api_sample_base.ApiSampleTestBaseV21): ADMIN_API = True extension_name = "os-hypervisors" def _get_flags(self): f = super(HypervisorsCellsSampleJsonTests, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.hypervisors.Hypervisors') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.hypervisor_status.' 'Hypervisor_status') return f def setUp(self): self.flags(enable=True, cell_type='api', group='cells') super(HypervisorsCellsSampleJsonTests, self).setUp() def test_hypervisor_uptime(self, mocks): fake_hypervisor = objects.ComputeNode(id=1, host='fake-mini', hypervisor_hostname='fake-mini') def fake_get_host_uptime(self, context, hyp): return (" 08:32:11 up 93 days, 18:25, 12 users, load average:" " 0.20, 0.12, 0.14") def fake_compute_node_get(self, context, hyp): return fake_hypervisor def fake_service_get_by_compute_host(self, context, host): return cells_utils.ServiceProxy( objects.Service(id=1, host='fake-mini', disabled=False, disabled_reason=None), 'cell1') self.stub_out( 'nova.compute.cells_api.HostAPI.compute_node_get', fake_compute_node_get) self.stub_out( 'nova.compute.cells_api.HostAPI.service_get_by_compute_host', fake_service_get_by_compute_host) self.stub_out( 'nova.compute.cells_api.HostAPI.get_host_uptime', fake_get_host_uptime) hypervisor_id = fake_hypervisor['id'] response = self._do_get('os-hypervisors/%s/uptime' % hypervisor_id) subs = {'hypervisor_id': str(hypervisor_id)} self._verify_response('hypervisors-uptime-resp', subs, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_flavor_extraspecs.py0000664000567000056710000000576713064447152030677 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import api_sample_base CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class FlavorExtraSpecsSampleJsonTests(api_sample_base.ApiSampleTestBaseV21): ADMIN_API = True extension_name = 'flavor-extra-specs' def _get_flags(self): f = super(FlavorExtraSpecsSampleJsonTests, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.flavorextraspecs.' 'Flavorextraspecs') return f def _flavor_extra_specs_create(self): subs = {'value1': 'value1', 'value2': 'value2' } response = self._do_post('flavors/1/os-extra_specs', 'flavor-extra-specs-create-req', subs) self._verify_response('flavor-extra-specs-create-resp', subs, response, 200) def test_flavor_extra_specs_get(self): subs = {'value1': 'value1'} self._flavor_extra_specs_create() response = self._do_get('flavors/1/os-extra_specs/key1') self._verify_response('flavor-extra-specs-get-resp', subs, response, 200) def test_flavor_extra_specs_list(self): subs = {'value1': 'value1', 'value2': 'value2' } self._flavor_extra_specs_create() response = self._do_get('flavors/1/os-extra_specs') self._verify_response('flavor-extra-specs-list-resp', subs, response, 200) def test_flavor_extra_specs_create(self): self._flavor_extra_specs_create() def test_flavor_extra_specs_update(self): subs = {'value1': 'new_value1'} self._flavor_extra_specs_create() response = self._do_put('flavors/1/os-extra_specs/key1', 'flavor-extra-specs-update-req', subs) self._verify_response('flavor-extra-specs-update-resp', subs, response, 200) def test_flavor_extra_specs_delete(self): self._flavor_extra_specs_create() response = self._do_delete('flavors/1/os-extra_specs/key1') self.assertEqual(200, response.status_code) self.assertEqual('', response.content) nova-13.1.4/nova/tests/functional/api_sample_tests/test_server_diagnostics.py0000664000567000056710000000302713064447152031025 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class ServerDiagnosticsSamplesJsonTest(test_servers.ServersSampleBase): extension_name = "os-server-diagnostics" def _get_flags(self): f = super(ServerDiagnosticsSamplesJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.server_diagnostics.' 'Server_diagnostics') return f def test_server_diagnostics_get(self): uuid = self._post_server() response = self._do_get('servers/%s/diagnostics' % uuid) self._verify_response('server-diagnostics-get-resp', {}, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_volumes.py0000664000567000056710000003237713064447152026634 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import datetime from nova import context from nova import objects from nova.tests.functional.api_sample_tests import api_sample_base from nova.tests.functional.api_sample_tests import test_servers from nova.tests.unit.api.openstack import fakes from nova.tests.unit import fake_block_device from nova.tests.unit import fake_instance CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class SnapshotsSampleJsonTests(api_sample_base.ApiSampleTestBaseV21): extension_name = "os-volumes" create_subs = { 'snapshot_name': 'snap-001', 'description': 'Daily backup', 'volume_id': '521752a6-acf6-4b2d-bc7a-119f9148cd8c' } def _get_flags(self): f = super(SnapshotsSampleJsonTests, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.volumes.Volumes') return f def setUp(self): super(SnapshotsSampleJsonTests, self).setUp() self.stub_out("nova.volume.cinder.API.get_all_snapshots", fakes.stub_snapshot_get_all) self.stub_out("nova.volume.cinder.API.get_snapshot", fakes.stub_snapshot_get) def _create_snapshot(self): self.stub_out("nova.volume.cinder.API.create_snapshot", fakes.stub_snapshot_create) response = self._do_post("os-snapshots", "snapshot-create-req", self.create_subs) return response def test_snapshots_create(self): response = self._create_snapshot() self._verify_response("snapshot-create-resp", self.create_subs, response, 200) def test_snapshots_delete(self): self.stub_out("nova.volume.cinder.API.delete_snapshot", fakes.stub_snapshot_delete) self._create_snapshot() response = self._do_delete('os-snapshots/100') self.assertEqual(202, response.status_code) self.assertEqual('', response.content) def test_snapshots_detail(self): response = self._do_get('os-snapshots/detail') self._verify_response('snapshots-detail-resp', {}, response, 200) def test_snapshots_list(self): response = self._do_get('os-snapshots') self._verify_response('snapshots-list-resp', {}, response, 200) def test_snapshots_show(self): response = self._do_get('os-snapshots/100') subs = { 'snapshot_name': 'Default name', 'description': 'Default description' } self._verify_response('snapshots-show-resp', subs, response, 200) class VolumesSampleJsonTest(test_servers.ServersSampleBase): extension_name = "os-volumes" def _get_flags(self): f = super(VolumesSampleJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.volumes.Volumes') return f def _get_volume_id(self): return 'a26887c6-c47b-4654-abb5-dfadf7d3f803' def _stub_volume(self, id, displayname="Volume Name", displaydesc="Volume Description", size=100): volume = { 'id': id, 'size': size, 'availability_zone': 'zone1:host1', 'status': 'in-use', 'attach_status': 'attached', 'name': 'vol name', 'display_name': displayname, 'display_description': displaydesc, 'created_at': datetime.datetime(2008, 12, 1, 11, 1, 55), 'snapshot_id': None, 'volume_type_id': 'fakevoltype', 'volume_metadata': [], 'volume_type': {'name': 'Backup'}, 'multiattach': False, 'attachments': {'3912f2b4-c5ba-4aec-9165-872876fe202e': {'mountpoint': '/', 'attachment_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f803' } } } return volume def _stub_volume_get(self, context, volume_id): return self._stub_volume(volume_id) def _stub_volume_delete(self, context, *args, **param): pass def _stub_volume_get_all(self, context, search_opts=None): id = self._get_volume_id() return [self._stub_volume(id)] def _stub_volume_create(self, context, size, name, description, snapshot, **param): id = self._get_volume_id() return self._stub_volume(id) def setUp(self): super(VolumesSampleJsonTest, self).setUp() fakes.stub_out_networking(self) fakes.stub_out_rate_limiting(self.stubs) self.stub_out("nova.volume.cinder.API.delete", self._stub_volume_delete) self.stub_out("nova.volume.cinder.API.get", self._stub_volume_get) self.stub_out("nova.volume.cinder.API.get_all", self._stub_volume_get_all) def _post_volume(self): subs_req = { 'volume_name': "Volume Name", 'volume_desc': "Volume Description", } self.stub_out("nova.volume.cinder.API.create", self._stub_volume_create) response = self._do_post('os-volumes', 'os-volumes-post-req', subs_req) self._verify_response('os-volumes-post-resp', subs_req, response, 200) def test_volumes_show(self): subs = { 'volume_name': "Volume Name", 'volume_desc': "Volume Description", } vol_id = self._get_volume_id() response = self._do_get('os-volumes/%s' % vol_id) self._verify_response('os-volumes-get-resp', subs, response, 200) def test_volumes_index(self): subs = { 'volume_name': "Volume Name", 'volume_desc': "Volume Description", } response = self._do_get('os-volumes') self._verify_response('os-volumes-index-resp', subs, response, 200) def test_volumes_detail(self): # For now, index and detail are the same. # See the volumes api subs = { 'volume_name': "Volume Name", 'volume_desc': "Volume Description", } response = self._do_get('os-volumes/detail') self._verify_response('os-volumes-detail-resp', subs, response, 200) def test_volumes_create(self): self._post_volume() def test_volumes_delete(self): self._post_volume() vol_id = self._get_volume_id() response = self._do_delete('os-volumes/%s' % vol_id) self.assertEqual(202, response.status_code) self.assertEqual('', response.content) class VolumeAttachmentsSample(test_servers.ServersSampleBase): extension_name = "os-volumes" def _stub_db_bdms_get_all_by_instance(self, server_id): def fake_bdms_get_all_by_instance(context, instance_uuid, use_slave=False): bdms = [ fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f803', 'instance_uuid': server_id, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/sdd'}), fake_block_device.FakeDbBlockDeviceDict( {'id': 2, 'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f804', 'instance_uuid': server_id, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/sdc'}) ] return bdms self.stub_out('nova.db.block_device_mapping_get_all_by_instance', fake_bdms_get_all_by_instance) def _stub_compute_api_get(self): def fake_compute_api_get(self, context, instance_id, want_objects=False, expected_attrs=None): if want_objects: return fake_instance.fake_instance_obj( context, **{'uuid': instance_id}) else: return {'uuid': instance_id} self.stub_out('nova.compute.api.API.get', fake_compute_api_get) def _get_flags(self): f = super(VolumeAttachmentsSample, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.volumes.Volumes') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.' 'volume_attachment_update.Volume_attachment_update') return f def test_attach_volume_to_server(self): self.stub_out('nova.volume.cinder.API.get', fakes.stub_volume_get) self.stub_out('nova.volume.cinder.API.check_attach', lambda *a, **k: None) self.stub_out('nova.volume.cinder.API.reserve_volume', lambda *a, **k: None) device_name = '/dev/vdd' bdm = objects.BlockDeviceMapping() bdm['device_name'] = device_name self.stub_out( 'nova.compute.manager.ComputeManager.reserve_block_device_name', lambda *a, **k: bdm) self.stub_out( 'nova.compute.manager.ComputeManager.attach_volume', lambda *a, **k: None) self.stub_out( 'nova.objects.BlockDeviceMapping.get_by_volume_and_instance', classmethod(lambda *a, **k: None)) volume = fakes.stub_volume_get(None, context.get_admin_context(), 'a26887c6-c47b-4654-abb5-dfadf7d3f803') subs = { 'volume_id': volume['id'], 'device': device_name } server_id = self._post_server() response = self._do_post('servers/%s/os-volume_attachments' % server_id, 'attach-volume-to-server-req', subs) self._verify_response('attach-volume-to-server-resp', subs, response, 200) def test_list_volume_attachments(self): server_id = self._post_server() self._stub_db_bdms_get_all_by_instance(server_id) response = self._do_get('servers/%s/os-volume_attachments' % server_id) self._verify_response('list-volume-attachments-resp', {}, response, 200) def test_volume_attachment_detail(self): server_id = self._post_server() attach_id = "a26887c6-c47b-4654-abb5-dfadf7d3f803" self._stub_db_bdms_get_all_by_instance(server_id) self._stub_compute_api_get() response = self._do_get('servers/%s/os-volume_attachments/%s' % (server_id, attach_id)) self._verify_response('volume-attachment-detail-resp', {}, response, 200) def test_volume_attachment_delete(self): server_id = self._post_server() attach_id = "a26887c6-c47b-4654-abb5-dfadf7d3f803" self._stub_db_bdms_get_all_by_instance(server_id) self._stub_compute_api_get() self.stub_out('nova.volume.cinder.API.get', fakes.stub_volume_get) self.stub_out('nova.compute.api.API.detach_volume', lambda *a, **k: None) response = self._do_delete('servers/%s/os-volume_attachments/%s' % (server_id, attach_id)) self.assertEqual(202, response.status_code) self.assertEqual('', response.content) def test_volume_attachment_update(self): self.stub_out('nova.volume.cinder.API.get', fakes.stub_volume_get) subs = { 'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f805' } server_id = self._post_server() attach_id = 'a26887c6-c47b-4654-abb5-dfadf7d3f803' self._stub_db_bdms_get_all_by_instance(server_id) self._stub_compute_api_get() self.stub_out('nova.volume.cinder.API.get', fakes.stub_volume_get) self.stub_out('nova.compute.api.API.swap_volume', lambda *a, **k: None) response = self._do_put('servers/%s/os-volume_attachments/%s' % (server_id, attach_id), 'update-volume-req', subs) self.assertEqual(202, response.status_code) self.assertEqual('', response.content) nova-13.1.4/nova/tests/functional/api_sample_tests/test_extended_status.py0000664000567000056710000000444113064447152030334 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class ExtendedStatusSampleJsonTests(test_servers.ServersSampleBase): extension_name = "os-extended-status" def _get_flags(self): f = super(ExtendedStatusSampleJsonTests, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.keypairs.Keypairs') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_ips.Extended_ips') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_ips_mac.' 'Extended_ips_mac') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_status.' 'Extended_status') return f def test_show(self): uuid = self._post_server() response = self._do_get('servers/%s' % uuid) subs = {} subs['hostid'] = '[a-f0-9]+' subs['access_ip_v4'] = '1.2.3.4' subs['access_ip_v6'] = '80fe::' self._verify_response('server-get-resp', subs, response, 200) def test_detail(self): uuid = self._post_server() response = self._do_get('servers/detail') subs = {} subs['id'] = uuid subs['hostid'] = '[a-f0-9]+' subs['access_ip_v4'] = '1.2.3.4' subs['access_ip_v6'] = '80fe::' self._verify_response('servers-detail-resp', subs, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_extension_info.py0000664000567000056710000000515213064447152030160 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from nova.api.openstack import extensions as api_extensions from nova.tests.functional.api_sample_tests import api_sample_base CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') def fake_soft_extension_authorizer(extension_name, core=False): def authorize(context, action=None): return True return authorize class ExtensionInfoAllSamplesJsonTest(api_sample_base.ApiSampleTestBaseV21): all_extensions = True @mock.patch.object(api_extensions, 'os_compute_soft_authorizer') def test_list_extensions(self, soft_auth): soft_auth.side_effect = fake_soft_extension_authorizer response = self._do_get('extensions') # The full extension list is one of the places that things are # different between the API versions and the legacy vs. new # stack. We default to the v2.1 case. template = 'extensions-list-resp' if self.api_major_version == 'v2': if self._legacy_v2_code: template = 'extensions-list-resp-v2' else: template = 'extensions-list-resp-v21-compatible' self._verify_response(template, {}, response, 200) class ExtensionInfoSamplesJsonTest(api_sample_base.ApiSampleTestBaseV21): sample_dir = "extension-info" all_extensions = True @mock.patch.object(api_extensions, 'os_compute_soft_authorizer') def test_get_extensions(self, soft_auth): soft_auth.side_effect = fake_soft_extension_authorizer response = self._do_get('extensions/os-agents') # The extension details info are different between legacy v2 and v2.1 # stack. namespace link and updated date are different. So keep both # version for testing and default to v2.1 template = 'extensions-get-resp' if self._legacy_v2_code: template = 'extensions-get-resp-v2' self._verify_response(template, {}, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/__init__.py0000664000567000056710000000000013064447140025611 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/api_sample_tests/test_cloudpipe.py0000664000567000056710000000655713064447152027127 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid as uuid_lib from oslo_config import cfg from nova.tests.functional.api_sample_tests import api_sample_base from nova.tests.unit.image import fake CONF = cfg.CONF CONF.import_opt('vpn_image_id', 'nova.cloudpipe.pipelib') CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class CloudPipeSampleTest(api_sample_base.ApiSampleTestBaseV21): ADMIN_API = True extension_name = "os-cloudpipe" def _get_flags(self): f = super(CloudPipeSampleTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append('nova.api.openstack.compute.' 'contrib.cloudpipe.Cloudpipe') f['osapi_compute_extension'].append('nova.api.openstack.compute.' 'contrib.cloudpipe_update.Cloudpipe_update') return f def setUp(self): super(CloudPipeSampleTest, self).setUp() def get_user_data(self, project_id): """Stub method to generate user data for cloudpipe tests.""" return "VVNFUiBEQVRB\n" def network_api_get(self, context, network_uuid): """Stub to get a valid network and its information.""" return {'vpn_public_address': '127.0.0.1', 'vpn_public_port': 22} self.stub_out('nova.cloudpipe.pipelib.CloudPipe.get_encoded_zip', get_user_data) self.stub_out('nova.network.api.API.get', network_api_get) def generalize_subs(self, subs, vanilla_regexes): subs['project_id'] = '[0-9a-f-]+' return subs def test_cloud_pipe_create(self): # Get api samples of cloud pipe extension creation. self.flags(vpn_image_id=fake.get_valid_image_id()) subs = {'project_id': str(uuid_lib.uuid4().hex)} response = self._do_post('os-cloudpipe', 'cloud-pipe-create-req', subs) subs['image_id'] = CONF.vpn_image_id self._verify_response('cloud-pipe-create-resp', subs, response, 200) return subs def test_cloud_pipe_list(self): # Get api samples of cloud pipe extension get request. subs = self.test_cloud_pipe_create() response = self._do_get('os-cloudpipe') subs['image_id'] = CONF.vpn_image_id self._verify_response('cloud-pipe-get-resp', subs, response, 200) def test_cloud_pipe_update(self): subs = {'vpn_ip': '192.168.1.1', 'vpn_port': '2000'} response = self._do_put('os-cloudpipe/configure-project', 'cloud-pipe-update-req', subs) self.assertEqual(202, response.status_code) self.assertEqual("", response.content) nova-13.1.4/nova/tests/functional/api_sample_tests/test_suspend_server.py0000664000567000056710000000401013064447152030170 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class SuspendServerSamplesJsonTest(test_servers.ServersSampleBase): extension_name = "os-suspend-server" def _get_flags(self): f = super(SuspendServerSamplesJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.admin_actions.Admin_actions') return f def setUp(self): """setUp Method for SuspendServer api samples extension This method creates the server that will be used in each tests """ super(SuspendServerSamplesJsonTest, self).setUp() self.uuid = self._post_server() def test_post_suspend(self): # Get api samples to suspend server request. response = self._do_post('servers/%s/action' % self.uuid, 'server-suspend', {}) self.assertEqual(202, response.status_code) def test_post_resume(self): # Get api samples to server resume request. self.test_post_suspend() response = self._do_post('servers/%s/action' % self.uuid, 'server-resume', {}) self.assertEqual(202, response.status_code) nova-13.1.4/nova/tests/functional/api_sample_tests/test_admin_actions.py0000664000567000056710000000453613064447152027746 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class AdminActionsSamplesJsonTest(test_servers.ServersSampleBase): extension_name = "os-admin-actions" def _get_flags(self): f = super(AdminActionsSamplesJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.admin_actions.Admin_actions') return f def setUp(self): """setUp Method for AdminActions api samples extension This method creates the server that will be used in each tests """ super(AdminActionsSamplesJsonTest, self).setUp() self.uuid = self._post_server() def test_post_reset_network(self): # Get api samples to reset server network request. response = self._do_post('servers/%s/action' % self.uuid, 'admin-actions-reset-network', {}) self.assertEqual(202, response.status_code) def test_post_inject_network_info(self): # Get api samples to inject network info request. response = self._do_post('servers/%s/action' % self.uuid, 'admin-actions-inject-network-info', {}) self.assertEqual(202, response.status_code) def test_post_reset_state(self): # get api samples to server reset state request. response = self._do_post('servers/%s/action' % self.uuid, 'admin-actions-reset-server-state', {}) self.assertEqual(202, response.status_code) nova-13.1.4/nova/tests/functional/api_sample_tests/test_migrate_server.py0000664000567000056710000000705113064447152030147 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from oslo_utils import versionutils from nova.tests.functional.api_sample_tests import test_servers CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class MigrateServerSamplesJsonTest(test_servers.ServersSampleBase): extension_name = "os-migrate-server" def _get_flags(self): f = super(MigrateServerSamplesJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.admin_actions.' 'Admin_actions') return f def setUp(self): """setUp Method for MigrateServer api samples extension This method creates the server that will be used in each tests """ super(MigrateServerSamplesJsonTest, self).setUp() self.uuid = self._post_server() @mock.patch('nova.conductor.manager.ComputeTaskManager._cold_migrate') def test_post_migrate(self, mock_cold_migrate): # Get api samples to migrate server request. response = self._do_post('servers/%s/action' % self.uuid, 'migrate-server', {}) self.assertEqual(202, response.status_code) def test_post_live_migrate_server(self): # Get api samples to server live migrate request. def fake_live_migrate(_self, context, instance, scheduler_hint, block_migration, disk_over_commit, request_spec): self.assertEqual(self.uuid, instance["uuid"]) host = scheduler_hint["host"] self.assertEqual(self.compute.host, host) self.stub_out( 'nova.conductor.manager.ComputeTaskManager._live_migrate', fake_live_migrate) def fake_get_compute(context, host): service = dict(host=host, binary='nova-compute', topic='compute', report_count=1, updated_at='foo', hypervisor_type='bar', hypervisor_version=( versionutils.convert_version_to_int('1.0')), disabled=False) return {'compute_node': [service]} self.stub_out("nova.db.service_get_by_compute_host", fake_get_compute) response = self._do_post('servers/%s/action' % self.uuid, 'live-migrate-server', {'hostname': self.compute.host}) self.assertEqual(202, response.status_code) class MigrateServerSamplesJsonTestV225(MigrateServerSamplesJsonTest): extension_name = "os-migrate-server" microversion = '2.25' scenarios = [('v2_25', {'api_major_version': 'v2.1'})] def test_post_migrate(self): # no changes for migrate-server pass nova-13.1.4/nova/tests/functional/api_sample_tests/test_servers.py0000664000567000056710000003204213064447152026620 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import api_sample_base from nova.tests.unit.image import fake CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class ServersSampleBase(api_sample_base.ApiSampleTestBaseV21): extra_extensions_to_load = ["os-access-ips"] microversion = None def _post_server(self, use_common_server_api_samples=True): # param use_common_server_api_samples: Boolean to set whether tests use # common sample files for server post request and response. # Default is True which means _get_sample_path method will fetch the # common server sample files from 'servers' directory. # Set False if tests need to use extension specific sample files subs = { 'image_id': fake.get_valid_image_id(), 'host': self._get_host(), 'compute_endpoint': self._get_compute_endpoint(), 'versioned_compute_endpoint': self._get_vers_compute_endpoint(), 'glance_host': self._get_glance_host(), 'access_ip_v4': '1.2.3.4', 'access_ip_v6': '80fe::' } # TODO(gmann): Remove this hack once all tests using this common # _post_server method are enabled with all extension. # This is added to avoid all tests updates together. post_req_template = 'server-post-req' post_resp_template = 'server-post-resp' if self.all_extensions and use_common_server_api_samples: post_req_template = 'server-create-req' post_resp_template = 'server-create-resp' orig_value = self.__class__._use_common_server_api_samples orig_sample_dir = self.__class__.sample_dir try: self.__class__._use_common_server_api_samples = ( use_common_server_api_samples) response = self._do_post('servers', post_req_template, subs) status = self._verify_response(post_resp_template, subs, response, 202) return status finally: self.__class__._use_common_server_api_samples = orig_value self.__class__.sample_dir = orig_sample_dir def setUp(self): super(ServersSampleBase, self).setUp() self.api.microversion = self.microversion class ServersSampleJsonTest(ServersSampleBase): sample_dir = 'servers' microversion = None def _get_flags(self): f = super(ServersSampleBase, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.keypairs.Keypairs') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_ips.Extended_ips') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_ips_mac.' 'Extended_ips_mac') return f def test_servers_post(self): return self._post_server() def test_servers_get(self): uuid = self.test_servers_post() response = self._do_get('servers/%s' % uuid) subs = {} subs['hostid'] = '[a-f0-9]+' subs['id'] = uuid subs['hypervisor_hostname'] = r'[\w\.\-]+' subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}' subs['access_ip_v4'] = '1.2.3.4' subs['access_ip_v6'] = '80fe::' self._verify_response('server-get-resp', subs, response, 200) def test_servers_list(self): uuid = self._post_server() response = self._do_get('servers') subs = {'id': uuid} self._verify_response('servers-list-resp', subs, response, 200) def test_servers_details(self): uuid = self.test_servers_post() response = self._do_get('servers/detail') subs = {} subs['hostid'] = '[a-f0-9]+' subs['id'] = uuid subs['hypervisor_hostname'] = r'[\w\.\-]+' subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}' subs['access_ip_v4'] = '1.2.3.4' subs['access_ip_v6'] = '80fe::' self._verify_response('servers-details-resp', subs, response, 200) class ServersSampleJson29Test(ServersSampleJsonTest): microversion = '2.9' # NOTE(gmann): microversion tests do not need to run for v2 API # so defining scenarios only for v2.9 which will run the original tests # by appending '(v2_9)' in test_id. scenarios = [('v2_9', {'api_major_version': 'v2.1'})] class ServersSampleJson219Test(ServersSampleJsonTest): microversion = '2.19' sample_dir = 'servers' scenarios = [('v2_19', {'api_major_version': 'v2.1'})] def test_servers_post(self): return self._post_server(False) def test_servers_put(self): uuid = self.test_servers_post() response = self._do_put('servers/%s' % uuid, 'server-put-req', {}) subs = { 'image_id': fake.get_valid_image_id(), 'hostid': '[a-f0-9]+', 'glance_host': self._get_glance_host(), 'access_ip_v4': '1.2.3.4', 'access_ip_v6': '80fe::' } self._verify_response('server-put-resp', subs, response, 200) class ServersUpdateSampleJsonTest(ServersSampleBase): sample_dir = 'servers' # TODO(gmann): This will be removed once all API tests runs for # all extension enable. all_extensions = True def test_update_server(self): uuid = self._post_server() subs = {} subs['hostid'] = '[a-f0-9]+' subs['access_ip_v4'] = '1.2.3.4' subs['access_ip_v6'] = '80fe::' response = self._do_put('servers/%s' % uuid, 'server-update-req', subs) self._verify_response('server-update-resp', subs, response, 200) class ServerSortKeysJsonTests(ServersSampleBase): sample_dir = 'servers-sort' def _get_flags(self): f = super(ServerSortKeysJsonTests, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.server_sort_keys.' 'Server_sort_keys') return f def test_servers_list(self): self._post_server() response = self._do_get('servers?sort_key=display_name&sort_dir=asc') self._verify_response('server-sort-keys-list-resp', {}, response, 200) class ServersSampleAllExtensionJsonTest(ServersSampleJsonTest): all_extensions = True sample_dir = None class ServersActionsJsonTest(ServersSampleBase): sample_dir = 'servers' def _test_server_action(self, uuid, action, req_tpl, subs=None, resp_tpl=None, code=202): subs = subs or {} subs.update({'action': action, 'glance_host': self._get_glance_host()}) response = self._do_post('servers/%s/action' % uuid, req_tpl, subs) if resp_tpl: self._verify_response(resp_tpl, subs, response, code) else: self.assertEqual(code, response.status_code) self.assertEqual("", response.content) def test_server_reboot_hard(self): uuid = self._post_server() self._test_server_action(uuid, "reboot", 'server-action-reboot', {"type": "HARD"}) def test_server_reboot_soft(self): uuid = self._post_server() self._test_server_action(uuid, "reboot", 'server-action-reboot', {"type": "SOFT"}) def test_server_rebuild(self): uuid = self._post_server() image = fake.get_valid_image_id() params = { 'uuid': image, 'name': 'foobar', 'pass': 'seekr3t', 'hostid': '[a-f0-9]+', 'access_ip_v4': '1.2.3.4', 'access_ip_v6': '80fe::', } resp = self._do_post('servers/%s/action' % uuid, 'server-action-rebuild', params) subs = params.copy() del subs['uuid'] self._verify_response('server-action-rebuild-resp', subs, resp, 202) def test_server_resize(self): self.flags(allow_resize_to_same_host=True) uuid = self._post_server() self._test_server_action(uuid, "resize", 'server-action-resize', {"id": '2', "host": self._get_host()}) return uuid def test_server_revert_resize(self): uuid = self.test_server_resize() self._test_server_action(uuid, "revertResize", 'server-action-revert-resize') def test_server_confirm_resize(self): uuid = self.test_server_resize() self._test_server_action(uuid, "confirmResize", 'server-action-confirm-resize', code=204) def test_server_create_image(self): uuid = self._post_server() self._test_server_action(uuid, 'createImage', 'server-action-create-image', {'name': 'foo-image'}) class ServersActionsJson219Test(ServersSampleBase): microversion = '2.19' sample_dir = 'servers' scenarios = [('v2_19', {'api_major_version': 'v2.1'})] def test_server_rebuild(self): uuid = self._post_server() image = fake.get_valid_image_id() params = { 'uuid': image, 'name': 'foobar', 'description': 'description of foobar', 'pass': 'seekr3t', 'hostid': '[a-f0-9]+', 'access_ip_v4': '1.2.3.4', 'access_ip_v6': '80fe::', } resp = self._do_post('servers/%s/action' % uuid, 'server-action-rebuild', params) subs = params.copy() del subs['uuid'] self._verify_response('server-action-rebuild-resp', subs, resp, 202) class ServersActionsAllJsonTest(ServersActionsJsonTest): all_extensions = True sample_dir = None class ServerStartStopJsonTest(ServersSampleBase): sample_dir = 'servers' def _get_flags(self): f = super(ServerStartStopJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.server_start_stop.' 'Server_start_stop') return f def _test_server_action(self, uuid, action, req_tpl): response = self._do_post('servers/%s/action' % uuid, req_tpl, {'action': action}) self.assertEqual(202, response.status_code) self.assertEqual("", response.content) def test_server_start(self): uuid = self._post_server() self._test_server_action(uuid, 'os-stop', 'server-action-stop') self._test_server_action(uuid, 'os-start', 'server-action-start') def test_server_stop(self): uuid = self._post_server() self._test_server_action(uuid, 'os-stop', 'server-action-stop') class ServersSampleMultiStatusJsonTest(ServersSampleBase): sample_dir = 'servers' extra_extensions_to_load = ["os-access-ips"] def _get_flags(self): f = super(ServersSampleMultiStatusJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.legacy_v2.contrib.' 'server_list_multi_status.Server_list_multi_status') return f def test_servers_list(self): uuid = self._post_server() response = self._do_get('servers?status=active&status=error') subs = {'id': uuid} self._verify_response('servers-list-resp', subs, response, 200) class ServerTriggerCrashDumpJsonTest(ServersSampleBase): sample_dir = 'servers' microversion = '2.17' scenarios = [('v2_17', {'api_major_version': 'v2.1'})] def test_trigger_crash_dump(self): uuid = self._post_server() response = self._do_post('servers/%s/action' % uuid, 'server-action-trigger-crash-dump', {}) self.assertEqual(response.status_code, 202) self.assertEqual(response.content, "") nova-13.1.4/nova/tests/functional/api_sample_tests/test_server_migrations.py0000664000567000056710000002071613064447152030676 0ustar jenkinsjenkins00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import mock from nova.conductor import manager as conductor_manager from nova import context from nova import db from nova import objects from nova.tests.functional.api_sample_tests import test_servers from nova.tests.unit import fake_instance class ServerMigrationsSampleJsonTest(test_servers.ServersSampleBase): extension_name = 'server-migrations' scenarios = [('v2_22', {'api_major_version': 'v2.1'})] extra_extensions_to_load = ["os-migrate-server", "os-access-ips"] def setUp(self): """setUp method for server usage.""" super(ServerMigrationsSampleJsonTest, self).setUp() self.uuid = self._post_server() self.api.microversion = '2.22' @mock.patch.object(conductor_manager.ComputeTaskManager, '_live_migrate') @mock.patch.object(db, 'service_get_by_compute_host') @mock.patch.object(objects.Migration, 'get_by_id_and_instance') @mock.patch('nova.compute.manager.ComputeManager.' 'live_migration_force_complete') def test_live_migrate_force_complete(self, live_migration_pause_instance, get_by_id_and_instance, service_get_by_compute_host, _live_migrate): migration = objects.Migration() migration.id = 1 migration.status = 'running' get_by_id_and_instance.return_value = migration self._do_post('servers/%s/action' % self.uuid, 'live-migrate-server', {'hostname': self.compute.host}) response = self._do_post('servers/%s/migrations/%s/action' % (self.uuid, '3'), 'force_complete', {}) self.assertEqual(202, response.status_code) def test_get_migration(self): response = self._do_get('servers/fake_id/migrations/1234') self.assertEqual(404, response.status_code) def test_list_migrations(self): response = self._do_get('servers/fake_id/migrations') self.assertEqual(404, response.status_code) class ServerMigrationsSamplesJsonTestV2_23(test_servers.ServersSampleBase): ADMIN_API = True extension_name = "server-migrations" microversion = '2.23' scenarios = [('v2_23', {'api_major_version': 'v2.1'})] UUID_1 = '4cfba335-03d8-49b2-8c52-e69043d1e8fe' UUID_2 = '058fc419-a8a8-4e08-b62c-a9841ef9cd3f' fake_migrations = [ { 'source_node': 'node1', 'dest_node': 'node2', 'source_compute': 'compute1', 'dest_compute': 'compute2', 'dest_host': '1.2.3.4', 'status': 'running', 'instance_uuid': UUID_1, 'migration_type': 'live-migration', 'hidden': False, 'memory_total': 123456, 'memory_processed': 12345, 'memory_remaining': 120000, 'disk_total': 234567, 'disk_processed': 23456, 'disk_remaining': 230000, 'created_at': datetime.datetime(2016, 0o1, 29, 13, 42, 2), 'updated_at': datetime.datetime(2016, 0o1, 29, 13, 42, 2), 'deleted_at': None, 'deleted': False }, { 'source_node': 'node10', 'dest_node': 'node20', 'source_compute': 'compute10', 'dest_compute': 'compute20', 'dest_host': '5.6.7.8', 'status': 'migrating', 'instance_uuid': UUID_2, 'migration_type': 'resize', 'hidden': False, 'memory_total': 456789, 'memory_processed': 56789, 'memory_remaining': 45000, 'disk_total': 96789, 'disk_processed': 6789, 'disk_remaining': 96000, 'created_at': datetime.datetime(2016, 0o1, 22, 13, 42, 2), 'updated_at': datetime.datetime(2016, 0o1, 22, 13, 42, 2), 'deleted_at': None, 'deleted': False } ] def setUp(self): super(ServerMigrationsSamplesJsonTestV2_23, self).setUp() fake_context = context.RequestContext('fake', 'fake') self.mig1 = objects.Migration( context=fake_context, **self.fake_migrations[0]) self.mig1.create() self.mig2 = objects.Migration( context=fake_context, **self.fake_migrations[1]) self.mig2.create() fake_ins = fake_instance.fake_db_instance(uuid=self.UUID_1) fake_ins.pop("pci_devices") fake_ins.pop("security_groups") fake_ins.pop("services") fake_ins.pop("tags") fake_ins.pop("info_cache") fake_ins.pop("id") self.instance = objects.Instance( context=fake_context, **fake_ins) self.instance.create() def test_get_migration(self): response = self._do_get('servers/%s/migrations/%s' % (self.fake_migrations[0]["instance_uuid"], self.mig1.id)) self.assertEqual(200, response.status_code) self._verify_response('migrations-get', {"server_uuid": self.UUID_1}, response, 200) def test_list_migrations(self): response = self._do_get('servers/%s/migrations' % self.fake_migrations[0]["instance_uuid"]) self.assertEqual(200, response.status_code) self._verify_response('migrations-index', {"server_uuid_1": self.UUID_1}, response, 200) class ServerMigrationsSampleJsonTestV2_24(test_servers.ServersSampleBase): ADMIN_API = True extension_name = "server-migrations" scenarios = [('v2_24', {'api_major_version': 'v2.1'})] extra_extensions_to_load = ["os-migrate-server", "os-access-ips"] def setUp(self): """setUp method for server usage.""" super(ServerMigrationsSampleJsonTestV2_24, self).setUp() self.api.microversion = '2.24' self.uuid = self._post_server() self.context = context.RequestContext('fake', 'fake') fake_migration = { 'source_node': self.compute.host, 'dest_node': 'node10', 'source_compute': 'compute1', 'dest_compute': 'compute12', 'migration_type': 'live-migration', 'instance_uuid': self.uuid, 'status': 'running'} self.migration = objects.Migration(context=self.context, **fake_migration) self.migration.create() @mock.patch.object(conductor_manager.ComputeTaskManager, '_live_migrate') def test_live_migrate_abort(self, _live_migrate): self._do_post('servers/%s/action' % self.uuid, 'live-migrate-server', {'hostname': self.compute.host}) uri = 'servers/%s/migrations/%s' % (self.uuid, self.migration.id) response = self._do_delete(uri) self.assertEqual(202, response.status_code) @mock.patch.object(conductor_manager.ComputeTaskManager, '_live_migrate') def test_live_migrate_abort_migration_not_found(self, _live_migrate): self._do_post('servers/%s/action' % self.uuid, 'live-migrate-server', {'hostname': self.compute.host}) uri = 'servers/%s/migrations/%s' % (self.uuid, '45') response = self._do_delete(uri) self.assertEqual(404, response.status_code) @mock.patch.object(conductor_manager.ComputeTaskManager, '_live_migrate') def test_live_migrate_abort_migration_not_running(self, _live_migrate): self.migration.status = 'completed' self.migration.save() self._do_post('servers/%s/action' % self.uuid, 'live-migrate-server', {'hostname': self.compute.host}) uri = 'servers/%s/migrations/%s' % (self.uuid, self.migration.id) response = self._do_delete(uri) self.assertEqual(400, response.status_code) nova-13.1.4/nova/tests/functional/api_sample_tests/README.rst0000664000567000056710000000166413064447140025210 0ustar jenkinsjenkins00000000000000Api Samples =========== This part of the tree contains templates for API samples. The documentation in doc/api_samples is completely autogenerated from the tests in this directory. To add a new api sample, add tests for the common passing and failing cases in this directory for your extension, and modify test_samples.py for your tests. Then run the following command: tox -e api-samples Which will create the files on doc/api_samples. If new tests are added or the .tpl files are changed due to bug fixes, the samples must be regenerated so they are in sync with the templates, as there is an additional test which reloads the documentation and ensures that it's in sync. Debugging sample generation --------------------------- If a .tpl is changed, its matching .json must be removed else the samples won't be generated. If an entirely new extension is added, a directory for it must be created before its samples will be generated. nova-13.1.4/nova/tests/functional/api_sample_tests/test_flavors.py0000664000567000056710000000453713064447152026613 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import api_sample_base CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class FlavorsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21): sample_dir = 'flavors' def _get_flags(self): f = super(FlavorsSampleJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.flavor_swap.Flavor_swap') f['osapi_compute_extension'].append('nova.api.openstack.compute.' 'contrib.flavor_disabled.Flavor_disabled') f['osapi_compute_extension'].append('nova.api.openstack.compute.' 'contrib.flavor_access.Flavor_access') f['osapi_compute_extension'].append('nova.api.openstack.compute.' 'contrib.flavorextradata.Flavorextradata') return f def test_flavors_get(self): response = self._do_get('flavors/1') self._verify_response('flavor-get-resp', {}, response, 200) def test_flavors_list(self): response = self._do_get('flavors') self._verify_response('flavors-list-resp', {}, response, 200) def test_flavors_detail(self): response = self._do_get('flavors/detail') self._verify_response('flavors-detail-resp', {}, response, 200) class FlavorsSampleAllExtensionJsonTest(FlavorsSampleJsonTest): all_extensions = True sample_dir = None def _get_flags(self): f = super(FlavorsSampleJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] return f nova-13.1.4/nova/tests/functional/api_sample_tests/test_rescue.py0000664000567000056710000001000313064447152026406 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class RescueJsonTest(test_servers.ServersSampleBase): extension_name = "os-rescue" def _get_flags(self): f = super(RescueJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.rescue.Rescue') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_rescue_with_image.' 'Extended_rescue_with_image') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.keypairs.Keypairs') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_ips.Extended_ips') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_ips_mac.' 'Extended_ips_mac') return f def _rescue(self, uuid): req_subs = { 'password': 'MySecretPass' } response = self._do_post('servers/%s/action' % uuid, 'server-rescue-req', req_subs) self._verify_response('server-rescue', req_subs, response, 200) def _unrescue(self, uuid): response = self._do_post('servers/%s/action' % uuid, 'server-unrescue-req', {}) self.assertEqual(202, response.status_code) def test_server_rescue(self): uuid = self._post_server() self._rescue(uuid) # Do a server get to make sure that the 'RESCUE' state is set response = self._do_get('servers/%s' % uuid) subs = {} subs['hostid'] = '[a-f0-9]+' subs['id'] = uuid subs['status'] = 'RESCUE' subs['access_ip_v4'] = '1.2.3.4' subs['access_ip_v6'] = '80fe::' self._verify_response('server-get-resp-rescue', subs, response, 200) def test_server_rescue_with_image_ref_specified(self): uuid = self._post_server() req_subs = { 'password': 'MySecretPass', 'image_ref': '2341-Abc' } response = self._do_post('servers/%s/action' % uuid, 'server-rescue-req-with-image-ref', req_subs) self._verify_response('server-rescue', req_subs, response, 200) # Do a server get to make sure that the 'RESCUE' state is set response = self._do_get('servers/%s' % uuid) subs = {} subs['hostid'] = '[a-f0-9]+' subs['id'] = uuid subs['status'] = 'RESCUE' subs['access_ip_v4'] = '1.2.3.4' subs['access_ip_v6'] = '80fe::' self._verify_response('server-get-resp-rescue', subs, response, 200) def test_server_unrescue(self): uuid = self._post_server() self._rescue(uuid) self._unrescue(uuid) # Do a server get to make sure that the 'ACTIVE' state is back response = self._do_get('servers/%s' % uuid) subs = {} subs['hostid'] = '[a-f0-9]+' subs['id'] = uuid subs['status'] = 'ACTIVE' subs['access_ip_v4'] = '1.2.3.4' subs['access_ip_v6'] = '80fe::' self._verify_response('server-get-resp-unrescue', subs, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_floating_ip_pools.py0000664000567000056710000000336713064447152030646 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import api_sample_base CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class FloatingIPPoolsSampleTests(api_sample_base.ApiSampleTestBaseV21): extension_name = "os-floating-ip-pools" def _get_flags(self): f = super(FloatingIPPoolsSampleTests, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append('nova.api.openstack.compute.' 'contrib.floating_ip_pools.Floating_ip_pools') return f def test_list_floatingippools(self): pool_list = ["pool1", "pool2"] def fake_get_floating_ip_pools(self, context): return pool_list self.stub_out("nova.network.api.API.get_floating_ip_pools", fake_get_floating_ip_pools) response = self._do_get('os-floating-ip-pools') subs = { 'pool1': pool_list[0], 'pool2': pool_list[1] } self._verify_response('floatingippools-list-resp', subs, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_flavor_access.py0000664000567000056710000001035713064447152027746 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import api_sample_base CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class FlavorAccessSampleJsonTests(api_sample_base.ApiSampleTestBaseV21): ADMIN_API = True extension_name = 'flavor-access' def _get_flags(self): f = super(FlavorAccessSampleJsonTests, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.' 'flavor_access.Flavor_access') # FlavorAccess extension also needs Flavormanage to be loaded. f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.' 'flavormanage.Flavormanage') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.' 'flavor_disabled.Flavor_disabled') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.' 'flavorextradata.Flavorextradata') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.' 'flavor_swap.Flavor_swap') return f def _add_tenant(self): subs = { 'tenant_id': 'fake_tenant', 'flavor_id': '10', } response = self._do_post('flavors/10/action', 'flavor-access-add-tenant-req', subs) self._verify_response('flavor-access-add-tenant-resp', subs, response, 200) def _create_flavor(self): subs = { 'flavor_id': '10', 'flavor_name': 'test_flavor' } response = self._do_post("flavors", "flavor-access-create-req", subs) self._verify_response("flavor-access-create-resp", subs, response, 200) # TODO(sdague): remove tests that are duplicative def test_flavor_access_create(self): self._create_flavor() def test_flavor_access_detail(self): response = self._do_get('flavors/detail') self._verify_response('flavor-access-detail-resp', {}, response, 200) def test_flavor_access_list(self): self._create_flavor() self._add_tenant() flavor_id = '10' response = self._do_get('flavors/%s/os-flavor-access' % flavor_id) subs = { 'flavor_id': flavor_id, 'tenant_id': 'fake_tenant', } self._verify_response('flavor-access-list-resp', subs, response, 200) def test_flavor_access_show(self): flavor_id = '1' response = self._do_get('flavors/%s' % flavor_id) subs = { 'flavor_id': flavor_id } self._verify_response('flavor-access-show-resp', subs, response, 200) def test_flavor_access_add_tenant(self): self._create_flavor() self._add_tenant() def test_flavor_access_remove_tenant(self): self._create_flavor() self._add_tenant() subs = { 'tenant_id': 'fake_tenant', } response = self._do_post('flavors/10/action', "flavor-access-remove-tenant-req", subs) exp_subs = { "tenant_id": self.api.project_id, "flavor_id": "10" } self._verify_response('flavor-access-remove-tenant-resp', exp_subs, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_disk_config.py0000664000567000056710000000667113064447152027417 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers from nova.tests.unit.image import fake CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class DiskConfigJsonTest(test_servers.ServersSampleBase): extension_name = 'os-disk-config' extra_extensions_to_load = ["os-access-ips"] def _get_flags(self): f = super(DiskConfigJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.keypairs.Keypairs') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_ips.Extended_ips') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_ips_mac.' 'Extended_ips_mac') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.disk_config.' 'Disk_config') return f def test_list_servers_detail(self): uuid = self._post_server(use_common_server_api_samples=False) response = self._do_get('servers/detail') subs = {} subs['hostid'] = '[a-f0-9]+' subs['access_ip_v4'] = '' subs['access_ip_v6'] = '' subs['id'] = uuid self._verify_response('list-servers-detail-get', subs, response, 200) def test_get_server(self): uuid = self._post_server(use_common_server_api_samples=False) response = self._do_get('servers/%s' % uuid) subs = {} subs['hostid'] = '[a-f0-9]+' subs['access_ip_v4'] = '' subs['access_ip_v6'] = '' self._verify_response('server-get-resp', subs, response, 200) def test_resize_server(self): self.flags(allow_resize_to_same_host=True) uuid = self._post_server(use_common_server_api_samples=False) response = self._do_post('servers/%s/action' % uuid, 'server-resize-post-req', {}) self.assertEqual(202, response.status_code) # NOTE(tmello): Resize does not return response body # Bug #1085213. self.assertEqual("", response.content) def test_rebuild_server(self): uuid = self._post_server(use_common_server_api_samples=False) subs = { 'image_id': fake.get_valid_image_id(), 'compute_endpoint': self._get_compute_endpoint(), } response = self._do_post('servers/%s/action' % uuid, 'server-action-rebuild-req', subs) subs['hostid'] = '[a-f0-9]+' subs['access_ip_v4'] = '' subs['access_ip_v6'] = '' self._verify_response('server-action-rebuild-resp', subs, response, 202) nova-13.1.4/nova/tests/functional/api_sample_tests/test_quota_classes.py0000664000567000056710000000376613064447152030010 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import api_sample_base CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class QuotaClassesSampleJsonTests(api_sample_base.ApiSampleTestBaseV21): ADMIN_API = True extension_name = "os-quota-class-sets" set_id = 'test_class' def _get_flags(self): f = super(QuotaClassesSampleJsonTests, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.quota_classes.' 'Quota_classes') return f def test_show_quota_classes(self): # Get api sample to show quota classes. response = self._do_get('os-quota-class-sets/%s' % self.set_id) subs = {'set_id': self.set_id} self._verify_response('quota-classes-show-get-resp', subs, response, 200) def test_update_quota_classes(self): # Get api sample to update quota classes. response = self._do_put('os-quota-class-sets/%s' % self.set_id, 'quota-classes-update-post-req', {}) self._verify_response('quota-classes-update-post-resp', {}, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_certificates.py0000664000567000056710000000413113064447152027572 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import api_sample_base from nova.tests.unit import fake_crypto CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class CertificatesSamplesJsonTest(api_sample_base.ApiSampleTestBaseV21): extension_name = "os-certificates" def _get_flags(self): f = super(CertificatesSamplesJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.certificates.Certificates') return f def setUp(self): super(CertificatesSamplesJsonTest, self).setUp() self.stub_out('nova.crypto.ensure_ca_filesystem', fake_crypto.ensure_ca_filesystem) self.stub_out('nova.crypto.fetch_ca', fake_crypto.fetch_ca) self.stub_out('nova.crypto.generate_x509_cert', fake_crypto.generate_x509_cert) self.cert = self.start_service('cert') def test_create_certificates(self): response = self._do_post('os-certificates', 'certificate-create-req', {}) self._verify_response('certificate-create-resp', {}, response, 200) def test_get_root_certificate(self): response = self._do_get('os-certificates/root') self._verify_response('certificate-get-root-resp', {}, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_images.py0000664000567000056710000000676213064447152026406 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import api_sample_base from nova.tests.unit.image import fake CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class ImagesSampleJsonTest(api_sample_base.ApiSampleTestBaseV21): sample_dir = 'images' # TODO(gmann): This will be removed once all API tests runs for # all extension enable. all_extensions = True def test_images_list(self): # Get api sample of images get list request. response = self._do_get('images') self._verify_response('images-list-get-resp', {}, response, 200) def test_image_get(self): # Get api sample of one single image details request. image_id = fake.get_valid_image_id() response = self._do_get('images/%s' % image_id) subs = {'image_id': image_id} self._verify_response('image-get-resp', subs, response, 200) def test_images_details(self): # Get api sample of all images details request. response = self._do_get('images/detail') self._verify_response('images-details-get-resp', {}, response, 200) def test_image_metadata_get(self): # Get api sample of an image metadata request. image_id = fake.get_valid_image_id() response = self._do_get('images/%s/metadata' % image_id) subs = {'image_id': image_id} self._verify_response('image-metadata-get-resp', subs, response, 200) def test_image_metadata_post(self): # Get api sample to update metadata of an image metadata request. image_id = fake.get_valid_image_id() response = self._do_post( 'images/%s/metadata' % image_id, 'image-metadata-post-req', {}) self._verify_response('image-metadata-post-resp', {}, response, 200) def test_image_metadata_put(self): # Get api sample of image metadata put request. image_id = fake.get_valid_image_id() response = self._do_put('images/%s/metadata' % (image_id), 'image-metadata-put-req', {}) self._verify_response('image-metadata-put-resp', {}, response, 200) def test_image_meta_key_get(self): # Get api sample of an image metadata key request. image_id = fake.get_valid_image_id() key = "kernel_id" response = self._do_get('images/%s/metadata/%s' % (image_id, key)) self._verify_response('image-meta-key-get', {}, response, 200) def test_image_meta_key_put(self): # Get api sample of image metadata key put request. image_id = fake.get_valid_image_id() key = "auto_disk_config" response = self._do_put('images/%s/metadata/%s' % (image_id, key), 'image-meta-key-put-req', {}) self._verify_response('image-meta-key-put-resp', {}, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_console_output.py0000664000567000056710000000303013064447152030204 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class ConsoleOutputSampleJsonTest(test_servers.ServersSampleBase): extension_name = "os-console-output" def _get_flags(self): f = super(ConsoleOutputSampleJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.console_output.' 'Console_output') return f def test_get_console_output(self): uuid = self._post_server() response = self._do_post('servers/%s/action' % uuid, 'console-output-post-req', {}) self._verify_response('console-output-post-resp', {}, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_server_groups.py0000664000567000056710000000622013064447152030033 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import api_sample_base CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class ServerGroupsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21): extension_name = "os-server-groups" def _get_flags(self): f = super(ServerGroupsSampleJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.server_groups.Server_groups') return f def _get_create_subs(self): return {'name': 'test'} def _post_server_group(self): """Verify the response status and returns the UUID of the newly created server group. """ subs = self._get_create_subs() response = self._do_post('os-server-groups', 'server-groups-post-req', subs) subs = {} subs['name'] = 'test' return self._verify_response('server-groups-post-resp', subs, response, 200) def _create_server_group(self): subs = self._get_create_subs() return self._do_post('os-server-groups', 'server-groups-post-req', subs) def test_server_groups_post(self): return self._post_server_group() def test_server_groups_list(self): subs = self._get_create_subs() uuid = self._post_server_group() response = self._do_get('os-server-groups') subs['id'] = uuid self._verify_response('server-groups-list-resp', subs, response, 200) def test_server_groups_get(self): # Get api sample of server groups get request. subs = {'name': 'test'} uuid = self._post_server_group() subs['id'] = uuid response = self._do_get('os-server-groups/%s' % uuid) self._verify_response('server-groups-get-resp', subs, response, 200) def test_server_groups_delete(self): uuid = self._post_server_group() response = self._do_delete('os-server-groups/%s' % uuid) self.assertEqual(204, response.status_code) class ServerGroupsV213SampleJsonTest(ServerGroupsSampleJsonTest): scenarios = [ ("v2_13", {'api_major_version': 'v2.1', 'microversion': '2.13'}) ] def setUp(self): super(ServerGroupsV213SampleJsonTest, self).setUp() self.api.microversion = self.microversion nova-13.1.4/nova/tests/functional/api_sample_tests/test_admin_password.py0000664000567000056710000000246113064447152030143 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class AdminPasswordJsonTest(test_servers.ServersSampleBase): extension_name = 'os-admin-password' def test_server_password(self): uuid = self._post_server() subs = {"password": "foo"} response = self._do_post('servers/%s/action' % uuid, 'admin-password-change-password', subs) self.assertEqual(202, response.status_code) self.assertEqual("", response.content) nova-13.1.4/nova/tests/functional/api_sample_tests/test_server_metadata.py0000664000567000056710000000655613064447152030310 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class ServersMetadataJsonTest(test_servers.ServersSampleBase): sample_dir = 'server-metadata' def _create_and_set(self, subs): uuid = self._post_server() response = self._do_put('/servers/%s/metadata' % uuid, 'server-metadata-all-req', subs) self._verify_response('server-metadata-all-resp', subs, response, 200) return uuid def generalize_subs(self, subs, vanilla_regexes): subs['value'] = '(Foo|Bar) Value' return subs def test_metadata_put_all(self): # Test setting all metadata for a server. subs = {'value': 'Foo Value'} self._create_and_set(subs) def test_metadata_post_all(self): # Test updating all metadata for a server. subs = {'value': 'Foo Value'} uuid = self._create_and_set(subs) subs['value'] = 'Bar Value' response = self._do_post('servers/%s/metadata' % uuid, 'server-metadata-all-req', subs) self._verify_response('server-metadata-all-resp', subs, response, 200) def test_metadata_get_all(self): # Test getting all metadata for a server. subs = {'value': 'Foo Value'} uuid = self._create_and_set(subs) response = self._do_get('servers/%s/metadata' % uuid) self._verify_response('server-metadata-all-resp', subs, response, 200) def test_metadata_put(self): # Test putting an individual metadata item for a server. subs = {'value': 'Foo Value'} uuid = self._create_and_set(subs) subs['value'] = 'Bar Value' response = self._do_put('servers/%s/metadata/foo' % uuid, 'server-metadata-req', subs) self._verify_response('server-metadata-resp', subs, response, 200) def test_metadata_get(self): # Test getting an individual metadata item for a server. subs = {'value': 'Foo Value'} uuid = self._create_and_set(subs) response = self._do_get('servers/%s/metadata/foo' % uuid) self._verify_response('server-metadata-resp', subs, response, 200) def test_metadata_delete(self): # Test deleting an individual metadata item for a server. subs = {'value': 'Foo Value'} uuid = self._create_and_set(subs) response = self._do_delete('servers/%s/metadata/foo' % uuid) self.assertEqual(204, response.status_code) self.assertEqual('', response.content) nova-13.1.4/nova/tests/functional/api_sample_tests/test_block_device_mapping_boot.py0000664000567000056710000000336313064447152032302 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers from nova.tests.unit.api.openstack import fakes CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class BlockDeviceMappingV1BootJsonTest(test_servers.ServersSampleBase): extension_name = "os-block-device-mapping-v1" def test_servers_post_with_bdm(self): self.stub_out('nova.volume.cinder.API.get', fakes.stub_volume_get) self.stub_out('nova.volume.cinder.API.check_attach', fakes.stub_volume_check_attach) return self._post_server() class BlockDeviceMappingV2BootJsonTest(BlockDeviceMappingV1BootJsonTest): extension_name = "os-block-device-mapping" def _get_flags(self): f = super(BlockDeviceMappingV2BootJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.legacy_v2.contrib.' 'block_device_mapping_v2_boot.Block_device_mapping_v2_boot') return f nova-13.1.4/nova/tests/functional/api_sample_tests/test_server_password.py0000664000567000056710000000463513064447152030366 0ustar jenkinsjenkins00000000000000# Copyright 2015 NEC Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class ServerPasswordSampleJsonTests(test_servers.ServersSampleBase): extension_name = "os-server-password" def _get_flags(self): f = super(ServerPasswordSampleJsonTests, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.server_password.' 'Server_password') return f @mock.patch("nova.api.metadata.password.extract_password") def test_get_password(self, mock_extract_password): password = ("xlozO3wLCBRWAa2yDjCCVx8vwNPypxnypmRYDa/zErlQ+EzPe1S/" "Gz6nfmC52mOlOSCRuUOmG7kqqgejPof6M7bOezS387zjq4LSvvwp" "28zUknzy4YzfFGhnHAdai3TxUJ26pfQCYrq8UTzmKF2Bq8ioSEtV" "VzM0A96pDh8W2i7BOz6MdoiVyiev/I1K2LsuipfxSJR7Wdke4zNX" "JjHHP2RfYsVbZ/k9ANu+Nz4iIH8/7Cacud/pphH7EjrY6a4RZNrj" "QskrhKYed0YERpotyjYk1eDtRe72GrSiXteqCM4biaQ5w3ruS+Ac" "X//PXk3uJ5kC7d67fPXaVz4WaQRYMg==") # Mock password since there is no api to set it mock_extract_password.return_value = password uuid = self._post_server() response = self._do_get('servers/%s/os-server-password' % uuid) subs = {'encrypted_password': password.replace('+', '\\+')} self._verify_response('get-password-resp', subs, response, 200) def test_reset_password(self): uuid = self._post_server() response = self._do_delete('servers/%s/os-server-password' % uuid) self.assertEqual(204, response.status_code) nova-13.1.4/nova/tests/functional/api_sample_tests/test_flavor_rxtx.py0000664000567000056710000000535113064447152027510 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import api_sample_base CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class FlavorRxtxJsonTest(api_sample_base.ApiSampleTestBaseV21): ADMIN_API = True extension_name = 'os-flavor-rxtx' def _get_flags(self): f = super(FlavorRxtxJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.flavor_rxtx.' 'Flavor_rxtx') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.flavormanage.' 'Flavormanage') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.flavor_disabled.' 'Flavor_disabled') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.flavor_access.' 'Flavor_access') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.flavorextradata.' 'Flavorextradata') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.flavor_swap.' 'Flavor_swap') return f def test_flavor_rxtx_get(self): flavor_id = '1' response = self._do_get('flavors/%s' % flavor_id) subs = { 'flavor_id': flavor_id, 'flavor_name': 'm1.tiny' } self._verify_response('flavor-rxtx-get-resp', subs, response, 200) def test_flavors_rxtx_detail(self): response = self._do_get('flavors/detail') self._verify_response('flavor-rxtx-list-resp', {}, response, 200) def test_flavors_rxtx_create(self): subs = { 'flavor_id': '100', 'flavor_name': 'flavortest' } response = self._do_post('flavors', 'flavor-rxtx-post-req', subs) self._verify_response('flavor-rxtx-post-resp', subs, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_fixed_ips.py0000664000567000056710000001132713064447152027104 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova import exception from nova.tests.functional.api_sample_tests import test_servers from nova.tests.unit.objects import test_network from nova.tests.unit import utils as test_utils CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class FixedIpTest(test_servers.ServersSampleBase): extension_name = "os-fixed-ips" microversion = None def _get_flags(self): f = super(FixedIpTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.fixed_ips.Fixed_ips') return f def setUp(self): super(FixedIpTest, self).setUp() self.api.microversion = self.microversion instance = dict(test_utils.get_test_instance(), hostname='compute.host.pvt', host='host') fake_fixed_ips = [{'id': 1, 'address': '192.168.1.1', 'network_id': 1, 'virtual_interface_id': 1, 'instance_uuid': '1', 'allocated': False, 'leased': False, 'reserved': False, 'created_at': None, 'deleted_at': None, 'updated_at': None, 'deleted': None, 'instance': instance, 'network': test_network.fake_network, 'host': None}, {'id': 2, 'address': '192.168.1.2', 'network_id': 1, 'virtual_interface_id': 2, 'instance_uuid': '2', 'allocated': False, 'leased': False, 'reserved': False, 'created_at': None, 'deleted_at': None, 'updated_at': None, 'deleted': None, 'instance': instance, 'network': test_network.fake_network, 'host': None}, ] def fake_fixed_ip_get_by_address(context, address, columns_to_join=None): for fixed_ip in fake_fixed_ips: if fixed_ip['address'] == address: return fixed_ip raise exception.FixedIpNotFoundForAddress(address=address) def fake_fixed_ip_update(context, address, values): fixed_ip = fake_fixed_ip_get_by_address(context, address) if fixed_ip is None: raise exception.FixedIpNotFoundForAddress(address=address) else: for key in values: fixed_ip[key] = values[key] self.stub_out("nova.db.fixed_ip_get_by_address", fake_fixed_ip_get_by_address) self.stub_out("nova.db.fixed_ip_update", fake_fixed_ip_update) def test_fixed_ip_reserve(self): # Reserve a Fixed IP. response = self._do_post('os-fixed-ips/192.168.1.1/action', 'fixedip-post-req', {}) self.assertEqual(202, response.status_code) self.assertEqual("", response.content) def _test_get_fixed_ip(self, **kwargs): # Return data about the given fixed ip. response = self._do_get('os-fixed-ips/192.168.1.1') project = {'cidr': '192.168.1.0/24', 'hostname': 'compute.host.pvt', 'host': 'host', 'address': '192.168.1.1'} project.update(**kwargs) self._verify_response('fixedips-get-resp', project, response, 200) def test_get_fixed_ip(self): self._test_get_fixed_ip() class FixedIpV24Test(FixedIpTest): microversion = '2.4' # NOTE(gmann): microversion tests do not need to run for v2 API # so defining scenarios only for v2.4 which will run the original tests # by appending '(v2_4)' in test_id. scenarios = [('v2_4', {'api_major_version': 'v2.1'})] def test_get_fixed_ip(self): self._test_get_fixed_ip(reserved='False') nova-13.1.4/nova/tests/functional/api_sample_tests/test_multinic.py0000664000567000056710000000447013064447152026757 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class MultinicSampleJsonTest(test_servers.ServersSampleBase): ADMIN_API = True extension_name = "os-multinic" def _get_flags(self): f = super(MultinicSampleJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.multinic.Multinic') return f def _disable_instance_dns_manager(self): # NOTE(markmc): it looks like multinic and instance_dns_manager are # incompatible. See: # https://bugs.launchpad.net/nova/+bug/1213251 self.flags( instance_dns_manager='nova.network.noop_dns_driver.NoopDNSDriver') def setUp(self): self._disable_instance_dns_manager() super(MultinicSampleJsonTest, self).setUp() self.uuid = self._post_server() def _add_fixed_ip(self): subs = {"networkId": '1'} response = self._do_post('servers/%s/action' % (self.uuid), 'multinic-add-fixed-ip-req', subs) self.assertEqual(202, response.status_code) def test_add_fixed_ip(self): self._add_fixed_ip() def test_remove_fixed_ip(self): self._add_fixed_ip() subs = {"ip": "10.0.0.4"} response = self._do_post('servers/%s/action' % (self.uuid), 'multinic-remove-fixed-ip-req', subs) self.assertEqual(202, response.status_code) nova-13.1.4/nova/tests/functional/api_sample_tests/test_agents.py0000664000567000056710000001023713064447152026412 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.db.sqlalchemy import models from nova.tests.functional.api_sample_tests import api_sample_base CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class AgentsJsonTest(api_sample_base.ApiSampleTestBaseV21): ADMIN_API = True extension_name = "os-agents" def _get_flags(self): f = super(AgentsJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.agents.Agents') return f def setUp(self): super(AgentsJsonTest, self).setUp() fake_agents_list = [{'url': 'http://example.com/path/to/resource', 'hypervisor': 'hypervisor', 'architecture': 'x86', 'os': 'os', 'version': '8.0', 'md5hash': 'add6bb58e139be103324d04d82d8f545', 'id': 1}] def fake_agent_build_create(context, values): values['id'] = 1 agent_build_ref = models.AgentBuild() agent_build_ref.update(values) return agent_build_ref def fake_agent_build_get_all(context, hypervisor): agent_build_all = [] for agent in fake_agents_list: if hypervisor and hypervisor != agent['hypervisor']: continue agent_build_ref = models.AgentBuild() agent_build_ref.update(agent) agent_build_all.append(agent_build_ref) return agent_build_all def fake_agent_build_update(context, agent_build_id, values): pass def fake_agent_build_destroy(context, agent_update_id): pass self.stub_out("nova.db.agent_build_create", fake_agent_build_create) self.stub_out("nova.db.agent_build_get_all", fake_agent_build_get_all) self.stub_out("nova.db.agent_build_update", fake_agent_build_update) self.stub_out("nova.db.agent_build_destroy", fake_agent_build_destroy) def test_agent_create(self): # Creates a new agent build. project = {'url': 'http://example.com/path/to/resource', 'hypervisor': 'hypervisor', 'architecture': 'x86', 'os': 'os', 'version': '8.0', 'md5hash': 'add6bb58e139be103324d04d82d8f545' } response = self._do_post('os-agents', 'agent-post-req', project) self._verify_response('agent-post-resp', project, response, 200) def test_agent_list(self): # Return a list of all agent builds. response = self._do_get('os-agents') self._verify_response('agents-get-resp', {}, response, 200) def test_agent_update(self): # Update an existing agent build. agent_id = 1 subs = {'version': '7.0', 'url': 'http://example.com/path/to/resource', 'md5hash': 'add6bb58e139be103324d04d82d8f545'} response = self._do_put('os-agents/%s' % agent_id, 'agent-update-put-req', subs) self._verify_response('agent-update-put-resp', subs, response, 200) def test_agent_delete(self): # Deletes an existing agent build. agent_id = 1 response = self._do_delete('os-agents/%s' % agent_id) self.assertEqual(200, response.status_code) nova-13.1.4/nova/tests/functional/api_sample_tests/test_floating_ip_dns.py0000664000567000056710000001024013064447152030262 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import api_sample_base CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class FloatingIpDNSTest(api_sample_base.ApiSampleTestBaseV21): ADMIN_API = True extension_name = "os-floating-ip-dns" domain = 'domain1.example.org' name = 'instance1' scope = 'public' project = 'project1' dns_type = 'A' ip = '192.168.1.1' def _get_flags(self): f = super(FloatingIpDNSTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append('nova.api.openstack.compute.' 'contrib.floating_ip_dns.Floating_ip_dns') return f def _create_or_update(self): subs = {'project': self.project, 'scope': self.scope} response = self._do_put('os-floating-ip-dns/%s' % self.domain, 'floating-ip-dns-create-or-update-req', subs) subs.update({'domain': self.domain}) self._verify_response('floating-ip-dns-create-or-update-resp', subs, response, 200) def _create_or_update_entry(self): subs = {'ip': self.ip, 'dns_type': self.dns_type} response = self._do_put('os-floating-ip-dns/%s/entries/%s' % (self.domain, self.name), 'floating-ip-dns-create-or-update-entry-req', subs) subs.update({'name': self.name, 'domain': self.domain}) self._verify_response('floating-ip-dns-create-or-update-entry-resp', subs, response, 200) def test_floating_ip_dns_list(self): self._create_or_update() response = self._do_get('os-floating-ip-dns') subs = {'domain': self.domain, 'project': self.project, 'scope': self.scope} self._verify_response('floating-ip-dns-list-resp', subs, response, 200) def test_floating_ip_dns_create_or_update(self): self._create_or_update() def test_floating_ip_dns_delete(self): self._create_or_update() response = self._do_delete('os-floating-ip-dns/%s' % self.domain) self.assertEqual(202, response.status_code) def test_floating_ip_dns_create_or_update_entry(self): self._create_or_update_entry() def test_floating_ip_dns_entry_get(self): self._create_or_update_entry() response = self._do_get('os-floating-ip-dns/%s/entries/%s' % (self.domain, self.name)) subs = {'domain': self.domain, 'ip': self.ip, 'name': self.name} self._verify_response('floating-ip-dns-entry-get-resp', subs, response, 200) def test_floating_ip_dns_entry_delete(self): self._create_or_update_entry() response = self._do_delete('os-floating-ip-dns/%s/entries/%s' % (self.domain, self.name)) self.assertEqual(202, response.status_code) def test_floating_ip_dns_entry_list(self): self._create_or_update_entry() response = self._do_get('os-floating-ip-dns/%s/entries/%s' % (self.domain, self.ip)) subs = {'domain': self.domain, 'ip': self.ip, 'name': self.name} self._verify_response('floating-ip-dns-entry-list-resp', subs, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_virtual_interfaces.py0000664000567000056710000000437613064447152031031 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class VirtualInterfacesJsonTest(test_servers.ServersSampleBase): extension_name = "os-virtual-interfaces" def setUp(self): super(VirtualInterfacesJsonTest, self).setUp() self.template = 'vifs-list-resp' if self.api_major_version == 'v2': self.template = 'vifs-list-resp-v2' def _get_flags(self): f = super(VirtualInterfacesJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.legacy_v2.contrib.' 'virtual_interfaces.Virtual_interfaces') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.legacy_v2.contrib.' 'extended_virtual_interfaces_net.Extended_virtual_interfaces_net') return f def test_vifs_list(self): uuid = self._post_server() response = self._do_get('servers/%s/os-virtual-interfaces' % uuid) subs = {'mac_addr': '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'} self._verify_response(self.template, subs, response, 200) class VirtualInterfacesJsonV212Test(VirtualInterfacesJsonTest): microversion = '2.12' # NOTE(gmann): microversion tests do not need to run for v2 API # so defining scenarios only for v2.12 which will run the original tests # by appending '(v2_12)' in test_id. scenarios = [('v2_12', {'api_major_version': 'v2.1'})] nova-13.1.4/nova/tests/functional/api_sample_tests/test_preserve_ephemeral_rebuild.py0000664000567000056710000000616213064447152032516 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.compute import api as compute_api from nova.tests.functional.api_sample_tests import test_servers from nova.tests.unit.image import fake CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class PreserveEphemeralOnRebuildJsonTest(test_servers.ServersSampleBase): extension_name = 'os-preserve-ephemeral-rebuild' def _get_flags(self): f = super(PreserveEphemeralOnRebuildJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.preserve_ephemeral_rebuild.' 'Preserve_ephemeral_rebuild') return f def _test_server_rebuild_preserve_ephemeral(self, value, resp_tpl=None): uuid = self._post_server() image = fake.get_valid_image_id() subs = {'host': self._get_host(), 'uuid': image, 'name': 'foobar', 'pass': 'seekr3t', 'hostid': '[a-f0-9]+', 'preserve_ephemeral': str(value).lower(), 'action': 'rebuild', 'glance_host': self._get_glance_host(), 'access_ip_v4': '1.2.3.4', 'access_ip_v6': '80fe::' } old_rebuild = compute_api.API.rebuild def fake_rebuild(self_, context, instance, image_href, admin_password, files_to_inject=None, **kwargs): self.assertEqual(kwargs['preserve_ephemeral'], value) if resp_tpl: return old_rebuild(self_, context, instance, image_href, admin_password, files_to_inject=None, **kwargs) self.stub_out('nova.compute.api.API.rebuild', fake_rebuild) response = self._do_post('servers/%s/action' % uuid, 'server-action-rebuild-preserve-ephemeral', subs) if resp_tpl: del subs['uuid'] self._verify_response(resp_tpl, subs, response, 202) else: self.assertEqual(202, response.status_code) def test_server_rebuild_preserve_ephemeral_true(self): self._test_server_rebuild_preserve_ephemeral(True) def test_server_rebuild_preserve_ephemeral_false(self): self._test_server_rebuild_preserve_ephemeral(False, resp_tpl='server-action-rebuild-preserve-ephemeral-resp') nova-13.1.4/nova/tests/functional/api_sample_tests/test_networks_associate.py0000664000567000056710000000611513064447152031040 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import api_sample_base CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class NetworksAssociateJsonTests(api_sample_base.ApiSampleTestBaseV21): ADMIN_API = True extension_name = "os-networks-associate" extra_extensions_to_load = ["os-networks"] _sentinel = object() def _get_flags(self): f = super(NetworksAssociateJsonTests, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] # Networks_associate requires Networks to be update f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.os_networks.Os_networks') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.networks_associate.' 'Networks_associate') return f def setUp(self): super(NetworksAssociateJsonTests, self).setUp() def fake_associate(self, context, network_id, host=NetworksAssociateJsonTests._sentinel, project=NetworksAssociateJsonTests._sentinel): return True self.stub_out("nova.network.api.API.associate", fake_associate) def test_disassociate(self): response = self._do_post('os-networks/1/action', 'network-disassociate-req', {}) self.assertEqual(202, response.status_code) self.assertEqual("", response.content) def test_disassociate_host(self): response = self._do_post('os-networks/1/action', 'network-disassociate-host-req', {}) self.assertEqual(202, response.status_code) self.assertEqual("", response.content) def test_disassociate_project(self): response = self._do_post('os-networks/1/action', 'network-disassociate-project-req', {}) self.assertEqual(202, response.status_code) self.assertEqual("", response.content) def test_associate_host(self): response = self._do_post('os-networks/1/action', 'network-associate-host-req', {"host": "testHost"}) self.assertEqual(202, response.status_code) self.assertEqual("", response.content) nova-13.1.4/nova/tests/functional/api_sample_tests/test_access_ips.py0000664000567000056710000000710413064447152027244 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import api_sample_base from nova.tests.unit.image import fake CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class AccessIPsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21): extension_name = 'os-access-ips' def _get_flags(self): f = super(AccessIPsSampleJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.keypairs.Keypairs') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_ips.Extended_ips') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_ips_mac.' 'Extended_ips_mac') return f def _servers_post(self, subs): response = self._do_post('servers', 'server-post-req', subs) return self._verify_response('server-post-resp', subs, response, 202) def test_servers_post(self): subs = { 'image_id': fake.get_valid_image_id(), 'compute_endpoint': self._get_compute_endpoint(), 'access_ip_v4': '1.2.3.4', 'access_ip_v6': 'fe80::' } self._servers_post(subs) def test_servers_get(self): subs = { 'image_id': fake.get_valid_image_id(), 'compute_endpoint': self._get_compute_endpoint(), 'access_ip_v4': '1.2.3.4', 'access_ip_v6': 'fe80::' } uuid = self._servers_post(subs) response = self._do_get('servers/%s' % uuid) subs['hostid'] = '[a-f0-9]+' subs['id'] = uuid self._verify_response('server-get-resp', subs, response, 200) def test_servers_details(self): subs = { 'image_id': fake.get_valid_image_id(), 'compute_endpoint': self._get_compute_endpoint(), 'access_ip_v4': '1.2.3.4', 'access_ip_v6': 'fe80::' } uuid = self._servers_post(subs) response = self._do_get('servers/detail') subs['hostid'] = '[a-f0-9]+' subs['id'] = uuid self._verify_response('servers-details-resp', subs, response, 200) def test_servers_rebuild(self): subs = { 'image_id': fake.get_valid_image_id(), 'compute_endpoint': self._get_compute_endpoint(), 'access_ip_v4': '1.2.3.4', 'access_ip_v6': 'fe80::' } uuid = self._servers_post(subs) subs['access_ip_v4'] = "4.3.2.1" subs['access_ip_v6'] = '80fe::' response = self._do_post('servers/%s/action' % uuid, 'server-action-rebuild', subs) subs['hostid'] = '[a-f0-9]+' subs['id'] = uuid self._verify_response('server-action-rebuild-resp', subs, response, 202) nova-13.1.4/nova/tests/functional/api_sample_tests/test_limits.py0000664000567000056710000000337113064447152026433 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import api_sample_base CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class LimitsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21): ADMIN_API = True extension_name = "limits" def setUp(self): super(LimitsSampleJsonTest, self).setUp() # NOTE(gmann): We have to separate the template files between V2 # and V2.1 as the response are different. self.template = 'limit-get-resp' if self._legacy_v2_code: self.template = 'v2-limit-get-resp' def _get_flags(self): f = super(LimitsSampleJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append("nova.api.openstack.compute." "legacy_v2.contrib.server_group_quotas." "Server_group_quotas") return f def test_limits_get(self): response = self._do_get('limits') self._verify_response(self.template, {}, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_assisted_volume_snapshots.py0000664000567000056710000000501213064447152032434 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers from nova.tests.unit.api.openstack import fakes CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class AssistedVolumeSnapshotsJsonTests(test_servers.ServersSampleBase): extension_name = "os-assisted-volume-snapshots" def _get_flags(self): f = super(AssistedVolumeSnapshotsJsonTests, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.' 'assisted_volume_snapshots.Assisted_volume_snapshots') return f def test_create(self): """Create a volume snapshots.""" self.stub_out('nova.compute.api.API.volume_snapshot_create', fakes.stub_compute_volume_snapshot_create) subs = { 'volume_id': '521752a6-acf6-4b2d-bc7a-119f9148cd8c', 'snapshot_id': '421752a6-acf6-4b2d-bc7a-119f9148cd8c', 'type': 'qcow2', 'new_file': 'new_file_name' } response = self._do_post("os-assisted-volume-snapshots", "snapshot-create-assisted-req", subs) self._verify_response("snapshot-create-assisted-resp", subs, response, 200) def test_snapshots_delete_assisted(self): self.stub_out('nova.compute.api.API.volume_snapshot_delete', fakes.stub_compute_volume_snapshot_delete) snapshot_id = '100' response = self._do_delete( 'os-assisted-volume-snapshots/%s?delete_info=' '{"volume_id":"521752a6-acf6-4b2d-bc7a-119f9148cd8c"}' % snapshot_id) self.assertEqual(204, response.status_code) self.assertEqual('', response.content) nova-13.1.4/nova/tests/functional/api_sample_tests/test_scheduler_hints.py0000664000567000056710000000332613064447152030315 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_config import cfg from nova.tests.functional.api_sample_tests import api_sample_base from nova.tests.unit.image import fake CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class SchedulerHintsJsonTest(api_sample_base.ApiSampleTestBaseV21): extension_name = "os-scheduler-hints" def _get_flags(self): f = super(SchedulerHintsJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( "nova.api.openstack.compute.contrib.scheduler_hints." "Scheduler_hints") return f def test_scheduler_hints_post(self): # Get api sample of scheduler hint post request. subs = {'image_id': fake.get_valid_image_id(), 'uuid': str(uuid.uuid4())} response = self._do_post('servers', 'scheduler-hints-post-req', subs) del subs['uuid'] self._verify_response('scheduler-hints-post-resp', subs, response, 202) nova-13.1.4/nova/tests/functional/api_sample_tests/test_services.py0000664000567000056710000001204413064447152026752 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_utils import fixture as utils_fixture from nova.tests.functional.api_sample_tests import api_sample_base from nova.tests.unit.api.openstack.compute import test_services CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class ServicesJsonTest(api_sample_base.ApiSampleTestBaseV21): ADMIN_API = True extension_name = "os-services" microversion = None def _get_flags(self): f = super(ServicesJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.services.Services') f['osapi_compute_extension'].append('nova.api.openstack.compute.' 'contrib.extended_services_delete.' 'Extended_services_delete') f['osapi_compute_extension'].append('nova.api.openstack.compute.' 'contrib.extended_services.Extended_services') return f def setUp(self): super(ServicesJsonTest, self).setUp() self.api.microversion = self.microversion self.stub_out("nova.db.service_get_all", test_services.fake_db_api_service_get_all) self.stub_out("nova.db.service_get_by_host_and_binary", test_services.fake_service_get_by_host_binary) self.stub_out("nova.db.service_update", test_services.fake_service_update) self.useFixture(utils_fixture.TimeFixture(test_services.fake_utcnow())) def test_services_list(self): """Return a list of all agent builds.""" response = self._do_get('os-services') subs = {'binary': 'nova-compute', 'host': 'host1', 'zone': 'nova', 'status': 'disabled', 'state': 'up'} self._verify_response('services-list-get-resp', subs, response, 200) def test_service_enable(self): """Enable an existing agent build.""" subs = {"host": "host1", 'binary': 'nova-compute'} response = self._do_put('os-services/enable', 'service-enable-put-req', subs) self._verify_response('service-enable-put-resp', subs, response, 200) def test_service_disable(self): """Disable an existing agent build.""" subs = {"host": "host1", 'binary': 'nova-compute'} response = self._do_put('os-services/disable', 'service-disable-put-req', subs) self._verify_response('service-disable-put-resp', subs, response, 200) def test_service_disable_log_reason(self): """Disable an existing service and log the reason.""" subs = {"host": "host1", 'binary': 'nova-compute', 'disabled_reason': 'test2'} response = self._do_put('os-services/disable-log-reason', 'service-disable-log-put-req', subs) self._verify_response('service-disable-log-put-resp', subs, response, 200) def test_service_delete(self): """Delete an existing service.""" response = self._do_delete('os-services/1') self.assertEqual(204, response.status_code) self.assertEqual("", response.content) class ServicesV211JsonTest(ServicesJsonTest): microversion = '2.11' # NOTE(gryf): There is no need to run those tests on v2 API. Only # scenarios for v2_11 will be run. scenarios = [('v2_11', {'api_major_version': 'v2.1'})] def test_services_list(self): """Return a list of all agent builds.""" response = self._do_get('os-services') subs = {'binary': 'nova-compute', 'host': 'host1', 'zone': 'nova', 'forced_down': 'false', 'status': 'disabled', 'state': 'up'} self._verify_response('services-list-get-resp', subs, response, 200) def test_force_down(self): """Set forced_down flag""" subs = {"host": 'host1', 'binary': 'nova-compute', 'forced_down': 'true'} response = self._do_put('os-services/force-down', 'service-force-down-put-req', subs) self._verify_response('service-force-down-put-resp', subs, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_multiple_create.py0000664000567000056710000000423613064447152030311 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers from nova.tests.unit.image import fake CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class MultipleCreateJsonTest(test_servers.ServersSampleBase): extension_name = "os-multiple-create" def _get_flags(self): f = super(MultipleCreateJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.multiple_create.' 'Multiple_create') return f def test_multiple_create(self): subs = { 'image_id': fake.get_valid_image_id(), 'compute_endpoint': self._get_compute_endpoint(), 'min_count': "2", 'max_count': "3" } response = self._do_post('servers', 'multiple-create-post-req', subs) self._verify_response('multiple-create-post-resp', subs, response, 202) def test_multiple_create_without_reservation_id(self): subs = { 'image_id': fake.get_valid_image_id(), 'compute_endpoint': self._get_compute_endpoint(), 'min_count': "2", 'max_count': "3" } response = self._do_post('servers', 'multiple-create-no-resv-post-req', subs) self._verify_response('multiple-create-no-resv-post-resp', subs, response, 202) nova-13.1.4/nova/tests/functional/api_sample_tests/test_migrations.py0000664000567000056710000001600613064447152027305 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_config import cfg from nova import context from nova import objects from nova.tests.functional.api_sample_tests import api_sample_base CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') # NOTE(ShaoHe Feng) here I can not use uuidsentinel, it generate a random # UUID. The uuid in doc/api_samples files is fixed. INSTANCE_UUID_1 = "8600d31b-d1a1-4632-b2ff-45c2be1a70ff" INSTANCE_UUID_2 = "9128d044-7b61-403e-b766-7547076ff6c1" class MigrationsSamplesJsonTest(api_sample_base.ApiSampleTestBaseV21): ADMIN_API = True extension_name = "os-migrations" def _get_flags(self): f = super(MigrationsSamplesJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.migrations.Migrations') return f def _stub_migrations(self, context, filters): fake_migrations = [ { 'id': 1234, 'source_node': 'node1', 'dest_node': 'node2', 'source_compute': 'compute1', 'dest_compute': 'compute2', 'dest_host': '1.2.3.4', 'status': 'Done', 'instance_uuid': 'instance_id_123', 'old_instance_type_id': 1, 'new_instance_type_id': 2, 'migration_type': 'resize', 'hidden': False, 'created_at': datetime.datetime(2012, 10, 29, 13, 42, 2), 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2), 'deleted_at': None, 'deleted': False }, { 'id': 5678, 'source_node': 'node10', 'dest_node': 'node20', 'source_compute': 'compute10', 'dest_compute': 'compute20', 'dest_host': '5.6.7.8', 'status': 'Done', 'instance_uuid': 'instance_id_456', 'old_instance_type_id': 5, 'new_instance_type_id': 6, 'migration_type': 'resize', 'hidden': False, 'created_at': datetime.datetime(2013, 10, 22, 13, 42, 2), 'updated_at': datetime.datetime(2013, 10, 22, 13, 42, 2), 'deleted_at': None, 'deleted': False } ] return fake_migrations def setUp(self): super(MigrationsSamplesJsonTest, self).setUp() self.stub_out('nova.compute.api.API.get_migrations', self._stub_migrations) def test_get_migrations(self): response = self._do_get('os-migrations') self.assertEqual(200, response.status_code) self._verify_response('migrations-get', {}, response, 200) class MigrationsSamplesJsonTestV2_23(api_sample_base.ApiSampleTestBaseV21): ADMIN_API = True extension_name = "os-migrations" microversion = '2.23' scenarios = [('v2_23', {'api_major_version': 'v2.1'})] fake_migrations = [ # in-progress live-migration. { 'source_node': 'node1', 'dest_node': 'node2', 'source_compute': 'compute1', 'dest_compute': 'compute2', 'dest_host': '1.2.3.4', 'status': 'running', 'instance_uuid': INSTANCE_UUID_1, 'old_instance_type_id': 1, 'new_instance_type_id': 2, 'migration_type': 'live-migration', 'hidden': False, 'created_at': datetime.datetime(2016, 0o1, 29, 13, 42, 2), 'updated_at': datetime.datetime(2016, 0o1, 29, 13, 42, 2), 'deleted_at': None, 'deleted': False }, # non in-progress live-migration. { 'source_node': 'node1', 'dest_node': 'node2', 'source_compute': 'compute1', 'dest_compute': 'compute2', 'dest_host': '1.2.3.4', 'status': 'error', 'instance_uuid': INSTANCE_UUID_1, 'old_instance_type_id': 1, 'new_instance_type_id': 2, 'migration_type': 'live-migration', 'hidden': False, 'created_at': datetime.datetime(2016, 0o1, 29, 13, 42, 2), 'updated_at': datetime.datetime(2016, 0o1, 29, 13, 42, 2), 'deleted_at': None, 'deleted': False }, # non in-progress resize. { 'source_node': 'node10', 'dest_node': 'node20', 'source_compute': 'compute10', 'dest_compute': 'compute20', 'dest_host': '5.6.7.8', 'status': 'error', 'instance_uuid': INSTANCE_UUID_2, 'old_instance_type_id': 5, 'new_instance_type_id': 6, 'migration_type': 'resize', 'hidden': False, 'created_at': datetime.datetime(2016, 0o1, 22, 13, 42, 2), 'updated_at': datetime.datetime(2016, 0o1, 22, 13, 42, 2), 'deleted_at': None, 'deleted': False }, # in-progress resize. { 'source_node': 'node10', 'dest_node': 'node20', 'source_compute': 'compute10', 'dest_compute': 'compute20', 'dest_host': '5.6.7.8', 'status': 'migrating', 'instance_uuid': INSTANCE_UUID_2, 'old_instance_type_id': 5, 'new_instance_type_id': 6, 'migration_type': 'resize', 'hidden': False, 'created_at': datetime.datetime(2016, 0o1, 22, 13, 42, 2), 'updated_at': datetime.datetime(2016, 0o1, 22, 13, 42, 2), 'deleted_at': None, 'deleted': False } ] def setUp(self): super(MigrationsSamplesJsonTestV2_23, self).setUp() self.api.microversion = self.microversion fake_context = context.RequestContext('fake', 'fake') for mig in self.fake_migrations: mig_obj = objects.Migration(context=fake_context, **mig) mig_obj.create() def test_get_migrations_v2_23(self): response = self._do_get('os-migrations') self.assertEqual(200, response.status_code) self._verify_response( 'migrations-get', {"instance_1": INSTANCE_UUID_1, "instance_2": INSTANCE_UUID_2}, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_config_drive.py0000664000567000056710000000561413064447152027572 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers from nova.tests.unit.api.openstack import fakes from nova.tests.unit.image import fake CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class ConfigDriveSampleJsonTest(test_servers.ServersSampleBase): extension_name = 'os-config-drive' def _get_flags(self): f = super(ConfigDriveSampleJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.config_drive.Config_drive') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.keypairs.Keypairs') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_ips.Extended_ips') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_ips_mac.' 'Extended_ips_mac') return f def setUp(self): super(ConfigDriveSampleJsonTest, self).setUp() fakes.stub_out_networking(self) fakes.stub_out_rate_limiting(self.stubs) fake.stub_out_image_service(self) def test_config_drive_show(self): uuid = self._post_server(use_common_server_api_samples=False) response = self._do_get('servers/%s' % uuid) subs = {} subs['hostid'] = '[a-f0-9]+' # config drive can be a string for True or empty value for False subs['cdrive'] = '.*' subs['access_ip_v4'] = '1.2.3.4' subs['access_ip_v6'] = '80fe::' self._verify_response('server-config-drive-get-resp', subs, response, 200) def test_config_drive_detail(self): self._post_server(use_common_server_api_samples=False) response = self._do_get('servers/detail') subs = {} subs['hostid'] = '[a-f0-9]+' # config drive can be a string for True or empty value for False subs['cdrive'] = '.*' subs['access_ip_v4'] = '1.2.3.4' subs['access_ip_v6'] = '80fe::' self._verify_response('servers-config-drive-details-resp', subs, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_fping.py0000664000567000056710000000417513064447152026240 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers from nova.tests.unit.api.openstack.compute import test_fping CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class FpingSampleJsonTests(test_servers.ServersSampleBase): extension_name = "os-fping" def _get_flags(self): f = super(FpingSampleJsonTests, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.fping.Fping') return f def setUp(self): super(FpingSampleJsonTests, self).setUp() def fake_check_fping(self): pass self.stub_out("nova.utils.execute", test_fping.execute) self.stub_out("nova.api.openstack.compute.fping." "FpingController.check_fping", fake_check_fping) self.stub_out("nova.api.openstack.compute.legacy_v2.contrib.fping." "FpingController.check_fping", fake_check_fping) def test_get_fping(self): self._post_server() response = self._do_get('os-fping') self._verify_response('fping-get-resp', {}, response, 200) def test_get_fping_details(self): uuid = self._post_server() response = self._do_get('os-fping/%s' % (uuid)) self._verify_response('fping-get-details-resp', {}, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_extended_volumes.py0000664000567000056710000000527413064447152030510 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers from nova.tests.unit.api.openstack import fakes CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class ExtendedVolumesSampleJsonTests(test_servers.ServersSampleBase): extension_name = "os-extended-volumes" def _get_flags(self): f = super(ExtendedVolumesSampleJsonTests, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_volumes.' 'Extended_volumes') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.keypairs.' 'Keypairs') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_ips_mac.' 'Extended_ips_mac') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_ips.' 'Extended_ips') return f def test_show(self): uuid = self._post_server() self.stub_out('nova.db.block_device_mapping_get_all_by_instance_uuids', fakes.stub_bdm_get_all_by_instance_uuids) response = self._do_get('servers/%s' % uuid) subs = {} subs['hostid'] = '[a-f0-9]+' subs['access_ip_v4'] = '1.2.3.4' subs['access_ip_v6'] = '80fe::' self._verify_response('server-get-resp', subs, response, 200) def test_detail(self): uuid = self._post_server() self.stub_out('nova.db.block_device_mapping_get_all_by_instance_uuids', fakes.stub_bdm_get_all_by_instance_uuids) response = self._do_get('servers/detail') subs = {} subs['id'] = uuid subs['hostid'] = '[a-f0-9]+' subs['access_ip_v4'] = '1.2.3.4' subs['access_ip_v6'] = '80fe::' self._verify_response('servers-detail-resp', subs, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_instance_actions.py0000664000567000056710000001115513064447152030455 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_config import cfg import six from nova.tests.functional.api_sample_tests import api_sample_base from nova.tests.unit import fake_instance from nova.tests.unit import fake_server_actions from nova.tests.unit import utils as test_utils CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class ServerActionsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21): microversion = None ADMIN_API = True extension_name = 'os-instance-actions' def _get_flags(self): f = super(ServerActionsSampleJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append('nova.api.openstack.compute.' 'contrib.instance_actions.Instance_actions') return f def _fake_get(self, context, instance_uuid, expected_attrs=None, want_objects=True): return fake_instance.fake_instance_obj( None, **{'uuid': instance_uuid}) def setUp(self): super(ServerActionsSampleJsonTest, self).setUp() self.api.microversion = self.microversion self.actions = fake_server_actions.FAKE_ACTIONS self.events = fake_server_actions.FAKE_EVENTS self.instance = test_utils.get_test_instance(obj=True) def fake_instance_action_get_by_request_id(context, uuid, request_id): return copy.deepcopy(self.actions[uuid][request_id]) def fake_server_actions_get(context, uuid): return [copy.deepcopy(value) for value in six.itervalues(self.actions[uuid])] def fake_instance_action_events_get(context, action_id): return copy.deepcopy(self.events[action_id]) def fake_instance_get_by_uuid(context, instance_id): return self.instance self.stub_out('nova.db.action_get_by_request_id', fake_instance_action_get_by_request_id) self.stub_out('nova.db.actions_get', fake_server_actions_get) self.stub_out('nova.db.action_events_get', fake_instance_action_events_get) self.stub_out('nova.db.instance_get_by_uuid', fake_instance_get_by_uuid) self.stub_out('nova.compute.api.API.get', self._fake_get) def test_instance_action_get(self): fake_uuid = fake_server_actions.FAKE_UUID fake_request_id = fake_server_actions.FAKE_REQUEST_ID1 fake_action = self.actions[fake_uuid][fake_request_id] response = self._do_get('servers/%s/os-instance-actions/%s' % (fake_uuid, fake_request_id)) subs = {} subs['action'] = '(reboot)|(resize)' subs['instance_uuid'] = str(fake_uuid) subs['integer_id'] = '[0-9]+' subs['request_id'] = str(fake_action['request_id']) subs['start_time'] = str(fake_action['start_time']) subs['result'] = '(Success)|(Error)' subs['event'] = '(schedule)|(compute_create)' self._verify_response('instance-action-get-resp', subs, response, 200) def test_instance_actions_list(self): fake_uuid = fake_server_actions.FAKE_UUID response = self._do_get('servers/%s/os-instance-actions' % (fake_uuid)) subs = {} subs['action'] = '(reboot)|(resize)' subs['integer_id'] = '[0-9]+' subs['request_id'] = ('req-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}' '-[0-9a-f]{4}-[0-9a-f]{12}') self._verify_response('instance-actions-list-resp', subs, response, 200) class ServerActionsV221SampleJsonTest(ServerActionsSampleJsonTest): microversion = '2.21' scenarios = [('v2_21', {'api_major_version': 'v2.1'})] def _fake_get(self, context, instance_uuid, expected_attrs=None, want_objects=True): self.assertEqual('yes', context.read_deleted) return fake_instance.fake_instance_obj( None, **{'uuid': instance_uuid}) nova-13.1.4/nova/tests/functional/api_sample_tests/test_consoles.py0000664000567000056710000000424313064447152026756 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.console import manager as console_manager # noqa - only for cfg from nova.tests.functional.api_sample_tests import test_servers class ConsolesSamplesJsonTest(test_servers.ServersSampleBase): sample_dir = "consoles" def setUp(self): super(ConsolesSamplesJsonTest, self).setUp() self.flags(console_public_hostname='fake') self.flags(console_host='fake') self.flags(console_driver='nova.console.fake.FakeConsoleProxy') self.console = self.start_service('console', host='fake') def _create_consoles(self, server_uuid): response = self._do_post('servers/%s/consoles' % server_uuid, 'consoles-create-req', {}) self.assertEqual(response.status_code, 200) def test_create_consoles(self): uuid = self._post_server() self._create_consoles(uuid) def test_list_consoles(self): uuid = self._post_server() self._create_consoles(uuid) response = self._do_get('servers/%s/consoles' % uuid) self._verify_response('consoles-list-get-resp', {}, response, 200) def test_console_get(self): uuid = self._post_server() self._create_consoles(uuid) response = self._do_get('servers/%s/consoles/1' % uuid) self._verify_response('consoles-get-resp', {}, response, 200) def test_console_delete(self): uuid = self._post_server() self._create_consoles(uuid) response = self._do_delete('servers/%s/consoles/1' % uuid) self.assertEqual(202, response.status_code) nova-13.1.4/nova/tests/functional/api_sample_tests/test_floating_ips_bulk.py0000664000567000056710000000711113064447152030621 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova import context from nova.tests.functional.api_sample_tests import api_sample_base CONF = cfg.CONF CONF.import_opt('default_floating_pool', 'nova.network.floating_ips') CONF.import_opt('public_interface', 'nova.network.linux_net') CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class FloatingIpsBulkTest(api_sample_base.ApiSampleTestBaseV21): ADMIN_API = True extension_name = "os-floating-ips-bulk" def _get_flags(self): f = super(FloatingIpsBulkTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append('nova.api.openstack.compute.' 'contrib.floating_ips_bulk.Floating_ips_bulk') return f def setUp(self): super(FloatingIpsBulkTest, self).setUp() pool = CONF.default_floating_pool interface = CONF.public_interface self.ip_pool = [ { 'address': "10.10.10.1", 'pool': pool, 'interface': interface, 'host': None }, { 'address': "10.10.10.2", 'pool': pool, 'interface': interface, 'host': None }, { 'address': "10.10.10.3", 'pool': pool, 'interface': interface, 'host': "testHost" }, ] self.compute.db.floating_ip_bulk_create( context.get_admin_context(), self.ip_pool) self.addCleanup(self.compute.db.floating_ip_bulk_destroy, context.get_admin_context(), self.ip_pool) def test_floating_ips_bulk_list(self): response = self._do_get('os-floating-ips-bulk') self._verify_response('floating-ips-bulk-list-resp', {}, response, 200) def test_floating_ips_bulk_list_by_host(self): response = self._do_get('os-floating-ips-bulk/testHost') self._verify_response('floating-ips-bulk-list-by-host-resp', {}, response, 200) def test_floating_ips_bulk_create(self): response = self._do_post('os-floating-ips-bulk', 'floating-ips-bulk-create-req', {"ip_range": "192.168.1.0/24", "pool": CONF.default_floating_pool, "interface": CONF.public_interface}) self._verify_response('floating-ips-bulk-create-resp', {}, response, 200) def test_floating_ips_bulk_delete(self): response = self._do_put('os-floating-ips-bulk/delete', 'floating-ips-bulk-delete-req', {"ip_range": "192.168.1.0/24"}) self._verify_response('floating-ips-bulk-delete-resp', {}, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_versions.py0000664000567000056710000000354413064447140027001 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.openstack import api_version_request as avr from nova.tests.functional.api_sample_tests import api_sample_base class VersionsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21): sample_dir = 'versions' # NOTE(gmann): Setting empty scenario for 'version' API testing # as those does not send request on particular endpoint and running # its tests alone is enough. scenarios = [] max_api_version = avr.max_api_version().get_string() def test_versions_get(self): response = self._do_get('', strip_version=True) self._verify_response('versions-get-resp', {'max_api_version': self.max_api_version}, response, 200, update_links=False) def test_versions_get_v2(self): response = self._do_get('/v2', strip_version=True) self._verify_response('v2-version-get-resp', {}, response, 200, update_links=False) def test_versions_get_v21(self): response = self._do_get('/v2.1', strip_version=True) self._verify_response('v21-version-get-resp', {'max_api_version': self.max_api_version}, response, 200, update_links=False) nova-13.1.4/nova/tests/functional/api_sample_tests/test_console_auth_tokens.py0000664000567000056710000000471413064447152031202 0ustar jenkinsjenkins00000000000000# Copyright 2013 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from oslo_config import cfg from oslo_serialization import jsonutils from nova.tests.functional.api_sample_tests import test_servers CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class ConsoleAuthTokensSampleJsonTests(test_servers.ServersSampleBase): ADMIN_API = True extension_name = "os-console-auth-tokens" extra_extensions_to_load = ["os-remote-consoles", "os-access-ips"] def _get_flags(self): f = super(ConsoleAuthTokensSampleJsonTests, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.consoles.Consoles') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.console_auth_tokens.' 'Console_auth_tokens') return f def _get_console_url(self, data): return jsonutils.loads(data)["console"]["url"] def _get_console_token(self, uuid): response = self._do_post('servers/%s/action' % uuid, 'get-rdp-console-post-req', {'action': 'os-getRDPConsole'}) url = self._get_console_url(response.content) return re.match('.+?token=([^&]+)', url).groups()[0] def test_get_console_connect_info(self): self.flags(enabled=True, group='rdp') uuid = self._post_server() token = self._get_console_token(uuid) response = self._do_get('os-console-auth-tokens/%s' % token) subs = {} subs["uuid"] = uuid subs["host"] = r"[\w\.\-]+" subs["port"] = "[0-9]+" subs["internal_access_path"] = ".*" self._verify_response('get-console-connect-info-get-resp', subs, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_servers_ips.py0000664000567000056710000000265513064447152027502 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class ServersIpsJsonTest(test_servers.ServersSampleBase): sample_dir = 'server-ips' def test_get(self): # Test getting a server's IP information. uuid = self._post_server() response = self._do_get('servers/%s/ips' % uuid) self._verify_response('server-ips-resp', {}, response, 200) def test_get_by_network(self): # Test getting a server's IP information by network id. uuid = self._post_server() response = self._do_get('servers/%s/ips/private' % uuid) self._verify_response('server-ips-network-resp', {}, response, 200) nova-13.1.4/nova/tests/functional/api_sample_tests/test_create_backup.py0000664000567000056710000000362113064447152027720 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import mock from nova.tests.functional.api_sample_tests import test_servers from nova.tests.unit.image import fake CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class CreateBackupSamplesJsonTest(test_servers.ServersSampleBase): extension_name = "os-create-backup" def _get_flags(self): f = super(CreateBackupSamplesJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.admin_actions.Admin_actions') return f def setUp(self): """setUp Method for PauseServer api samples extension This method creates the server that will be used in each tests """ super(CreateBackupSamplesJsonTest, self).setUp() self.uuid = self._post_server() @mock.patch.object(fake._FakeImageService, 'detail', return_value=[]) def test_post_backup_server(self, mock_method): # Get api samples to backup server request. response = self._do_post('servers/%s/action' % self.uuid, 'create-backup-req', {}) self.assertEqual(202, response.status_code) nova-13.1.4/nova/tests/functional/api_sample_tests/test_evacuate.py0000664000567000056710000001435613064447152026734 0ustar jenkinsjenkins00000000000000# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class EvacuateJsonTest(test_servers.ServersSampleBase): ADMIN_API = True extension_name = "os-evacuate" def _get_flags(self): f = super(EvacuateJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.evacuate.Evacuate') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_evacuate_find_host.' 'Extended_evacuate_find_host') return f def _test_evacuate(self, req_subs, server_req, server_resp, expected_resp_code): self.uuid = self._post_server() def fake_service_is_up(self, service): """Simulate validation of instance host is down.""" return False def fake_service_get_by_compute_host(self, context, host): """Simulate that given host is a valid host.""" return { 'host_name': host, 'service': 'compute', 'zone': 'nova' } def fake_check_instance_exists(self, context, instance): """Simulate validation of instance does not exist.""" return False self.stub_out( 'nova.servicegroup.api.API.service_is_up', fake_service_is_up) self.stub_out( 'nova.compute.api.HostAPI.service_get_by_compute_host', fake_service_get_by_compute_host) self.stub_out( 'nova.compute.manager.ComputeManager._check_instance_exists', fake_check_instance_exists) response = self._do_post('servers/%s/action' % self.uuid, server_req, req_subs) if server_resp: self._verify_response(server_resp, {}, response, expected_resp_code) else: # NOTE(gibi): no server_resp means we expect empty body as # a response self.assertEqual(expected_resp_code, response.status_code) self.assertEqual('', response.content) @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance') def test_server_evacuate(self, rebuild_mock): # Note (wingwj): The host can't be the same one req_subs = { 'host': 'testHost', "adminPass": "MySecretPass", "onSharedStorage": 'False' } self._test_evacuate(req_subs, 'server-evacuate-req', 'server-evacuate-resp', 200) rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY, orig_image_ref=mock.ANY, image_ref=mock.ANY, injected_files=mock.ANY, new_pass="MySecretPass", orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY, on_shared_storage=False, preserve_ephemeral=mock.ANY, host='testHost', request_spec=mock.ANY) @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance') def test_server_evacuate_find_host(self, rebuild_mock): req_subs = { "adminPass": "MySecretPass", "onSharedStorage": 'False' } self._test_evacuate(req_subs, 'server-evacuate-find-host-req', 'server-evacuate-find-host-resp', 200) rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY, orig_image_ref=mock.ANY, image_ref=mock.ANY, injected_files=mock.ANY, new_pass="MySecretPass", orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY, on_shared_storage=False, preserve_ephemeral=mock.ANY, host=None, request_spec=mock.ANY) class EvacuateJsonTestV214(EvacuateJsonTest): microversion = '2.14' scenarios = [('v2_14', {'api_major_version': 'v2.1'})] @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance') def test_server_evacuate(self, rebuild_mock): # Note (wingwj): The host can't be the same one req_subs = { 'host': 'testHost', "adminPass": "MySecretPass", } self._test_evacuate(req_subs, 'server-evacuate-req', server_resp=None, expected_resp_code=200) rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY, orig_image_ref=mock.ANY, image_ref=mock.ANY, injected_files=mock.ANY, new_pass="MySecretPass", orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY, on_shared_storage=None, preserve_ephemeral=mock.ANY, host='testHost', request_spec=mock.ANY) @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance') def test_server_evacuate_find_host(self, rebuild_mock): req_subs = { "adminPass": "MySecretPass", } self._test_evacuate(req_subs, 'server-evacuate-find-host-req', server_resp=None, expected_resp_code=200) rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY, orig_image_ref=mock.ANY, image_ref=mock.ANY, injected_files=mock.ANY, new_pass="MySecretPass", orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY, on_shared_storage=None, preserve_ephemeral=mock.ANY, host=None, request_spec=mock.ANY) nova-13.1.4/nova/tests/functional/api_sample_tests/test_personality.py0000664000567000056710000000306613064447152027504 0ustar jenkinsjenkins00000000000000# Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.tests.functional.api_sample_tests import test_servers from nova.tests.unit.image import fake class PersonalitySampleJsonTest(test_servers.ServersSampleBase): extension_name = 'os-personality' def test_servers_post(self): self._post_server(use_common_server_api_samples=False) def test_servers_rebuild(self): subs = { 'image_id': fake.get_valid_image_id(), 'host': self._get_host(), 'glance_host': self._get_glance_host(), 'access_ip_v4': '1.2.3.4', 'access_ip_v6': '80fe::' } uuid = self._post_server(use_common_server_api_samples=False) response = self._do_post('servers/%s/action' % uuid, 'server-action-rebuild-req', subs) subs['hostid'] = '[a-f0-9]+' subs['id'] = uuid self._verify_response('server-action-rebuild-resp', subs, response, 202) nova-13.1.4/nova/tests/functional/api_paste_fixture.py0000664000567000056710000000470213064447152024251 0ustar jenkinsjenkins00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import fixtures from oslo_config import cfg from nova import paths CONF = cfg.CONF class ApiPasteV21Fixture(fixtures.Fixture): def _replace_line(self, target_file, line): # TODO(johnthetubaguy) should really point the tests at /v2.1 target_file.write(line.replace( "/v2: openstack_compute_api_v21_legacy_v2_compatible", "/v2: openstack_compute_api_v21")) def setUp(self): super(ApiPasteV21Fixture, self).setUp() CONF.set_default('api_paste_config', paths.state_path_def('etc/nova/api-paste.ini')) tmp_api_paste_dir = self.useFixture(fixtures.TempDir()) tmp_api_paste_file_name = os.path.join(tmp_api_paste_dir.path, 'fake_api_paste.ini') with open(CONF.api_paste_config, 'r') as orig_api_paste: with open(tmp_api_paste_file_name, 'w') as tmp_file: for line in orig_api_paste: self._replace_line(tmp_file, line) CONF.set_override('api_paste_config', tmp_api_paste_file_name) class ApiPasteLegacyV2Fixture(ApiPasteV21Fixture): def _replace_line(self, target_file, line): # NOTE(johnthetubaguy) this is hack so we test the legacy_v2 code # even though its disable by default in api-paste.ini line = line.replace( "/v2: openstack_compute_api_v21_legacy_v2_compatible", "/v2: openstack_compute_api_legacy_v2") target_file.write(line) class ApiPasteNoProjectId(ApiPasteV21Fixture): def _replace_line(self, target_file, line): line = line.replace( "paste.filter_factory = nova.api.openstack.auth:" "NoAuthMiddleware.factory", "paste.filter_factory = nova.api.openstack.auth:" "NoAuthMiddlewareV2_18.factory") target_file.write(line) nova-13.1.4/nova/tests/functional/notification_sample_tests/0000775000567000056710000000000013064447471025436 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/notification_sample_tests/notification_sample_base.py0000664000567000056710000000735013064447152033032 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_serialization import jsonutils from nova import test from nova.tests import fixtures as nova_fixtures from nova.tests.unit import fake_notifier class NotificationSampleTestBase(test.TestCase): """Base class for notification sample testing. To add tests for a versioned notification you have to store a sample file under doc/notification_sample directory. In the test method in the subclass trigger a change in the system that expected to generate the notification then use the _verify_notification function to assert if the stored sample matches with the generated one. If the notification has different payload content depending on the state change you triggered then the replacements parameter of the _verify_notification function can be used to override values coming from the sample file. Check nova.functional.notification_sample_tests.test_service_update as an example. """ def setUp(self): super(NotificationSampleTestBase, self).setUp() api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) self.api = api_fixture.api self.admin_api = api_fixture.admin_api fake_notifier.stub_notifier(self.stubs) self.addCleanup(fake_notifier.reset) def _get_notification_sample(self, sample): sample_dir = os.path.dirname(os.path.abspath(__file__)) sample_dir = os.path.normpath(os.path.join( sample_dir, "../../../../doc/notification_samples")) return sample_dir + '/' + sample + '.json' def _apply_replacements(self, replacements, sample_obj): replacements = replacements or {} for key, value in replacements.items(): obj = sample_obj['payload'] for sub_key in key.split('.')[:-1]: obj = obj['nova_object.data'][sub_key] obj['nova_object.data'][key.split('.')[-1]] = value def _verify_notification(self, sample_file_name, replacements=None): """Assert if the generated notification matches with the stored sample :param sample_file_name: The name of the sample file to match relative to doc/notification_samples :param replacements: A dict of key value pairs that is used to update the payload field of the sample data before it is matched against the generated notification. The 'x.y':'new-value' key-value pair selects the ["payload"]["nova_object.data"]["x"] ["nova_object.data"]["y"] value from the sample data and overrides it with 'new-value'. """ self.assertEqual(1, len(fake_notifier.VERSIONED_NOTIFICATIONS)) notification = fake_notifier.VERSIONED_NOTIFICATIONS[0] with open(self._get_notification_sample(sample_file_name)) as sample: sample_data = sample.read() sample_obj = jsonutils.loads(sample_data) self._apply_replacements(replacements, sample_obj) self.assertJsonEqual(sample_obj, notification) nova-13.1.4/nova/tests/functional/notification_sample_tests/__init__.py0000664000567000056710000000000013064447140027526 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/notification_sample_tests/test_service_update.py0000664000567000056710000000534213064447140032046 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import fixture as utils_fixture from nova.tests.functional.notification_sample_tests \ import notification_sample_base from nova.tests.unit.api.openstack.compute import test_services class TestServiceUpdateNotificationSample( notification_sample_base.NotificationSampleTestBase): def setUp(self): super(TestServiceUpdateNotificationSample, self).setUp() self.stub_out("nova.db.service_get_by_host_and_binary", test_services.fake_service_get_by_host_binary) self.stub_out("nova.db.service_update", test_services.fake_service_update) self.useFixture(utils_fixture.TimeFixture(test_services.fake_utcnow())) def test_service_enable(self): body = {'host': 'host1', 'binary': 'nova-compute'} self.admin_api.api_put('os-services/enable', body) self._verify_notification('service-update') def test_service_disabled(self): body = {'host': 'host1', 'binary': 'nova-compute'} self.admin_api.api_put('os-services/disable', body) self._verify_notification('service-update', replacements={'disabled': True}) def test_service_disabled_log_reason(self): body = {'host': 'host1', 'binary': 'nova-compute', 'disabled_reason': 'test2'} self.admin_api.api_put('os-services/disable-log-reason', body) self._verify_notification('service-update', replacements={'disabled': True, 'disabled_reason': 'test2'}) def test_service_force_down(self): body = {'host': 'host1', 'binary': 'nova-compute', 'forced_down': True} self.admin_api.microversion = '2.12' self.admin_api.api_put('os-services/force-down', body) self._verify_notification('service-update', replacements={'forced_down': True, 'disabled': True, 'disabled_reason': 'test2'}) nova-13.1.4/nova/tests/functional/db/0000775000567000056710000000000013064447471020552 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/db/test_flavor_model.py0000664000567000056710000000574213064447140024635 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.db.sqlalchemy import api_models from nova.db.sqlalchemy import models from nova import test class FlavorTablesCompareTestCase(test.NoDBTestCase): def _get_columns_list(self, model): columns_list = [m.key for m in model.__table__.columns] return columns_list def _check_column_list(self, columns_new, columns_old): columns_old.remove('deleted_at') columns_old.remove('deleted') intersect = set(columns_new).intersection(set(columns_old)) if intersect != set(columns_new) or intersect != set(columns_old): return False return True def test_tables_flavors_instance_types(self): flavors = api_models.Flavors() instance_types = models.InstanceTypes() columns_flavors = self._get_columns_list(flavors) columns_instance_types = self._get_columns_list(instance_types) self.assertTrue(self._check_column_list(columns_flavors, columns_instance_types)) def test_tables_flavor_instance_type_extra_specs(self): flavor_extra_specs = api_models.FlavorExtraSpecs() instance_type_extra_specs = models.InstanceTypeExtraSpecs() columns_flavor_extra_specs = self._get_columns_list(flavor_extra_specs) columns_instance_type_extra_specs = self._get_columns_list( instance_type_extra_specs) columns_flavor_extra_specs.remove('flavor_id') columns_instance_type_extra_specs.remove('instance_type_id') self.assertTrue(self._check_column_list( columns_flavor_extra_specs, columns_instance_type_extra_specs)) def test_tables_flavor_instance_type_projects(self): flavor_projects = api_models.FlavorProjects() instance_types_projects = models.InstanceTypeProjects() columns_flavor_projects = self._get_columns_list(flavor_projects) columns_instance_type_projects = self._get_columns_list( instance_types_projects) columns_flavor_projects.remove('flavor_id') columns_instance_type_projects.remove('instance_type_id') self.assertTrue(self._check_column_list( columns_flavor_projects, columns_instance_type_projects)) nova-13.1.4/nova/tests/functional/db/test_connection_switch.py0000664000567000056710000000445013064447152025702 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_utils import uuidutils from nova import context from nova import exception from nova import objects from nova import test from nova.tests import fixtures class ConnectionSwitchTestCase(test.NoDBTestCase): test_filename = 'foo.db' def setUp(self): super(ConnectionSwitchTestCase, self).setUp() self.addCleanup(self.cleanup) def cleanup(self): try: os.remove(self.test_filename) except OSError: pass def test_connection_switch(self): # Use a file-based sqlite database so data will persist across new # connections fake_conn = 'sqlite:///' + self.test_filename # The 'main' database connection will stay open, so in-memory is fine self.useFixture(fixtures.Database(database='main')) self.useFixture(fixtures.Database(connection=fake_conn)) # Make a request context with a cell mapping mapping = objects.CellMapping(database_connection=fake_conn) # In the tests, the admin context is required in order to read # an Instance back after write, for some reason ctxt = context.get_admin_context() # Create an instance in the cell database uuid = uuidutils.generate_uuid() with context.target_cell(ctxt, mapping): instance = objects.Instance(context=ctxt, uuid=uuid) instance.create() # Verify the instance is found in the cell database inst = objects.Instance.get_by_uuid(ctxt, uuid) self.assertEqual(uuid, inst.uuid) # Verify the instance isn't found in the main database self.assertRaises(exception.InstanceNotFound, objects.Instance.get_by_uuid, ctxt, uuid) nova-13.1.4/nova/tests/functional/db/test_archive.py0000664000567000056710000001571013064447140023601 0ustar jenkinsjenkins00000000000000# Copyright 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.dialects import sqlite from nova import context from nova import db from nova.db.sqlalchemy import api as sqlalchemy_api from nova.tests.functional import test_servers from nova.tests.unit import fake_network class TestDatabaseArchive(test_servers.ServersTestBase): """Tests DB API for archiving (soft) deleted records""" def setUp(self): super(TestDatabaseArchive, self).setUp() # TODO(mriedem): pull this out so we can re-use it in # test_archive_deleted_rows_fk_constraint # SQLite doesn't enforce foreign key constraints without a pragma. engine = sqlalchemy_api.get_engine() dialect = engine.url.get_dialect() if dialect == sqlite.dialect: # We're seeing issues with foreign key support in SQLite 3.6.20 # SQLAlchemy doesn't support it at all with < SQLite 3.6.19 # It works fine in SQLite 3.7. # So return early to skip this test if running SQLite < 3.7 import sqlite3 tup = sqlite3.sqlite_version_info if tup[0] < 3 or (tup[0] == 3 and tup[1] < 7): self.skipTest( 'sqlite version too old for reliable SQLA foreign_keys') engine.connect().execute("PRAGMA foreign_keys = ON") def _create_server(self): """Creates a minimal test server via the compute API Ensures the server is created and can be retrieved from the compute API and waits for it to be ACTIVE. :returns: created server (dict) """ # TODO(mriedem): We should pull this up into the parent class so we # don't have so much copy/paste in these functional tests. fake_network.set_stub_network_methods(self) # Create a server server = self._build_minimal_create_server_request() created_server = self.api.post_server({'server': server}) self.assertTrue(created_server['id']) created_server_id = created_server['id'] # Check it's there found_server = self.api.get_server(created_server_id) self.assertEqual(created_server_id, found_server['id']) found_server = self._wait_for_state_change(found_server, 'BUILD') # It should be available... self.assertEqual('ACTIVE', found_server['status']) return found_server def test_archive_deleted_rows(self): # Boots a server, deletes it, and then tries to archive it. server = self._create_server() server_id = server['id'] # Assert that there are instance_actions. instance_actions are # interesting since we don't soft delete them but they have a foreign # key back to the instances table. actions = self.api.get_instance_actions(server_id) self.assertTrue(len(actions), 'No instance actions for server: %s' % server_id) self._delete_server(server_id) # Verify we have the soft deleted instance in the database. admin_context = context.get_admin_context(read_deleted='yes') # This will raise InstanceNotFound if it's not found. instance = db.instance_get_by_uuid(admin_context, server_id) # Make sure it's soft deleted. self.assertNotEqual(0, instance.deleted) # Verify we have some system_metadata since we'll check that later. self.assertTrue(len(instance.system_metadata), 'No system_metadata for instance: %s' % server_id) # Now try and archive the soft deleted records. results = db.archive_deleted_rows(max_rows=100) # verify system_metadata was dropped self.assertIn('instance_system_metadata', results) self.assertEqual(len(instance.system_metadata), results['instance_system_metadata']) # Verify that instances rows are dropped self.assertIn('instances', results) # Verify that instance_actions and actions_event are dropped # by the archive self.assertIn('instance_actions', results) self.assertIn('instance_actions_events', results) def test_archive_deleted_rows_with_undeleted_residue(self): # Boots a server, deletes it, and then tries to archive it. server = self._create_server() server_id = server['id'] # Assert that there are instance_actions. instance_actions are # interesting since we don't soft delete them but they have a foreign # key back to the instances table. actions = self.api.get_instance_actions(server_id) self.assertTrue(len(actions), 'No instance actions for server: %s' % server_id) self._delete_server(server_id) # Verify we have the soft deleted instance in the database. admin_context = context.get_admin_context(read_deleted='yes') # This will raise InstanceNotFound if it's not found. instance = db.instance_get_by_uuid(admin_context, server_id) # Make sure it's soft deleted. self.assertNotEqual(0, instance.deleted) # Undelete the instance_extra record to make sure we delete it anyway extra = db.instance_extra_get_by_instance_uuid(admin_context, instance.uuid) self.assertNotEqual(0, extra.deleted) db.instance_extra_update_by_uuid(admin_context, instance.uuid, {'deleted': 0}) extra = db.instance_extra_get_by_instance_uuid(admin_context, instance.uuid) self.assertEqual(0, extra.deleted) # Verify we have some system_metadata since we'll check that later. self.assertTrue(len(instance.system_metadata), 'No system_metadata for instance: %s' % server_id) # Now try and archive the soft deleted records. results = db.archive_deleted_rows(max_rows=100) # verify system_metadata was dropped self.assertIn('instance_system_metadata', results) self.assertEqual(len(instance.system_metadata), results['instance_system_metadata']) # Verify that instances rows are dropped self.assertIn('instances', results) # Verify that instance_actions and actions_event are dropped # by the archive self.assertIn('instance_actions', results) self.assertIn('instance_actions_events', results) nova-13.1.4/nova/tests/functional/db/__init__.py0000664000567000056710000000000013064447140022642 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/db/test_host_mapping.py0000664000567000056710000001052413064447152024651 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import uuidutils from nova import context from nova import exception from nova.objects import cell_mapping from nova.objects import host_mapping from nova import test from nova.tests import fixtures sample_mapping = {'host': 'fake-host', 'cell_mapping': None} sample_cell_mapping = {'id': 1, 'uuid': '', 'name': 'fake-cell', 'transport_url': 'rabbit:///', 'database_connection': 'mysql:///'} def create_cell_mapping(**kwargs): args = sample_cell_mapping.copy() if 'uuid' not in kwargs: args['uuid'] = uuidutils.generate_uuid() args.update(kwargs) ctxt = context.RequestContext('fake-user', 'fake-project') return cell_mapping.CellMapping._create_in_db(ctxt, args) def create_mapping(**kwargs): args = sample_mapping.copy() args.update(kwargs) if args["cell_mapping"] is None: args["cell_mapping"] = create_cell_mapping() args["cell_id"] = args.pop("cell_mapping", {}).get("id") ctxt = context.RequestContext('fake-user', 'fake-project') return host_mapping.HostMapping._create_in_db(ctxt, args) def create_mapping_obj(context, **kwargs): mapping = create_mapping(**kwargs) return host_mapping.HostMapping._from_db_object( context, host_mapping.HostMapping(), mapping) class HostMappingTestCase(test.NoDBTestCase): def setUp(self): super(HostMappingTestCase, self).setUp() self.useFixture(fixtures.Database(database='api')) self.context = context.RequestContext('fake-user', 'fake-project') self.mapping_obj = host_mapping.HostMapping() self.cell_mapping_obj = cell_mapping.CellMapping() def _compare_cell_obj_to_mapping(self, obj, mapping): for key in [key for key in self.cell_mapping_obj.fields.keys() if key not in ("created_at", "updated_at")]: self.assertEqual(getattr(obj, key), mapping[key]) def test_get_by_host(self): mapping = create_mapping() db_mapping = self.mapping_obj._get_by_host_from_db( self.context, mapping['host']) for key in self.mapping_obj.fields.keys(): if key == "cell_mapping": key = "cell_id" self.assertEqual(db_mapping[key], mapping[key]) def test_get_by_host_not_found(self): self.assertRaises(exception.HostMappingNotFound, self.mapping_obj._get_by_host_from_db, self.context, 'fake-host2') def test_save_in_db(self): mapping = create_mapping() new_cell = create_cell_mapping(id=42) self.mapping_obj._save_in_db(self.context, mapping, {'cell_id': new_cell["id"]}) db_mapping = self.mapping_obj._get_by_host_from_db( self.context, mapping['host']) self.assertNotEqual(db_mapping['cell_id'], mapping['cell_id']) for key in [key for key in self.mapping_obj.fields.keys() if key not in ('updated_at', 'cell_id')]: if key == "cell_mapping": continue self.assertEqual(db_mapping[key], mapping[key]) def test_destroy_in_db(self): mapping = create_mapping() self.mapping_obj._get_by_host_from_db(self.context, mapping['host']) self.mapping_obj._destroy_in_db(self.context, mapping['host']) self.assertRaises(exception.HostMappingNotFound, self.mapping_obj._get_by_host_from_db, self.context, mapping['host']) def test_load_cell_mapping(self): cell = create_cell_mapping(id=42) mapping_obj = create_mapping_obj(self.context, cell_mapping=cell) cell_map_obj = mapping_obj.cell_mapping self._compare_cell_obj_to_mapping(cell_map_obj, cell) nova-13.1.4/nova/tests/functional/db/test_resource_provider.py0000664000567000056710000001004413064447152025717 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import context from nova import exception from nova import objects from nova.objects import fields from nova import test from nova.tests import fixtures from nova.tests import uuidsentinel DISK_INVENTORY = dict( total=200, reserved=10, min_unit=2, max_unit=5, step_size=1, allocation_ratio=1.0 ) class ResourceProviderTestCase(test.NoDBTestCase): """Test resource-provider objects' lifecycles.""" def setUp(self): super(ResourceProviderTestCase, self).setUp() self.useFixture(fixtures.Database()) self.context = context.RequestContext('fake-user', 'fake-project') def test_create_resource_provider_requires_uuid(self): resource_provider = objects.ResourceProvider( context = self.context) self.assertRaises(exception.ObjectActionError, resource_provider.create) def test_create_resource_provider(self): created_resource_provider = objects.ResourceProvider( context=self.context, uuid=uuidsentinel.fake_resource_provider ) created_resource_provider.create() self.assertIsInstance(created_resource_provider.id, int) retrieved_resource_provider = objects.ResourceProvider.get_by_uuid( self.context, uuidsentinel.fake_resource_provider ) self.assertEqual(retrieved_resource_provider.id, created_resource_provider.id) def test_create_inventory_with_uncreated_provider(self): resource_provider = objects.ResourceProvider( context=self.context, uuid=uuidsentinel.inventory_resource_provider ) resource_class = fields.ResourceClass.DISK_GB disk_inventory = objects.Inventory( context=self.context, resource_provider=resource_provider, resource_class=resource_class, **DISK_INVENTORY ) self.assertRaises(exception.ObjectActionError, disk_inventory.create) def test_create_and_update_inventory(self): resource_provider = objects.ResourceProvider( context=self.context, uuid=uuidsentinel.inventory_resource_provider ) resource_provider.create() resource_class = fields.ResourceClass.DISK_GB disk_inventory = objects.Inventory( context=self.context, resource_provider=resource_provider, resource_class=resource_class, **DISK_INVENTORY ) disk_inventory.create() self.assertEqual(resource_class, disk_inventory.resource_class) self.assertEqual(resource_provider, disk_inventory.resource_provider) self.assertEqual(DISK_INVENTORY['allocation_ratio'], disk_inventory.allocation_ratio) self.assertEqual(DISK_INVENTORY['total'], disk_inventory.total) disk_inventory.total = 32 disk_inventory.save() inventories = objects.InventoryList.get_all_by_resource_provider_uuid( self.context, resource_provider.uuid) self.assertEqual(1, len(inventories)) self.assertEqual(32, inventories[0].total) inventories[0].total = 33 inventories[0].save() reloaded_inventories = ( objects.InventoryList.get_all_by_resource_provider_uuid( self.context, resource_provider.uuid)) self.assertEqual(33, reloaded_inventories[0].total) nova-13.1.4/nova/tests/functional/db/test_build_request.py0000664000567000056710000000670713064447152025040 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from oslo_utils import uuidutils from nova import context from nova import exception from nova import objects from nova.objects import build_request from nova import test from nova.tests import fixtures from nova.tests.unit import fake_build_request from nova.tests.unit import fake_request_spec class BuildRequestTestCase(test.NoDBTestCase): def setUp(self): super(BuildRequestTestCase, self).setUp() # NOTE: This means that we're using a database for this test suite # despite inheriting from NoDBTestCase self.useFixture(fixtures.Database(database='api')) self.context = context.RequestContext('fake-user', 'fake-project') self.build_req_obj = build_request.BuildRequest() self.instance_uuid = uuidutils.generate_uuid() self.project_id = 'fake-project' def _create_req(self): req_spec = fake_request_spec.fake_spec_obj(remove_id=True) req_spec.instance_uuid = self.instance_uuid req_spec.create() args = fake_build_request.fake_db_req( request_spec_id=req_spec.id) args.pop('id', None) args.pop('request_spec', None) args['project_id'] = self.project_id return build_request.BuildRequest._from_db_object(self.context, self.build_req_obj, self.build_req_obj._create_in_db(self.context, args)) def test_get_by_instance_uuid_not_found(self): self.assertRaises(exception.BuildRequestNotFound, self.build_req_obj._get_by_instance_uuid_from_db, self.context, self.instance_uuid) def test_get_by_uuid(self): req = self._create_req() db_req = self.build_req_obj._get_by_instance_uuid_from_db(self.context, self.instance_uuid) for key in self.build_req_obj.fields.keys(): expected = getattr(req, key) db_value = db_req[key] if key == 'request_spec': # NOTE: The object and db value can't be compared directly as # objects, so serialize them to a comparable form. db_value = jsonutils.dumps(objects.RequestSpec._from_db_object( self.context, objects.RequestSpec(), db_value).obj_to_primitive()) expected = jsonutils.dumps(expected.obj_to_primitive()) elif key in build_request.OBJECT_FIELDS: expected = jsonutils.dumps(expected.obj_to_primitive()) elif key in build_request.JSON_FIELDS: expected = jsonutils.dumps(expected) elif key in build_request.IP_FIELDS: expected = str(expected) elif key in ['created_at', 'updated_at']: # Objects store tz aware datetimes but the db does not. expected = expected.replace(tzinfo=None) self.assertEqual(expected, db_value) nova-13.1.4/nova/tests/functional/db/test_instance_mapping.py0000664000567000056710000001011613064447152025475 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import uuidutils from nova import context from nova import exception from nova.objects import instance_mapping from nova import test from nova.tests import fixtures sample_mapping = {'instance_uuid': '', 'cell_id': 3, 'project_id': 'fake-project'} def create_mapping(**kwargs): args = sample_mapping.copy() if 'instance_uuid' not in kwargs: args['instance_uuid'] = uuidutils.generate_uuid() args.update(kwargs) ctxt = context.RequestContext('fake-user', 'fake-project') return instance_mapping.InstanceMapping._create_in_db(ctxt, args) class InstanceMappingTestCase(test.NoDBTestCase): def setUp(self): super(InstanceMappingTestCase, self).setUp() self.useFixture(fixtures.Database(database='api')) self.context = context.RequestContext('fake-user', 'fake-project') self.mapping_obj = instance_mapping.InstanceMapping() def test_get_by_instance_uuid(self): mapping = create_mapping() db_mapping = self.mapping_obj._get_by_instance_uuid_from_db( self.context, mapping['instance_uuid']) for key in self.mapping_obj.fields.keys(): self.assertEqual(db_mapping[key], mapping[key]) def test_get_by_instance_uuid_not_found(self): self.assertRaises(exception.InstanceMappingNotFound, self.mapping_obj._get_by_instance_uuid_from_db, self.context, uuidutils.generate_uuid()) def test_save_in_db(self): mapping = create_mapping() self.mapping_obj._save_in_db(self.context, mapping['instance_uuid'], {'cell_id': 42}) db_mapping = self.mapping_obj._get_by_instance_uuid_from_db( self.context, mapping['instance_uuid']) self.assertNotEqual(db_mapping['cell_id'], mapping['cell_id']) for key in [key for key in self.mapping_obj.fields.keys() if key not in ['cell_id', 'updated_at']]: self.assertEqual(db_mapping[key], mapping[key]) def test_destroy_in_db(self): mapping = create_mapping() self.mapping_obj._get_by_instance_uuid_from_db(self.context, mapping['instance_uuid']) self.mapping_obj._destroy_in_db(self.context, mapping['instance_uuid']) self.assertRaises(exception.InstanceMappingNotFound, self.mapping_obj._get_by_instance_uuid_from_db, self.context, mapping['instance_uuid']) def test_cell_id_nullable(self): # Just ensure this doesn't raise create_mapping(cell_id=None) class InstanceMappingListTestCase(test.NoDBTestCase): def setUp(self): super(InstanceMappingListTestCase, self).setUp() self.useFixture(fixtures.Database(database='api')) self.context = context.RequestContext('fake-user', 'fake-project') self.list_obj = instance_mapping.InstanceMappingList() def test_get_by_project_id_from_db(self): project_id = 'fake-project' mappings = {} mapping = create_mapping(project_id=project_id) mappings[mapping['instance_uuid']] = mapping mapping = create_mapping(project_id=project_id) mappings[mapping['instance_uuid']] = mapping db_mappings = self.list_obj._get_by_project_id_from_db( self.context, project_id) for db_mapping in db_mappings: mapping = mappings[db_mapping.instance_uuid] for key in instance_mapping.InstanceMapping.fields.keys(): self.assertEqual(db_mapping[key], mapping[key]) nova-13.1.4/nova/tests/functional/db/test_request_spec.py0000664000567000056710000000465313064447152024671 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import context from nova import exception from nova.objects import base as obj_base from nova.objects import request_spec from nova import test from nova.tests import fixtures from nova.tests.unit import fake_request_spec class RequestSpecTestCase(test.NoDBTestCase): def setUp(self): super(RequestSpecTestCase, self).setUp() self.useFixture(fixtures.Database(database='api')) self.context = context.RequestContext('fake-user', 'fake-project') self.spec_obj = request_spec.RequestSpec() self.instance_uuid = None def _create_spec(self): args = fake_request_spec.fake_db_spec() args.pop('id', None) self.instance_uuid = args['instance_uuid'] request_spec.RequestSpec._from_db_object(self.context, self.spec_obj, self.spec_obj._create_in_db(self.context, args)) return self.spec_obj def test_get_by_instance_uuid_not_found(self): self.assertRaises(exception.RequestSpecNotFound, self.spec_obj._get_by_instance_uuid_from_db, self.context, self.instance_uuid) def test_get_by_uuid(self): spec = self._create_spec() db_spec = self.spec_obj.get_by_instance_uuid(self.context, self.instance_uuid) self.assertTrue(obj_base.obj_equal_prims(spec, db_spec)) def test_save_in_db(self): spec = self._create_spec() old_az = spec.availability_zone spec.availability_zone = '%s-new' % old_az spec.save() db_spec = self.spec_obj.get_by_instance_uuid(self.context, spec.instance_uuid) self.assertTrue(obj_base.obj_equal_prims(spec, db_spec)) self.assertNotEqual(old_az, db_spec.availability_zone) def test_double_create(self): spec = self._create_spec() self.assertRaises(exception.ObjectActionError, spec.create) nova-13.1.4/nova/tests/functional/db/api/0000775000567000056710000000000013064447471021323 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/db/api/__init__.py0000664000567000056710000000000013064447140023413 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/db/api/test_migrations.py0000664000567000056710000002644313064447152025115 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for database migrations. There are "opportunistic" tests which allows testing against all 3 databases (sqlite in memory, mysql, pg) in a properly configured unit test environment. For the opportunistic testing you need to set up db's named 'openstack_citest' with user 'openstack_citest' and password 'openstack_citest' on localhost. The test will then use that db and u/p combo to run the tests. For postgres on Ubuntu this can be done with the following commands:: | sudo -u postgres psql | postgres=# create user openstack_citest with createdb login password | 'openstack_citest'; | postgres=# create database openstack_citest with owner openstack_citest; """ import logging import os from migrate.versioning import repository import mock from oslo_db.sqlalchemy import test_base from oslo_db.sqlalchemy import test_migrations from oslo_db.sqlalchemy import utils as db_utils import sqlalchemy from sqlalchemy.engine import reflection from nova.db import migration from nova.db.sqlalchemy.api_migrations import migrate_repo from nova.db.sqlalchemy import api_models from nova.db.sqlalchemy import migration as sa_migration from nova import test class NovaAPIModelsSync(test_migrations.ModelsMigrationsSync): """Test that the models match the database after migrations are run.""" def db_sync(self, engine): with mock.patch.object(sa_migration, 'get_engine', return_value=engine): sa_migration.db_sync(database='api') @property def migrate_engine(self): return self.engine def get_engine(self): return self.migrate_engine def get_metadata(self): return api_models.API_BASE.metadata def include_object(self, object_, name, type_, reflected, compare_to): if type_ == 'table': # migrate_version is a sqlalchemy-migrate control table and # isn't included in the model. if name == 'migrate_version': return False return True class TestNovaAPIMigrationsSQLite(NovaAPIModelsSync, test_base.DbTestCase, test.NoDBTestCase): pass class TestNovaAPIMigrationsMySQL(NovaAPIModelsSync, test_base.MySQLOpportunisticTestCase, test.NoDBTestCase): pass class TestNovaAPIMigrationsPostgreSQL(NovaAPIModelsSync, test_base.PostgreSQLOpportunisticTestCase, test.NoDBTestCase): pass class NovaAPIMigrationsWalk(test_migrations.WalkVersionsMixin): def setUp(self): super(NovaAPIMigrationsWalk, self).setUp() # NOTE(viktors): We should reduce log output because it causes issues, # when we run tests with testr migrate_log = logging.getLogger('migrate') old_level = migrate_log.level migrate_log.setLevel(logging.WARN) self.addCleanup(migrate_log.setLevel, old_level) @property def INIT_VERSION(self): return migration.db_initial_version('api') @property def REPOSITORY(self): return repository.Repository( os.path.abspath(os.path.dirname(migrate_repo.__file__))) @property def migration_api(self): return sa_migration.versioning_api @property def migrate_engine(self): return self.engine def test_walk_versions(self): self.walk_versions(snake_walk=False, downgrade=False) def assertColumnExists(self, engine, table_name, column): self.assertTrue(db_utils.column_exists(engine, table_name, column), 'Column %s.%s does not exist' % (table_name, column)) def assertIndexExists(self, engine, table_name, index): self.assertTrue(db_utils.index_exists(engine, table_name, index), 'Index %s on table %s does not exist' % (index, table_name)) def assertUniqueConstraintExists(self, engine, table_name, columns): inspector = reflection.Inspector.from_engine(engine) constrs = inspector.get_unique_constraints(table_name) constr_columns = [constr['column_names'] for constr in constrs] self.assertIn(columns, constr_columns) def assertTableNotExists(self, engine, table_name): self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table, engine, table_name) def _check_001(self, engine, data): for column in ['created_at', 'updated_at', 'id', 'uuid', 'name', 'transport_url', 'database_connection']: self.assertColumnExists(engine, 'cell_mappings', column) self.assertIndexExists(engine, 'cell_mappings', 'uuid_idx') self.assertUniqueConstraintExists(engine, 'cell_mappings', ['uuid']) def _check_002(self, engine, data): for column in ['created_at', 'updated_at', 'id', 'instance_uuid', 'cell_id', 'project_id']: self.assertColumnExists(engine, 'instance_mappings', column) for index in ['instance_uuid_idx', 'project_id_idx']: self.assertIndexExists(engine, 'instance_mappings', index) self.assertUniqueConstraintExists(engine, 'instance_mappings', ['instance_uuid']) inspector = reflection.Inspector.from_engine(engine) # There should only be one foreign key here fk = inspector.get_foreign_keys('instance_mappings')[0] self.assertEqual('cell_mappings', fk['referred_table']) self.assertEqual(['id'], fk['referred_columns']) self.assertEqual(['cell_id'], fk['constrained_columns']) def _check_003(self, engine, data): for column in ['created_at', 'updated_at', 'id', 'cell_id', 'host']: self.assertColumnExists(engine, 'host_mappings', column) self.assertIndexExists(engine, 'host_mappings', 'host_idx') self.assertUniqueConstraintExists(engine, 'host_mappings', ['host']) inspector = reflection.Inspector.from_engine(engine) # There should only be one foreign key here fk = inspector.get_foreign_keys('host_mappings')[0] self.assertEqual('cell_mappings', fk['referred_table']) self.assertEqual(['id'], fk['referred_columns']) self.assertEqual(['cell_id'], fk['constrained_columns']) def _check_004(self, engine, data): columns = ['created_at', 'updated_at', 'id', 'instance_uuid', 'spec'] for column in columns: self.assertColumnExists(engine, 'request_specs', column) self.assertUniqueConstraintExists(engine, 'request_specs', ['instance_uuid']) if engine.name != 'ibm_db_sa': self.assertIndexExists(engine, 'request_specs', 'request_spec_instance_uuid_idx') def _check_005(self, engine, data): # flavors for column in ['created_at', 'updated_at', 'name', 'id', 'memory_mb', 'vcpus', 'swap', 'vcpu_weight', 'flavorid', 'rxtx_factor', 'root_gb', 'ephemeral_gb', 'disabled', 'is_public']: self.assertColumnExists(engine, 'flavors', column) self.assertUniqueConstraintExists(engine, 'flavors', ['flavorid']) self.assertUniqueConstraintExists(engine, 'flavors', ['name']) # flavor_extra_specs for column in ['created_at', 'updated_at', 'id', 'flavor_id', 'key', 'value']: self.assertColumnExists(engine, 'flavor_extra_specs', column) if engine.name != 'ibm_db_sa': self.assertIndexExists(engine, 'flavor_extra_specs', 'flavor_extra_specs_flavor_id_key_idx') self.assertUniqueConstraintExists(engine, 'flavor_extra_specs', ['flavor_id', 'key']) inspector = reflection.Inspector.from_engine(engine) # There should only be one foreign key here fk = inspector.get_foreign_keys('flavor_extra_specs')[0] self.assertEqual('flavors', fk['referred_table']) self.assertEqual(['id'], fk['referred_columns']) self.assertEqual(['flavor_id'], fk['constrained_columns']) # flavor_projects for column in ['created_at', 'updated_at', 'id', 'flavor_id', 'project_id']: self.assertColumnExists(engine, 'flavor_projects', column) self.assertUniqueConstraintExists(engine, 'flavor_projects', ['flavor_id', 'project_id']) inspector = reflection.Inspector.from_engine(engine) # There should only be one foreign key here fk = inspector.get_foreign_keys('flavor_projects')[0] self.assertEqual('flavors', fk['referred_table']) self.assertEqual(['id'], fk['referred_columns']) self.assertEqual(['flavor_id'], fk['constrained_columns']) def _check_006(self, engine, data): for column in ['id', 'request_spec_id', 'project_id', 'user_id', 'display_name', 'instance_metadata', 'progress', 'vm_state', 'image_ref', 'access_ip_v4', 'access_ip_v6', 'info_cache', 'security_groups', 'config_drive', 'key_name', 'locked_by']: self.assertColumnExists(engine, 'build_requests', column) self.assertIndexExists(engine, 'build_requests', 'build_requests_project_id_idx') self.assertUniqueConstraintExists(engine, 'build_requests', ['request_spec_id']) inspector = reflection.Inspector.from_engine(engine) # There should only be one foreign key here fk = inspector.get_foreign_keys('build_requests')[0] self.assertEqual('request_specs', fk['referred_table']) self.assertEqual(['id'], fk['referred_columns']) self.assertEqual(['request_spec_id'], fk['constrained_columns']) def _check_007(self, engine, data): map_table = db_utils.get_table(engine, 'instance_mappings') self.assertTrue(map_table.columns['cell_id'].nullable) # Ensure the foreign key still exists inspector = reflection.Inspector.from_engine(engine) # There should only be one foreign key here fk = inspector.get_foreign_keys('instance_mappings')[0] self.assertEqual('cell_mappings', fk['referred_table']) self.assertEqual(['id'], fk['referred_columns']) self.assertEqual(['cell_id'], fk['constrained_columns']) class TestNovaAPIMigrationsWalkSQLite(NovaAPIMigrationsWalk, test_base.DbTestCase, test.NoDBTestCase): pass class TestNovaAPIMigrationsWalkMySQL(NovaAPIMigrationsWalk, test_base.MySQLOpportunisticTestCase, test.NoDBTestCase): pass class TestNovaAPIMigrationsWalkPostgreSQL(NovaAPIMigrationsWalk, test_base.PostgreSQLOpportunisticTestCase, test.NoDBTestCase): pass nova-13.1.4/nova/tests/functional/db/test_cell_mapping.py0000664000567000056710000000602313064447152024612 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import uuidutils from nova import context from nova import exception from nova.objects import cell_mapping from nova import test from nova.tests import fixtures class CellMappingTestCase(test.NoDBTestCase): def setUp(self): super(CellMappingTestCase, self).setUp() self.useFixture(fixtures.Database(database='api')) self.context = context.RequestContext('fake-user', 'fake-project') self.mapping_obj = cell_mapping.CellMapping() self.uuid = uuidutils.generate_uuid() sample_mapping = {'uuid': '', 'name': 'fake-cell', 'transport_url': 'rabbit:///', 'database_connection': 'mysql+pymysql:///'} def _create_mapping(self, **kwargs): args = self.sample_mapping.copy() if 'uuid' not in kwargs: args['uuid'] = self.uuid args.update(kwargs) return self.mapping_obj._create_in_db(self.context, args) def test_get_by_uuid(self): mapping = self._create_mapping() db_mapping = self.mapping_obj._get_by_uuid_from_db(self.context, mapping['uuid']) for key in self.mapping_obj.fields.keys(): self.assertEqual(db_mapping[key], mapping[key]) def test_get_by_uuid_not_found(self): self.assertRaises(exception.CellMappingNotFound, self.mapping_obj._get_by_uuid_from_db, self.context, self.uuid) def test_save_in_db(self): mapping = self._create_mapping() self.mapping_obj._save_in_db(self.context, mapping['uuid'], {'name': 'meow'}) db_mapping = self.mapping_obj._get_by_uuid_from_db(self.context, mapping['uuid']) self.assertNotEqual(db_mapping['name'], mapping['name']) for key in [key for key in self.mapping_obj.fields.keys() if key not in ['name', 'updated_at']]: self.assertEqual(db_mapping[key], mapping[key]) def test_destroy_in_db(self): mapping = self._create_mapping() self.mapping_obj._get_by_uuid_from_db(self.context, mapping['uuid']) self.mapping_obj._destroy_in_db(self.context, mapping['uuid']) self.assertRaises(exception.CellMappingNotFound, self.mapping_obj._get_by_uuid_from_db, self.context, mapping['uuid']) def test_destroy_in_db_not_found(self): self.assertRaises(exception.CellMappingNotFound, self.mapping_obj._destroy_in_db, self.context, self.uuid) nova-13.1.4/nova/tests/functional/libvirt/0000775000567000056710000000000013064447471021640 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/libvirt/test_numa_servers.py0000664000567000056710000001507213064447152025763 0ustar jenkinsjenkins00000000000000# Copyright (C) 2015 Red Hat, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import fixtures from oslo_config import cfg from oslo_log import log as logging from nova import test from nova.tests.functional.test_servers import ServersTestBase from nova.tests.unit import fake_network from nova.tests.unit.virt.libvirt import fake_libvirt_utils from nova.tests.unit.virt.libvirt import fakelibvirt CONF = cfg.CONF LOG = logging.getLogger(__name__) class NumaHostInfo(fakelibvirt.HostInfo): def __init__(self, **kwargs): super(NumaHostInfo, self).__init__(**kwargs) self.numa_mempages_list = [] def get_numa_topology(self): if self.numa_topology: return self.numa_topology topology = self._gen_numa_topology(self.cpu_nodes, self.cpu_sockets, self.cpu_cores, self.cpu_threads, self.kB_mem) self.numa_topology = topology # update number of active cpus cpu_count = len(topology.cells) * len(topology.cells[0].cpus) self.cpus = cpu_count - len(self.disabled_cpus_list) return topology def set_custom_numa_toplogy(self, topology): self.numa_topology = topology class NUMAServersTest(ServersTestBase): def setUp(self): super(NUMAServersTest, self).setUp() # Replace libvirt with fakelibvirt self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.libvirt_utils', fake_libvirt_utils)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.libvirt', fakelibvirt)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.host.libvirt', fakelibvirt)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.guest.libvirt', fakelibvirt)) self.useFixture(fakelibvirt.FakeLibvirtFixture()) def _setup_compute_service(self): pass def _setup_scheduler_service(self): self.flags(compute_driver='nova.virt.libvirt.LibvirtDriver') self.flags(scheduler_driver='filter_scheduler') self.flags(scheduler_default_filters=CONF.scheduler_default_filters + ['NUMATopologyFilter']) return self.start_service('scheduler') def _run_build_test(self, flavor_id, filter_mock, end_status='ACTIVE'): self.compute = self.start_service('compute', host='test_compute0') fake_network.set_stub_network_methods(self) # Create server good_server = self._build_server(flavor_id) post = {'server': good_server} created_server = self.api.post_server(post) LOG.debug("created_server: %s" % created_server) self.assertTrue(created_server['id']) created_server_id = created_server['id'] # Validate that the server has been created found_server = self.api.get_server(created_server_id) self.assertEqual(created_server_id, found_server['id']) # It should also be in the all-servers list servers = self.api.get_servers() server_ids = [s['id'] for s in servers] self.assertIn(created_server_id, server_ids) # Validate that NUMATopologyFilter has been called self.assertTrue(filter_mock.called) found_server = self._wait_for_state_change(found_server, 'BUILD') self.assertEqual(end_status, found_server['status']) self._delete_server(created_server_id) def _get_topology_filter_spy(self): host_manager = self.scheduler.manager.driver.host_manager numa_filter_class = host_manager.filter_cls_map['NUMATopologyFilter'] host_pass_mock = mock.Mock(wraps=numa_filter_class().host_passes) return host_pass_mock @mock.patch('nova.virt.libvirt.LibvirtDriver._create_image') def test_create_server_with_numa_topology(self, img_mock): host_info = NumaHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) fake_connection = fakelibvirt.Connection('qemu:///system', version=1002007, hv_version=2001000, host_info=host_info) # Create a flavor extra_spec = {'hw:numa_nodes': '2'} flavor_id = self._create_flavor(extra_spec=extra_spec) host_pass_mock = self._get_topology_filter_spy() with test.nested( mock.patch('nova.virt.libvirt.host.Host.get_connection', return_value=fake_connection), mock.patch('nova.scheduler.filters' '.numa_topology_filter.NUMATopologyFilter.host_passes', side_effect=host_pass_mock)) as (conn_mock, filter_mock): self._run_build_test(flavor_id, filter_mock) @mock.patch('nova.virt.libvirt.LibvirtDriver._create_image') def test_create_server_with_numa_fails(self, img_mock): host_info = NumaHostInfo(cpu_nodes=1, cpu_sockets=1, cpu_cores=2, kB_mem=15740000) fake_connection = fakelibvirt.Connection('qemu:///system', version=1002007, host_info=host_info) # Create a flavor extra_spec = {'hw:numa_nodes': '2'} flavor_id = self._create_flavor(extra_spec=extra_spec) host_pass_mock = self._get_topology_filter_spy() with test.nested( mock.patch('nova.virt.libvirt.host.Host.get_connection', return_value=fake_connection), mock.patch('nova.scheduler.filters' '.numa_topology_filter.NUMATopologyFilter.host_passes', side_effect=host_pass_mock)) as (conn_mock, filter_mock): self._run_build_test(flavor_id, filter_mock, end_status='ERROR') nova-13.1.4/nova/tests/functional/libvirt/__init__.py0000664000567000056710000000000013064447140023730 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/libvirt/test_rt_servers.py0000664000567000056710000001362413064447152025451 0ustar jenkinsjenkins00000000000000# Copyright (C) 2015 Red Hat, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import fixtures from oslo_log import log as logging from nova.tests.functional.api import client from nova.tests.functional.test_servers import ServersTestBase from nova.tests.unit import fake_network from nova.tests.unit.virt.libvirt import fake_libvirt_utils from nova.tests.unit.virt.libvirt import fakelibvirt LOG = logging.getLogger(__name__) class NumaHostInfo(fakelibvirt.HostInfo): def __init__(self, **kwargs): super(NumaHostInfo, self).__init__(**kwargs) self.numa_mempages_list = [] def get_numa_topology(self): if self.numa_topology: return self.numa_topology topology = self._gen_numa_topology(self.cpu_nodes, self.cpu_sockets, self.cpu_cores, self.cpu_threads, self.kB_mem) self.numa_topology = topology # update number of active cpus cpu_count = len(topology.cells) * len(topology.cells[0].cpus) self.cpus = cpu_count - len(self.disabled_cpus_list) return topology def set_custom_numa_toplogy(self, topology): self.numa_topology = topology class RealTimeServersTest(ServersTestBase): def setUp(self): super(RealTimeServersTest, self).setUp() # Replace libvirt with fakelibvirt self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.libvirt_utils', fake_libvirt_utils)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.libvirt', fakelibvirt)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.host.libvirt', fakelibvirt)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.guest.libvirt', fakelibvirt)) self.useFixture(fakelibvirt.FakeLibvirtFixture()) def _setup_compute_service(self): self.flags(compute_driver='nova.virt.libvirt.LibvirtDriver') def test_no_dedicated_cpu(self): flavor = self._create_flavor(extra_spec={'hw:cpu_realtime': 'yes'}) server = self._build_server(flavor) # Cannot set realtime policy in a non dedicated cpu pinning policy self.assertRaises( client.OpenStackApiException, self.api.post_server, {'server': server}) def test_no_realtime_mask(self): flavor = self._create_flavor(extra_spec={ 'hw:cpu_realtime': 'yes', 'hw:cpu_policy': 'dedicated'}) server = self._build_server(flavor) # Cannot set realtime policy if not vcpus mask defined self.assertRaises( client.OpenStackApiException, self.api.post_server, {'server': server}) @mock.patch('nova.virt.libvirt.LibvirtDriver._create_image') def test_invalid_libvirt_version(self, img_mock): host_info = NumaHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) fake_connection = fakelibvirt.Connection('qemu:///system', version=1002007, hv_version=2001000, host_info=host_info) with mock.patch('nova.virt.libvirt.host.Host.get_connection', return_value=fake_connection): self.compute = self.start_service('compute', host='test_compute0') fake_network.set_stub_network_methods(self) flavor = self._create_flavor(extra_spec={ 'hw:cpu_realtime': 'yes', 'hw:cpu_policy': 'dedicated', 'hw:cpu_realtime_mask': '^1'}) server = self._build_server(flavor) created = self.api.post_server({'server': server}) instance = self.api.get_server(created['id']) instance = self._wait_for_state_change(instance, 'BUILD') # Realtime policy not supported by hypervisor self.assertEqual('ERROR', instance['status']) self._delete_server(instance['id']) @mock.patch('nova.virt.libvirt.LibvirtDriver._create_image') def test_success(self, img_mock): host_info = NumaHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2, kB_mem=15740000) fake_connection = fakelibvirt.Connection('qemu:///system', version=1002013, hv_version=2001000, host_info=host_info) with mock.patch('nova.virt.libvirt.host.Host.get_connection', return_value=fake_connection): self.compute = self.start_service('compute', host='test_compute0') fake_network.set_stub_network_methods(self) flavor = self._create_flavor(extra_spec={ 'hw:cpu_realtime': 'yes', 'hw:cpu_policy': 'dedicated', 'hw:cpu_realtime_mask': '^1'}) server = self._build_server(flavor) created = self.api.post_server({'server': server}) instance = self.api.get_server(created['id']) instance = self._wait_for_state_change(instance, 'BUILD') self.assertEqual('ACTIVE', instance['status']) self._delete_server(instance['id']) nova-13.1.4/nova/tests/functional/test_middleware.py0000664000567000056710000001024713064447152023713 0ustar jenkinsjenkins00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests to assert that various incorporated middleware works as expected. """ from oslo_config import cfg from nova.tests.functional.api_sample_tests import api_sample_base class TestCORSMiddleware(api_sample_base.ApiSampleTestBaseV21): '''Provide a basic smoke test to ensure CORS middleware is active. The tests below provide minimal confirmation that the CORS middleware is active, and may be configured. For comprehensive tests, please consult the test suite in oslo_middleware. ''' def setUp(self): # Here we monkeypatch GroupAttr.__getattr__, necessary because the # paste.ini method of initializing this middleware creates its own # ConfigOpts instance, bypassing the regular config fixture. # Mocking also does not work, as accessing an attribute on a mock # object will return a MagicMock instance, which will fail # configuration type checks. def _mock_getattr(instance, key): if key != 'allowed_origin': return self._original_call_method(instance, key) return "http://valid.example.com" self._original_call_method = cfg.ConfigOpts.GroupAttr.__getattr__ cfg.ConfigOpts.GroupAttr.__getattr__ = _mock_getattr # Initialize the application after all the config overrides are in # place. super(TestCORSMiddleware, self).setUp() def tearDown(self): super(TestCORSMiddleware, self).tearDown() # Reset the configuration overrides. cfg.ConfigOpts.GroupAttr.__getattr__ = self._original_call_method def test_valid_cors_options_request(self): response = self._do_options('servers', headers={ 'Origin': 'http://valid.example.com', 'Access-Control-Request-Method': 'GET' }) self.assertEqual(response.status_code, 200) self.assertIn('Access-Control-Allow-Origin', response.headers) self.assertEqual('http://valid.example.com', response.headers['Access-Control-Allow-Origin']) def test_invalid_cors_options_request(self): response = self._do_options('servers', headers={ 'Origin': 'http://invalid.example.com', 'Access-Control-Request-Method': 'GET' }) self.assertEqual(response.status_code, 200) self.assertNotIn('Access-Control-Allow-Origin', response.headers) def test_valid_cors_get_request(self): response = self._do_get('servers', headers={ 'Origin': 'http://valid.example.com', 'Access-Control-Request-Method': 'GET' }) self.assertEqual(response.status_code, 200) self.assertIn('Access-Control-Allow-Origin', response.headers) self.assertEqual('http://valid.example.com', response.headers['Access-Control-Allow-Origin']) def test_invalid_cors_get_request(self): response = self._do_get('servers', headers={ 'Origin': 'http://invalid.example.com', 'Access-Control-Request-Method': 'GET' }) self.assertEqual(response.status_code, 200) self.assertNotIn('Access-Control-Allow-Origin', response.headers) nova-13.1.4/nova/tests/functional/wsgi/0000775000567000056710000000000013064447471021136 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/wsgi/test_flavor_manage.py0000664000567000056710000002155713064447152025356 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from nova import context from nova import db from nova import exception as ex from nova import test from nova.tests import fixtures as nova_fixtures from nova.tests.functional import integrated_helpers as helper from nova.tests.unit import policy_fixture def rand_flavor(**kwargs): flav = { 'name': 'name-%s' % helper.generate_random_alphanumeric(10), 'id': helper.generate_random_alphanumeric(10), 'ram': int(helper.generate_random_numeric(2)) + 1, 'disk': int(helper.generate_random_numeric(3)), 'vcpus': int(helper.generate_random_numeric(1)) + 1, } flav.update(kwargs) return flav class FlavorManageFullstack(test.TestCase): """Tests for flavors manage administrative command. Extension: os-flavors-manage os-flavors-manage adds a set of admin functions to the flavors resource for the creation and deletion of flavors. POST /v2/flavors: :: { 'name': NAME, # string, required unique 'id': ID, # string, required unique 'ram': RAM, # in MB, required 'vcpus': VCPUS, # int value, required 'disk': DISK, # in GB, required 'OS-FLV-EXT-DATA:ephemeral', # in GB, ephemeral disk size 'is_public': IS_PUBLIC, # boolean 'swap': SWAP, # in GB? 'rxtx_factor': RXTX, # ??? } Returns Flavor DELETE /v2/flavors/ID Functional Test Scope: This test starts the wsgi stack for the nova api services, uses an in memory database to ensure the path through the wsgi layer to the database. """ def setUp(self): super(FlavorManageFullstack, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture()) # NOTE(sdague): because this test is primarily an admin API # test default self.api to the admin api. self.api = api_fixture.admin_api self.user_api = api_fixture.api def assertFlavorDbEqual(self, flav, flavdb): # a mapping of the REST params to the db fields mapping = { 'name': 'name', 'disk': 'root_gb', 'ram': 'memory_mb', 'vcpus': 'vcpus', 'id': 'flavorid', 'swap': 'swap' } for k, v in six.iteritems(mapping): if k in flav: self.assertEqual(flav[k], flavdb[v], "%s != %s" % (flav, flavdb)) def assertFlavorAPIEqual(self, flav, flavapi): # for all keys in the flavor, ensure they are correctly set in # flavapi response. for k, v in six.iteritems(flav): if k in flavapi: self.assertEqual(flav[k], flavapi[k], "%s != %s" % (flav, flavapi)) else: self.fail("Missing key: %s in flavor: %s" % (k, flavapi)) def assertFlavorInList(self, flav, flavlist): for item in flavlist['flavors']: if flav['id'] == item['id']: self.assertEqual(flav['name'], item['name']) return self.fail("%s not found in %s" % (flav, flavlist)) def assertFlavorNotInList(self, flav, flavlist): for item in flavlist['flavors']: if flav['id'] == item['id']: self.fail("%s found in %s" % (flav, flavlist)) def test_flavor_manage_func_negative(self): """Test flavor manage edge conditions. - Bogus body is a 400 - Unknown flavor is a 404 - Deleting unknown flavor is a 404 """ # Test for various API failure conditions # bad body is 400 resp = self.api.api_post('flavors', '', check_response_status=False) self.assertEqual(400, resp.status) # get unknown flavor is 404 resp = self.api.api_delete('flavors/foo', check_response_status=False) self.assertEqual(404, resp.status) # delete unknown flavor is 404 resp = self.api.api_delete('flavors/foo', check_response_status=False) self.assertEqual(404, resp.status) ctx = context.get_admin_context() # bounds conditions - invalid vcpus flav = {'flavor': rand_flavor(vcpus=0)} resp = self.api.api_post('flavors', flav, check_response_status=False) self.assertEqual(400, resp.status, resp) # ... and ensure that we didn't leak it into the db self.assertRaises(ex.FlavorNotFound, db.flavor_get_by_flavor_id, ctx, flav['flavor']['id']) # bounds conditions - invalid ram flav = {'flavor': rand_flavor(ram=0)} resp = self.api.api_post('flavors', flav, check_response_status=False) self.assertEqual(400, resp.status) # ... and ensure that we didn't leak it into the db self.assertRaises(ex.FlavorNotFound, db.flavor_get_by_flavor_id, ctx, flav['flavor']['id']) # NOTE(sdague): if there are other bounds conditions that # should be checked, stack them up here. def test_flavor_manage_deleted(self): """Ensure the behavior around a deleted flavor is stable. - Fetching a deleted flavor works, and returns the flavor info. - Listings should not contain deleted flavors """ # create a deleted flavor new_flav = {'flavor': rand_flavor()} self.api.api_post('flavors', new_flav) self.api.api_delete('flavors/%s' % new_flav['flavor']['id']) # It is valid to directly fetch details of a deleted flavor resp = self.api.api_get('flavors/%s' % new_flav['flavor']['id']) self.assertEqual(200, resp.status) self.assertFlavorAPIEqual(new_flav['flavor'], resp.body['flavor']) # deleted flavor should not show up in a list resp = self.api.api_get('flavors') self.assertFlavorNotInList(new_flav['flavor'], resp.body) def test_flavor_manage_func(self): """Basic flavor creation lifecycle testing. - Creating a flavor - Ensure it's in the database - Ensure it's in the listing - Delete it - Ensure it's hidden in the database """ ctx = context.get_admin_context() flav1 = { 'flavor': rand_flavor(), } # Create flavor and ensure it made it to the database self.api.api_post('flavors', flav1) flav1db = db.flavor_get_by_flavor_id(ctx, flav1['flavor']['id']) self.assertFlavorDbEqual(flav1['flavor'], flav1db) # Ensure new flavor is seen in the listing resp = self.api.api_get('flavors') self.assertFlavorInList(flav1['flavor'], resp.body) # Delete flavor and ensure it was removed from the database self.api.api_delete('flavors/%s' % flav1['flavor']['id']) self.assertRaises(ex.FlavorNotFound, db.flavor_get_by_flavor_id, ctx, flav1['flavor']['id']) resp = self.api.api_delete('flavors/%s' % flav1['flavor']['id'], check_response_status=False) self.assertEqual(404, resp.status) def test_flavor_manage_permissions(self): """Ensure that regular users can't create or delete flavors. """ ctx = context.get_admin_context() flav1 = {'flavor': rand_flavor()} # Ensure user can't create flavor resp = self.user_api.api_post('flavors', flav1, check_response_status=False) self.assertEqual(403, resp.status) # ... and that it didn't leak through self.assertRaises(ex.FlavorNotFound, db.flavor_get_by_flavor_id, ctx, flav1['flavor']['id']) # Create the flavor as the admin user self.api.api_post('flavors', flav1) # Ensure user can't delete flavors from our cloud resp = self.user_api.api_delete('flavors/%s' % flav1['flavor']['id'], check_response_status=False) self.assertEqual(403, resp.status) # ... and ensure that we didn't actually delete the flavor, # this will throw an exception if we did. db.flavor_get_by_flavor_id(ctx, flav1['flavor']['id']) nova-13.1.4/nova/tests/functional/wsgi/__init__.py0000664000567000056710000000000013064447140023226 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/wsgi/test_secgroup.py0000664000567000056710000000646313064447152024403 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import testscenarios from nova import test from nova.tests import fixtures as nova_fixtures import nova.tests.unit.image.fake from nova.tests.unit import policy_fixture LOG = logging.getLogger(__name__) class SecgroupsFullstack(testscenarios.WithScenarios, test.TestCase): """Tests for security groups TODO: describe security group API TODO: define scope """ REQUIRES_LOCKING = True _image_ref_parameter = 'imageRef' _flavor_ref_parameter = 'flavorRef' # This test uses ``testscenarios`` which matrix multiplies the # test across the scenarios listed below setting the attributres # in the dictionary on ``self`` for each scenario. scenarios = [ ('v2', { 'api_major_version': 'v2'}), # test v2.1 base microversion ('v2_1', { 'api_major_version': 'v2.1'}), ] def setUp(self): super(SecgroupsFullstack, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture()) self.api = api_fixture.api # the image fake backend needed for image discovery nova.tests.unit.image.fake.stub_out_image_service(self) # TODO(sdague): refactor this method into the API client, we're # going to use it a lot def _build_minimal_create_server_request(self, name): server = {} image = self.api.get_images()[0] LOG.info("Image: %s" % image) if self._image_ref_parameter in image: image_href = image[self._image_ref_parameter] else: image_href = image['id'] image_href = 'http://fake.server/%s' % image_href # We now have a valid imageId server[self._image_ref_parameter] = image_href # Set a valid flavorId flavor = self.api.get_flavors()[1] server[self._flavor_ref_parameter] = ('http://fake.server/%s' % flavor['id']) server['name'] = name return server def test_security_group_fuzz(self): """Test security group doesn't explode with a 500 on bad input. Originally reported with bug https://bugs.launchpad.net/nova/+bug/1239723 """ server = self._build_minimal_create_server_request("sg-fuzz") # security groups must be passed as a list, this is an invalid # format. The jsonschema in v2.1 caught it automatically, but # in v2 we used to throw a 500. server['security_groups'] = {"name": "sec"} resp = self.api.api_post('/servers', {'server': server}, check_response_status=False) self.assertEqual(400, resp.status) nova-13.1.4/nova/tests/functional/test_instance_actions.py0000664000567000056710000000565313064447140025124 0ustar jenkinsjenkins00000000000000# Copyright 2016 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.tests.functional.api import client from nova.tests.functional import test_servers from nova.tests.unit import fake_network class InstanceActionsTestV2(test_servers.ServersTestBase): """Tests Instance Actions API""" def _create_server(self): """Creates a minimal test server via the compute API Ensures the server is created and can be retrieved from the compute API and waits for it to be ACTIVE. :returns: created server (dict) """ # TODO(mriedem): We should pull this up into the parent class so we # don't have so much copy/paste in these functional tests. fake_network.set_stub_network_methods(self) # Create a server server = self._build_minimal_create_server_request() created_server = self.api.post_server({'server': server}) self.assertTrue(created_server['id']) created_server_id = created_server['id'] # Check it's there found_server = self.api.get_server(created_server_id) self.assertEqual(created_server_id, found_server['id']) found_server = self._wait_for_state_change(found_server, 'BUILD') # It should be available... self.assertEqual('ACTIVE', found_server['status']) return found_server def test_get_instance_actions(self): server = self._create_server() actions = self.api.get_instance_actions(server['id']) self.assertEqual('create', actions[0]['action']) def test_get_instance_actions_deleted(self): server = self._create_server() self._delete_server(server['id']) self.assertRaises(client.OpenStackApiNotFoundException, self.api.get_instance_actions, server['id']) class InstanceActionsTestV21(InstanceActionsTestV2): api_major_version = 'v2.1' class InstanceActionsTestV221(InstanceActionsTestV21): microversion = '2.21' def setUp(self): super(InstanceActionsTestV221, self).setUp() self.api.microversion = self.microversion def test_get_instance_actions_deleted(self): server = self._create_server() self._delete_server(server['id']) actions = self.api.get_instance_actions(server['id']) self.assertEqual('delete', actions[0]['action']) self.assertEqual('create', actions[1]['action']) nova-13.1.4/nova/tests/functional/api/0000775000567000056710000000000013064447471020736 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/functional/api/__init__.py0000664000567000056710000000133513064447140023042 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Justin Santa Barbara # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`api` -- OpenStack API client, for testing rather than production ================================= """ nova-13.1.4/nova/tests/functional/api/client.py0000664000567000056710000003226213064447152022567 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Justin Santa Barbara # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import urllib from oslo_log import log as logging from oslo_serialization import jsonutils import requests import six LOG = logging.getLogger(__name__) class APIResponse(object): """Decoded API Response This provides a decoded version of the Requests response which include a json decoded body, far more convenient for testing that returned structures are correct, or using parts of returned structures in tests. This class is a simple wrapper around dictionaries for API responses in tests. It includes extra attributes so that they can be inspected in addition to the attributes. All json responses from Nova APIs are dictionary compatible, or blank, so other possible base classes are not needed. """ status = 200 """The HTTP status code as an int""" content = "" """The Raw HTTP response body as a string""" body = {} """The decoded json body as a dictionary""" headers = {} """Response headers as a dictionary""" def __init__(self, response): """Construct an API response from a Requests response :param response: a ``requests`` library response """ super(APIResponse, self).__init__() self.status = response.status_code self.content = response.content if self.content: self.body = jsonutils.loads(self.content) self.headers = response.headers def __str__(self): # because __str__ falls back to __repr__ we can still use repr # on self but add in the other attributes. return "" % (self.body, self.status) class OpenStackApiException(Exception): def __init__(self, message=None, response=None): self.response = response if not message: message = 'Unspecified error' if response: _status = response.status_code _body = response.content message = ('%(message)s\nStatus Code: %(_status)s\n' 'Body: %(_body)s' % {'message': message, '_status': _status, '_body': _body}) super(OpenStackApiException, self).__init__(message) class OpenStackApiAuthenticationException(OpenStackApiException): def __init__(self, response=None, message=None): if not message: message = "Authentication error" super(OpenStackApiAuthenticationException, self).__init__(message, response) class OpenStackApiAuthorizationException(OpenStackApiException): def __init__(self, response=None, message=None): if not message: message = "Authorization error" super(OpenStackApiAuthorizationException, self).__init__(message, response) class OpenStackApiNotFoundException(OpenStackApiException): def __init__(self, response=None, message=None): if not message: message = "Item not found" super(OpenStackApiNotFoundException, self).__init__(message, response) class TestOpenStackClient(object): """Simple OpenStack API Client. This is a really basic OpenStack API client that is under our control, so we can make changes / insert hooks for testing """ def __init__(self, auth_user, auth_key, auth_uri, project_id=None): super(TestOpenStackClient, self).__init__() self.auth_result = None self.auth_user = auth_user self.auth_key = auth_key self.auth_uri = auth_uri if project_id is None: self.project_id = "6f70656e737461636b20342065766572" else: self.project_id = project_id self.microversion = None def request(self, url, method='GET', body=None, headers=None): _headers = {'Content-Type': 'application/json'} _headers.update(headers or {}) response = requests.request(method, url, data=body, headers=_headers) return response def _authenticate(self): if self.auth_result: return self.auth_result auth_uri = self.auth_uri headers = {'X-Auth-User': self.auth_user, 'X-Auth-Key': self.auth_key, 'X-Auth-Project-Id': self.project_id} response = self.request(auth_uri, headers=headers) http_status = response.status_code LOG.debug("%(auth_uri)s => code %(http_status)s", {'auth_uri': auth_uri, 'http_status': http_status}) if http_status == 401: raise OpenStackApiAuthenticationException(response=response) self.auth_result = response.headers return self.auth_result def api_request(self, relative_uri, check_response_status=None, strip_version=False, **kwargs): auth_result = self._authenticate() # NOTE(justinsb): httplib 'helpfully' converts headers to lower case base_uri = auth_result['x-server-management-url'] if strip_version: # NOTE(vish): cut out version number and tenant_id base_uri = '/'.join(base_uri.split('/', 3)[:-1]) full_uri = '%s/%s' % (base_uri, relative_uri) headers = kwargs.setdefault('headers', {}) headers['X-Auth-Token'] = auth_result['x-auth-token'] if self.microversion: headers['X-OpenStack-Nova-API-Version'] = self.microversion response = self.request(full_uri, **kwargs) http_status = response.status_code LOG.debug("%(relative_uri)s => code %(http_status)s", {'relative_uri': relative_uri, 'http_status': http_status}) if check_response_status: if http_status not in check_response_status: if http_status == 404: raise OpenStackApiNotFoundException(response=response) elif http_status == 401: raise OpenStackApiAuthorizationException(response=response) else: raise OpenStackApiException( message="Unexpected status code", response=response) return response def _decode_json(self, response): resp = APIResponse(status=response.status_code) if response.content: resp.body = jsonutils.loads(response.content) return resp def api_get(self, relative_uri, **kwargs): kwargs.setdefault('check_response_status', [200]) return APIResponse(self.api_request(relative_uri, **kwargs)) def api_post(self, relative_uri, body, **kwargs): kwargs['method'] = 'POST' if body: headers = kwargs.setdefault('headers', {}) headers['Content-Type'] = 'application/json' kwargs['body'] = jsonutils.dumps(body) kwargs.setdefault('check_response_status', [200, 202]) return APIResponse(self.api_request(relative_uri, **kwargs)) def api_put(self, relative_uri, body, **kwargs): kwargs['method'] = 'PUT' if body: headers = kwargs.setdefault('headers', {}) headers['Content-Type'] = 'application/json' kwargs['body'] = jsonutils.dumps(body) kwargs.setdefault('check_response_status', [200, 202, 204]) return APIResponse(self.api_request(relative_uri, **kwargs)) def api_delete(self, relative_uri, **kwargs): kwargs['method'] = 'DELETE' kwargs.setdefault('check_response_status', [200, 202, 204]) return APIResponse(self.api_request(relative_uri, **kwargs)) ##################################### # # Convenience methods # # The following are a set of convenience methods to get well known # resources, they can be helpful in setting up resources in # tests. All of these convenience methods throw exceptions if they # get a non 20x status code, so will appropriately abort tests if # they fail. # # They all return the most relevant part of their response body as # decoded data structure. # ##################################### def get_server(self, server_id): return self.api_get('/servers/%s' % server_id).body['server'] def get_servers(self, detail=True, search_opts=None): rel_url = '/servers/detail' if detail else '/servers' if search_opts is not None: qparams = {} for opt, val in six.iteritems(search_opts): qparams[opt] = val if qparams: query_string = "?%s" % urllib.urlencode(qparams) rel_url += query_string return self.api_get(rel_url).body['servers'] def post_server(self, server): response = self.api_post('/servers', server).body if 'reservation_id' in response: return response else: return response['server'] def put_server(self, server_id, server): return self.api_put('/servers/%s' % server_id, server).body def post_server_action(self, server_id, data): return self.api_post('/servers/%s/action' % server_id, data).body def delete_server(self, server_id): return self.api_delete('/servers/%s' % server_id) def get_image(self, image_id): return self.api_get('/images/%s' % image_id).body['image'] def get_images(self, detail=True): rel_url = '/images/detail' if detail else '/images' return self.api_get(rel_url).body['images'] def post_image(self, image): return self.api_post('/images', image).body['image'] def delete_image(self, image_id): return self.api_delete('/images/%s' % image_id) def get_flavor(self, flavor_id): return self.api_get('/flavors/%s' % flavor_id).body['flavor'] def get_flavors(self, detail=True): rel_url = '/flavors/detail' if detail else '/flavors' return self.api_get(rel_url).body['flavors'] def post_flavor(self, flavor): return self.api_post('/flavors', flavor).body['flavor'] def delete_flavor(self, flavor_id): return self.api_delete('/flavors/%s' % flavor_id) def post_extra_spec(self, flavor_id, spec): return self.api_post('/flavors/%s/os-extra_specs' % flavor_id, spec) def get_volume(self, volume_id): return self.api_get('/volumes/%s' % volume_id).body['volume'] def get_volumes(self, detail=True): rel_url = '/volumes/detail' if detail else '/volumes' return self.api_get(rel_url).body['volumes'] def post_volume(self, volume): return self.api_post('/volumes', volume).body['volume'] def delete_volume(self, volume_id): return self.api_delete('/volumes/%s' % volume_id) def get_server_volume(self, server_id, attachment_id): return self.api_get('/servers/%s/os-volume_attachments/%s' % (server_id, attachment_id) ).body['volumeAttachment'] def get_server_volumes(self, server_id): return self.api_get('/servers/%s/os-volume_attachments' % (server_id)).body['volumeAttachments'] def post_server_volume(self, server_id, volume_attachment): return self.api_post('/servers/%s/os-volume_attachments' % (server_id), volume_attachment ).body['volumeAttachment'] def delete_server_volume(self, server_id, attachment_id): return self.api_delete('/servers/%s/os-volume_attachments/%s' % (server_id, attachment_id)) def post_server_metadata(self, server_id, metadata): post_body = {'metadata': {}} post_body['metadata'].update(metadata) return self.api_post('/servers/%s/metadata' % server_id, post_body).body['metadata'] def get_server_groups(self, all_projects=None): if all_projects: return self.api_get( '/os-server-groups?all_projects').body['server_groups'] else: return self.api_get('/os-server-groups').body['server_groups'] def get_server_group(self, group_id): return self.api_get('/os-server-groups/%s' % group_id).body['server_group'] def post_server_groups(self, group): response = self.api_post('/os-server-groups', {"server_group": group}) return response.body['server_group'] def delete_server_group(self, group_id): self.api_delete('/os-server-groups/%s' % group_id) def get_instance_actions(self, server_id): return self.api_get('/servers/%s/os-instance-actions' % (server_id)).body['instanceActions'] nova-13.1.4/nova/tests/unit/0000775000567000056710000000000013064447471017002 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/test_crypto.py0000664000567000056710000004014513064447152021733 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for Crypto module. """ import os import uuid from cryptography.hazmat import backends from cryptography.hazmat.primitives import serialization import mock from oslo_concurrency import processutils import paramiko import six from nova import crypto from nova import exception from nova import test from nova import utils class X509Test(test.NoDBTestCase): @mock.patch('nova.db.certificate_create') def test_can_generate_x509(self, mock_create): with utils.tempdir() as tmpdir: self.flags(ca_path=tmpdir) crypto.ensure_ca_filesystem() _key, cert_str = crypto.generate_x509_cert('fake', 'fake') project_cert = crypto.fetch_ca(project_id='fake') signed_cert_file = os.path.join(tmpdir, "signed") with open(signed_cert_file, 'w') as keyfile: keyfile.write(cert_str) project_cert_file = os.path.join(tmpdir, "project") with open(project_cert_file, 'w') as keyfile: keyfile.write(project_cert) enc, err = utils.execute('openssl', 'verify', '-CAfile', project_cert_file, '-verbose', signed_cert_file) self.assertFalse(err) def test_encrypt_decrypt_x509(self): with utils.tempdir() as tmpdir: self.flags(ca_path=tmpdir) project_id = "fake" crypto.ensure_ca_filesystem() cert = crypto.fetch_ca(project_id) public_key = os.path.join(tmpdir, "public.pem") with open(public_key, 'w') as keyfile: keyfile.write(cert) text = "some @#!%^* test text" process_input = text.encode("ascii") if six.PY3 else text enc, _err = utils.execute('openssl', 'rsautl', '-certin', '-encrypt', '-inkey', '%s' % public_key, process_input=process_input, binary=True) dec = crypto.decrypt_text(project_id, enc) self.assertIsInstance(dec, bytes) if six.PY3: dec = dec.decode('ascii') self.assertEqual(text, dec) @mock.patch.object(utils, 'execute', side_effect=processutils.ProcessExecutionError) def test_ensure_ca_filesystem_chdir(self, *args, **kargs): with utils.tempdir() as tmpdir: self.flags(ca_path=tmpdir) start = os.getcwd() self.assertRaises(processutils.ProcessExecutionError, crypto.ensure_ca_filesystem) self.assertEqual(start, os.getcwd()) class RevokeCertsTest(test.NoDBTestCase): @mock.patch('nova.crypto.revoke_cert') def test_revoke_certs_by_user_and_project(self, mock_revoke): user_id = 'test_user' project_id = 2 file_name = 'test_file' def mock_certificate_get_all_by_user_and_project(context, user_id, project_id): return [{"user_id": user_id, "project_id": project_id, "file_name": file_name}] self.stub_out('nova.db.certificate_get_all_by_user_and_project', mock_certificate_get_all_by_user_and_project) crypto.revoke_certs_by_user_and_project(user_id, project_id) mock_revoke.assert_called_once_with(project_id, file_name) @mock.patch('nova.crypto.revoke_cert') def test_revoke_certs_by_user(self, mock_revoke): user_id = 'test_user' project_id = 2 file_name = 'test_file' def mock_certificate_get_all_by_user(context, user_id): return [{"user_id": user_id, "project_id": project_id, "file_name": file_name}] self.stub_out('nova.db.certificate_get_all_by_user', mock_certificate_get_all_by_user) crypto.revoke_certs_by_user(user_id) mock_revoke.assert_called_once_with(project_id, mock.ANY) @mock.patch('nova.crypto.revoke_cert') def test_revoke_certs_by_project(self, mock_revoke): user_id = 'test_user' project_id = 2 file_name = 'test_file' def mock_certificate_get_all_by_project(context, project_id): return [{"user_id": user_id, "project_id": project_id, "file_name": file_name}] self.stub_out('nova.db.certificate_get_all_by_project', mock_certificate_get_all_by_project) crypto.revoke_certs_by_project(project_id) mock_revoke.assert_called_once_with(project_id, mock.ANY) @mock.patch.object(utils, 'execute', side_effect=processutils.ProcessExecutionError) @mock.patch.object(os, 'chdir', return_value=None) def test_revoke_cert_process_execution_error(self, *args, **kargs): self.assertRaises(exception.RevokeCertFailure, crypto.revoke_cert, 2, 'test_file') def test_revoke_cert_project_not_found_chdir_fails(self, *args, **kargs): self.flags(use_project_ca=True) self.assertRaises(exception.ProjectNotFound, crypto.revoke_cert, str(uuid.uuid4()), 'test_file') class CertExceptionTests(test.NoDBTestCase): def test_fetch_ca_file_not_found(self): with utils.tempdir() as tmpdir: self.flags(ca_path=tmpdir) self.flags(use_project_ca=True) self.assertRaises(exception.CryptoCAFileNotFound, crypto.fetch_ca, project_id='fake') def test_fetch_crl_file_not_found(self): with utils.tempdir() as tmpdir: self.flags(ca_path=tmpdir) self.flags(use_project_ca=True) self.assertRaises(exception.CryptoCRLFileNotFound, crypto.fetch_crl, project_id='fake') class EncryptionTests(test.NoDBTestCase): pubkey = ("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDArtgrfBu/g2o28o+H2ng/crv" "zgES91i/NNPPFTOutXelrJ9QiPTPTm+B8yspLsXifmbsmXztNOlBQgQXs6usxb4" "fnJKNUZ84Vkp5esbqK/L7eyRqwPvqo7btKBMoAMVX/kUyojMpxb7Ssh6M6Y8cpi" "goi+MSDPD7+5yRJ9z4mH9h7MCY6Ejv8KTcNYmVHvRhsFUcVhWcIISlNWUGiG7rf" "oki060F5myQN3AXcL8gHG5/Qb1RVkQFUKZ5geQ39/wSyYA1Q65QTba/5G2QNbl2" "0eAIBTyKZhN6g88ak+yARa6BLLDkrlP7L4WctHQMLsuXHohQsUO9AcOlVMARgrg" "uF test@test") prikey = """-----BEGIN RSA PRIVATE KEY----- MIIEpQIBAAKCAQEAwK7YK3wbv4NqNvKPh9p4P3K784BEvdYvzTTzxUzrrV3payfU Ij0z05vgfMrKS7F4n5m7Jl87TTpQUIEF7OrrMW+H5ySjVGfOFZKeXrG6ivy+3ska sD76qO27SgTKADFV/5FMqIzKcW+0rIejOmPHKYoKIvjEgzw+/uckSfc+Jh/YezAm OhI7/Ck3DWJlR70YbBVHFYVnCCEpTVlBohu636JItOtBeZskDdwF3C/IBxuf0G9U VZEBVCmeYHkN/f8EsmANUOuUE22v+RtkDW5dtHgCAU8imYTeoPPGpPsgEWugSyw5 K5T+y+FnLR0DC7Llx6IULFDvQHDpVTAEYK4LhQIDAQABAoIBAF9ibrrgHnBpItx+ qVUMbriiGK8LUXxUmqdQTljeolDZi6KzPc2RVKWtpazBSvG7skX3+XCediHd+0JP DNri1HlNiA6B0aUIGjoNsf6YpwsE4YwyK9cR5k5YGX4j7se3pKX2jOdngxQyw1Mh dkmCeWZz4l67nbSFz32qeQlwrsB56THJjgHB7elDoGCXTX/9VJyjFlCbfxVCsIng inrNgT0uMSYMNpAjTNOjguJt/DtXpwzei5eVpsERe0TRRVH23ycS0fuq/ancYwI/ MDr9KSB8r+OVGeVGj3popCxECxYLBxhqS1dAQyJjhQXKwajJdHFzidjXO09hLBBz FiutpYUCgYEA6OFikTrPlCMGMJjSj+R9woDAOPfvCDbVZWfNo8iupiECvei88W28 RYFnvUQRjSC0pHe//mfUSmiEaE+SjkNCdnNR+vsq9q+htfrADm84jl1mfeWatg/g zuGz2hAcZnux3kQMI7ufOwZNNpM2bf5B4yKamvG8tZRRxSkkAL1NV48CgYEA08/Z Ty9g9XPKoLnUWStDh1zwG+c0q14l2giegxzaUAG5DOgOXbXcw0VQ++uOWD5ARELG g9wZcbBsXxJrRpUqx+GAlv2Y1bkgiPQS1JIyhsWEUtwfAC/G+uZhCX53aI3Pbsjh QmkPCSp5DuOuW2PybMaw+wVe+CaI/gwAWMYDAasCgYEA4Fzkvc7PVoU33XIeywr0 LoQkrb4QyPUrOvt7H6SkvuFm5thn0KJMlRpLfAksb69m2l2U1+HooZd4mZawN+eN DNmlzgxWJDypq83dYwq8jkxmBj1DhMxfZnIE+L403nelseIVYAfPLOqxUTcbZXVk vRQFp+nmSXqQHUe5rAy1ivkCgYEAqLu7cclchCxqDv/6mc5NTVhMLu5QlvO5U6fq HqitgW7d69oxF5X499YQXZ+ZFdMBf19ypTiBTIAu1M3nh6LtIa4SsjXzus5vjKpj FdQhTBus/hU83Pkymk1MoDOPDEtsI+UDDdSDldmv9pyKGWPVi7H86vusXCLWnwsQ e6fCXWECgYEAqgpGvva5kJ1ISgNwnJbwiNw0sOT9BMOsdNZBElf0kJIIy6FMPvap 6S1ziw+XWfdQ83VIUOCL5DrwmcYzLIogS0agmnx/monfDx0Nl9+OZRxy6+AI9vkK 86A1+DXdo+IgX3grFK1l1gPhAZPRWJZ+anrEkyR4iLq6ZoPZ3BQn97U= -----END RSA PRIVATE KEY-----""" text = "Some text! %$*" def _ssh_decrypt_text(self, ssh_private_key, text): with utils.tempdir() as tmpdir: sshkey = os.path.abspath(os.path.join(tmpdir, 'ssh.key')) with open(sshkey, 'w') as f: f.write(ssh_private_key) try: dec, _err = utils.execute('openssl', 'rsautl', '-decrypt', '-inkey', sshkey, process_input=text, binary=True) return dec except processutils.ProcessExecutionError as exc: raise exception.DecryptionFailure(reason=exc.stderr) def test_ssh_encrypt_decrypt_text(self): self._test_ssh_encrypt_decrypt_text(self.pubkey) key_with_spaces_in_comment = self.pubkey.replace('test@test', 'Generated by Nova') self._test_ssh_encrypt_decrypt_text(key_with_spaces_in_comment) def _test_ssh_encrypt_decrypt_text(self, key): enc = crypto.ssh_encrypt_text(self.pubkey, self.text) self.assertIsInstance(enc, bytes) # Comparison between bytes and str raises a TypeError # when using python3 -bb if six.PY2: self.assertNotEqual(enc, self.text) result = self._ssh_decrypt_text(self.prikey, enc) self.assertIsInstance(result, bytes) if six.PY3: result = result.decode('utf-8') self.assertEqual(result, self.text) def test_ssh_encrypt_failure(self): self.assertRaises(exception.EncryptionFailure, crypto.ssh_encrypt_text, '', self.text) class KeyPairTest(test.NoDBTestCase): rsa_prv = ( "-----BEGIN RSA PRIVATE KEY-----\n" "MIIEowIBAAKCAQEA5G44D6lEgMj6cRwCPydsMl1VRN2B9DVyV5lmwssGeJClywZM\n" "WcKlSZBaWPbwbt20/r74eMGZPlqtEi9Ro+EHj4/n5+3A2Mh11h0PGSt53PSPfWwo\n" "ZhEg9hQ1w1ZxfBMCx7eG2YdGFQocMgR0zQasJGjjt8hruCnWRB3pNH9DhEwKhgET\n" "H0/CFzxSh0eZWs/O4GSf4upwmRG/1Yu90vnVZq3AanwvvW5UBk6g4uWb6FTES867\n" "kAy4b5EcH6WR3lLE09omuG/NqtH+qkgIdQconDkmkuK3xf5go6GSwEod0erM1G1v\n" "e+C4w/MD98KZ4Zlon9hy7oE2rcqHXf58gZtOTQIDAQABAoIBAQCnkeM2Oemyv7xY\n" "dT+ArJ7GY4lFt2i5iOuUL0ge5Wid0R6OTNR9lDhEOszMLno6GhHIPrdvfjW4dDQ5\n" "/tRY757oRZzNmq+5V3R52V9WC3qeCBmq3EjWdwJDAphd72/YoOmNMKiPsphKntwI\n" "JRS5wodNPlSuYSwEMUypM3f7ttAEn5CASgYgribBDapm7EqkVa2AqSvpFzNvN3/e\n" "Sc36/XlxJin7AkKVOnRksuVOOj504VUQfXgVWZkfTeZqAROgA1FSnjUAffcubJmq\n" "pDL/JSgOqN4S+sJkkTrb19MuM9M/IdXteloynF+GUKZx6FdVQQc8xCiXgeupeeSD\n" "fNMAP7DRAoGBAP0JRFm3fCAavBREKVOyZm20DpeR6zMrVP7ht0SykkT/bw/kiRG+\n" "FH1tNioj9uyixt5SiKhH3ZVAunjsKvrwET8i3uz1M2Gk+ovWdLXurBogYNNWafjQ\n" "hRhFHpyExoZYRsn58bvYvjFXTO6JxuNS2b59DGBRkQ5mpsOhxarfbZnXAoGBAOcb\n" "K+qoPDeDicnQZ8+ygYYHxY3fy1nvm1F19jBiWd26bAUOHeZNPPKGvTSlrGWJgEyA\n" "FjZIlHJOY2s0dhukiytOiXzdA5iqK1NvlF+QTUI4tCeNMVejWC+n6sKR9ADZkX8D\n" "NOHaLkDzc/ukus59aKyjxP53I6SV6y6m5NeyvDx7AoGAaUji1MXA8wbMvU4DOB0h\n" "+4GRFMYVbEwaaJd4jzASJn12M9GuquBBXFMF15DxXFL6lmUXEZYdf83YCRqTY6hi\n" "NLgIs+XuxDFGQssv8sdletWAFE9/dpUk3A1eiFfC1wGCKuZCDBxKPvOJQjO3uryt\n" "d1JGxQkLZ0eVGg+E1O10iC8CgYB4w2QRfNPqllu8D6EPkVHJfeonltgmKOTajm+V\n" "HO+kw7OKeLP7EkVU3j+kcSZC8LUQRKZWu1qG2Jtu+7zz+OmYObPygXNNpS56rQW1\n" "Yixc/FB3knpEN2DvlilAfxAoGYjD/CL4GhCtdAoZZx0Opc262OEpr4v6hzSb7i4K\n" "4KUoXQKBgHfbiaSilxx9guUqvSaexpHmtiUwx05a05fD6tu8Cofl6AM9wGpw3xOT\n" "tfo4ehvS13tTz2RDE2xKuetMmkya7UgifcxUmBzqkOlgr0oOi2rp+eDKXnzUUqsH\n" "V7E96Dj36K8q2+gZIXcNqjN7PzfkF8pA0G+E1veTi8j5dnvIsy1x\n" "-----END RSA PRIVATE KEY-----\n" ) rsa_pub = ( "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDkbjgPqUSAyPpxHAI/J2wyXVVE" "3YH0NXJXmWbCywZ4kKXLBkxZwqVJkFpY9vBu3bT+vvh4wZk+Wq0SL1Gj4QePj+fn" "7cDYyHXWHQ8ZK3nc9I99bChmESD2FDXDVnF8EwLHt4bZh0YVChwyBHTNBqwkaOO3" "yGu4KdZEHek0f0OETAqGARMfT8IXPFKHR5laz87gZJ/i6nCZEb/Vi73S+dVmrcBq" "fC+9blQGTqDi5ZvoVMRLzruQDLhvkRwfpZHeUsTT2ia4b82q0f6qSAh1ByicOSaS" "4rfF/mCjoZLASh3R6szUbW974LjD8wP3wpnhmWif2HLugTatyodd/nyBm05N Gen" "erated-by-Nova" ) rsa_fp = "e7:66:a1:2c:4f:90:6e:11:19:da:ac:c2:69:e1:ad:89" dss_pub = ( "ssh-dss AAAAB3NzaC1kc3MAAACBAKWFW2++pDxJWObkADbSXw8KfZ4VupkRKEXF" "SPN2kV0v+FgdnBEcrEJPExaOTMhmxIuc82ktTv76wHSEpbbsLuI7IDbB6KJJwHs2" "y356yB28Q9rin7X0VMYKkPxvAcbIUSrEbQtyPMihlOaaQ2dGSsEQGQSpjm3f3RU6" "OWux0w/NAAAAFQCgzWF2zxQmi/Obd11z9Im6gY02gwAAAIAHCDLjipVwMLXIqNKO" "MktiPex+ewRQxBi80dzZ3mJzARqzLPYI9hJFUU0LiMtLuypV/djpUWN0cQpmgTQf" "TfuZx9ipC6Mtiz66NQqjkQuoihzdk+9KlOTo03UsX5uBGwuZ09Dnf1VTF8ZsW5Hg" "HyOk6qD71QBajkcFJAKOT3rFfgAAAIAy8trIzqEps9/n37Nli1TvNPLbFQAXl1LN" "wUFmFDwBCGTLl8puVZv7VSu1FG8ko+mzqNebqcN4RMC26NxJqe+RRubn5KtmLoIa" "7tRe74hvQ1HTLLuGxugwa4CewNbwzzEDEs8U79WDhGKzDkJR4nLPVimj5WLAWV70" "RNnRX7zj5w== Generated-by-Nova" ) dss_fp = "b9:dc:ac:57:df:2a:2b:cf:65:a8:c3:4e:9d:4a:82:3c" ecdsa_pub = ( "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAy" "NTYAAABBBG1r4wzPTIjSo78POCq+u/czb8gYK0KvqlmCvcRPrnDWxgLw7y6BX51t" "uYREz7iLRCP7BwUt8R+ZWzFZDeOLIWU= Generated-by-Nova" ) ecdsa_pub_with_spaces = ( "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAy" "NTYAAABBBG1r4wzPTIjSo78POCq+u/czb8gYK0KvqlmCvcRPrnDWxgLw7y6BX51t" "uYREz7iLRCP7BwUt8R+ZWzFZDeOLIWU= Generated by Nova" ) ecdsa_fp = "16:6a:c9:ec:80:4d:17:3e:d5:3b:6f:c0:d7:15:04:40" def test_generate_fingerprint(self): fingerprint = crypto.generate_fingerprint(self.rsa_pub) self.assertEqual(self.rsa_fp, fingerprint) fingerprint = crypto.generate_fingerprint(self.dss_pub) self.assertEqual(self.dss_fp, fingerprint) fingerprint = crypto.generate_fingerprint(self.ecdsa_pub) self.assertEqual(self.ecdsa_fp, fingerprint) fingerprint = crypto.generate_fingerprint(self.ecdsa_pub_with_spaces) self.assertEqual(self.ecdsa_fp, fingerprint) def test_generate_key_pair_2048_bits(self): (private_key, public_key, fingerprint) = crypto.generate_key_pair() pub_bytes = public_key.encode('utf-8') pkey = serialization.load_ssh_public_key( pub_bytes, backends.default_backend()) self.assertEqual(2048, pkey.key_size) def test_generate_key_pair_1024_bits(self): bits = 1024 (private_key, public_key, fingerprint) = crypto.generate_key_pair(bits) pub_bytes = public_key.encode('utf-8') pkey = serialization.load_ssh_public_key( pub_bytes, backends.default_backend()) self.assertEqual(bits, pkey.key_size) def test_generate_key_pair_mocked_private_key(self): keyin = six.StringIO() keyin.write(self.rsa_prv) keyin.seek(0) key = paramiko.RSAKey.from_private_key(keyin) with mock.patch.object(crypto, 'generate_key') as mock_generate: mock_generate.return_value = key (private_key, public_key, fingerprint) = crypto.generate_key_pair() self.assertEqual(self.rsa_pub, public_key) self.assertEqual(self.rsa_fp, fingerprint) nova-13.1.4/nova/tests/unit/fake_policy.py0000664000567000056710000004305613064447152021645 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. policy_data = """ { "admin_api": "is_admin:True", "cells_scheduler_filter:TargetCellFilter": "is_admin:True", "context_is_admin": "role:admin or role:administrator", "compute:create": "", "compute:create:attach_network": "", "compute:create:attach_volume": "", "compute:get": "", "compute:get_all": "", "compute:get_all_tenants": "", "compute:update": "", "compute:get_instance_metadata": "", "compute:get_all_instance_metadata": "", "compute:get_all_instance_system_metadata": "", "compute:update_instance_metadata": "", "compute:delete_instance_metadata": "", "compute:get_diagnostics": "", "compute:get_instance_diagnostics": "", "compute:lock": "", "compute:unlock": "", "compute:unlock_override": "is_admin:True", "compute:get_vnc_console": "", "compute:get_spice_console": "", "compute:get_rdp_console": "", "compute:get_serial_console": "", "compute:get_mks_console": "", "compute:get_console_output": "", "compute:reset_network": "", "compute:inject_network_info": "", "compute:add_fixed_ip": "", "compute:remove_fixed_ip": "", "compute:attach_volume": "", "compute:detach_volume": "", "compute:attach_interface": "", "compute:detach_interface": "", "compute:set_admin_password": "", "compute:rescue": "", "compute:unrescue": "", "compute:suspend": "", "compute:resume": "", "compute:pause": "", "compute:unpause": "", "compute:start": "", "compute:stop": "", "compute:resize": "", "compute:confirm_resize": "", "compute:revert_resize": "", "compute:rebuild": "", "compute:reboot": "", "compute:snapshot": "", "compute:snapshot_volume_backed": "", "compute:backup": "", "compute:shelve": "", "compute:shelve_offload": "", "compute:unshelve": "", "compute:security_groups:add_to_instance": "", "compute:security_groups:remove_from_instance": "", "compute:delete": "", "compute:soft_delete": "", "compute:force_delete": "", "compute:restore": "", "compute:swap_volume": "", "compute:volume_snapshot_create": "", "compute:volume_snapshot_delete": "", "os_compute_api:servers:confirm_resize": "", "os_compute_api:servers:create": "", "os_compute_api:servers:create:attach_network": "", "os_compute_api:servers:create:attach_volume": "", "os_compute_api:servers:create:forced_host": "", "os_compute_api:servers:delete": "", "os_compute_api:servers:detail": "", "os_compute_api:servers:detail:get_all_tenants": "", "os_compute_api:servers:index": "", "os_compute_api:servers:index:get_all_tenants": "", "os_compute_api:servers:reboot": "", "os_compute_api:servers:rebuild": "", "os_compute_api:servers:resize": "", "os_compute_api:servers:revert_resize": "", "os_compute_api:servers:show": "", "os_compute_api:servers:show:host_status": "", "os_compute_api:servers:create_image": "", "os_compute_api:servers:create_image:allow_volume_backed": "", "os_compute_api:servers:update": "", "os_compute_api:servers:start": "", "os_compute_api:servers:stop": "", "os_compute_api:servers:trigger_crash_dump": "", "os_compute_api:servers:migrations:delete": "rule:admin_api", "os_compute_api:servers:migrations:force_complete": "", "os_compute_api:servers:migrations:index": "rule:admin_api", "os_compute_api:servers:migrations:show": "rule:admin_api", "os_compute_api:os-access-ips": "", "compute_extension:accounts": "", "compute_extension:admin_actions:pause": "", "compute_extension:admin_actions:unpause": "", "compute_extension:admin_actions:suspend": "", "compute_extension:admin_actions:resume": "", "compute_extension:admin_actions:lock": "", "compute_extension:admin_actions:unlock": "", "compute_extension:admin_actions:resetNetwork": "", "compute_extension:admin_actions:injectNetworkInfo": "", "compute_extension:admin_actions:createBackup": "", "compute_extension:admin_actions:migrateLive": "", "compute_extension:admin_actions:resetState": "", "compute_extension:admin_actions:migrate": "", "os_compute_api:os-admin-actions:reset_network": "", "os_compute_api:os-admin-actions:inject_network_info": "", "os_compute_api:os-admin-actions:reset_state": "", "os_compute_api:os-admin-password": "", "compute_extension:aggregates": "rule:admin_api", "os_compute_api:os-aggregates:index": "rule:admin_api", "os_compute_api:os-aggregates:create": "rule:admin_api", "os_compute_api:os-aggregates:show": "rule:admin_api", "os_compute_api:os-aggregates:update": "rule:admin_api", "os_compute_api:os-aggregates:delete": "rule:admin_api", "os_compute_api:os-aggregates:add_host": "rule:admin_api", "os_compute_api:os-aggregates:remove_host": "rule:admin_api", "os_compute_api:os-aggregates:set_metadata": "rule:admin_api", "compute_extension:agents": "", "os_compute_api:os-agents": "", "compute_extension:attach_interfaces": "", "os_compute_api:os-attach-interfaces": "", "compute_extension:baremetal_nodes": "", "os_compute_api:os-baremetal-nodes": "", "compute_extension:cells": "", "compute_extension:cells:create": "rule:admin_api", "compute_extension:cells:delete": "rule:admin_api", "compute_extension:cells:update": "rule:admin_api", "compute_extension:cells:sync_instances": "rule:admin_api", "os_compute_api:os-cells": "", "os_compute_api:os-cells:create": "rule:admin_api", "os_compute_api:os-cells:delete": "rule:admin_api", "os_compute_api:os-cells:update": "rule:admin_api", "os_compute_api:os-cells:sync_instances": "rule:admin_api", "compute_extension:certificates": "", "os_compute_api:os-certificates:create": "", "os_compute_api:os-certificates:show": "", "compute_extension:cloudpipe": "", "os_compute_api:os-cloudpipe": "", "compute_extension:cloudpipe_update": "", "compute_extension:config_drive": "", "os_compute_api:os-config-drive": "", "compute_extension:console_output": "", "os_compute_api:os-console-output": "", "compute_extension:consoles": "", "os_compute_api:os-remote-consoles": "", "os_compute_api:os-consoles:create": "", "os_compute_api:os-consoles:delete": "", "os_compute_api:os-consoles:index": "", "os_compute_api:os-consoles:show": "", "compute_extension:createserverext": "", "os_compute_api:os-create-backup": "", "compute_extension:deferred_delete": "", "os_compute_api:os-deferred-delete": "", "compute_extension:disk_config": "", "os_compute_api:os-disk-config": "", "compute_extension:evacuate": "is_admin:True", "os_compute_api:os-evacuate": "is_admin:True", "compute_extension:extended_server_attributes": "", "os_compute_api:os-extended-server-attributes": "", "compute_extension:extended_status": "", "os_compute_api:os-extended-status": "", "compute_extension:extended_availability_zone": "", "os_compute_api:os-extended-availability-zone": "", "compute_extension:extended_ips": "", "compute_extension:extended_ips_mac": "", "compute_extension:extended_vif_net": "", "compute_extension:extended_volumes": "", "os_compute_api:ips:index": "", "os_compute_api:ips:show": "", "os_compute_api:os-extended-volumes": "", "os_compute_api:extensions": "", "compute_extension:fixed_ips": "", "os_compute_api:os-fixed-ips": "", "compute_extension:flavor_access": "", "compute_extension:flavor_access:addTenantAccess": "", "compute_extension:flavor_access:removeTenantAccess": "", "os_compute_api:os-flavor-access": "", "os_compute_api:os-flavor-access:remove_tenant_access": "", "os_compute_api:os-flavor-access:add_tenant_access": "", "compute_extension:flavor_disabled": "", "compute_extension:flavor_rxtx": "", "os_compute_api:os-flavor-rxtx": "", "compute_extension:flavor_swap": "", "compute_extension:flavorextradata": "", "compute_extension:flavorextraspecs:index": "", "compute_extension:flavorextraspecs:show": "", "compute_extension:flavorextraspecs:create": "is_admin:True", "compute_extension:flavorextraspecs:update": "is_admin:True", "compute_extension:flavorextraspecs:delete": "is_admin:True", "os_compute_api:os-flavor-extra-specs:index": "", "os_compute_api:os-flavor-extra-specs:show": "", "os_compute_api:os-flavor-extra-specs:create": "is_admin:True", "os_compute_api:os-flavor-extra-specs:update": "is_admin:True", "os_compute_api:os-flavor-extra-specs:delete": "is_admin:True", "compute_extension:flavormanage": "", "os_compute_api:os-flavor-manage": "", "compute_extension:floating_ip_dns": "", "os_compute_api:os-floating-ip-dns": "", "os_compute_api:os-floating-ip-dns:domain:update": "", "os_compute_api:os-floating-ip-dns:domain:delete": "", "compute_extension:floating_ip_pools": "", "os_compute_api:os-floating-ip-pools": "", "compute_extension:floating_ips": "", "os_compute_api:os-floating-ips": "", "compute_extension:floating_ips_bulk": "", "os_compute_api:os-floating-ips-bulk": "", "compute_extension:fping": "", "compute_extension:fping:all_tenants": "is_admin:True", "os_compute_api:os-fping": "", "os_compute_api:os-fping:all_tenants": "is_admin:True", "compute_extension:hide_server_addresses": "", "os_compute_api:os-hide-server-addresses": "", "compute_extension:hosts": "", "os_compute_api:os-hosts": "rule:admin_api", "compute_extension:hypervisors": "rule:admin_api", "os_compute_api:os-hypervisors": "rule:admin_api", "compute_extension:image_size": "", "os_compute_api:image-size": "", "compute_extension:instance_actions": "", "os_compute_api:os-instance-actions": "", "compute_extension:instance_actions:events": "is_admin:True", "os_compute_api:os-instance-actions:events": "is_admin:True", "compute_extension:instance_usage_audit_log": "rule:admin_api", "os_compute_api:os-instance-usage-audit-log": "", "compute_extension:keypairs": "", "compute_extension:keypairs:index": "", "compute_extension:keypairs:show": "", "compute_extension:keypairs:create": "", "compute_extension:keypairs:delete": "", "os_compute_api:os-keypairs": "", "os_compute_api:os-keypairs:index": "rule:admin_api or user_id:%(user_id)s", "os_compute_api:os-keypairs:show": "rule:admin_api or user_id:%(user_id)s", "os_compute_api:os-keypairs:create": "rule:admin_api or user_id:%(user_id)s", "os_compute_api:os-keypairs:delete": "rule:admin_api or user_id:%(user_id)s", "os_compute_api:os-lock-server:lock": "", "os_compute_api:os-lock-server:unlock": "", "os_compute_api:os-lock-server:unlock:unlock_override": "", "os_compute_api:os-migrate-server:migrate": "", "os_compute_api:os-migrate-server:migrate_live": "", "compute_extension:multinic": "", "os_compute_api:os-multinic": "", "compute_extension:networks": "", "compute_extension:networks:view": "", "os_compute_api:os-networks": "", "os_compute_api:os-networks:view": "", "compute_extension:networks_associate": "", "os_compute_api:os-networks-associate": "", "compute_extension:os-tenant-networks": "", "os_compute_api:os-tenant-networks": "", "os_compute_api:os-pause-server:pause": "", "os_compute_api:os-pause-server:unpause": "", "os_compute_api:os-pci:pci_servers": "", "os_compute_api:os-pci:index": "", "os_compute_api:os-pci:detail": "", "os_compute_api:os-pci:show": "", "compute_extension:quotas:show": "", "compute_extension:quotas:update": "", "compute_extension:quotas:delete": "", "os_compute_api:os-quota-sets:show": "", "os_compute_api:os-quota-sets:update": "", "os_compute_api:os-quota-sets:delete": "", "os_compute_api:os-quota-sets:detail": "", "os_compute_api:os-quota-sets:defaults": "", "compute_extension:quota_classes": "", "os_compute_api:os-quota-class-sets:update": "", "os_compute_api:os-quota-class-sets:show": "", "compute_extension:rescue": "", "os_compute_api:os-rescue": "", "compute_extension:security_group_default_rules": "", "os_compute_api:os-security-group-default-rules": "", "compute_extension:security_groups": "", "os_compute_api:os-security-groups": "", "compute_extension:server_diagnostics": "", "os_compute_api:os-server-diagnostics": "", "compute_extension:server_groups": "", "compute_extension:server_password": "", "os_compute_api:os-server-password": "", "compute_extension:server_usage": "", "os_compute_api:os-server-usage": "", "os_compute_api:os-server-groups": "", "compute_extension:services": "", "os_compute_api:os-services": "", "compute_extension:shelve": "", "compute_extension:shelveOffload": "", "os_compute_api:os-shelve:shelve": "", "os_compute_api:os-shelve:shelve_offload": "", "compute_extension:simple_tenant_usage:show": "", "compute_extension:simple_tenant_usage:list": "", "os_compute_api:os-simple-tenant-usage:show": "", "os_compute_api:os-simple-tenant-usage:list": "", "compute_extension:unshelve": "", "os_compute_api:os-shelve:unshelve": "", "os_compute_api:os-suspend-server:suspend": "", "os_compute_api:os-suspend-server:resume": "", "compute_extension:users": "", "compute_extension:virtual_interfaces": "", "os_compute_api:os-virtual-interfaces": "", "compute_extension:virtual_storage_arrays": "", "compute_extension:volumes": "", "compute_extension:volume_attachments:index": "", "compute_extension:volume_attachments:show": "", "compute_extension:volume_attachments:create": "", "compute_extension:volume_attachments:update": "", "compute_extension:volume_attachments:delete": "", "os_compute_api:os-volumes": "", "os_compute_api:os-volumes-attachments:index": "", "os_compute_api:os-volumes-attachments:show": "", "os_compute_api:os-volumes-attachments:create": "", "os_compute_api:os-volumes-attachments:update": "", "os_compute_api:os-volumes-attachments:delete": "", "compute_extension:volumetypes": "", "compute_extension:availability_zone:list": "", "os_compute_api:os-availability-zone:list": "", "compute_extension:availability_zone:detail": "", "os_compute_api:os-availability-zone:detail": "", "compute_extension:used_limits_for_admin": "is_admin:True", "os_compute_api:os-used-limits": "is_admin:True", "os_compute_api:limits": "", "compute_extension:migrations:index": "is_admin:True", "os_compute_api:os-migrations:index": "is_admin:True", "compute_extension:os-assisted-volume-snapshots:create": "", "compute_extension:os-assisted-volume-snapshots:delete": "", "os_compute_api:os-assisted-volume-snapshots:create": "", "os_compute_api:os-assisted-volume-snapshots:delete": "", "compute_extension:console_auth_tokens": "is_admin:True", "os_compute_api:os-console-auth-tokens": "is_admin:True", "compute_extension:os-server-external-events:create": "rule:admin_api", "os_compute_api:os-server-external-events:create": "rule:admin_api", "os_compute_api:server-metadata:create": "", "os_compute_api:server-metadata:update": "", "os_compute_api:server-metadata:update_all": "", "os_compute_api:server-metadata:delete": "", "os_compute_api:server-metadata:show": "", "os_compute_api:server-metadata:index": "", "network:get_all": "", "network:get": "", "network:create": "", "network:delete": "", "network:associate": "", "network:disassociate": "", "network:get_vifs_by_instance": "", "network:get_vif_by_mac_address": "", "network:allocate_for_instance": "", "network:deallocate_for_instance": "", "network:validate_networks": "", "network:get_instance_uuids_by_ip_filter": "", "network:get_instance_id_by_floating_address": "", "network:setup_networks_on_host": "", "network:get_floating_ip": "", "network:get_floating_ip_pools": "", "network:get_floating_ip_by_address": "", "network:get_floating_ips_by_project": "", "network:get_floating_ips_by_fixed_address": "", "network:allocate_floating_ip": "", "network:associate_floating_ip": "", "network:disassociate_floating_ip": "", "network:release_floating_ip": "", "network:migrate_instance_start": "", "network:migrate_instance_finish": "", "network:get_fixed_ip": "", "network:get_fixed_ip_by_address": "", "network:add_fixed_ip_to_instance": "", "network:remove_fixed_ip_from_instance": "", "network:add_network_to_project": "", "network:get_instance_nw_info": "", "network:get_dns_domains": "", "network:add_dns_entry": "", "network:modify_dns_entry": "", "network:delete_dns_entry": "", "network:get_dns_entries_by_address": "", "network:get_dns_entries_by_name": "", "network:create_private_dns_domain": "", "network:create_public_dns_domain": "", "network:delete_dns_domain": "", "network:attach_external_network": "rule:admin_api" } """ nova-13.1.4/nova/tests/unit/test_configdrive2.py0000664000567000056710000001116213064447152022771 0ustar jenkinsjenkins00000000000000# Copyright 2012 Michael Still and Canonical Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import tempfile import mock from mox3 import mox from oslo_config import cfg from oslo_utils import fileutils from nova import context from nova import test from nova.tests.unit import fake_instance from nova import utils from nova.virt import configdrive CONF = cfg.CONF class FakeInstanceMD(object): def metadata_for_config_drive(self): yield ('this/is/a/path/hello', 'This is some content') class ConfigDriveTestCase(test.NoDBTestCase): def test_create_configdrive_iso(self): CONF.set_override('config_drive_format', 'iso9660') imagefile = None try: self.mox.StubOutWithMock(utils, 'execute') utils.execute('genisoimage', '-o', mox.IgnoreArg(), '-ldots', '-allow-lowercase', '-allow-multidot', '-l', '-publisher', mox.IgnoreArg(), '-quiet', '-J', '-r', '-V', 'config-2', mox.IgnoreArg(), attempts=1, run_as_root=False).AndReturn(None) self.mox.ReplayAll() with configdrive.ConfigDriveBuilder(FakeInstanceMD()) as c: (fd, imagefile) = tempfile.mkstemp(prefix='cd_iso_') os.close(fd) c.make_drive(imagefile) finally: if imagefile: fileutils.delete_if_exists(imagefile) def test_create_configdrive_vfat(self): CONF.set_override('config_drive_format', 'vfat') imagefile = None try: self.mox.StubOutWithMock(utils, 'mkfs') self.mox.StubOutWithMock(utils, 'execute') self.mox.StubOutWithMock(utils, 'trycmd') utils.mkfs('vfat', mox.IgnoreArg(), label='config-2').AndReturn(None) utils.trycmd('mount', '-o', mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), run_as_root=True).AndReturn((None, None)) utils.execute('umount', mox.IgnoreArg(), run_as_root=True).AndReturn(None) self.mox.ReplayAll() with configdrive.ConfigDriveBuilder(FakeInstanceMD()) as c: (fd, imagefile) = tempfile.mkstemp(prefix='cd_vfat_') os.close(fd) c.make_drive(imagefile) # NOTE(mikal): we can't check for a VFAT output here because the # filesystem creation stuff has been mocked out because it # requires root permissions finally: if imagefile: fileutils.delete_if_exists(imagefile) def test_config_drive_required_by_image_property(self): inst = fake_instance.fake_instance_obj(context.get_admin_context()) inst.config_drive = '' inst.system_metadata = { utils.SM_IMAGE_PROP_PREFIX + 'img_config_drive': 'mandatory'} self.assertTrue(configdrive.required_by(inst)) inst.system_metadata = { utils.SM_IMAGE_PROP_PREFIX + 'img_config_drive': 'optional'} self.assertFalse(configdrive.required_by(inst)) @mock.patch.object(configdrive, 'required_by', return_value=False) def test_config_drive_update_instance_required_by_false(self, mock_required): inst = fake_instance.fake_instance_obj(context.get_admin_context()) inst.config_drive = '' configdrive.update_instance(inst) self.assertEqual('', inst.config_drive) inst.config_drive = True configdrive.update_instance(inst) self.assertTrue(inst.config_drive) @mock.patch.object(configdrive, 'required_by', return_value=True) def test_config_drive_update_instance(self, mock_required): inst = fake_instance.fake_instance_obj(context.get_admin_context()) inst.config_drive = '' configdrive.update_instance(inst) self.assertTrue(inst.config_drive) inst.config_drive = True configdrive.update_instance(inst) self.assertTrue(inst.config_drive) nova-13.1.4/nova/tests/unit/cells/0000775000567000056710000000000013064447471020104 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/cells/test_cells_filters.py0000664000567000056710000002264613064447152024355 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012-2013 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for cells scheduler filters. """ from nova.cells import filters from nova.cells import state from nova import context from nova.db.sqlalchemy import models from nova import test from nova.tests.unit.cells import fakes class FiltersTestCase(test.NoDBTestCase): """Makes sure the proper filters are in the directory.""" def test_all_filters(self): filter_classes = filters.all_filters() class_names = [cls.__name__ for cls in filter_classes] self.assertIn("TargetCellFilter", class_names) self.assertIn("DifferentCellFilter", class_names) class _FilterTestClass(test.NoDBTestCase): """Base class for testing individual filter plugins.""" filter_cls_name = None def setUp(self): super(_FilterTestClass, self).setUp() fakes.init(self) self.msg_runner = fakes.get_message_runner('api-cell') self.scheduler = self.msg_runner.scheduler self.my_cell_state = self.msg_runner.state_manager.get_my_state() self.filter_handler = filters.CellFilterHandler() filter_classes = self.filter_handler.get_matching_classes( [self.filter_cls_name]) self.filters = [cls() for cls in filter_classes] self.context = context.RequestContext('fake', 'fake', is_admin=True) def _filter_cells(self, cells, filter_properties): return self.filter_handler.get_filtered_objects(self.filters, cells, filter_properties) class ImagePropertiesFilter(_FilterTestClass): filter_cls_name = \ 'nova.cells.filters.image_properties.ImagePropertiesFilter' def setUp(self): super(ImagePropertiesFilter, self).setUp() self.cell1 = models.Cell() self.cell2 = models.Cell() self.cell3 = models.Cell() self.cells = [self.cell1, self.cell2, self.cell3] for cell in self.cells: cell.capabilities = {} self.filter_props = {'context': self.context, 'request_spec': {}} def test_missing_image_properties(self): self.assertEqual(self.cells, self._filter_cells(self.cells, self.filter_props)) def test_missing_hypervisor_version_requires(self): self.filter_props['request_spec'] = {'image': {'properties': {}}} for cell in self.cells: cell.capabilities = {"prominent_hypervisor_version": set([u"6.2"])} self.assertEqual(self.cells, self._filter_cells(self.cells, self.filter_props)) def test_missing_hypervisor_version_in_cells(self): image = {'properties': {'hypervisor_version_requires': '>6.2.1'}} self.filter_props['request_spec'] = {'image': image} self.cell1.capabilities = {"prominent_hypervisor_version": set([])} self.assertEqual(self.cells, self._filter_cells(self.cells, self.filter_props)) def test_cells_matching_hypervisor_version(self): image = {'properties': {'hypervisor_version_requires': '>6.0, <=6.3'}} self.filter_props['request_spec'] = {'image': image} self.cell1.capabilities = {"prominent_hypervisor_version": set([u"6.2"])} self.cell2.capabilities = {"prominent_hypervisor_version": set([u"6.3"])} self.cell3.capabilities = {"prominent_hypervisor_version": set([u"6.0"])} self.assertEqual([self.cell1, self.cell2], self._filter_cells(self.cells, self.filter_props)) # assert again to verify filter doesn't mutate state # LP bug #1325705 self.assertEqual([self.cell1, self.cell2], self._filter_cells(self.cells, self.filter_props)) class TestTargetCellFilter(_FilterTestClass): filter_cls_name = 'nova.cells.filters.target_cell.TargetCellFilter' def test_missing_scheduler_hints(self): cells = [1, 2, 3] # No filtering filter_props = {'context': self.context} self.assertEqual(cells, self._filter_cells(cells, filter_props)) def test_no_target_cell_hint(self): cells = [1, 2, 3] filter_props = {'scheduler_hints': {}, 'context': self.context} # No filtering self.assertEqual(cells, self._filter_cells(cells, filter_props)) def test_target_cell_specified_me(self): cells = [1, 2, 3] target_cell = 'fake!cell!path' current_cell = 'fake!cell!path' filter_props = {'scheduler_hints': {'target_cell': target_cell}, 'routing_path': current_cell, 'scheduler': self.scheduler, 'context': self.context} # Only myself in the list. self.assertEqual([self.my_cell_state], self._filter_cells(cells, filter_props)) def test_target_cell_specified_me_but_not_admin(self): ctxt = context.RequestContext('fake', 'fake') cells = [1, 2, 3] target_cell = 'fake!cell!path' current_cell = 'fake!cell!path' filter_props = {'scheduler_hints': {'target_cell': target_cell}, 'routing_path': current_cell, 'scheduler': self.scheduler, 'context': ctxt} # No filtering, because not an admin. self.assertEqual(cells, self._filter_cells(cells, filter_props)) def test_target_cell_specified_not_me(self): info = {} def _fake_build_instances(ctxt, cell, sched_kwargs): info['ctxt'] = ctxt info['cell'] = cell info['sched_kwargs'] = sched_kwargs self.stubs.Set(self.msg_runner, 'build_instances', _fake_build_instances) cells = [1, 2, 3] target_cell = 'fake!cell!path' current_cell = 'not!the!same' filter_props = {'scheduler_hints': {'target_cell': target_cell}, 'routing_path': current_cell, 'scheduler': self.scheduler, 'context': self.context, 'host_sched_kwargs': 'meow'} # None is returned to bypass further scheduling. self.assertIsNone(self._filter_cells(cells, filter_props)) # The filter should have re-scheduled to the child cell itself. expected_info = {'ctxt': self.context, 'cell': 'fake!cell!path', 'sched_kwargs': 'meow'} self.assertEqual(expected_info, info) class TestDifferentCellFilter(_FilterTestClass): filter_cls_name = 'nova.cells.filters.different_cell.DifferentCellFilter' def setUp(self): super(TestDifferentCellFilter, self).setUp() # We only load one filter so we know the first one is the one we want self.policy.set_rules({'cells_scheduler_filter:DifferentCellFilter': ''}) self.cells = [state.CellState('1'), state.CellState('2'), state.CellState('3')] def test_missing_scheduler_hints(self): filter_props = {'context': self.context} # No filtering self.assertEqual(self.cells, self._filter_cells(self.cells, filter_props)) def test_no_different_cell_hint(self): filter_props = {'scheduler_hints': {}, 'context': self.context} # No filtering self.assertEqual(self.cells, self._filter_cells(self.cells, filter_props)) def test_different_cell(self): filter_props = {'scheduler_hints': {'different_cell': 'fake!2'}, 'routing_path': 'fake', 'context': self.context} filtered_cells = self._filter_cells(self.cells, filter_props) self.assertEqual(2, len(filtered_cells)) self.assertNotIn(self.cells[1], filtered_cells) def test_different_multiple_cells(self): filter_props = {'scheduler_hints': {'different_cell': ['fake!1', 'fake!2']}, 'routing_path': 'fake', 'context': self.context} filtered_cells = self._filter_cells(self.cells, filter_props) self.assertEqual(1, len(filtered_cells)) self.assertNotIn(self.cells[0], filtered_cells) self.assertNotIn(self.cells[1], filtered_cells) def test_different_cell_specified_me_not_authorized(self): self.policy.set_rules({'cells_scheduler_filter:DifferentCellFilter': '!'}) filter_props = {'scheduler_hints': {'different_cell': 'fake!2'}, 'routing_path': 'fake', 'context': self.context} # No filtering, because not an admin. self.assertEqual(self.cells, self._filter_cells(self.cells, filter_props)) nova-13.1.4/nova/tests/unit/cells/test_cells_utils.py0000664000567000056710000002367513064447152024050 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Cells Utility methods """ import inspect import mock import random from nova.cells import utils as cells_utils from nova import exception from nova import objects from nova import test from nova.tests.unit import fake_instance class CellsUtilsTestCase(test.NoDBTestCase): """Test case for Cells utility methods.""" def test_get_instances_to_sync(self): fake_context = 'fake_context' call_info = {'get_all': 0, 'shuffle': 0} def random_shuffle(_list): call_info['shuffle'] += 1 @staticmethod def instance_get_all_by_filters(context, filters, sort_key, sort_dir, limit, marker): # Pretend we return a full list the first time otherwise we loop # infinitely if marker is not None: return [] self.assertEqual(fake_context, context) self.assertEqual('deleted', sort_key) self.assertEqual('asc', sort_dir) call_info['got_filters'] = filters call_info['get_all'] += 1 instances = [fake_instance.fake_db_instance() for i in range(3)] return instances self.stubs.Set(objects.InstanceList, 'get_by_filters', instance_get_all_by_filters) self.stubs.Set(random, 'shuffle', random_shuffle) instances = cells_utils.get_instances_to_sync(fake_context) self.assertTrue(inspect.isgenerator(instances)) self.assertEqual(3, len([x for x in instances])) self.assertEqual(1, call_info['get_all']) self.assertEqual({}, call_info['got_filters']) self.assertEqual(0, call_info['shuffle']) instances = cells_utils.get_instances_to_sync(fake_context, shuffle=True) self.assertTrue(inspect.isgenerator(instances)) self.assertEqual(3, len([x for x in instances])) self.assertEqual(2, call_info['get_all']) self.assertEqual({}, call_info['got_filters']) self.assertEqual(1, call_info['shuffle']) instances = cells_utils.get_instances_to_sync(fake_context, updated_since='fake-updated-since') self.assertTrue(inspect.isgenerator(instances)) self.assertEqual(3, len([x for x in instances])) self.assertEqual(3, call_info['get_all']) self.assertEqual({'changes-since': 'fake-updated-since'}, call_info['got_filters']) self.assertEqual(1, call_info['shuffle']) instances = cells_utils.get_instances_to_sync(fake_context, project_id='fake-project', updated_since='fake-updated-since', shuffle=True) self.assertTrue(inspect.isgenerator(instances)) self.assertEqual(3, len([x for x in instances])) self.assertEqual(4, call_info['get_all']) self.assertEqual({'changes-since': 'fake-updated-since', 'project_id': 'fake-project'}, call_info['got_filters']) self.assertEqual(2, call_info['shuffle']) @mock.patch.object(objects.InstanceList, 'get_by_filters') @mock.patch.object(random, 'shuffle') def _test_get_instances_pagination(self, mock_shuffle, mock_get_by_filters, shuffle=False, updated_since=None, project_id=None): fake_context = 'fake_context' instances0 = objects.instance._make_instance_list(fake_context, objects.InstanceList(), [fake_instance.fake_db_instance() for i in range(3)], expected_attrs=None) marker0 = instances0[-1]['uuid'] instances1 = objects.instance._make_instance_list(fake_context, objects.InstanceList(), [fake_instance.fake_db_instance() for i in range(3)], expected_attrs=None) marker1 = instances1[-1]['uuid'] mock_get_by_filters.side_effect = [instances0, instances1, []] instances = cells_utils.get_instances_to_sync(fake_context, updated_since, project_id, shuffle=shuffle) self.assertEqual(len([x for x in instances]), 6) filters = {} if updated_since is not None: filters['changes-since'] = updated_since if project_id is not None: filters['project_id'] = project_id limit = 100 expected_calls = [mock.call(fake_context, filters, sort_key='deleted', sort_dir='asc', limit=limit, marker=None), mock.call(fake_context, filters, sort_key='deleted', sort_dir='asc', limit=limit, marker=marker0), mock.call(fake_context, filters, sort_key='deleted', sort_dir='asc', limit=limit, marker=marker1)] mock_get_by_filters.assert_has_calls(expected_calls) self.assertEqual(3, mock_get_by_filters.call_count) def test_get_instances_to_sync_limit(self): self._test_get_instances_pagination() def test_get_instances_to_sync_shuffle(self): self._test_get_instances_pagination(shuffle=True) def test_get_instances_to_sync_updated_since(self): self._test_get_instances_pagination(updated_since='fake-updated-since') def test_get_instances_to_sync_multiple_params(self): self._test_get_instances_pagination(project_id='fake-project', updated_since='fake-updated-since', shuffle=True) def test_split_cell_and_item(self): path = 'australia', 'queensland', 'gold_coast' cell = cells_utils.PATH_CELL_SEP.join(path) item = 'host_5' together = cells_utils.cell_with_item(cell, item) self.assertEqual(cells_utils._CELL_ITEM_SEP.join([cell, item]), together) # Test normal usage result_cell, result_item = cells_utils.split_cell_and_item(together) self.assertEqual(cell, result_cell) self.assertEqual(item, result_item) # Test with no cell cell = None together = cells_utils.cell_with_item(cell, item) self.assertEqual(item, together) result_cell, result_item = cells_utils.split_cell_and_item(together) self.assertEqual(cell, result_cell) self.assertEqual(item, result_item) def test_add_cell_to_compute_node(self): fake_compute = objects.ComputeNode(id=1, host='fake') cell_path = 'fake_path' proxy = cells_utils.add_cell_to_compute_node(fake_compute, cell_path) self.assertIsInstance(proxy, cells_utils.ComputeNodeProxy) self.assertEqual(cells_utils.cell_with_item(cell_path, 1), proxy.id) self.assertEqual(cells_utils.cell_with_item(cell_path, 'fake'), proxy.host) @mock.patch.object(objects.Service, 'obj_load_attr') def test_add_cell_to_service_no_compute_node(self, mock_get_by_id): fake_service = objects.Service(id=1, host='fake') mock_get_by_id.side_effect = exception.ServiceNotFound(service_id=1) cell_path = 'fake_path' proxy = cells_utils.add_cell_to_service(fake_service, cell_path) self.assertIsInstance(proxy, cells_utils.ServiceProxy) self.assertEqual(cells_utils.cell_with_item(cell_path, 1), proxy.id) self.assertEqual(cells_utils.cell_with_item(cell_path, 'fake'), proxy.host) self.assertRaises(AttributeError, getattr, proxy, 'compute_node') def test_add_cell_to_service_with_compute_node(self): fake_service = objects.Service(id=1, host='fake') fake_service.compute_node = objects.ComputeNode(id=1, host='fake') cell_path = 'fake_path' proxy = cells_utils.add_cell_to_service(fake_service, cell_path) self.assertIsInstance(proxy, cells_utils.ServiceProxy) self.assertEqual(cells_utils.cell_with_item(cell_path, 1), proxy.id) self.assertEqual(cells_utils.cell_with_item(cell_path, 'fake'), proxy.host) self.assertRaises(AttributeError, getattr, proxy, 'compute_node') def test_proxy_object_serializer_to_primitive(self): obj = objects.ComputeNode(id=1, host='fake') obj_proxy = cells_utils.ComputeNodeProxy(obj, 'fake_path') serializer = cells_utils.ProxyObjectSerializer() primitive = serializer.serialize_entity('ctx', obj_proxy) self.assertIsInstance(primitive, dict) class_name = primitive.pop('cell_proxy.class_name') cell_path = primitive.pop('cell_proxy.cell_path') self.assertEqual('ComputeNodeProxy', class_name) self.assertEqual('fake_path', cell_path) self.assertEqual(obj.obj_to_primitive(), primitive) def test_proxy_object_serializer_from_primitive(self): obj = objects.ComputeNode(id=1, host='fake') serializer = cells_utils.ProxyObjectSerializer() # Recreating the primitive by hand to isolate the test for only # the deserializing method primitive = obj.obj_to_primitive() primitive['cell_proxy.class_name'] = 'ComputeNodeProxy' primitive['cell_proxy.cell_path'] = 'fake_path' result = serializer.deserialize_entity('ctx', primitive) self.assertIsInstance(result, cells_utils.ComputeNodeProxy) self.assertEqual(obj.obj_to_primitive(), result._obj.obj_to_primitive()) self.assertEqual('fake_path', result._cell_path) nova-13.1.4/nova/tests/unit/cells/test_cells_rpc_driver.py0000664000567000056710000002140313064447152025032 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Cells RPC Communication Driver """ import mock from mox3 import mox import oslo_messaging from nova.cells import messaging from nova.cells import rpc_driver import nova.conf from nova import context from nova import rpc from nova import test from nova.tests.unit.cells import fakes CONF = nova.conf.CONF class CellsRPCDriverTestCase(test.NoDBTestCase): """Test case for Cells communication via RPC.""" def setUp(self): super(CellsRPCDriverTestCase, self).setUp() fakes.init(self) self.ctxt = context.RequestContext('fake', 'fake') self.driver = rpc_driver.CellsRPCDriver() def test_start_servers(self): self.flags(rpc_driver_queue_base='cells.intercell42', group='cells') fake_msg_runner = fakes.get_message_runner('api-cell') class FakeInterCellRPCDispatcher(object): def __init__(_self, msg_runner): self.assertEqual(fake_msg_runner, msg_runner) self.stubs.Set(rpc_driver, 'InterCellRPCDispatcher', FakeInterCellRPCDispatcher) self.mox.StubOutWithMock(rpc, 'get_server') for message_type in messaging.MessageRunner.get_message_types(): topic = 'cells.intercell42.' + message_type target = oslo_messaging.Target(topic=topic, server=CONF.host) endpoints = [mox.IsA(FakeInterCellRPCDispatcher)] rpcserver = self.mox.CreateMockAnything() rpc.get_server(target, endpoints=endpoints).AndReturn(rpcserver) rpcserver.start() self.mox.ReplayAll() self.driver.start_servers(fake_msg_runner) def test_stop_servers(self): call_info = {'stopped': []} class FakeRPCServer(object): def stop(self): call_info['stopped'].append(self) fake_servers = [FakeRPCServer() for x in range(5)] self.driver.rpc_servers = fake_servers self.driver.stop_servers() self.assertEqual(fake_servers, call_info['stopped']) def test_create_transport_once(self): # should only construct each Transport once rpcapi = self.driver.intercell_rpcapi transport_url = 'amqp://fakeurl' next_hop = fakes.FakeCellState('cellname') next_hop.db_info['transport_url'] = transport_url # first call to _get_transport creates a oslo.messaging.Transport obj with mock.patch.object(oslo_messaging, 'get_transport') as get_trans: transport = rpcapi._get_transport(next_hop) get_trans.assert_called_once_with(rpc_driver.CONF, transport_url, rpc.TRANSPORT_ALIASES) self.assertIn(transport_url, rpcapi.transports) self.assertEqual(transport, rpcapi.transports[transport_url]) # subsequent calls should return the pre-created Transport obj transport2 = rpcapi._get_transport(next_hop) self.assertEqual(transport, transport2) def test_send_message_to_cell_cast(self): msg_runner = fakes.get_message_runner('api-cell') cell_state = fakes.get_cell_state('api-cell', 'child-cell2') message = messaging._TargetedMessage(msg_runner, self.ctxt, 'fake', {}, 'down', cell_state, fanout=False) expected_server_params = {'hostname': 'rpc_host2', 'password': 'password2', 'port': 3092, 'username': 'username2', 'virtual_host': 'rpc_vhost2'} expected_url = ('rabbit://%(username)s:%(password)s@' '%(hostname)s:%(port)d/%(virtual_host)s' % expected_server_params) def check_transport_url(cell_state): return cell_state.db_info['transport_url'] == expected_url rpcapi = self.driver.intercell_rpcapi rpcclient = self.mox.CreateMockAnything() self.mox.StubOutWithMock(rpcapi, '_get_client') rpcapi._get_client( mox.Func(check_transport_url), 'cells.intercell.targeted').AndReturn(rpcclient) rpcclient.cast(mox.IgnoreArg(), 'process_message', message=message.to_json()) self.mox.ReplayAll() self.driver.send_message_to_cell(cell_state, message) def test_send_message_to_cell_fanout_cast(self): msg_runner = fakes.get_message_runner('api-cell') cell_state = fakes.get_cell_state('api-cell', 'child-cell2') message = messaging._TargetedMessage(msg_runner, self.ctxt, 'fake', {}, 'down', cell_state, fanout=True) expected_server_params = {'hostname': 'rpc_host2', 'password': 'password2', 'port': 3092, 'username': 'username2', 'virtual_host': 'rpc_vhost2'} expected_url = ('rabbit://%(username)s:%(password)s@' '%(hostname)s:%(port)d/%(virtual_host)s' % expected_server_params) def check_transport_url(cell_state): return cell_state.db_info['transport_url'] == expected_url rpcapi = self.driver.intercell_rpcapi rpcclient = self.mox.CreateMockAnything() self.mox.StubOutWithMock(rpcapi, '_get_client') rpcapi._get_client( mox.Func(check_transport_url), 'cells.intercell.targeted').AndReturn(rpcclient) rpcclient.prepare(fanout=True).AndReturn(rpcclient) rpcclient.cast(mox.IgnoreArg(), 'process_message', message=message.to_json()) self.mox.ReplayAll() self.driver.send_message_to_cell(cell_state, message) def test_rpc_topic_uses_message_type(self): self.flags(rpc_driver_queue_base='cells.intercell42', group='cells') msg_runner = fakes.get_message_runner('api-cell') cell_state = fakes.get_cell_state('api-cell', 'child-cell2') message = messaging._BroadcastMessage(msg_runner, self.ctxt, 'fake', {}, 'down', fanout=True) message.message_type = 'fake-message-type' expected_server_params = {'hostname': 'rpc_host2', 'password': 'password2', 'port': 3092, 'username': 'username2', 'virtual_host': 'rpc_vhost2'} expected_url = ('rabbit://%(username)s:%(password)s@' '%(hostname)s:%(port)d/%(virtual_host)s' % expected_server_params) def check_transport_url(cell_state): return cell_state.db_info['transport_url'] == expected_url rpcapi = self.driver.intercell_rpcapi rpcclient = self.mox.CreateMockAnything() self.mox.StubOutWithMock(rpcapi, '_get_client') rpcapi._get_client( mox.Func(check_transport_url), 'cells.intercell42.fake-message-type').AndReturn(rpcclient) rpcclient.prepare(fanout=True).AndReturn(rpcclient) rpcclient.cast(mox.IgnoreArg(), 'process_message', message=message.to_json()) self.mox.ReplayAll() self.driver.send_message_to_cell(cell_state, message) def test_process_message(self): msg_runner = fakes.get_message_runner('api-cell') dispatcher = rpc_driver.InterCellRPCDispatcher(msg_runner) message = messaging._BroadcastMessage(msg_runner, self.ctxt, 'fake', {}, 'down', fanout=True) call_info = {} def _fake_message_from_json(json_message): call_info['json_message'] = json_message self.assertEqual(message.to_json(), json_message) return message def _fake_process(): call_info['process_called'] = True self.stubs.Set(msg_runner, 'message_from_json', _fake_message_from_json) self.stubs.Set(message, 'process', _fake_process) dispatcher.process_message(self.ctxt, message.to_json()) self.assertEqual(message.to_json(), call_info['json_message']) self.assertTrue(call_info['process_called']) nova-13.1.4/nova/tests/unit/cells/__init__.py0000664000567000056710000000000013064447140022174 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/cells/test_cells_messaging.py0000664000567000056710000026652013064447152024663 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Cells Messaging module """ import uuid import mock from mox3 import mox import oslo_messaging from oslo_serialization import jsonutils from oslo_utils import timeutils from oslo_utils import uuidutils from nova.cells import messaging from nova.cells import rpcapi as cells_rpcapi from nova.cells import utils as cells_utils from nova.compute import task_states from nova.compute import vm_states import nova.conf from nova import context from nova import db from nova import exception from nova import objects from nova.objects import base as objects_base from nova.objects import fields as objects_fields from nova import rpc from nova import test from nova.tests.unit.cells import fakes from nova.tests.unit import fake_instance from nova.tests.unit import fake_server_actions CONF = nova.conf.CONF class CellsMessageClassesTestCase(test.NoDBTestCase): """Test case for the main Cells Message classes.""" def setUp(self): super(CellsMessageClassesTestCase, self).setUp() fakes.init(self) self.ctxt = context.RequestContext('fake', 'fake') self.our_name = 'api-cell' self.msg_runner = fakes.get_message_runner(self.our_name) self.state_manager = self.msg_runner.state_manager def test_reverse_path(self): path = 'a!b!c!d' expected = 'd!c!b!a' rev_path = messaging._reverse_path(path) self.assertEqual(expected, rev_path) def test_response_cell_name_from_path(self): # test array with tuples of inputs/expected outputs test_paths = [('cell1', 'cell1'), ('cell1!cell2', 'cell2!cell1'), ('cell1!cell2!cell3', 'cell3!cell2!cell1')] for test_input, expected_output in test_paths: self.assertEqual(expected_output, messaging._response_cell_name_from_path(test_input)) def test_response_cell_name_from_path_neighbor_only(self): # test array with tuples of inputs/expected outputs test_paths = [('cell1', 'cell1'), ('cell1!cell2', 'cell2!cell1'), ('cell1!cell2!cell3', 'cell3!cell2')] for test_input, expected_output in test_paths: self.assertEqual(expected_output, messaging._response_cell_name_from_path(test_input, neighbor_only=True)) def test_response_to_json_and_from_json(self): fake_uuid = str(uuid.uuid4()) response = messaging.Response(self.ctxt, 'child-cell!api-cell', objects.Instance(id=1, uuid=fake_uuid), False) json_response = response.to_json() deserialized_response = messaging.Response.from_json(self.ctxt, json_response) obj = deserialized_response.value self.assertIsInstance(obj, objects.Instance) self.assertEqual(1, obj.id) self.assertEqual(fake_uuid, obj.uuid) def test_targeted_message(self): self.flags(max_hop_count=99, group='cells') target_cell = 'api-cell!child-cell2!grandchild-cell1' method = 'fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' tgt_message = messaging._TargetedMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, target_cell) self.assertEqual(self.ctxt, tgt_message.ctxt) self.assertEqual(method, tgt_message.method_name) self.assertEqual(method_kwargs, tgt_message.method_kwargs) self.assertEqual(direction, tgt_message.direction) self.assertEqual(target_cell, target_cell) self.assertFalse(tgt_message.fanout) self.assertFalse(tgt_message.need_response) self.assertEqual(self.our_name, tgt_message.routing_path) self.assertEqual(1, tgt_message.hop_count) self.assertEqual(99, tgt_message.max_hop_count) self.assertFalse(tgt_message.is_broadcast) # Correct next hop? next_hop = tgt_message._get_next_hop() child_cell = self.state_manager.get_child_cell('child-cell2') self.assertEqual(child_cell, next_hop) def test_create_targeted_message_with_response(self): self.flags(max_hop_count=99, group='cells') our_name = 'child-cell1' target_cell = 'child-cell1!api-cell' msg_runner = fakes.get_message_runner(our_name) method = 'fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'up' tgt_message = messaging._TargetedMessage(msg_runner, self.ctxt, method, method_kwargs, direction, target_cell, need_response=True) self.assertEqual(self.ctxt, tgt_message.ctxt) self.assertEqual(method, tgt_message.method_name) self.assertEqual(method_kwargs, tgt_message.method_kwargs) self.assertEqual(direction, tgt_message.direction) self.assertEqual(target_cell, target_cell) self.assertFalse(tgt_message.fanout) self.assertTrue(tgt_message.need_response) self.assertEqual(our_name, tgt_message.routing_path) self.assertEqual(1, tgt_message.hop_count) self.assertEqual(99, tgt_message.max_hop_count) self.assertFalse(tgt_message.is_broadcast) # Correct next hop? next_hop = tgt_message._get_next_hop() parent_cell = msg_runner.state_manager.get_parent_cell('api-cell') self.assertEqual(parent_cell, next_hop) def test_targeted_message_when_target_is_cell_state(self): method = 'fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' target_cell = self.state_manager.get_child_cell('child-cell2') tgt_message = messaging._TargetedMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, target_cell) self.assertEqual('api-cell!child-cell2', tgt_message.target_cell) # Correct next hop? next_hop = tgt_message._get_next_hop() self.assertEqual(target_cell, next_hop) def test_targeted_message_when_target_cell_state_is_me(self): method = 'fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' target_cell = self.state_manager.get_my_state() tgt_message = messaging._TargetedMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, target_cell) self.assertEqual('api-cell', tgt_message.target_cell) # Correct next hop? next_hop = tgt_message._get_next_hop() self.assertEqual(target_cell, next_hop) def test_create_broadcast_message(self): self.flags(max_hop_count=99, group='cells') self.flags(name='api-cell', max_hop_count=99, group='cells') method = 'fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' bcast_message = messaging._BroadcastMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction) self.assertEqual(self.ctxt, bcast_message.ctxt) self.assertEqual(method, bcast_message.method_name) self.assertEqual(method_kwargs, bcast_message.method_kwargs) self.assertEqual(direction, bcast_message.direction) self.assertFalse(bcast_message.fanout) self.assertFalse(bcast_message.need_response) self.assertEqual(self.our_name, bcast_message.routing_path) self.assertEqual(1, bcast_message.hop_count) self.assertEqual(99, bcast_message.max_hop_count) self.assertTrue(bcast_message.is_broadcast) # Correct next hops? next_hops = bcast_message._get_next_hops() child_cells = self.state_manager.get_child_cells() self.assertEqual(child_cells, next_hops) def test_create_broadcast_message_with_response(self): self.flags(max_hop_count=99, group='cells') our_name = 'child-cell1' msg_runner = fakes.get_message_runner(our_name) method = 'fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'up' bcast_message = messaging._BroadcastMessage(msg_runner, self.ctxt, method, method_kwargs, direction, need_response=True) self.assertEqual(self.ctxt, bcast_message.ctxt) self.assertEqual(method, bcast_message.method_name) self.assertEqual(method_kwargs, bcast_message.method_kwargs) self.assertEqual(direction, bcast_message.direction) self.assertFalse(bcast_message.fanout) self.assertTrue(bcast_message.need_response) self.assertEqual(our_name, bcast_message.routing_path) self.assertEqual(1, bcast_message.hop_count) self.assertEqual(99, bcast_message.max_hop_count) self.assertTrue(bcast_message.is_broadcast) # Correct next hops? next_hops = bcast_message._get_next_hops() parent_cells = msg_runner.state_manager.get_parent_cells() self.assertEqual(parent_cells, next_hops) def test_self_targeted_message(self): target_cell = 'api-cell' method = 'our_fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' call_info = {} def our_fake_method(message, **kwargs): call_info['context'] = message.ctxt call_info['routing_path'] = message.routing_path call_info['kwargs'] = kwargs fakes.stub_tgt_method(self, 'api-cell', 'our_fake_method', our_fake_method) tgt_message = messaging._TargetedMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, target_cell) tgt_message.process() self.assertEqual(self.ctxt, call_info['context']) self.assertEqual(method_kwargs, call_info['kwargs']) self.assertEqual(target_cell, call_info['routing_path']) def test_child_targeted_message(self): target_cell = 'api-cell!child-cell1' method = 'our_fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' call_info = {} def our_fake_method(message, **kwargs): call_info['context'] = message.ctxt call_info['routing_path'] = message.routing_path call_info['kwargs'] = kwargs fakes.stub_tgt_method(self, 'child-cell1', 'our_fake_method', our_fake_method) tgt_message = messaging._TargetedMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, target_cell) tgt_message.process() self.assertEqual(self.ctxt, call_info['context']) self.assertEqual(method_kwargs, call_info['kwargs']) self.assertEqual(target_cell, call_info['routing_path']) def test_child_targeted_message_with_object(self): target_cell = 'api-cell!child-cell1' method = 'our_fake_method' direction = 'down' call_info = {} class CellsMsgingTestObject(objects_base.NovaObject): """Test object. We just need 1 field in order to test that this gets serialized properly. """ fields = {'test': objects_fields.StringField()} objects_base.NovaObjectRegistry.register(CellsMsgingTestObject) test_obj = CellsMsgingTestObject() test_obj.test = 'meow' method_kwargs = dict(obj=test_obj, arg1=1, arg2=2) def our_fake_method(message, **kwargs): call_info['context'] = message.ctxt call_info['routing_path'] = message.routing_path call_info['kwargs'] = kwargs fakes.stub_tgt_method(self, 'child-cell1', 'our_fake_method', our_fake_method) tgt_message = messaging._TargetedMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, target_cell) tgt_message.process() self.assertEqual(self.ctxt, call_info['context']) self.assertEqual(target_cell, call_info['routing_path']) self.assertEqual(3, len(call_info['kwargs'])) self.assertEqual(1, call_info['kwargs']['arg1']) self.assertEqual(2, call_info['kwargs']['arg2']) # Verify we get a new object with what we expect. obj = call_info['kwargs']['obj'] self.assertIsInstance(obj, CellsMsgingTestObject) self.assertNotEqual(id(test_obj), id(obj)) self.assertEqual(test_obj.test, obj.test) def test_grandchild_targeted_message(self): target_cell = 'api-cell!child-cell2!grandchild-cell1' method = 'our_fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' call_info = {} def our_fake_method(message, **kwargs): call_info['context'] = message.ctxt call_info['routing_path'] = message.routing_path call_info['kwargs'] = kwargs fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method', our_fake_method) tgt_message = messaging._TargetedMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, target_cell) tgt_message.process() self.assertEqual(self.ctxt, call_info['context']) self.assertEqual(method_kwargs, call_info['kwargs']) self.assertEqual(target_cell, call_info['routing_path']) def test_grandchild_targeted_message_with_response(self): target_cell = 'api-cell!child-cell2!grandchild-cell1' method = 'our_fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' call_info = {} def our_fake_method(message, **kwargs): call_info['context'] = message.ctxt call_info['routing_path'] = message.routing_path call_info['kwargs'] = kwargs return 'our_fake_response' fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method', our_fake_method) tgt_message = messaging._TargetedMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, target_cell, need_response=True) response = tgt_message.process() self.assertEqual(self.ctxt, call_info['context']) self.assertEqual(method_kwargs, call_info['kwargs']) self.assertEqual(target_cell, call_info['routing_path']) self.assertFalse(response.failure) self.assertEqual('our_fake_response', response.value_or_raise()) def test_grandchild_targeted_message_with_error(self): target_cell = 'api-cell!child-cell2!grandchild-cell1' method = 'our_fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' def our_fake_method(message, **kwargs): raise test.TestingException('this should be returned') fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method', our_fake_method) tgt_message = messaging._TargetedMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, target_cell, need_response=True) response = tgt_message.process() self.assertTrue(response.failure) self.assertRaises(test.TestingException, response.value_or_raise) def test_grandchild_targeted_message_max_hops(self): self.flags(max_hop_count=2, group='cells') target_cell = 'api-cell!child-cell2!grandchild-cell1' method = 'our_fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' def our_fake_method(message, **kwargs): raise test.TestingException('should not be reached') fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method', our_fake_method) tgt_message = messaging._TargetedMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, target_cell, need_response=True) response = tgt_message.process() self.assertTrue(response.failure) self.assertRaises(exception.CellMaxHopCountReached, response.value_or_raise) def test_targeted_message_invalid_cell(self): target_cell = 'api-cell!child-cell2!grandchild-cell4' method = 'our_fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' tgt_message = messaging._TargetedMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, target_cell, need_response=True) response = tgt_message.process() self.assertTrue(response.failure) self.assertRaises(exception.CellRoutingInconsistency, response.value_or_raise) def test_targeted_message_invalid_cell2(self): target_cell = 'unknown-cell!child-cell2' method = 'our_fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' tgt_message = messaging._TargetedMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, target_cell, need_response=True) response = tgt_message.process() self.assertTrue(response.failure) self.assertRaises(exception.CellRoutingInconsistency, response.value_or_raise) def test_targeted_message_target_cell_none(self): target_cell = None method = 'our_fake_method' method_kwargs = dict(arg=1, arg2=2) direction = 'down' self.assertRaises(exception.CellRoutingInconsistency, messaging._TargetedMessage, self.msg_runner, self.ctxt, method, method_kwargs, direction, target_cell, need_response=False) def test_broadcast_routing(self): method = 'our_fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' cells = set() def our_fake_method(message, **kwargs): cells.add(message.routing_path) fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method) bcast_message = messaging._BroadcastMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, run_locally=True) bcast_message.process() # fakes creates 8 cells (including ourself). self.assertEqual(8, len(cells)) def test_broadcast_routing_up(self): method = 'our_fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'up' msg_runner = fakes.get_message_runner('grandchild-cell3') cells = set() def our_fake_method(message, **kwargs): cells.add(message.routing_path) fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method) bcast_message = messaging._BroadcastMessage(msg_runner, self.ctxt, method, method_kwargs, direction, run_locally=True) bcast_message.process() # Paths are reversed, since going 'up' expected = set(['grandchild-cell3', 'grandchild-cell3!child-cell3', 'grandchild-cell3!child-cell3!api-cell']) self.assertEqual(expected, cells) def test_broadcast_routing_without_ourselves(self): method = 'our_fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' cells = set() def our_fake_method(message, **kwargs): cells.add(message.routing_path) fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method) bcast_message = messaging._BroadcastMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, run_locally=False) bcast_message.process() # fakes creates 8 cells (including ourself). So we should see # only 7 here. self.assertEqual(7, len(cells)) def test_broadcast_routing_with_response(self): method = 'our_fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' def our_fake_method(message, **kwargs): return 'response-%s' % message.routing_path fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method) bcast_message = messaging._BroadcastMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, run_locally=True, need_response=True) responses = bcast_message.process() self.assertEqual(8, len(responses)) for response in responses: self.assertFalse(response.failure) self.assertEqual('response-%s' % response.cell_name, response.value_or_raise()) def test_broadcast_routing_with_response_max_hops(self): self.flags(max_hop_count=2, group='cells') method = 'our_fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' def our_fake_method(message, **kwargs): return 'response-%s' % message.routing_path fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method) bcast_message = messaging._BroadcastMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, run_locally=True, need_response=True) responses = bcast_message.process() # Should only get responses from our immediate children (and # ourselves) self.assertEqual(5, len(responses)) for response in responses: self.assertFalse(response.failure) self.assertEqual('response-%s' % response.cell_name, response.value_or_raise()) def test_broadcast_routing_with_all_erroring(self): method = 'our_fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' def our_fake_method(message, **kwargs): raise test.TestingException('fake failure') fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method) bcast_message = messaging._BroadcastMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, run_locally=True, need_response=True) responses = bcast_message.process() self.assertEqual(8, len(responses)) for response in responses: self.assertTrue(response.failure) self.assertRaises(test.TestingException, response.value_or_raise) def test_broadcast_routing_with_two_erroring(self): method = 'our_fake_method' method_kwargs = dict(arg1=1, arg2=2) direction = 'down' def our_fake_method_failing(message, **kwargs): raise test.TestingException('fake failure') def our_fake_method(message, **kwargs): return 'response-%s' % message.routing_path fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method) fakes.stub_bcast_method(self, 'child-cell2', 'our_fake_method', our_fake_method_failing) fakes.stub_bcast_method(self, 'grandchild-cell3', 'our_fake_method', our_fake_method_failing) bcast_message = messaging._BroadcastMessage(self.msg_runner, self.ctxt, method, method_kwargs, direction, run_locally=True, need_response=True) responses = bcast_message.process() self.assertEqual(8, len(responses)) failure_responses = [resp for resp in responses if resp.failure] success_responses = [resp for resp in responses if not resp.failure] self.assertEqual(2, len(failure_responses)) self.assertEqual(6, len(success_responses)) for response in success_responses: self.assertFalse(response.failure) self.assertEqual('response-%s' % response.cell_name, response.value_or_raise()) for response in failure_responses: self.assertIn(response.cell_name, ['api-cell!child-cell2', 'api-cell!child-cell3!grandchild-cell3']) self.assertTrue(response.failure) self.assertRaises(test.TestingException, response.value_or_raise) class CellsTargetedMethodsWithDatabaseTestCase(test.TestCase): """These tests access the database unlike the others.""" def setUp(self): super(CellsTargetedMethodsWithDatabaseTestCase, self).setUp() fakes.init(self) self.ctxt = context.RequestContext('fake', 'fake') self._setup_attrs('api-cell', 'api-cell!child-cell2') def _setup_attrs(self, source_cell, target_cell): self.tgt_cell_name = target_cell self.src_msg_runner = fakes.get_message_runner(source_cell) def test_service_delete(self): fake_service = dict(id=42, host='fake_host', binary='nova-compute', topic='compute') ctxt = self.ctxt.elevated() db.service_create(ctxt, fake_service) self.src_msg_runner.service_delete( ctxt, self.tgt_cell_name, fake_service['id']) self.assertRaises(exception.ServiceNotFound, db.service_get, ctxt, fake_service['id']) class CellsTargetedMethodsTestCase(test.NoDBTestCase): """Test case for _TargetedMessageMethods class. Most of these tests actually test the full path from the MessageRunner through to the functionality of the message method. Hits 2 birds with 1 stone, even though it's a little more than a unit test. """ def setUp(self): super(CellsTargetedMethodsTestCase, self).setUp() fakes.init(self) self.ctxt = context.RequestContext('fake', 'fake') self._setup_attrs('api-cell', 'api-cell!child-cell2') def _setup_attrs(self, source_cell, target_cell): self.tgt_cell_name = target_cell self.src_msg_runner = fakes.get_message_runner(source_cell) self.src_state_manager = self.src_msg_runner.state_manager tgt_shortname = target_cell.split('!')[-1] self.tgt_cell_mgr = fakes.get_cells_manager(tgt_shortname) self.tgt_msg_runner = self.tgt_cell_mgr.msg_runner self.tgt_scheduler = self.tgt_msg_runner.scheduler self.tgt_state_manager = self.tgt_msg_runner.state_manager methods_cls = self.tgt_msg_runner.methods_by_type['targeted'] self.tgt_methods_cls = methods_cls self.tgt_compute_api = methods_cls.compute_api self.tgt_host_api = methods_cls.host_api self.tgt_db_inst = methods_cls.db self.tgt_c_rpcapi = methods_cls.compute_rpcapi def test_build_instances(self): build_inst_kwargs = {'filter_properties': {}, 'key1': 'value1', 'key2': 'value2'} self.mox.StubOutWithMock(self.tgt_scheduler, 'build_instances') self.tgt_scheduler.build_instances(self.ctxt, build_inst_kwargs) self.mox.ReplayAll() self.src_msg_runner.build_instances(self.ctxt, self.tgt_cell_name, build_inst_kwargs) def _run_compute_api_method(self, method_name): instance = fake_instance.fake_instance_obj(self.ctxt) method_info = {'method': method_name, 'method_args': (instance.uuid, 2, 3), 'method_kwargs': {'arg1': 'val1', 'arg2': 'val2'}} expected_attrs = ['metadata', 'system_metadata', 'security_groups', 'info_cache'] @mock.patch.object(self.tgt_compute_api, method_name, return_value='fake-result') @mock.patch.object(objects.Instance, 'get_by_uuid', return_value=instance) def run_method(mock_get_by_uuid, mock_method): response = self.src_msg_runner.run_compute_api_method( self.ctxt, self.tgt_cell_name, method_info, True) result = response.value_or_raise() self.assertEqual('fake-result', result) mock_get_by_uuid.assert_called_once_with(self.ctxt, instance.uuid, expected_attrs=expected_attrs) mock_method.assert_called_once_with(self.ctxt, instance, 2, 3, arg1='val1', arg2='val2') run_method() def test_run_compute_api_method_expects_obj(self): # Run compute_api start method self._run_compute_api_method('start') def test_run_compute_api_method_shelve_with_info_cache(self): # Run compute_api shelve method as it requires info_cache and # metadata to be present in instance object self._run_compute_api_method('shelve') def test_run_compute_api_method_unknown_instance(self): # Unknown instance should send a broadcast up that instance # is gone. instance = fake_instance.fake_instance_obj(self.ctxt) instance_uuid = instance.uuid method_info = {'method': 'reboot', 'method_args': (instance_uuid, 2, 3), 'method_kwargs': {'arg1': 'val1', 'arg2': 'val2'}} self.mox.StubOutWithMock(objects.Instance, 'get_by_uuid') self.mox.StubOutWithMock(self.tgt_msg_runner, 'instance_destroy_at_top') objects.Instance.get_by_uuid(self.ctxt, instance.uuid, expected_attrs=['metadata', 'system_metadata', 'security_groups', 'info_cache']).AndRaise( exception.InstanceNotFound(instance_id=instance_uuid)) self.tgt_msg_runner.instance_destroy_at_top(self.ctxt, mox.IsA(objects.Instance)) self.mox.ReplayAll() response = self.src_msg_runner.run_compute_api_method( self.ctxt, self.tgt_cell_name, method_info, True) self.assertRaises(exception.InstanceNotFound, response.value_or_raise) def test_update_capabilities(self): # Route up to API self._setup_attrs('child-cell2', 'child-cell2!api-cell') capabs = {'cap1': set(['val1', 'val2']), 'cap2': set(['val3'])} # The list(set([])) seems silly, but we can't assume the order # of the list... This behavior should match the code we're # testing... which is check that a set was converted to a list. expected_capabs = {'cap1': list(set(['val1', 'val2'])), 'cap2': ['val3']} self.mox.StubOutWithMock(self.src_state_manager, 'get_our_capabilities') self.mox.StubOutWithMock(self.tgt_state_manager, 'update_cell_capabilities') self.mox.StubOutWithMock(self.tgt_msg_runner, 'tell_parents_our_capabilities') self.src_state_manager.get_our_capabilities().AndReturn(capabs) self.tgt_state_manager.update_cell_capabilities('child-cell2', expected_capabs) self.tgt_msg_runner.tell_parents_our_capabilities(self.ctxt) self.mox.ReplayAll() self.src_msg_runner.tell_parents_our_capabilities(self.ctxt) def test_update_capacities(self): self._setup_attrs('child-cell2', 'child-cell2!api-cell') capacs = 'fake_capacs' self.mox.StubOutWithMock(self.src_state_manager, 'get_our_capacities') self.mox.StubOutWithMock(self.tgt_state_manager, 'update_cell_capacities') self.mox.StubOutWithMock(self.tgt_msg_runner, 'tell_parents_our_capacities') self.src_state_manager.get_our_capacities().AndReturn(capacs) self.tgt_state_manager.update_cell_capacities('child-cell2', capacs) self.tgt_msg_runner.tell_parents_our_capacities(self.ctxt) self.mox.ReplayAll() self.src_msg_runner.tell_parents_our_capacities(self.ctxt) def test_announce_capabilities(self): self._setup_attrs('api-cell', 'api-cell!child-cell1') # To make this easier to test, make us only have 1 child cell. cell_state = self.src_state_manager.child_cells['child-cell1'] self.src_state_manager.child_cells = {'child-cell1': cell_state} self.mox.StubOutWithMock(self.tgt_msg_runner, 'tell_parents_our_capabilities') self.tgt_msg_runner.tell_parents_our_capabilities(self.ctxt) self.mox.ReplayAll() self.src_msg_runner.ask_children_for_capabilities(self.ctxt) def test_announce_capacities(self): self._setup_attrs('api-cell', 'api-cell!child-cell1') # To make this easier to test, make us only have 1 child cell. cell_state = self.src_state_manager.child_cells['child-cell1'] self.src_state_manager.child_cells = {'child-cell1': cell_state} self.mox.StubOutWithMock(self.tgt_msg_runner, 'tell_parents_our_capacities') self.tgt_msg_runner.tell_parents_our_capacities(self.ctxt) self.mox.ReplayAll() self.src_msg_runner.ask_children_for_capacities(self.ctxt) def test_service_get_by_compute_host(self): fake_host_name = 'fake-host-name' self.mox.StubOutWithMock(objects.Service, 'get_by_compute_host') objects.Service.get_by_compute_host(self.ctxt, fake_host_name).AndReturn('fake-service') self.mox.ReplayAll() response = self.src_msg_runner.service_get_by_compute_host( self.ctxt, self.tgt_cell_name, fake_host_name) result = response.value_or_raise() self.assertEqual('fake-service', result) def test_service_update(self): binary = 'nova-compute' params_to_update = {'disabled': True, 'report_count': 13} fake_service = objects.Service(id=42, host='fake_host', binary='nova-compute', topic='compute') fake_service.compute_node = objects.ComputeNode(id=1, host='fake_host') self.mox.StubOutWithMock(objects.Service, 'get_by_args') self.mox.StubOutWithMock(objects.Service, 'save') objects.Service.get_by_args( self.ctxt, 'fake_host', 'nova-compute').AndReturn(fake_service) fake_service.save() self.mox.ReplayAll() response = self.src_msg_runner.service_update( self.ctxt, self.tgt_cell_name, 'fake_host', binary, params_to_update) result = response.value_or_raise() self.assertIsInstance(result, objects.Service) # NOTE(sbauza): As NovaObjects can't be comparated directly, we need to # check the fields by primitiving them first self.assertEqual(jsonutils.to_primitive(fake_service), jsonutils.to_primitive(result)) def test_proxy_rpc_to_manager_call(self): fake_topic = 'fake-topic' fake_rpc_message = {'method': 'fake_rpc_method', 'args': {}} fake_host_name = 'fake-host-name' self.mox.StubOutWithMock(objects.Service, 'get_by_compute_host') objects.Service.get_by_compute_host(self.ctxt, fake_host_name) target = oslo_messaging.Target(topic='fake-topic') rpcclient = self.mox.CreateMockAnything() self.mox.StubOutWithMock(rpc, 'get_client') rpc.get_client(target).AndReturn(rpcclient) rpcclient.prepare(timeout=5).AndReturn(rpcclient) rpcclient.call(mox.IgnoreArg(), 'fake_rpc_method').AndReturn('fake_result') self.mox.ReplayAll() response = self.src_msg_runner.proxy_rpc_to_manager( self.ctxt, self.tgt_cell_name, fake_host_name, fake_topic, fake_rpc_message, True, timeout=5) result = response.value_or_raise() self.assertEqual('fake_result', result) def test_proxy_rpc_to_manager_cast(self): fake_topic = 'fake-topic' fake_rpc_message = {'method': 'fake_rpc_method', 'args': {}} fake_host_name = 'fake-host-name' self.mox.StubOutWithMock(objects.Service, 'get_by_compute_host') objects.Service.get_by_compute_host(self.ctxt, fake_host_name) target = oslo_messaging.Target(topic='fake-topic') rpcclient = self.mox.CreateMockAnything() self.mox.StubOutWithMock(rpc, 'get_client') rpc.get_client(target).AndReturn(rpcclient) rpcclient.cast(mox.IgnoreArg(), 'fake_rpc_method') self.mox.ReplayAll() self.src_msg_runner.proxy_rpc_to_manager( self.ctxt, self.tgt_cell_name, fake_host_name, fake_topic, fake_rpc_message, False, timeout=None) def test_task_log_get_all_targeted(self): task_name = 'fake_task_name' begin = 'fake_begin' end = 'fake_end' host = 'fake_host' state = 'fake_state' self.mox.StubOutWithMock(self.tgt_db_inst, 'task_log_get_all') self.tgt_db_inst.task_log_get_all(self.ctxt, task_name, begin, end, host=host, state=state).AndReturn(['fake_result']) self.mox.ReplayAll() response = self.src_msg_runner.task_log_get_all(self.ctxt, self.tgt_cell_name, task_name, begin, end, host=host, state=state) self.assertIsInstance(response, list) self.assertEqual(1, len(response)) result = response[0].value_or_raise() self.assertEqual(['fake_result'], result) def test_compute_node_get(self): compute_id = 'fake-id' self.mox.StubOutWithMock(objects.ComputeNode, 'get_by_id') objects.ComputeNode.get_by_id(self.ctxt, compute_id).AndReturn('fake_result') self.mox.ReplayAll() response = self.src_msg_runner.compute_node_get(self.ctxt, self.tgt_cell_name, compute_id) result = response.value_or_raise() self.assertEqual('fake_result', result) def test_actions_get(self): fake_uuid = fake_server_actions.FAKE_UUID fake_req_id = fake_server_actions.FAKE_REQUEST_ID1 fake_act = fake_server_actions.FAKE_ACTIONS[fake_uuid][fake_req_id] self.mox.StubOutWithMock(self.tgt_db_inst, 'actions_get') self.tgt_db_inst.actions_get(self.ctxt, 'fake-uuid').AndReturn([fake_act]) self.mox.ReplayAll() response = self.src_msg_runner.actions_get(self.ctxt, self.tgt_cell_name, 'fake-uuid') result = response.value_or_raise() self.assertEqual([jsonutils.to_primitive(fake_act)], result) def test_action_get_by_request_id(self): fake_uuid = fake_server_actions.FAKE_UUID fake_req_id = fake_server_actions.FAKE_REQUEST_ID1 fake_act = fake_server_actions.FAKE_ACTIONS[fake_uuid][fake_req_id] self.mox.StubOutWithMock(self.tgt_db_inst, 'action_get_by_request_id') self.tgt_db_inst.action_get_by_request_id(self.ctxt, 'fake-uuid', 'req-fake').AndReturn(fake_act) self.mox.ReplayAll() response = self.src_msg_runner.action_get_by_request_id(self.ctxt, self.tgt_cell_name, 'fake-uuid', 'req-fake') result = response.value_or_raise() self.assertEqual(jsonutils.to_primitive(fake_act), result) def test_action_events_get(self): fake_action_id = fake_server_actions.FAKE_ACTION_ID1 fake_events = fake_server_actions.FAKE_EVENTS[fake_action_id] self.mox.StubOutWithMock(self.tgt_db_inst, 'action_events_get') self.tgt_db_inst.action_events_get(self.ctxt, 'fake-action').AndReturn(fake_events) self.mox.ReplayAll() response = self.src_msg_runner.action_events_get(self.ctxt, self.tgt_cell_name, 'fake-action') result = response.value_or_raise() self.assertEqual(jsonutils.to_primitive(fake_events), result) def test_validate_console_port(self): instance_uuid = 'fake_instance_uuid' instance = objects.Instance(uuid=instance_uuid) console_port = 'fake-port' console_type = 'fake-type' @mock.patch.object(objects.Instance, 'get_by_uuid', return_value=instance) @mock.patch.object(self.tgt_c_rpcapi, 'validate_console_port', return_value='fake_result') def do_test(mock_validate, mock_get): response = self.src_msg_runner.validate_console_port(self.ctxt, self.tgt_cell_name, instance_uuid, console_port, console_type) result = response.value_or_raise() self.assertEqual('fake_result', result) mock_get.assert_called_once_with(self.ctxt, instance_uuid) mock_validate.assert_called_once_with(self.ctxt, instance, console_port, console_type) do_test() def test_get_migrations_for_a_given_cell(self): filters = {'cell_name': 'child-cell2', 'status': 'confirmed'} migrations_in_progress = [{'id': 123}] self.mox.StubOutWithMock(self.tgt_compute_api, 'get_migrations') self.tgt_compute_api.get_migrations(self.ctxt, filters).\ AndReturn(migrations_in_progress) self.mox.ReplayAll() responses = self.src_msg_runner.get_migrations( self.ctxt, self.tgt_cell_name, False, filters) result = responses[0].value_or_raise() self.assertEqual(migrations_in_progress, result) def test_get_migrations_for_an_invalid_cell(self): filters = {'cell_name': 'invalid_Cell', 'status': 'confirmed'} responses = self.src_msg_runner.get_migrations( self.ctxt, 'api_cell!invalid_cell', False, filters) self.assertEqual(0, len(responses)) def test_call_compute_api_with_obj(self): instance = objects.Instance() instance.uuid = uuidutils.generate_uuid() self.mox.StubOutWithMock(instance, 'refresh') # Using 'snapshot' for this test, because it # takes args and kwargs. self.mox.StubOutWithMock(self.tgt_compute_api, 'snapshot') instance.refresh() self.tgt_compute_api.snapshot( self.ctxt, instance, 'name', extra_properties='props').AndReturn('foo') self.mox.ReplayAll() result = self.tgt_methods_cls._call_compute_api_with_obj( self.ctxt, instance, 'snapshot', 'name', extra_properties='props') self.assertEqual('foo', result) def test_call_compute_api_with_obj_no_cache(self): instance = objects.Instance() instance.uuid = uuidutils.generate_uuid() error = exception.InstanceInfoCacheNotFound( instance_uuid=instance.uuid) with mock.patch.object(instance, 'refresh', side_effect=error): self.assertRaises(exception.InstanceInfoCacheNotFound, self.tgt_methods_cls._call_compute_api_with_obj, self.ctxt, instance, 'snapshot') def test_call_delete_compute_api_with_obj_no_cache(self): instance = objects.Instance() instance.uuid = uuidutils.generate_uuid() error = exception.InstanceInfoCacheNotFound( instance_uuid=instance.uuid) with test.nested( mock.patch.object(instance, 'refresh', side_effect=error), mock.patch.object(self.tgt_compute_api, 'delete')) as (inst, delete): self.tgt_methods_cls._call_compute_api_with_obj(self.ctxt, instance, 'delete') delete.assert_called_once_with(self.ctxt, instance) def test_call_compute_with_obj_unknown_instance(self): instance = objects.Instance() instance.uuid = uuidutils.generate_uuid() instance.vm_state = vm_states.ACTIVE instance.task_state = None self.mox.StubOutWithMock(instance, 'refresh') self.mox.StubOutWithMock(self.tgt_msg_runner, 'instance_destroy_at_top') instance.refresh().AndRaise( exception.InstanceNotFound(instance_id=instance.uuid)) self.tgt_msg_runner.instance_destroy_at_top(self.ctxt, mox.IsA(objects.Instance)) self.mox.ReplayAll() self.assertRaises(exception.InstanceNotFound, self.tgt_methods_cls._call_compute_api_with_obj, self.ctxt, instance, 'snapshot', 'name') def _instance_update_helper(self, admin_state_reset): class FakeMessage(object): pass message = FakeMessage() message.ctxt = self.ctxt instance = objects.Instance() instance.cell_name = self.tgt_cell_name instance.obj_reset_changes() instance.task_state = 'meow' instance.vm_state = 'wuff' instance.user_data = 'foo' instance.metadata = {'meta': 'data'} instance.system_metadata = {'system': 'metadata'} self.assertEqual(set(['user_data', 'vm_state', 'task_state', 'metadata', 'system_metadata']), instance.obj_what_changed()) self.mox.StubOutWithMock(instance, 'save') def _check_object(*args, **kwargs): # task_state and vm_state changes should have been cleared # before calling save() if admin_state_reset: self.assertEqual( set(['user_data', 'vm_state', 'task_state']), instance.obj_what_changed()) else: self.assertEqual(set(['user_data']), instance.obj_what_changed()) instance.save(expected_task_state='exp_task', expected_vm_state='exp_vm').WithSideEffects( _check_object) self.mox.ReplayAll() self.tgt_methods_cls.instance_update_from_api( message, instance, expected_vm_state='exp_vm', expected_task_state='exp_task', admin_state_reset=admin_state_reset) def test_instance_update_from_api(self): self._instance_update_helper(False) def test_instance_update_from_api_admin_state_reset(self): self._instance_update_helper(True) def test_instance_update_from_api_calls_skip_cells_sync(self): self.flags(enable=True, cell_type='compute', group='cells') instance = fake_instance.fake_instance_obj(self.ctxt) instance.cell_name = self.tgt_cell_name instance.task_state = 'meow' instance.vm_state = 'wuff' instance.user_data = 'foo' message = '' @mock.patch.object(instance, 'save', side_effect=test.TestingException) @mock.patch.object(instance, 'skip_cells_sync') def _ensure_skip_cells_sync_called(mock_sync, mock_save): self.assertRaises(test.TestingException, self.tgt_methods_cls.instance_update_from_api, message, instance, expected_vm_state='exp_vm', expected_task_state='exp_task', admin_state_reset=False) mock_sync.assert_has_calls([mock.call()]) _ensure_skip_cells_sync_called() self.assertEqual(self.tgt_cell_name, instance.cell_name) @mock.patch.object(db, 'instance_update_and_get_original') def test_instance_update_from_api_skips_cell_sync(self, mock_db_update): self.flags(enable=True, cell_type='compute', group='cells') instance = fake_instance.fake_instance_obj(self.ctxt) instance.cell_name = self.tgt_cell_name instance.task_state = 'meow' instance.vm_state = 'wuff' instance.user_data = 'foo' message = '' inst_ref = dict(objects_base.obj_to_primitive(instance)) mock_db_update.return_value = (inst_ref, inst_ref) with mock.patch.object(cells_rpcapi.CellsAPI, 'instance_update_at_top') as inst_upd_at_top: self.tgt_methods_cls.instance_update_from_api(message, instance, expected_vm_state='exp_vm', expected_task_state='exp_task', admin_state_reset=False) self.assertEqual(0, inst_upd_at_top.call_count) def _test_instance_action_method(self, method, args, kwargs, expected_args, expected_kwargs, expect_result): class FakeMessage(object): pass message = FakeMessage() message.ctxt = self.ctxt message.need_response = expect_result meth_cls = self.tgt_methods_cls self.mox.StubOutWithMock(meth_cls, '_call_compute_api_with_obj') method_corrections = { 'terminate': 'delete', } api_method = method_corrections.get(method, method) meth_cls._call_compute_api_with_obj( self.ctxt, 'fake-instance', api_method, *expected_args, **expected_kwargs).AndReturn('meow') self.mox.ReplayAll() method_translations = {'revert_resize': 'revert_resize', 'confirm_resize': 'confirm_resize', 'reset_network': 'reset_network', 'inject_network_info': 'inject_network_info', 'set_admin_password': 'set_admin_password', } tgt_method = method_translations.get(method, '%s_instance' % method) result = getattr(meth_cls, tgt_method)( message, 'fake-instance', *args, **kwargs) if expect_result: self.assertEqual('meow', result) def test_start_instance(self): self._test_instance_action_method('start', (), {}, (), {}, False) def test_stop_instance_cast(self): self._test_instance_action_method('stop', (), {}, (), {'do_cast': True, 'clean_shutdown': True}, False) def test_stop_instance_call(self): self._test_instance_action_method('stop', (), {}, (), {'do_cast': False, 'clean_shutdown': True}, True) def test_reboot_instance(self): kwargs = dict(reboot_type='HARD') self._test_instance_action_method('reboot', (), kwargs, (), kwargs, False) def test_suspend_instance(self): self._test_instance_action_method('suspend', (), {}, (), {}, False) def test_resume_instance(self): self._test_instance_action_method('resume', (), {}, (), {}, False) def test_get_host_uptime(self): host_name = "fake-host" host_uptime = (" 08:32:11 up 93 days, 18:25, 12 users, load average:" " 0.20, 0.12, 0.14") self.mox.StubOutWithMock(self.tgt_host_api, 'get_host_uptime') self.tgt_host_api.get_host_uptime(self.ctxt, host_name).\ AndReturn(host_uptime) self.mox.ReplayAll() response = self.src_msg_runner.get_host_uptime(self.ctxt, self.tgt_cell_name, host_name) expected_host_uptime = response.value_or_raise() self.assertEqual(host_uptime, expected_host_uptime) def test_terminate_instance(self): self._test_instance_action_method('terminate', (), {}, (), {}, False) def test_soft_delete_instance(self): self._test_instance_action_method('soft_delete', (), {}, (), {}, False) def test_pause_instance(self): self._test_instance_action_method('pause', (), {}, (), {}, False) def test_unpause_instance(self): self._test_instance_action_method('unpause', (), {}, (), {}, False) def _test_resize_instance(self, clean_shutdown=True): kwargs = dict(flavor=dict(id=42, flavorid='orangemocchafrappuccino'), extra_instance_updates=dict(cow='moo'), clean_shutdown=clean_shutdown) expected_kwargs = dict(flavor_id='orangemocchafrappuccino', cow='moo', clean_shutdown=clean_shutdown) self._test_instance_action_method('resize', (), kwargs, (), expected_kwargs, False) def test_resize_instance(self): self._test_resize_instance() def test_resize_instance_forced_shutdown(self): self._test_resize_instance(clean_shutdown=False) def test_live_migrate_instance(self): kwargs = dict(block_migration='fake-block-mig', disk_over_commit='fake-commit', host_name='fake-host') expected_args = ('fake-block-mig', 'fake-commit', 'fake-host') self._test_instance_action_method('live_migrate', (), kwargs, expected_args, {}, False) def test_revert_resize(self): self._test_instance_action_method('revert_resize', (), {}, (), {}, False) def test_confirm_resize(self): self._test_instance_action_method('confirm_resize', (), {}, (), {}, False) def test_reset_network(self): self._test_instance_action_method('reset_network', (), {}, (), {}, False) def test_inject_network_info(self): self._test_instance_action_method('inject_network_info', (), {}, (), {}, False) def test_snapshot_instance(self): inst = objects.Instance() meth_cls = self.tgt_methods_cls self.mox.StubOutWithMock(inst, 'refresh') self.mox.StubOutWithMock(inst, 'save') self.mox.StubOutWithMock(meth_cls.compute_rpcapi, 'snapshot_instance') def check_state(expected_task_state=None): self.assertEqual(task_states.IMAGE_SNAPSHOT_PENDING, inst.task_state) inst.refresh() inst.save(expected_task_state=[None]).WithSideEffects(check_state) meth_cls.compute_rpcapi.snapshot_instance(self.ctxt, inst, 'image-id') self.mox.ReplayAll() class FakeMessage(object): pass message = FakeMessage() message.ctxt = self.ctxt message.need_response = False meth_cls.snapshot_instance(message, inst, image_id='image-id') def test_backup_instance(self): inst = objects.Instance() meth_cls = self.tgt_methods_cls self.mox.StubOutWithMock(inst, 'refresh') self.mox.StubOutWithMock(inst, 'save') self.mox.StubOutWithMock(meth_cls.compute_rpcapi, 'backup_instance') def check_state(expected_task_state=None): self.assertEqual(task_states.IMAGE_BACKUP, inst.task_state) inst.refresh() inst.save(expected_task_state=[None]).WithSideEffects(check_state) meth_cls.compute_rpcapi.backup_instance(self.ctxt, inst, 'image-id', 'backup-type', 'rotation') self.mox.ReplayAll() class FakeMessage(object): pass message = FakeMessage() message.ctxt = self.ctxt message.need_response = False meth_cls.backup_instance(message, inst, image_id='image-id', backup_type='backup-type', rotation='rotation') def test_set_admin_password(self): args = ['fake-password'] self._test_instance_action_method('set_admin_password', args, {}, args, {}, False) class CellsBroadcastMethodsTestCase(test.NoDBTestCase): """Test case for _BroadcastMessageMethods class. Most of these tests actually test the full path from the MessageRunner through to the functionality of the message method. Hits 2 birds with 1 stone, even though it's a little more than a unit test. """ def setUp(self): super(CellsBroadcastMethodsTestCase, self).setUp() fakes.init(self) self.ctxt = context.RequestContext('fake', 'fake') self._setup_attrs() def _setup_attrs(self, up=True): mid_cell = 'child-cell2' if up: src_cell = 'grandchild-cell1' tgt_cell = 'api-cell' else: src_cell = 'api-cell' tgt_cell = 'grandchild-cell1' self.src_msg_runner = fakes.get_message_runner(src_cell) methods_cls = self.src_msg_runner.methods_by_type['broadcast'] self.src_methods_cls = methods_cls self.src_db_inst = methods_cls.db self.src_compute_api = methods_cls.compute_api self.src_ca_rpcapi = methods_cls.consoleauth_rpcapi if not up: # fudge things so we only have 1 child to broadcast to state_manager = self.src_msg_runner.state_manager for cell in state_manager.get_child_cells(): if cell.name != 'child-cell2': del state_manager.child_cells[cell.name] self.mid_msg_runner = fakes.get_message_runner(mid_cell) methods_cls = self.mid_msg_runner.methods_by_type['broadcast'] self.mid_methods_cls = methods_cls self.mid_db_inst = methods_cls.db self.mid_compute_api = methods_cls.compute_api self.mid_ca_rpcapi = methods_cls.consoleauth_rpcapi self.tgt_msg_runner = fakes.get_message_runner(tgt_cell) methods_cls = self.tgt_msg_runner.methods_by_type['broadcast'] self.tgt_methods_cls = methods_cls self.tgt_db_inst = methods_cls.db self.tgt_compute_api = methods_cls.compute_api self.tgt_ca_rpcapi = methods_cls.consoleauth_rpcapi def test_at_the_top(self): self.assertTrue(self.tgt_methods_cls._at_the_top()) self.assertFalse(self.mid_methods_cls._at_the_top()) self.assertFalse(self.src_methods_cls._at_the_top()) def test_apply_expected_states_building(self): instance_info = objects.Instance(vm_state=vm_states.BUILDING) expected = instance_info.obj_clone() expected.expected_vm_state = [vm_states.BUILDING, None] expected_vm_state = self.src_methods_cls._get_expected_vm_state( instance_info) self.assertEqual(expected.expected_vm_state, expected_vm_state) def test_apply_expected_states_resize_finish(self): instance_info = objects.Instance(task_state=task_states.RESIZE_FINISH) exp_states = [task_states.RESIZE_FINISH, task_states.RESIZE_MIGRATED, task_states.RESIZE_MIGRATING, task_states.RESIZE_PREP] expected = instance_info.obj_clone() expected.expected_task_state = exp_states expected_task_state = self.src_methods_cls._get_expected_task_state( instance_info) self.assertEqual(expected.expected_task_state, expected_task_state) def _test_instance_update_at_top(self, exists=True): fake_uuid = fake_server_actions.FAKE_UUID fake_info_cache = objects.InstanceInfoCache(instance_uuid='fake-uuid') fake_sys_metadata = {'key1': 'value1', 'key2': 'value2'} fake_attrs = {'uuid': fake_uuid, 'cell_name': 'fake', 'info_cache': fake_info_cache, 'system_metadata': fake_sys_metadata} fake_instance = objects.Instance(**fake_attrs) expected_cell_name = 'api-cell!child-cell2!grandchild-cell1' def fake_save(instance): self.assertEqual(fake_uuid, instance.uuid) self.assertEqual(expected_cell_name, instance.cell_name) self.assertEqual(fake_info_cache, instance.info_cache) self.assertEqual(fake_sys_metadata, instance.system_metadata) @mock.patch.object(objects.Instance, 'save') @mock.patch.object(objects.Instance, 'create') def do_test(mock_create, mock_save): if exists: mock_save.side_effect = fake_save else: error = exception.InstanceNotFound(instance_id='fake_uuid') mock_save.side_effect = error self.src_msg_runner.instance_update_at_top(self.ctxt, fake_instance) if exists: mock_save.assert_called_once_with(expected_vm_state=None, expected_task_state=None) self.assertFalse(mock_create.called) else: mock_save.assert_called_once_with(expected_vm_state=None, expected_task_state=None) mock_create.assert_called_once_with() do_test() def test_instance_update_at_top(self): self._test_instance_update_at_top() def test_instance_update_at_top_does_not_already_exist(self): self._test_instance_update_at_top(exists=False) def test_instance_update_at_top_with_building_state(self): fake_uuid = fake_server_actions.FAKE_UUID fake_info_cache = objects.InstanceInfoCache(instance_uuid='fake-uuid') fake_sys_metadata = {'key1': 'value1', 'key2': 'value2'} fake_attrs = {'uuid': fake_uuid, 'cell_name': 'fake', 'info_cache': fake_info_cache, 'system_metadata': fake_sys_metadata, 'vm_state': vm_states.BUILDING} fake_instance = objects.Instance(**fake_attrs) expected_cell_name = 'api-cell!child-cell2!grandchild-cell1' expected_vm_state = [vm_states.BUILDING, None] def fake_save(instance): self.assertEqual(fake_uuid, instance.uuid) self.assertEqual(expected_cell_name, instance.cell_name) self.assertEqual(fake_info_cache, instance.info_cache) self.assertEqual(fake_sys_metadata, instance.system_metadata) with mock.patch.object(objects.Instance, 'save', side_effect=fake_save) as mock_save: self.src_msg_runner.instance_update_at_top(self.ctxt, fake_instance) # Check that save is called with the right expected states. mock_save.assert_called_once_with( expected_vm_state=expected_vm_state, expected_task_state=None) def test_instance_destroy_at_top(self): fake_instance = objects.Instance(uuid='fake_uuid') with mock.patch.object(objects.Instance, 'destroy') as mock_destroy: self.src_msg_runner.instance_destroy_at_top(self.ctxt, fake_instance) mock_destroy.assert_called_once_with() def test_instance_destroy_at_top_incomplete_instance_obj(self): fake_instance = objects.Instance(uuid='fake_uuid') with mock.patch.object(objects.Instance, 'get_by_uuid') as mock_get: self.src_msg_runner.instance_destroy_at_top(self.ctxt, fake_instance) mock_get.assert_called_once_with(self.ctxt, fake_instance.uuid) def test_instance_hard_delete_everywhere(self): # Reset this, as this is a broadcast down. self._setup_attrs(up=False) instance = {'uuid': 'meow'} # Should not be called in src (API cell) self.mox.StubOutWithMock(self.src_compute_api, 'delete') self.mox.StubOutWithMock(self.mid_compute_api, 'delete') self.mox.StubOutWithMock(self.tgt_compute_api, 'delete') self.mid_compute_api.delete(self.ctxt, instance) self.tgt_compute_api.delete(self.ctxt, instance) self.mox.ReplayAll() self.src_msg_runner.instance_delete_everywhere(self.ctxt, instance, 'hard') def test_instance_soft_delete_everywhere(self): # Reset this, as this is a broadcast down. self._setup_attrs(up=False) instance = {'uuid': 'meow'} # Should not be called in src (API cell) self.mox.StubOutWithMock(self.src_compute_api, 'soft_delete') self.mox.StubOutWithMock(self.mid_compute_api, 'soft_delete') self.mox.StubOutWithMock(self.tgt_compute_api, 'soft_delete') self.mid_compute_api.soft_delete(self.ctxt, instance) self.tgt_compute_api.soft_delete(self.ctxt, instance) self.mox.ReplayAll() self.src_msg_runner.instance_delete_everywhere(self.ctxt, instance, 'soft') def test_instance_fault_create_at_top(self): fake_instance_fault = {'id': 1, 'message': 'fake-message', 'details': 'fake-details'} if_mock = mock.Mock(spec_set=objects.InstanceFault) def _check_create(): self.assertEqual('fake-message', if_mock.message) self.assertEqual('fake-details', if_mock.details) # Should not be set self.assertNotEqual(1, if_mock.id) if_mock.create.side_effect = _check_create with mock.patch.object(objects, 'InstanceFault') as if_obj_mock: if_obj_mock.return_value = if_mock self.src_msg_runner.instance_fault_create_at_top( self.ctxt, fake_instance_fault) if_obj_mock.assert_called_once_with(context=self.ctxt) if_mock.create.assert_called_once_with() def test_bw_usage_update_at_top(self): fake_bw_update_info = {'uuid': 'fake_uuid', 'mac': 'fake_mac', 'start_period': 'fake_start_period', 'bw_in': 'fake_bw_in', 'bw_out': 'fake_bw_out', 'last_ctr_in': 'fake_last_ctr_in', 'last_ctr_out': 'fake_last_ctr_out', 'last_refreshed': 'fake_last_refreshed'} # Shouldn't be called for these 2 cells self.mox.StubOutWithMock(self.src_db_inst, 'bw_usage_update') self.mox.StubOutWithMock(self.mid_db_inst, 'bw_usage_update') self.mox.StubOutWithMock(self.tgt_db_inst, 'bw_usage_update') self.tgt_db_inst.bw_usage_update(self.ctxt, **fake_bw_update_info) self.mox.ReplayAll() self.src_msg_runner.bw_usage_update_at_top(self.ctxt, fake_bw_update_info) def test_sync_instances(self): # Reset this, as this is a broadcast down. self._setup_attrs(up=False) project_id = 'fake_project_id' updated_since_raw = 'fake_updated_since_raw' updated_since_parsed = 'fake_updated_since_parsed' deleted = 'fake_deleted' instance1 = objects.Instance(uuid='fake_uuid1', deleted=False) instance2 = objects.Instance(uuid='fake_uuid2', deleted=True) fake_instances = [instance1, instance2] self.mox.StubOutWithMock(self.tgt_msg_runner, 'instance_update_at_top') self.mox.StubOutWithMock(self.tgt_msg_runner, 'instance_destroy_at_top') self.mox.StubOutWithMock(timeutils, 'parse_isotime') self.mox.StubOutWithMock(cells_utils, 'get_instances_to_sync') # Middle cell. timeutils.parse_isotime(updated_since_raw).AndReturn( updated_since_parsed) cells_utils.get_instances_to_sync(self.ctxt, updated_since=updated_since_parsed, project_id=project_id, deleted=deleted).AndReturn([]) # Bottom/Target cell timeutils.parse_isotime(updated_since_raw).AndReturn( updated_since_parsed) cells_utils.get_instances_to_sync(self.ctxt, updated_since=updated_since_parsed, project_id=project_id, deleted=deleted).AndReturn(fake_instances) self.tgt_msg_runner.instance_update_at_top(self.ctxt, instance1) self.tgt_msg_runner.instance_destroy_at_top(self.ctxt, instance2) self.mox.ReplayAll() self.src_msg_runner.sync_instances(self.ctxt, project_id, updated_since_raw, deleted) def test_service_get_all_with_disabled(self): # Reset this, as this is a broadcast down. self._setup_attrs(up=False) ctxt = self.ctxt.elevated() self.mox.StubOutWithMock(objects.ServiceList, 'get_all') # Calls are made from grandchild-cell to api-cell objects.ServiceList.get_all( mox.IgnoreArg(), disabled=None).AndReturn([4, 5]) objects.ServiceList.get_all( mox.IgnoreArg(), disabled=None).AndReturn([3]) objects.ServiceList.get_all( mox.IgnoreArg(), disabled=None).AndReturn([1, 2]) self.mox.ReplayAll() responses = self.src_msg_runner.service_get_all(ctxt, filters={}) response_values = [(resp.cell_name, resp.value_or_raise()) for resp in responses] expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]), ('api-cell!child-cell2', [3]), ('api-cell', [1, 2])] self.assertEqual(expected, response_values) def test_service_get_all_without_disabled(self): # Reset this, as this is a broadcast down. self._setup_attrs(up=False) disabled = False filters = {'disabled': disabled} ctxt = self.ctxt.elevated() self.mox.StubOutWithMock(objects.ServiceList, 'get_all') # Calls are made from grandchild-cell to api-cell objects.ServiceList.get_all( mox.IgnoreArg(), disabled=disabled).AndReturn([4, 5]) objects.ServiceList.get_all( mox.IgnoreArg(), disabled=disabled).AndReturn([3]) objects.ServiceList.get_all( mox.IgnoreArg(), disabled=disabled).AndReturn([1, 2]) self.mox.ReplayAll() responses = self.src_msg_runner.service_get_all(ctxt, filters=filters) response_values = [(resp.cell_name, resp.value_or_raise()) for resp in responses] expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]), ('api-cell!child-cell2', [3]), ('api-cell', [1, 2])] self.assertEqual(expected, response_values) def test_task_log_get_all_broadcast(self): # Reset this, as this is a broadcast down. self._setup_attrs(up=False) task_name = 'fake_task_name' begin = 'fake_begin' end = 'fake_end' host = 'fake_host' state = 'fake_state' ctxt = self.ctxt.elevated() self.mox.StubOutWithMock(self.src_db_inst, 'task_log_get_all') self.mox.StubOutWithMock(self.mid_db_inst, 'task_log_get_all') self.mox.StubOutWithMock(self.tgt_db_inst, 'task_log_get_all') self.src_db_inst.task_log_get_all(ctxt, task_name, begin, end, host=host, state=state).AndReturn([1, 2]) self.mid_db_inst.task_log_get_all(ctxt, task_name, begin, end, host=host, state=state).AndReturn([3]) self.tgt_db_inst.task_log_get_all(ctxt, task_name, begin, end, host=host, state=state).AndReturn([4, 5]) self.mox.ReplayAll() responses = self.src_msg_runner.task_log_get_all(ctxt, None, task_name, begin, end, host=host, state=state) response_values = [(resp.cell_name, resp.value_or_raise()) for resp in responses] expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]), ('api-cell!child-cell2', [3]), ('api-cell', [1, 2])] self.assertEqual(expected, response_values) def test_compute_node_get_all(self): # Reset this, as this is a broadcast down. self._setup_attrs(up=False) ctxt = self.ctxt.elevated() self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all') # Calls are made from grandchild-cell to api-cell objects.ComputeNodeList.get_all(mox.IgnoreArg()).AndReturn([4, 5]) objects.ComputeNodeList.get_all(mox.IgnoreArg()).AndReturn([3]) objects.ComputeNodeList.get_all(mox.IgnoreArg()).AndReturn([1, 2]) self.mox.ReplayAll() responses = self.src_msg_runner.compute_node_get_all(ctxt) response_values = [(resp.cell_name, resp.value_or_raise()) for resp in responses] expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]), ('api-cell!child-cell2', [3]), ('api-cell', [1, 2])] self.assertEqual(expected, response_values) def test_compute_node_get_all_with_hyp_match(self): # Reset this, as this is a broadcast down. self._setup_attrs(up=False) hypervisor_match = 'meow' ctxt = self.ctxt.elevated() self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_by_hypervisor') # Calls are made from grandchild-cell to api-cell objects.ComputeNodeList.get_by_hypervisor( ctxt, hypervisor_match).AndReturn([4, 5]) objects.ComputeNodeList.get_by_hypervisor( ctxt, hypervisor_match).AndReturn([3]) objects.ComputeNodeList.get_by_hypervisor( ctxt, hypervisor_match).AndReturn([1, 2]) self.mox.ReplayAll() responses = self.src_msg_runner.compute_node_get_all(ctxt, hypervisor_match=hypervisor_match) response_values = [(resp.cell_name, resp.value_or_raise()) for resp in responses] expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]), ('api-cell!child-cell2', [3]), ('api-cell', [1, 2])] self.assertEqual(expected, response_values) def test_compute_node_stats(self): # Reset this, as this is a broadcast down. self._setup_attrs(up=False) ctxt = self.ctxt.elevated() self.mox.StubOutWithMock(self.src_db_inst, 'compute_node_statistics') self.mox.StubOutWithMock(self.mid_db_inst, 'compute_node_statistics') self.mox.StubOutWithMock(self.tgt_db_inst, 'compute_node_statistics') self.src_db_inst.compute_node_statistics(ctxt).AndReturn([1, 2]) self.mid_db_inst.compute_node_statistics(ctxt).AndReturn([3]) self.tgt_db_inst.compute_node_statistics(ctxt).AndReturn([4, 5]) self.mox.ReplayAll() responses = self.src_msg_runner.compute_node_stats(ctxt) response_values = [(resp.cell_name, resp.value_or_raise()) for resp in responses] expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]), ('api-cell!child-cell2', [3]), ('api-cell', [1, 2])] self.assertEqual(expected, response_values) def test_consoleauth_delete_tokens(self): fake_uuid = 'fake-instance-uuid' # To show these should not be called in src/mid-level cell self.mox.StubOutWithMock(self.src_ca_rpcapi, 'delete_tokens_for_instance') self.mox.StubOutWithMock(self.mid_ca_rpcapi, 'delete_tokens_for_instance') self.mox.StubOutWithMock(self.tgt_ca_rpcapi, 'delete_tokens_for_instance') self.tgt_ca_rpcapi.delete_tokens_for_instance(self.ctxt, fake_uuid) self.mox.ReplayAll() self.src_msg_runner.consoleauth_delete_tokens(self.ctxt, fake_uuid) def test_bdm_update_or_create_with_none_create(self): fake_bdm = {'id': 'fake_id', 'volume_id': 'fake_volume_id'} expected_bdm = fake_bdm.copy() expected_bdm.pop('id') # Shouldn't be called for these 2 cells self.mox.StubOutWithMock(self.src_db_inst, 'block_device_mapping_update_or_create') self.mox.StubOutWithMock(self.mid_db_inst, 'block_device_mapping_update_or_create') self.mox.StubOutWithMock(self.tgt_db_inst, 'block_device_mapping_update_or_create') self.tgt_db_inst.block_device_mapping_update_or_create( self.ctxt, expected_bdm, legacy=False) self.mox.ReplayAll() self.src_msg_runner.bdm_update_or_create_at_top(self.ctxt, fake_bdm, create=None) def test_bdm_update_or_create_with_true_create(self): fake_bdm = {'id': 'fake_id', 'volume_id': 'fake_volume_id'} expected_bdm = fake_bdm.copy() expected_bdm.pop('id') # Shouldn't be called for these 2 cells self.mox.StubOutWithMock(self.src_db_inst, 'block_device_mapping_create') self.mox.StubOutWithMock(self.mid_db_inst, 'block_device_mapping_create') self.mox.StubOutWithMock(self.tgt_db_inst, 'block_device_mapping_create') self.tgt_db_inst.block_device_mapping_create( self.ctxt, fake_bdm, legacy=False) self.mox.ReplayAll() self.src_msg_runner.bdm_update_or_create_at_top(self.ctxt, fake_bdm, create=True) def test_bdm_update_or_create_with_false_create_vol_id(self): fake_bdm = {'id': 'fake_id', 'instance_uuid': 'fake_instance_uuid', 'device_name': 'fake_device_name', 'volume_id': 'fake_volume_id'} expected_bdm = fake_bdm.copy() expected_bdm.pop('id') fake_inst_bdms = [{'id': 1, 'volume_id': 'not-a-match', 'device_name': 'not-a-match'}, {'id': 2, 'volume_id': 'fake_volume_id', 'device_name': 'not-a-match'}, {'id': 3, 'volume_id': 'not-a-match', 'device_name': 'not-a-match'}] # Shouldn't be called for these 2 cells self.mox.StubOutWithMock(self.src_db_inst, 'block_device_mapping_update') self.mox.StubOutWithMock(self.mid_db_inst, 'block_device_mapping_update') self.mox.StubOutWithMock(self.tgt_db_inst, 'block_device_mapping_get_all_by_instance') self.mox.StubOutWithMock(self.tgt_db_inst, 'block_device_mapping_update') self.tgt_db_inst.block_device_mapping_get_all_by_instance( self.ctxt, 'fake_instance_uuid').AndReturn( fake_inst_bdms) # Should try to update ID 2. self.tgt_db_inst.block_device_mapping_update( self.ctxt, 2, expected_bdm, legacy=False) self.mox.ReplayAll() self.src_msg_runner.bdm_update_or_create_at_top(self.ctxt, fake_bdm, create=False) def test_bdm_update_or_create_with_false_create_dev_name(self): fake_bdm = {'id': 'fake_id', 'instance_uuid': 'fake_instance_uuid', 'device_name': 'fake_device_name', 'volume_id': 'fake_volume_id'} expected_bdm = fake_bdm.copy() expected_bdm.pop('id') fake_inst_bdms = [{'id': 1, 'volume_id': 'not-a-match', 'device_name': 'not-a-match'}, {'id': 2, 'volume_id': 'not-a-match', 'device_name': 'fake_device_name'}, {'id': 3, 'volume_id': 'not-a-match', 'device_name': 'not-a-match'}] # Shouldn't be called for these 2 cells self.mox.StubOutWithMock(self.src_db_inst, 'block_device_mapping_update') self.mox.StubOutWithMock(self.mid_db_inst, 'block_device_mapping_update') self.mox.StubOutWithMock(self.tgt_db_inst, 'block_device_mapping_get_all_by_instance') self.mox.StubOutWithMock(self.tgt_db_inst, 'block_device_mapping_update') self.tgt_db_inst.block_device_mapping_get_all_by_instance( self.ctxt, 'fake_instance_uuid').AndReturn( fake_inst_bdms) # Should try to update ID 2. self.tgt_db_inst.block_device_mapping_update( self.ctxt, 2, expected_bdm, legacy=False) self.mox.ReplayAll() self.src_msg_runner.bdm_update_or_create_at_top(self.ctxt, fake_bdm, create=False) def test_bdm_destroy_by_volume(self): fake_instance_uuid = 'fake-instance-uuid' fake_volume_id = 'fake-volume-name' # Shouldn't be called for these 2 cells self.mox.StubOutWithMock(self.src_db_inst, 'block_device_mapping_destroy_by_instance_and_volume') self.mox.StubOutWithMock(self.mid_db_inst, 'block_device_mapping_destroy_by_instance_and_volume') self.mox.StubOutWithMock(self.tgt_db_inst, 'block_device_mapping_destroy_by_instance_and_volume') self.tgt_db_inst.block_device_mapping_destroy_by_instance_and_volume( self.ctxt, fake_instance_uuid, fake_volume_id) self.mox.ReplayAll() self.src_msg_runner.bdm_destroy_at_top(self.ctxt, fake_instance_uuid, volume_id=fake_volume_id) def test_bdm_destroy_by_device(self): fake_instance_uuid = 'fake-instance-uuid' fake_device_name = 'fake-device-name' # Shouldn't be called for these 2 cells self.mox.StubOutWithMock(self.src_db_inst, 'block_device_mapping_destroy_by_instance_and_device') self.mox.StubOutWithMock(self.mid_db_inst, 'block_device_mapping_destroy_by_instance_and_device') self.mox.StubOutWithMock(self.tgt_db_inst, 'block_device_mapping_destroy_by_instance_and_device') self.tgt_db_inst.block_device_mapping_destroy_by_instance_and_device( self.ctxt, fake_instance_uuid, fake_device_name) self.mox.ReplayAll() self.src_msg_runner.bdm_destroy_at_top(self.ctxt, fake_instance_uuid, device_name=fake_device_name) def test_get_migrations(self): self._setup_attrs(up=False) filters = {'status': 'confirmed'} migrations_from_cell1 = [{'id': 123}] migrations_from_cell2 = [{'id': 456}] self.mox.StubOutWithMock(self.mid_compute_api, 'get_migrations') self.mid_compute_api.get_migrations(self.ctxt, filters).\ AndReturn(migrations_from_cell1) self.mox.StubOutWithMock(self.tgt_compute_api, 'get_migrations') self.tgt_compute_api.get_migrations(self.ctxt, filters).\ AndReturn(migrations_from_cell2) self.mox.ReplayAll() responses = self.src_msg_runner.get_migrations( self.ctxt, None, False, filters) self.assertEqual(2, len(responses)) for response in responses: self.assertIn(response.value_or_raise(), [migrations_from_cell1, migrations_from_cell2]) @mock.patch.object(objects.KeyPair, 'get_by_name', return_value='fake_keypair') def test_get_keypair_at_top(self, fake_get_by_name): user_id = 'fake_user_id' name = 'fake_keypair_name' responses = self.src_msg_runner.get_keypair_at_top(self.ctxt, user_id, name) fake_get_by_name.assert_called_once_with(self.ctxt, user_id, name) for response in responses: if response.value is not None: self.assertEqual('fake_keypair', response.value) @mock.patch.object(objects.KeyPair, 'get_by_name') def test_get_keypair_at_top_with_objects_exception(self, fake_get_by_name): user_id = 'fake_user_id' name = 'fake_keypair_name' keypair_exception = exception.KeypairNotFound(user_id=user_id, name=name) fake_get_by_name.side_effect = keypair_exception responses = self.src_msg_runner.get_keypair_at_top(self.ctxt, user_id, name) fake_get_by_name.assert_called_once_with(self.ctxt, user_id, name) for response in responses: self.assertIsNone(response.value) @mock.patch.object(messaging._BroadcastMessage, 'process') def test_get_keypair_at_top_with_process_response(self, fake_process): user_id = 'fake_user_id' name = 'fake_keypair_name' response = messaging.Response(self.ctxt, 'cell', 'keypair', False) other_response = messaging.Response(self.ctxt, 'cell', 'fake_other_keypair', False) fake_process.return_value = [response, other_response] responses = self.src_msg_runner.get_keypair_at_top(self.ctxt, user_id, name) fake_process.assert_called_once_with() self.assertEqual(fake_process.return_value, responses) class CellsPublicInterfacesTestCase(test.NoDBTestCase): """Test case for the public interfaces into cells messaging.""" def setUp(self): super(CellsPublicInterfacesTestCase, self).setUp() fakes.init(self) self.ctxt = context.RequestContext('fake', 'fake') self.our_name = 'api-cell' self.msg_runner = fakes.get_message_runner(self.our_name) self.state_manager = self.msg_runner.state_manager @mock.patch.object(messaging, '_TargetedMessage') def test_resize_instance(self, mock_message): instance = objects.Instance(cell_name='api-cell!child-cell') flavor = 'fake' extra_instance_updates = {'fake': 'fake'} clean_shutdown = True self.msg_runner.resize_instance(self.ctxt, instance, flavor, extra_instance_updates, clean_shutdown=clean_shutdown) extra_kwargs = dict(flavor=flavor, extra_instance_updates=extra_instance_updates, clean_shutdown=clean_shutdown) method_kwargs = {'instance': instance} method_kwargs.update(extra_kwargs) mock_message.assert_called_once_with(self.msg_runner, self.ctxt, 'resize_instance', method_kwargs, 'down', instance.cell_name, need_response=False) nova-13.1.4/nova/tests/unit/cells/test_cells_weights.py0000664000567000056710000002145313064447152024352 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for testing the cells weight algorithms. Cells with higher weights should be given priority for new builds. """ import datetime from oslo_utils import fixture as utils_fixture from oslo_utils import timeutils from nova.cells import state from nova.cells import weights from nova import test class FakeCellState(state.CellState): def __init__(self, cell_name): super(FakeCellState, self).__init__(cell_name) self.capacities['ram_free'] = {'total_mb': 0, 'units_by_mb': {}} self.db_info = {} def _update_ram_free(self, *args): ram_free = self.capacities['ram_free'] for ram_size, units in args: ram_free['total_mb'] += units * ram_size ram_free['units_by_mb'][str(ram_size)] = units def _get_fake_cells(): cell1 = FakeCellState('cell1') cell1._update_ram_free((512, 1), (1024, 4), (2048, 3)) cell1.db_info['weight_offset'] = -200.0 cell2 = FakeCellState('cell2') cell2._update_ram_free((512, 2), (1024, 3), (2048, 4)) cell2.db_info['weight_offset'] = -200.1 cell3 = FakeCellState('cell3') cell3._update_ram_free((512, 3), (1024, 2), (2048, 1)) cell3.db_info['weight_offset'] = 400.0 cell4 = FakeCellState('cell4') cell4._update_ram_free((512, 4), (1024, 1), (2048, 2)) cell4.db_info['weight_offset'] = 300.0 return [cell1, cell2, cell3, cell4] class CellsWeightsTestCase(test.NoDBTestCase): """Makes sure the proper weighers are in the directory.""" def test_all_weighers(self): weighers = weights.all_weighers() # Check at least a couple that we expect are there self.assertTrue(len(weighers) >= 2) class_names = [cls.__name__ for cls in weighers] self.assertIn('WeightOffsetWeigher', class_names) self.assertIn('RamByInstanceTypeWeigher', class_names) class _WeigherTestClass(test.NoDBTestCase): """Base class for testing individual weigher plugins.""" weigher_cls_name = None def setUp(self): super(_WeigherTestClass, self).setUp() self.weight_handler = weights.CellWeightHandler() weigher_classes = self.weight_handler.get_matching_classes( [self.weigher_cls_name]) self.weighers = [cls() for cls in weigher_classes] def _get_weighed_cells(self, cells, weight_properties): return self.weight_handler.get_weighed_objects(self.weighers, cells, weight_properties) class RAMByInstanceTypeWeigherTestClass(_WeigherTestClass): weigher_cls_name = ('nova.cells.weights.ram_by_instance_type.' 'RamByInstanceTypeWeigher') def test_default_spreading(self): """Test that cells with more ram available return a higher weight.""" cells = _get_fake_cells() # Simulate building a new 512MB instance. instance_type = {'memory_mb': 512} weight_properties = {'request_spec': {'instance_type': instance_type}} weighed_cells = self._get_weighed_cells(cells, weight_properties) self.assertEqual(4, len(weighed_cells)) resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells] expected_cells = [cells[3], cells[2], cells[1], cells[0]] self.assertEqual(expected_cells, resulting_cells) # Simulate building a new 1024MB instance. instance_type = {'memory_mb': 1024} weight_properties = {'request_spec': {'instance_type': instance_type}} weighed_cells = self._get_weighed_cells(cells, weight_properties) self.assertEqual(4, len(weighed_cells)) resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells] expected_cells = [cells[0], cells[1], cells[2], cells[3]] self.assertEqual(expected_cells, resulting_cells) # Simulate building a new 2048MB instance. instance_type = {'memory_mb': 2048} weight_properties = {'request_spec': {'instance_type': instance_type}} weighed_cells = self._get_weighed_cells(cells, weight_properties) self.assertEqual(4, len(weighed_cells)) resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells] expected_cells = [cells[1], cells[0], cells[3], cells[2]] self.assertEqual(expected_cells, resulting_cells) def test_negative_multiplier(self): """Test that cells with less ram available return a higher weight.""" self.flags(ram_weight_multiplier=-1.0, group='cells') cells = _get_fake_cells() # Simulate building a new 512MB instance. instance_type = {'memory_mb': 512} weight_properties = {'request_spec': {'instance_type': instance_type}} weighed_cells = self._get_weighed_cells(cells, weight_properties) self.assertEqual(4, len(weighed_cells)) resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells] expected_cells = [cells[0], cells[1], cells[2], cells[3]] self.assertEqual(expected_cells, resulting_cells) # Simulate building a new 1024MB instance. instance_type = {'memory_mb': 1024} weight_properties = {'request_spec': {'instance_type': instance_type}} weighed_cells = self._get_weighed_cells(cells, weight_properties) self.assertEqual(4, len(weighed_cells)) resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells] expected_cells = [cells[3], cells[2], cells[1], cells[0]] self.assertEqual(expected_cells, resulting_cells) # Simulate building a new 2048MB instance. instance_type = {'memory_mb': 2048} weight_properties = {'request_spec': {'instance_type': instance_type}} weighed_cells = self._get_weighed_cells(cells, weight_properties) self.assertEqual(4, len(weighed_cells)) resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells] expected_cells = [cells[2], cells[3], cells[0], cells[1]] self.assertEqual(expected_cells, resulting_cells) class WeightOffsetWeigherTestClass(_WeigherTestClass): """Test the RAMWeigher class.""" weigher_cls_name = 'nova.cells.weights.weight_offset.WeightOffsetWeigher' def test_weight_offset(self): """Test that cells with higher weight_offsets return higher weights. """ cells = _get_fake_cells() weighed_cells = self._get_weighed_cells(cells, {}) self.assertEqual(4, len(weighed_cells)) expected_cells = [cells[2], cells[3], cells[0], cells[1]] resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells] self.assertEqual(expected_cells, resulting_cells) class MuteWeigherTestClass(_WeigherTestClass): weigher_cls_name = 'nova.cells.weights.mute_child.MuteChildWeigher' def setUp(self): super(MuteWeigherTestClass, self).setUp() self.flags(mute_weight_multiplier=-10.0, mute_child_interval=100, group='cells') self.now = timeutils.utcnow() self.useFixture(utils_fixture.TimeFixture(self.now)) self.cells = _get_fake_cells() for cell in self.cells: cell.last_seen = self.now def test_non_mute(self): weight_properties = {} weighed_cells = self._get_weighed_cells(self.cells, weight_properties) self.assertEqual(4, len(weighed_cells)) for weighed_cell in weighed_cells: self.assertEqual(0, weighed_cell.weight) def test_mutes(self): # make 2 of them mute: self.cells[0].last_seen = (self.cells[0].last_seen - datetime.timedelta(seconds=200)) self.cells[1].last_seen = (self.cells[1].last_seen - datetime.timedelta(seconds=200)) weight_properties = {} weighed_cells = self._get_weighed_cells(self.cells, weight_properties) self.assertEqual(4, len(weighed_cells)) for i in range(2): weighed_cell = weighed_cells.pop(0) self.assertEqual(0, weighed_cell.weight) self.assertIn(weighed_cell.obj.name, ['cell3', 'cell4']) for i in range(2): weighed_cell = weighed_cells.pop(0) self.assertEqual(-10.0, weighed_cell.weight) self.assertIn(weighed_cell.obj.name, ['cell1', 'cell2']) nova-13.1.4/nova/tests/unit/cells/test_cells_rpcapi.py0000664000567000056710000010574213064447152024162 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Cells RPCAPI """ import six from nova.cells import rpcapi as cells_rpcapi import nova.conf from nova import exception from nova import objects from nova import test from nova.tests.unit import fake_instance CONF = nova.conf.CONF class CellsAPITestCase(test.NoDBTestCase): """Test case for cells.api interfaces.""" def setUp(self): super(CellsAPITestCase, self).setUp() self.fake_topic = 'fake_topic' self.fake_context = 'fake_context' self.flags(topic=self.fake_topic, enable=True, group='cells') self.cells_rpcapi = cells_rpcapi.CellsAPI() def _stub_rpc_method(self, rpc_method, result): call_info = {} orig_prepare = self.cells_rpcapi.client.prepare def fake_rpc_prepare(**kwargs): if 'version' in kwargs: call_info['version'] = kwargs.pop('version') return self.cells_rpcapi.client def fake_csv(version): return orig_prepare(version).can_send_version() def fake_rpc_method(ctxt, method, **kwargs): call_info['context'] = ctxt call_info['method'] = method call_info['args'] = kwargs return result self.stubs.Set(self.cells_rpcapi.client, 'prepare', fake_rpc_prepare) self.stubs.Set(self.cells_rpcapi.client, 'can_send_version', fake_csv) self.stubs.Set(self.cells_rpcapi.client, rpc_method, fake_rpc_method) return call_info def _check_result(self, call_info, method, args, version=None): self.assertEqual(self.fake_topic, self.cells_rpcapi.client.target.topic) self.assertEqual(self.fake_context, call_info['context']) self.assertEqual(method, call_info['method']) self.assertEqual(args, call_info['args']) if version is not None: self.assertIn('version', call_info) self.assertIsInstance(call_info['version'], six.string_types, msg="Message version %s is not a string" % call_info['version']) self.assertEqual(version, call_info['version']) else: self.assertNotIn('version', call_info) def test_cast_compute_api_method(self): fake_cell_name = 'fake_cell_name' fake_method = 'fake_method' fake_method_args = (1, 2) fake_method_kwargs = {'kwarg1': 10, 'kwarg2': 20} expected_method_info = {'method': fake_method, 'method_args': fake_method_args, 'method_kwargs': fake_method_kwargs} expected_args = {'method_info': expected_method_info, 'cell_name': fake_cell_name, 'call': False} call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.cast_compute_api_method(self.fake_context, fake_cell_name, fake_method, *fake_method_args, **fake_method_kwargs) self._check_result(call_info, 'run_compute_api_method', expected_args) def test_call_compute_api_method(self): fake_cell_name = 'fake_cell_name' fake_method = 'fake_method' fake_method_args = (1, 2) fake_method_kwargs = {'kwarg1': 10, 'kwarg2': 20} fake_response = 'fake_response' expected_method_info = {'method': fake_method, 'method_args': fake_method_args, 'method_kwargs': fake_method_kwargs} expected_args = {'method_info': expected_method_info, 'cell_name': fake_cell_name, 'call': True} call_info = self._stub_rpc_method('call', fake_response) result = self.cells_rpcapi.call_compute_api_method(self.fake_context, fake_cell_name, fake_method, *fake_method_args, **fake_method_kwargs) self._check_result(call_info, 'run_compute_api_method', expected_args) self.assertEqual(fake_response, result) def test_build_instances(self): call_info = self._stub_rpc_method('cast', None) instances = [objects.Instance(id=1), objects.Instance(id=2)] self.cells_rpcapi.build_instances( self.fake_context, instances=instances, image={'fake': 'image'}, arg1=1, arg2=2, arg3=3) expected_args = {'build_inst_kwargs': {'instances': instances, 'image': {'fake': 'image'}, 'arg1': 1, 'arg2': 2, 'arg3': 3}} self._check_result(call_info, 'build_instances', expected_args, version='1.34') def test_get_capacities(self): capacity_info = {"capacity": "info"} call_info = self._stub_rpc_method('call', result=capacity_info) result = self.cells_rpcapi.get_capacities(self.fake_context, cell_name="name") self._check_result(call_info, 'get_capacities', {'cell_name': 'name'}, version='1.9') self.assertEqual(capacity_info, result) def test_instance_update_at_top(self): fake_info_cache = objects.InstanceInfoCache(instance_uuid='fake-uuid') fake_sys_metadata = {'key1': 'value1', 'key2': 'value2'} fake_attrs = {'id': 2, 'cell_name': 'fake', 'metadata': {'fake': 'fake'}, 'info_cache': fake_info_cache, 'system_metadata': fake_sys_metadata} fake_instance = objects.Instance(**fake_attrs) call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.instance_update_at_top( self.fake_context, fake_instance) expected_args = {'instance': fake_instance} self._check_result(call_info, 'instance_update_at_top', expected_args, version='1.35') def test_instance_destroy_at_top(self): fake_instance = objects.Instance(uuid='fake-uuid') call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.instance_destroy_at_top( self.fake_context, fake_instance) expected_args = {'instance': fake_instance} self._check_result(call_info, 'instance_destroy_at_top', expected_args, version='1.35') def test_instance_delete_everywhere(self): instance = fake_instance.fake_instance_obj(self.fake_context) call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.instance_delete_everywhere( self.fake_context, instance, 'fake-type') expected_args = {'instance': instance, 'delete_type': 'fake-type'} self._check_result(call_info, 'instance_delete_everywhere', expected_args, version='1.27') def test_instance_fault_create_at_top(self): fake_instance_fault = {'id': 2, 'other': 'meow'} call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.instance_fault_create_at_top( self.fake_context, fake_instance_fault) expected_args = {'instance_fault': fake_instance_fault} self._check_result(call_info, 'instance_fault_create_at_top', expected_args) def test_bw_usage_update_at_top(self): update_args = ('fake_uuid', 'fake_mac', 'fake_start_period', 'fake_bw_in', 'fake_bw_out', 'fake_ctr_in', 'fake_ctr_out') update_kwargs = {'last_refreshed': 'fake_refreshed'} call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.bw_usage_update_at_top( self.fake_context, *update_args, **update_kwargs) bw_update_info = {'uuid': 'fake_uuid', 'mac': 'fake_mac', 'start_period': 'fake_start_period', 'bw_in': 'fake_bw_in', 'bw_out': 'fake_bw_out', 'last_ctr_in': 'fake_ctr_in', 'last_ctr_out': 'fake_ctr_out', 'last_refreshed': 'fake_refreshed'} expected_args = {'bw_update_info': bw_update_info} self._check_result(call_info, 'bw_usage_update_at_top', expected_args) def test_get_cell_info_for_neighbors(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.get_cell_info_for_neighbors( self.fake_context) self._check_result(call_info, 'get_cell_info_for_neighbors', {}, version='1.1') self.assertEqual('fake_response', result) def test_sync_instances(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.sync_instances(self.fake_context, project_id='fake_project', updated_since='fake_time', deleted=True) expected_args = {'project_id': 'fake_project', 'updated_since': 'fake_time', 'deleted': True} self._check_result(call_info, 'sync_instances', expected_args, version='1.1') def test_service_get_all(self): call_info = self._stub_rpc_method('call', 'fake_response') fake_filters = {'key1': 'val1', 'key2': 'val2'} result = self.cells_rpcapi.service_get_all(self.fake_context, filters=fake_filters) expected_args = {'filters': fake_filters} self._check_result(call_info, 'service_get_all', expected_args, version='1.2') self.assertEqual('fake_response', result) def test_service_get_by_compute_host(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.service_get_by_compute_host( self.fake_context, host_name='fake-host-name') expected_args = {'host_name': 'fake-host-name'} self._check_result(call_info, 'service_get_by_compute_host', expected_args, version='1.2') self.assertEqual('fake_response', result) def test_get_host_uptime(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.get_host_uptime( self.fake_context, host_name='fake-host-name') expected_args = {'host_name': 'fake-host-name'} self._check_result(call_info, 'get_host_uptime', expected_args, version='1.17') self.assertEqual('fake_response', result) def test_service_update(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.service_update( self.fake_context, host_name='fake-host-name', binary='nova-api', params_to_update={'disabled': True}) expected_args = { 'host_name': 'fake-host-name', 'binary': 'nova-api', 'params_to_update': {'disabled': True}} self._check_result(call_info, 'service_update', expected_args, version='1.7') self.assertEqual('fake_response', result) def test_service_delete(self): call_info = self._stub_rpc_method('call', None) cell_service_id = 'cell@id' result = self.cells_rpcapi.service_delete( self.fake_context, cell_service_id=cell_service_id) expected_args = {'cell_service_id': cell_service_id} self._check_result(call_info, 'service_delete', expected_args, version='1.26') self.assertIsNone(result) def test_proxy_rpc_to_manager(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.proxy_rpc_to_manager( self.fake_context, rpc_message='fake-msg', topic='fake-topic', call=True, timeout=-1) expected_args = {'rpc_message': 'fake-msg', 'topic': 'fake-topic', 'call': True, 'timeout': -1} self._check_result(call_info, 'proxy_rpc_to_manager', expected_args, version='1.2') self.assertEqual('fake_response', result) def test_task_log_get_all(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.task_log_get_all(self.fake_context, task_name='fake_name', period_beginning='fake_begin', period_ending='fake_end', host='fake_host', state='fake_state') expected_args = {'task_name': 'fake_name', 'period_beginning': 'fake_begin', 'period_ending': 'fake_end', 'host': 'fake_host', 'state': 'fake_state'} self._check_result(call_info, 'task_log_get_all', expected_args, version='1.3') self.assertEqual('fake_response', result) def test_compute_node_get_all(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.compute_node_get_all(self.fake_context, hypervisor_match='fake-match') expected_args = {'hypervisor_match': 'fake-match'} self._check_result(call_info, 'compute_node_get_all', expected_args, version='1.4') self.assertEqual('fake_response', result) def test_compute_node_stats(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.compute_node_stats(self.fake_context) expected_args = {} self._check_result(call_info, 'compute_node_stats', expected_args, version='1.4') self.assertEqual('fake_response', result) def test_compute_node_get(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.compute_node_get(self.fake_context, 'fake_compute_id') expected_args = {'compute_id': 'fake_compute_id'} self._check_result(call_info, 'compute_node_get', expected_args, version='1.4') self.assertEqual('fake_response', result) def test_actions_get(self): fake_instance = {'uuid': 'fake-uuid', 'cell_name': 'region!child'} call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.actions_get(self.fake_context, fake_instance) expected_args = {'cell_name': 'region!child', 'instance_uuid': fake_instance['uuid']} self._check_result(call_info, 'actions_get', expected_args, version='1.5') self.assertEqual('fake_response', result) def test_actions_get_no_cell(self): fake_instance = {'uuid': 'fake-uuid', 'cell_name': None} self.assertRaises(exception.InstanceUnknownCell, self.cells_rpcapi.actions_get, self.fake_context, fake_instance) def test_action_get_by_request_id(self): fake_instance = {'uuid': 'fake-uuid', 'cell_name': 'region!child'} call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.action_get_by_request_id(self.fake_context, fake_instance, 'req-fake') expected_args = {'cell_name': 'region!child', 'instance_uuid': fake_instance['uuid'], 'request_id': 'req-fake'} self._check_result(call_info, 'action_get_by_request_id', expected_args, version='1.5') self.assertEqual('fake_response', result) def test_action_get_by_request_id_no_cell(self): fake_instance = {'uuid': 'fake-uuid', 'cell_name': None} self.assertRaises(exception.InstanceUnknownCell, self.cells_rpcapi.action_get_by_request_id, self.fake_context, fake_instance, 'req-fake') def test_action_events_get(self): fake_instance = {'uuid': 'fake-uuid', 'cell_name': 'region!child'} call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.action_events_get(self.fake_context, fake_instance, 'fake-action') expected_args = {'cell_name': 'region!child', 'action_id': 'fake-action'} self._check_result(call_info, 'action_events_get', expected_args, version='1.5') self.assertEqual('fake_response', result) def test_action_events_get_no_cell(self): fake_instance = {'uuid': 'fake-uuid', 'cell_name': None} self.assertRaises(exception.InstanceUnknownCell, self.cells_rpcapi.action_events_get, self.fake_context, fake_instance, 'fake-action') def test_consoleauth_delete_tokens(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.consoleauth_delete_tokens(self.fake_context, 'fake-uuid') expected_args = {'instance_uuid': 'fake-uuid'} self._check_result(call_info, 'consoleauth_delete_tokens', expected_args, version='1.6') def test_validate_console_port(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.validate_console_port(self.fake_context, 'fake-uuid', 'fake-port', 'fake-type') expected_args = {'instance_uuid': 'fake-uuid', 'console_port': 'fake-port', 'console_type': 'fake-type'} self._check_result(call_info, 'validate_console_port', expected_args, version='1.6') self.assertEqual('fake_response', result) def test_bdm_update_or_create_at_top(self): fake_bdm = {'id': 2, 'other': 'meow'} call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.bdm_update_or_create_at_top( self.fake_context, fake_bdm, create='fake-create') expected_args = {'bdm': fake_bdm, 'create': 'fake-create'} self._check_result(call_info, 'bdm_update_or_create_at_top', expected_args, version='1.28') def test_bdm_destroy_at_top(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.bdm_destroy_at_top(self.fake_context, 'fake-uuid', device_name='fake-device', volume_id='fake-vol') expected_args = {'instance_uuid': 'fake-uuid', 'device_name': 'fake-device', 'volume_id': 'fake-vol'} self._check_result(call_info, 'bdm_destroy_at_top', expected_args, version='1.10') def test_get_migrations(self): call_info = self._stub_rpc_method('call', None) filters = {'cell_name': 'ChildCell', 'status': 'confirmed'} self.cells_rpcapi.get_migrations(self.fake_context, filters) expected_args = {'filters': filters} self._check_result(call_info, 'get_migrations', expected_args, version="1.11") def test_instance_update_from_api(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.instance_update_from_api( self.fake_context, 'fake-instance', expected_vm_state='exp_vm', expected_task_state='exp_task', admin_state_reset='admin_reset') expected_args = {'instance': 'fake-instance', 'expected_vm_state': 'exp_vm', 'expected_task_state': 'exp_task', 'admin_state_reset': 'admin_reset'} self._check_result(call_info, 'instance_update_from_api', expected_args, version='1.16') def test_start_instance(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.start_instance( self.fake_context, 'fake-instance') expected_args = {'instance': 'fake-instance'} self._check_result(call_info, 'start_instance', expected_args, version='1.12') def test_stop_instance_cast(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.stop_instance( self.fake_context, 'fake-instance', do_cast=True, clean_shutdown=True) expected_args = {'instance': 'fake-instance', 'do_cast': True, 'clean_shutdown': True} self._check_result(call_info, 'stop_instance', expected_args, version='1.31') def test_stop_instance_call(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.stop_instance( self.fake_context, 'fake-instance', do_cast=False, clean_shutdown=True) expected_args = {'instance': 'fake-instance', 'do_cast': False, 'clean_shutdown': True} self._check_result(call_info, 'stop_instance', expected_args, version='1.31') self.assertEqual('fake_response', result) def test_cell_create(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.cell_create(self.fake_context, 'values') expected_args = {'values': 'values'} self._check_result(call_info, 'cell_create', expected_args, version='1.13') self.assertEqual('fake_response', result) def test_cell_update(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.cell_update(self.fake_context, 'cell_name', 'values') expected_args = {'cell_name': 'cell_name', 'values': 'values'} self._check_result(call_info, 'cell_update', expected_args, version='1.13') self.assertEqual('fake_response', result) def test_cell_delete(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.cell_delete(self.fake_context, 'cell_name') expected_args = {'cell_name': 'cell_name'} self._check_result(call_info, 'cell_delete', expected_args, version='1.13') self.assertEqual('fake_response', result) def test_cell_get(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.cell_get(self.fake_context, 'cell_name') expected_args = {'cell_name': 'cell_name'} self._check_result(call_info, 'cell_get', expected_args, version='1.13') self.assertEqual('fake_response', result) def test_reboot_instance(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.reboot_instance( self.fake_context, 'fake-instance', block_device_info='ignored', reboot_type='HARD') expected_args = {'instance': 'fake-instance', 'reboot_type': 'HARD'} self._check_result(call_info, 'reboot_instance', expected_args, version='1.14') def test_pause_instance(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.pause_instance( self.fake_context, 'fake-instance') expected_args = {'instance': 'fake-instance'} self._check_result(call_info, 'pause_instance', expected_args, version='1.19') def test_unpause_instance(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.unpause_instance( self.fake_context, 'fake-instance') expected_args = {'instance': 'fake-instance'} self._check_result(call_info, 'unpause_instance', expected_args, version='1.19') def test_suspend_instance(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.suspend_instance( self.fake_context, 'fake-instance') expected_args = {'instance': 'fake-instance'} self._check_result(call_info, 'suspend_instance', expected_args, version='1.15') def test_resume_instance(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.resume_instance( self.fake_context, 'fake-instance') expected_args = {'instance': 'fake-instance'} self._check_result(call_info, 'resume_instance', expected_args, version='1.15') def test_terminate_instance(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.terminate_instance(self.fake_context, 'fake-instance', [], delete_type='delete') expected_args = {'instance': 'fake-instance', 'delete_type': 'delete'} self._check_result(call_info, 'terminate_instance', expected_args, version='1.36') def test_soft_delete_instance(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.soft_delete_instance(self.fake_context, 'fake-instance') expected_args = {'instance': 'fake-instance'} self._check_result(call_info, 'soft_delete_instance', expected_args, version='1.18') def test_resize_instance(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.resize_instance(self.fake_context, 'fake-instance', dict(cow='moo'), 'fake-hint', 'fake-flavor', 'fake-reservations', clean_shutdown=True) expected_args = {'instance': 'fake-instance', 'flavor': 'fake-flavor', 'extra_instance_updates': dict(cow='moo'), 'clean_shutdown': True} self._check_result(call_info, 'resize_instance', expected_args, version='1.33') def test_live_migrate_instance(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.live_migrate_instance(self.fake_context, 'fake-instance', 'fake-host', 'fake-block', 'fake-commit') expected_args = {'instance': 'fake-instance', 'block_migration': 'fake-block', 'disk_over_commit': 'fake-commit', 'host_name': 'fake-host'} self._check_result(call_info, 'live_migrate_instance', expected_args, version='1.20') def test_live_migrate_instance_not_passing_request_spec(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.live_migrate_instance(self.fake_context, 'fake-instance', 'fake-host', 'fake-block', 'fake-commit', 'fake-spec') expected_args = {'instance': 'fake-instance', 'block_migration': 'fake-block', 'disk_over_commit': 'fake-commit', 'host_name': 'fake-host'} self._check_result(call_info, 'live_migrate_instance', expected_args, version='1.20') def test_rebuild_instance_not_passing_request_spec(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.rebuild_instance(self.fake_context, 'fake-instance', 'fake-pass', 'fake-files', 'fake-image_ref', 'fake-orig_image_ref', 'fake-orig_sys_metadata', 'fake-bdms', recreate=False, on_shared_storage=False, host=None, preserve_ephemeral=False, request_spec='fake-spec', kwargs=None) expected_args = {'instance': 'fake-instance', 'image_href': 'fake-image_ref', 'admin_password': 'fake-pass', 'files_to_inject': 'fake-files', 'preserve_ephemeral': False, 'kwargs': None} self._check_result(call_info, 'rebuild_instance', expected_args, version='1.25') def test_revert_resize(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.revert_resize(self.fake_context, 'fake-instance', 'fake-migration', 'fake-dest', 'resvs') expected_args = {'instance': 'fake-instance'} self._check_result(call_info, 'revert_resize', expected_args, version='1.21') def test_confirm_resize(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.confirm_resize(self.fake_context, 'fake-instance', 'fake-migration', 'fake-source', 'resvs') expected_args = {'instance': 'fake-instance'} self._check_result(call_info, 'confirm_resize', expected_args, version='1.21') def test_reset_network(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.reset_network(self.fake_context, 'fake-instance') expected_args = {'instance': 'fake-instance'} self._check_result(call_info, 'reset_network', expected_args, version='1.22') def test_inject_network_info(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.inject_network_info(self.fake_context, 'fake-instance') expected_args = {'instance': 'fake-instance'} self._check_result(call_info, 'inject_network_info', expected_args, version='1.23') def test_snapshot_instance(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.snapshot_instance(self.fake_context, 'fake-instance', 'image-id') expected_args = {'instance': 'fake-instance', 'image_id': 'image-id'} self._check_result(call_info, 'snapshot_instance', expected_args, version='1.24') def test_backup_instance(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.backup_instance(self.fake_context, 'fake-instance', 'image-id', 'backup-type', 'rotation') expected_args = {'instance': 'fake-instance', 'image_id': 'image-id', 'backup_type': 'backup-type', 'rotation': 'rotation'} self._check_result(call_info, 'backup_instance', expected_args, version='1.24') def test_set_admin_password(self): call_info = self._stub_rpc_method('cast', None) self.cells_rpcapi.set_admin_password(self.fake_context, 'fake-instance', 'fake-password') expected_args = {'instance': 'fake-instance', 'new_pass': 'fake-password'} self._check_result(call_info, 'set_admin_password', expected_args, version='1.29') def test_get_keypair_at_top(self): call_info = self._stub_rpc_method('call', 'fake_response') result = self.cells_rpcapi.get_keypair_at_top(self.fake_context, 'fake_user_id', 'fake_name') expected_args = {'user_id': 'fake_user_id', 'name': 'fake_name'} self._check_result(call_info, 'get_keypair_at_top', expected_args, version='1.37') self.assertEqual(result, 'fake_response') def test_get_keypair_at_top_with_not_found(self): call_info = self._stub_rpc_method('call', None) self.assertRaises(exception.KeypairNotFound, self.cells_rpcapi.get_keypair_at_top, self.fake_context, 'fake_user_id', 'fake_name') expected_args = {'user_id': 'fake_user_id', 'name': 'fake_name'} self._check_result(call_info, 'get_keypair_at_top', expected_args, version='1.37') nova-13.1.4/nova/tests/unit/cells/test_cells_manager.py0000664000567000056710000012371413064447152024315 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For CellsManager """ import copy import datetime import mock from oslo_utils import timeutils from six.moves import range from nova.cells import messaging from nova.cells import utils as cells_utils import nova.conf from nova import context from nova import objects from nova import test from nova.tests.unit.cells import fakes from nova.tests.unit import fake_instance from nova.tests.unit import fake_server_actions from nova.tests.unit.objects import test_flavor CONF = nova.conf.CONF CONF.import_opt('compute_topic', 'nova.compute.rpcapi') FAKE_COMPUTE_NODES = [dict(id=1, host='host1'), dict(id=2, host='host2')] FAKE_SERVICES = [dict(id=1, host='host1'), dict(id=2, host='host2'), dict(id=3, host='host3')] FAKE_TASK_LOGS = [dict(id=1, host='host1'), dict(id=2, host='host2')] class CellsManagerClassTestCase(test.NoDBTestCase): """Test case for CellsManager class.""" def setUp(self): super(CellsManagerClassTestCase, self).setUp() fakes.init(self) # pick a child cell to use for tests. self.our_cell = 'grandchild-cell1' self.cells_manager = fakes.get_cells_manager(self.our_cell) self.msg_runner = self.cells_manager.msg_runner self.state_manager = fakes.get_state_manager(self.our_cell) self.driver = self.cells_manager.driver self.ctxt = 'fake_context' def _get_fake_response(self, raw_response=None, exc=False): if exc: return messaging.Response(self.ctxt, 'fake', test.TestingException(), True) if raw_response is None: raw_response = 'fake-response' return messaging.Response(self.ctxt, 'fake', raw_response, False) def test_get_cell_info_for_neighbors(self): self.mox.StubOutWithMock(self.cells_manager.state_manager, 'get_cell_info_for_neighbors') self.cells_manager.state_manager.get_cell_info_for_neighbors() self.mox.ReplayAll() self.cells_manager.get_cell_info_for_neighbors(self.ctxt) def test_post_start_hook_child_cell(self): self.mox.StubOutWithMock(self.driver, 'start_servers') self.mox.StubOutWithMock(context, 'get_admin_context') self.mox.StubOutWithMock(self.cells_manager, '_update_our_parents') self.driver.start_servers(self.msg_runner) context.get_admin_context().AndReturn(self.ctxt) self.cells_manager._update_our_parents(self.ctxt) self.mox.ReplayAll() self.cells_manager.post_start_hook() def test_post_start_hook_middle_cell(self): cells_manager = fakes.get_cells_manager('child-cell2') msg_runner = cells_manager.msg_runner driver = cells_manager.driver self.mox.StubOutWithMock(driver, 'start_servers') self.mox.StubOutWithMock(context, 'get_admin_context') self.mox.StubOutWithMock(msg_runner, 'ask_children_for_capabilities') self.mox.StubOutWithMock(msg_runner, 'ask_children_for_capacities') driver.start_servers(msg_runner) context.get_admin_context().AndReturn(self.ctxt) msg_runner.ask_children_for_capabilities(self.ctxt) msg_runner.ask_children_for_capacities(self.ctxt) self.mox.ReplayAll() cells_manager.post_start_hook() def test_update_our_parents(self): self.mox.StubOutWithMock(self.msg_runner, 'tell_parents_our_capabilities') self.mox.StubOutWithMock(self.msg_runner, 'tell_parents_our_capacities') self.msg_runner.tell_parents_our_capabilities(self.ctxt) self.msg_runner.tell_parents_our_capacities(self.ctxt) self.mox.ReplayAll() self.cells_manager._update_our_parents(self.ctxt) def test_build_instances(self): build_inst_kwargs = {'instances': [objects.Instance(), objects.Instance()]} self.mox.StubOutWithMock(self.msg_runner, 'build_instances') our_cell = self.msg_runner.state_manager.get_my_state() self.msg_runner.build_instances(self.ctxt, our_cell, build_inst_kwargs) self.mox.ReplayAll() self.cells_manager.build_instances(self.ctxt, build_inst_kwargs=build_inst_kwargs) def test_build_instances_old_flavor(self): flavor_dict = test_flavor.fake_flavor args = {'filter_properties': {'instance_type': flavor_dict}, 'instances': [objects.Instance()]} with mock.patch.object(self.msg_runner, 'build_instances') as mock_bi: self.cells_manager.build_instances(self.ctxt, build_inst_kwargs=args) filter_properties = mock_bi.call_args[0][2]['filter_properties'] self.assertIsInstance(filter_properties['instance_type'], objects.Flavor) def test_build_instances_old_instances(self): args = {'instances': [fake_instance.fake_db_instance()]} with mock.patch.object(self.msg_runner, 'build_instances') as mock_bi: self.cells_manager.build_instances(self.ctxt, build_inst_kwargs=args) self.assertIsInstance(mock_bi.call_args[0][2]['instances'][0], objects.Instance) def test_run_compute_api_method(self): # Args should just be silently passed through cell_name = 'fake-cell-name' method_info = 'fake-method-info' self.mox.StubOutWithMock(self.msg_runner, 'run_compute_api_method') fake_response = self._get_fake_response() self.msg_runner.run_compute_api_method(self.ctxt, cell_name, method_info, True).AndReturn(fake_response) self.mox.ReplayAll() response = self.cells_manager.run_compute_api_method( self.ctxt, cell_name=cell_name, method_info=method_info, call=True) self.assertEqual('fake-response', response) def test_instance_update_at_top(self): self.mox.StubOutWithMock(self.msg_runner, 'instance_update_at_top') self.msg_runner.instance_update_at_top(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.instance_update_at_top(self.ctxt, instance='fake-instance') def test_instance_destroy_at_top(self): self.mox.StubOutWithMock(self.msg_runner, 'instance_destroy_at_top') self.msg_runner.instance_destroy_at_top(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.instance_destroy_at_top(self.ctxt, instance='fake-instance') def test_instance_delete_everywhere(self): self.mox.StubOutWithMock(self.msg_runner, 'instance_delete_everywhere') self.msg_runner.instance_delete_everywhere(self.ctxt, 'fake-instance', 'fake-type') self.mox.ReplayAll() self.cells_manager.instance_delete_everywhere( self.ctxt, instance='fake-instance', delete_type='fake-type') def test_instance_fault_create_at_top(self): self.mox.StubOutWithMock(self.msg_runner, 'instance_fault_create_at_top') self.msg_runner.instance_fault_create_at_top(self.ctxt, 'fake-fault') self.mox.ReplayAll() self.cells_manager.instance_fault_create_at_top( self.ctxt, instance_fault='fake-fault') def test_bw_usage_update_at_top(self): self.mox.StubOutWithMock(self.msg_runner, 'bw_usage_update_at_top') self.msg_runner.bw_usage_update_at_top(self.ctxt, 'fake-bw-info') self.mox.ReplayAll() self.cells_manager.bw_usage_update_at_top( self.ctxt, bw_update_info='fake-bw-info') def test_heal_instances(self): self.flags(instance_updated_at_threshold=1000, instance_update_num_instances=2, group='cells') fake_context = context.RequestContext('fake', 'fake') stalled_time = timeutils.utcnow() updated_since = stalled_time - datetime.timedelta(seconds=1000) def utcnow(): return stalled_time call_info = {'get_instances': 0, 'sync_instances': []} instances = ['instance1', 'instance2', 'instance3'] def get_instances_to_sync(context, **kwargs): self.assertEqual(fake_context, context) call_info['shuffle'] = kwargs.get('shuffle') call_info['project_id'] = kwargs.get('project_id') call_info['updated_since'] = kwargs.get('updated_since') call_info['get_instances'] += 1 return iter(instances) @staticmethod def instance_get_by_uuid(context, uuid): return instances[int(uuid[-1]) - 1] def sync_instance(context, instance): self.assertEqual(fake_context, context) call_info['sync_instances'].append(instance) self.stubs.Set(cells_utils, 'get_instances_to_sync', get_instances_to_sync) self.stubs.Set(objects.Instance, 'get_by_uuid', instance_get_by_uuid) self.stubs.Set(self.cells_manager, '_sync_instance', sync_instance) self.stubs.Set(timeutils, 'utcnow', utcnow) self.cells_manager._heal_instances(fake_context) self.assertTrue(call_info['shuffle']) self.assertIsNone(call_info['project_id']) self.assertEqual(updated_since, call_info['updated_since']) self.assertEqual(1, call_info['get_instances']) # Only first 2 self.assertEqual(instances[:2], call_info['sync_instances']) call_info['sync_instances'] = [] self.cells_manager._heal_instances(fake_context) self.assertTrue(call_info['shuffle']) self.assertIsNone(call_info['project_id']) self.assertEqual(updated_since, call_info['updated_since']) self.assertEqual(2, call_info['get_instances']) # Now the last 1 and the first 1 self.assertEqual([instances[-1], instances[0]], call_info['sync_instances']) def test_sync_instances(self): self.mox.StubOutWithMock(self.msg_runner, 'sync_instances') self.msg_runner.sync_instances(self.ctxt, 'fake-project', 'fake-time', 'fake-deleted') self.mox.ReplayAll() self.cells_manager.sync_instances(self.ctxt, project_id='fake-project', updated_since='fake-time', deleted='fake-deleted') def test_service_get_all(self): responses = [] expected_response = [] # 3 cells... so 3 responses. Each response is a list of services. # Manager should turn these into a single list of responses. for i in range(3): cell_name = 'path!to!cell%i' % i services = [] for service in FAKE_SERVICES: fake_service = objects.Service(**service) services.append(fake_service) expected_service = cells_utils.ServiceProxy(fake_service, cell_name) expected_response.append( (cell_name, expected_service, fake_service)) response = messaging.Response(self.ctxt, cell_name, services, False) responses.append(response) self.mox.StubOutWithMock(self.msg_runner, 'service_get_all') self.mox.StubOutWithMock(cells_utils, 'add_cell_to_service') self.msg_runner.service_get_all(self.ctxt, 'fake-filters').AndReturn(responses) # Calls are done by cells, so we need to sort the list by the cell name expected_response.sort(key=lambda k: k[0]) for cell_name, service_proxy, service in expected_response: cells_utils.add_cell_to_service( service, cell_name).AndReturn(service_proxy) self.mox.ReplayAll() response = self.cells_manager.service_get_all(self.ctxt, filters='fake-filters') self.assertEqual([proxy for cell, proxy, service in expected_response], response) def test_service_get_by_compute_host(self): fake_cell = 'fake-cell' fake_service = objects.Service(**FAKE_SERVICES[0]) fake_response = messaging.Response(self.ctxt, fake_cell, fake_service, False) expected_response = cells_utils.ServiceProxy(fake_service, fake_cell) cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host') self.mox.StubOutWithMock(self.msg_runner, 'service_get_by_compute_host') self.mox.StubOutWithMock(cells_utils, 'add_cell_to_service') self.msg_runner.service_get_by_compute_host(self.ctxt, fake_cell, 'fake-host').AndReturn(fake_response) cells_utils.add_cell_to_service(fake_service, fake_cell).AndReturn( expected_response) self.mox.ReplayAll() response = self.cells_manager.service_get_by_compute_host(self.ctxt, host_name=cell_and_host) self.assertEqual(expected_response, response) def test_get_host_uptime(self): fake_cell = 'parent!fake-cell' fake_host = 'fake-host' fake_cell_and_host = cells_utils.cell_with_item(fake_cell, fake_host) host_uptime = (" 08:32:11 up 93 days, 18:25, 12 users, load average:" " 0.20, 0.12, 0.14") fake_response = messaging.Response(self.ctxt, fake_cell, host_uptime, False) self.mox.StubOutWithMock(self.msg_runner, 'get_host_uptime') self.msg_runner.get_host_uptime(self.ctxt, fake_cell, fake_host).\ AndReturn(fake_response) self.mox.ReplayAll() response = self.cells_manager.get_host_uptime(self.ctxt, fake_cell_and_host) self.assertEqual(host_uptime, response) def test_service_update(self): fake_cell = 'fake-cell' fake_service = objects.Service(**FAKE_SERVICES[0]) fake_response = messaging.Response( self.ctxt, fake_cell, fake_service, False) expected_response = cells_utils.ServiceProxy(fake_service, fake_cell) cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host') params_to_update = {'disabled': True} self.mox.StubOutWithMock(self.msg_runner, 'service_update') self.mox.StubOutWithMock(cells_utils, 'add_cell_to_service') self.msg_runner.service_update(self.ctxt, fake_cell, 'fake-host', 'nova-api', params_to_update).AndReturn(fake_response) cells_utils.add_cell_to_service(fake_service, fake_cell).AndReturn( expected_response) self.mox.ReplayAll() response = self.cells_manager.service_update( self.ctxt, host_name=cell_and_host, binary='nova-api', params_to_update=params_to_update) self.assertEqual(expected_response, response) def test_service_delete(self): fake_cell = 'fake-cell' service_id = '1' cell_service_id = cells_utils.cell_with_item(fake_cell, service_id) with mock.patch.object(self.msg_runner, 'service_delete') as service_delete: self.cells_manager.service_delete(self.ctxt, cell_service_id) service_delete.assert_called_once_with( self.ctxt, fake_cell, service_id) def test_proxy_rpc_to_manager(self): self.mox.StubOutWithMock(self.msg_runner, 'proxy_rpc_to_manager') fake_response = self._get_fake_response() cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host') topic = "%s.%s" % (CONF.compute_topic, cell_and_host) self.msg_runner.proxy_rpc_to_manager(self.ctxt, 'fake-cell', 'fake-host', topic, 'fake-rpc-msg', True, -1).AndReturn(fake_response) self.mox.ReplayAll() response = self.cells_manager.proxy_rpc_to_manager(self.ctxt, topic=topic, rpc_message='fake-rpc-msg', call=True, timeout=-1) self.assertEqual('fake-response', response) def _build_task_log_responses(self, num): responses = [] expected_response = [] # 3 cells... so 3 responses. Each response is a list of task log # entries. Manager should turn these into a single list of # task log entries. for i in range(num): cell_name = 'path!to!cell%i' % i task_logs = [] for task_log in FAKE_TASK_LOGS: task_logs.append(copy.deepcopy(task_log)) expected_task_log = copy.deepcopy(task_log) cells_utils.add_cell_to_task_log(expected_task_log, cell_name) expected_response.append(expected_task_log) response = messaging.Response(self.ctxt, cell_name, task_logs, False) responses.append(response) return expected_response, responses def test_task_log_get_all(self): expected_response, responses = self._build_task_log_responses(3) self.mox.StubOutWithMock(self.msg_runner, 'task_log_get_all') self.msg_runner.task_log_get_all(self.ctxt, None, 'fake-name', 'fake-begin', 'fake-end', host=None, state=None).AndReturn(responses) self.mox.ReplayAll() response = self.cells_manager.task_log_get_all(self.ctxt, task_name='fake-name', period_beginning='fake-begin', period_ending='fake-end') self.assertEqual(expected_response, response) def test_task_log_get_all_with_filters(self): expected_response, responses = self._build_task_log_responses(1) cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host') self.mox.StubOutWithMock(self.msg_runner, 'task_log_get_all') self.msg_runner.task_log_get_all(self.ctxt, 'fake-cell', 'fake-name', 'fake-begin', 'fake-end', host='fake-host', state='fake-state').AndReturn(responses) self.mox.ReplayAll() response = self.cells_manager.task_log_get_all(self.ctxt, task_name='fake-name', period_beginning='fake-begin', period_ending='fake-end', host=cell_and_host, state='fake-state') self.assertEqual(expected_response, response) def test_task_log_get_all_with_cell_but_no_host_filters(self): expected_response, responses = self._build_task_log_responses(1) # Host filter only has cell name. cell_and_host = 'fake-cell' self.mox.StubOutWithMock(self.msg_runner, 'task_log_get_all') self.msg_runner.task_log_get_all(self.ctxt, 'fake-cell', 'fake-name', 'fake-begin', 'fake-end', host=None, state='fake-state').AndReturn(responses) self.mox.ReplayAll() response = self.cells_manager.task_log_get_all(self.ctxt, task_name='fake-name', period_beginning='fake-begin', period_ending='fake-end', host=cell_and_host, state='fake-state') self.assertEqual(expected_response, response) def test_compute_node_get_all(self): responses = [] expected_response = [] # 3 cells... so 3 responses. Each response is a list of computes. # Manager should turn these into a single list of responses. for i in range(3): cell_name = 'path!to!cell%i' % i compute_nodes = [] for compute_node in FAKE_COMPUTE_NODES: fake_compute = objects.ComputeNode(**compute_node) fake_compute._cached_service = None compute_nodes.append(fake_compute) expected_compute_node = cells_utils.ComputeNodeProxy( fake_compute, cell_name) expected_response.append( (cell_name, expected_compute_node, fake_compute)) response = messaging.Response(self.ctxt, cell_name, compute_nodes, False) responses.append(response) self.mox.StubOutWithMock(self.msg_runner, 'compute_node_get_all') self.mox.StubOutWithMock(cells_utils, 'add_cell_to_compute_node') self.msg_runner.compute_node_get_all(self.ctxt, hypervisor_match='fake-match').AndReturn(responses) # Calls are done by cells, so we need to sort the list by the cell name expected_response.sort(key=lambda k: k[0]) for cell_name, compute_proxy, compute_node in expected_response: cells_utils.add_cell_to_compute_node( compute_node, cell_name).AndReturn(compute_proxy) self.mox.ReplayAll() response = self.cells_manager.compute_node_get_all(self.ctxt, hypervisor_match='fake-match') self.assertEqual([proxy for cell, proxy, compute in expected_response], response) def test_compute_node_stats(self): raw_resp1 = {'key1': 1, 'key2': 2} raw_resp2 = {'key2': 1, 'key3': 2} raw_resp3 = {'key3': 1, 'key4': 2} responses = [messaging.Response(self.ctxt, 'cell1', raw_resp1, False), messaging.Response(self.ctxt, 'cell2', raw_resp2, False), messaging.Response(self.ctxt, 'cell2', raw_resp3, False)] expected_resp = {'key1': 1, 'key2': 3, 'key3': 3, 'key4': 2} self.mox.StubOutWithMock(self.msg_runner, 'compute_node_stats') self.msg_runner.compute_node_stats(self.ctxt).AndReturn(responses) self.mox.ReplayAll() response = self.cells_manager.compute_node_stats(self.ctxt) self.assertEqual(expected_resp, response) def test_compute_node_get(self): fake_cell = 'fake-cell' fake_compute = objects.ComputeNode(**FAKE_COMPUTE_NODES[0]) fake_compute._cached_service = None fake_response = messaging.Response(self.ctxt, fake_cell, fake_compute, False) expected_response = cells_utils.ComputeNodeProxy(fake_compute, fake_cell) cell_and_id = cells_utils.cell_with_item(fake_cell, 'fake-id') self.mox.StubOutWithMock(self.msg_runner, 'compute_node_get') self.mox.StubOutWithMock(cells_utils, 'add_cell_to_compute_node') self.msg_runner.compute_node_get(self.ctxt, 'fake-cell', 'fake-id').AndReturn(fake_response) cells_utils.add_cell_to_compute_node( fake_compute, fake_cell).AndReturn(expected_response) self.mox.ReplayAll() response = self.cells_manager.compute_node_get(self.ctxt, compute_id=cell_and_id) self.assertEqual(expected_response, response) def test_actions_get(self): fake_uuid = fake_server_actions.FAKE_UUID fake_req_id = fake_server_actions.FAKE_REQUEST_ID1 fake_act = fake_server_actions.FAKE_ACTIONS[fake_uuid][fake_req_id] fake_response = messaging.Response(self.ctxt, 'fake-cell', [fake_act], False) expected_response = [fake_act] self.mox.StubOutWithMock(self.msg_runner, 'actions_get') self.msg_runner.actions_get(self.ctxt, 'fake-cell', 'fake-uuid').AndReturn(fake_response) self.mox.ReplayAll() response = self.cells_manager.actions_get(self.ctxt, 'fake-cell', 'fake-uuid') self.assertEqual(expected_response, response) def test_action_get_by_request_id(self): fake_uuid = fake_server_actions.FAKE_UUID fake_req_id = fake_server_actions.FAKE_REQUEST_ID1 fake_act = fake_server_actions.FAKE_ACTIONS[fake_uuid][fake_req_id] fake_response = messaging.Response(self.ctxt, 'fake-cell', fake_act, False) expected_response = fake_act self.mox.StubOutWithMock(self.msg_runner, 'action_get_by_request_id') self.msg_runner.action_get_by_request_id(self.ctxt, 'fake-cell', 'fake-uuid', 'req-fake').AndReturn(fake_response) self.mox.ReplayAll() response = self.cells_manager.action_get_by_request_id(self.ctxt, 'fake-cell', 'fake-uuid', 'req-fake') self.assertEqual(expected_response, response) def test_action_events_get(self): fake_action_id = fake_server_actions.FAKE_ACTION_ID1 fake_events = fake_server_actions.FAKE_EVENTS[fake_action_id] fake_response = messaging.Response(self.ctxt, 'fake-cell', fake_events, False) expected_response = fake_events self.mox.StubOutWithMock(self.msg_runner, 'action_events_get') self.msg_runner.action_events_get(self.ctxt, 'fake-cell', 'fake-action').AndReturn(fake_response) self.mox.ReplayAll() response = self.cells_manager.action_events_get(self.ctxt, 'fake-cell', 'fake-action') self.assertEqual(expected_response, response) def test_consoleauth_delete_tokens(self): instance_uuid = 'fake-instance-uuid' self.mox.StubOutWithMock(self.msg_runner, 'consoleauth_delete_tokens') self.msg_runner.consoleauth_delete_tokens(self.ctxt, instance_uuid) self.mox.ReplayAll() self.cells_manager.consoleauth_delete_tokens(self.ctxt, instance_uuid=instance_uuid) def test_get_capacities(self): cell_name = 'cell_name' response = {"ram_free": {"units_by_mb": {"64": 20, "128": 10}, "total_mb": 1491}} self.mox.StubOutWithMock(self.state_manager, 'get_capacities') self.state_manager.get_capacities(cell_name).AndReturn(response) self.mox.ReplayAll() self.assertEqual(response, self.cells_manager.get_capacities(self.ctxt, cell_name)) def test_validate_console_port(self): instance_uuid = 'fake-instance-uuid' cell_name = 'fake-cell-name' instance = objects.Instance(cell_name=cell_name) console_port = 'fake-console-port' console_type = 'fake-console-type' self.mox.StubOutWithMock(self.msg_runner, 'validate_console_port') self.mox.StubOutWithMock(objects.Instance, 'get_by_uuid') fake_response = self._get_fake_response() objects.Instance.get_by_uuid(self.ctxt, instance_uuid).AndReturn(instance) self.msg_runner.validate_console_port(self.ctxt, cell_name, instance_uuid, console_port, console_type).AndReturn(fake_response) self.mox.ReplayAll() response = self.cells_manager.validate_console_port(self.ctxt, instance_uuid=instance_uuid, console_port=console_port, console_type=console_type) self.assertEqual('fake-response', response) def test_bdm_update_or_create_at_top(self): self.mox.StubOutWithMock(self.msg_runner, 'bdm_update_or_create_at_top') self.msg_runner.bdm_update_or_create_at_top(self.ctxt, 'fake-bdm', create='foo') self.mox.ReplayAll() self.cells_manager.bdm_update_or_create_at_top(self.ctxt, 'fake-bdm', create='foo') def test_bdm_destroy_at_top(self): self.mox.StubOutWithMock(self.msg_runner, 'bdm_destroy_at_top') self.msg_runner.bdm_destroy_at_top(self.ctxt, 'fake_instance_uuid', device_name='fake_device_name', volume_id='fake_volume_id') self.mox.ReplayAll() self.cells_manager.bdm_destroy_at_top(self.ctxt, 'fake_instance_uuid', device_name='fake_device_name', volume_id='fake_volume_id') def test_get_migrations(self): filters = {'status': 'confirmed'} cell1_migrations = [{'id': 123}] cell2_migrations = [{'id': 456}] fake_responses = [self._get_fake_response(cell1_migrations), self._get_fake_response(cell2_migrations)] self.mox.StubOutWithMock(self.msg_runner, 'get_migrations') self.msg_runner.get_migrations(self.ctxt, None, False, filters).\ AndReturn(fake_responses) self.mox.ReplayAll() response = self.cells_manager.get_migrations(self.ctxt, filters) self.assertEqual([cell1_migrations[0], cell2_migrations[0]], response) def test_get_migrations_for_a_given_cell(self): filters = {'status': 'confirmed', 'cell_name': 'ChildCell1'} target_cell = '%s%s%s' % (CONF.cells.name, '!', filters['cell_name']) migrations = [{'id': 123}] fake_responses = [self._get_fake_response(migrations)] self.mox.StubOutWithMock(self.msg_runner, 'get_migrations') self.msg_runner.get_migrations(self.ctxt, target_cell, False, filters).AndReturn(fake_responses) self.mox.ReplayAll() response = self.cells_manager.get_migrations(self.ctxt, filters) self.assertEqual(migrations, response) def test_instance_update_from_api(self): self.mox.StubOutWithMock(self.msg_runner, 'instance_update_from_api') self.msg_runner.instance_update_from_api(self.ctxt, 'fake-instance', 'exp_vm', 'exp_task', 'admin_reset') self.mox.ReplayAll() self.cells_manager.instance_update_from_api( self.ctxt, instance='fake-instance', expected_vm_state='exp_vm', expected_task_state='exp_task', admin_state_reset='admin_reset') def test_start_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'start_instance') self.msg_runner.start_instance(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.start_instance(self.ctxt, instance='fake-instance') def test_stop_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'stop_instance') self.msg_runner.stop_instance(self.ctxt, 'fake-instance', do_cast='meow', clean_shutdown='purr') self.mox.ReplayAll() self.cells_manager.stop_instance(self.ctxt, instance='fake-instance', do_cast='meow', clean_shutdown='purr') def test_cell_create(self): values = 'values' response = 'created_cell' self.mox.StubOutWithMock(self.state_manager, 'cell_create') self.state_manager.cell_create(self.ctxt, values).\ AndReturn(response) self.mox.ReplayAll() self.assertEqual(response, self.cells_manager.cell_create(self.ctxt, values)) def test_cell_update(self): cell_name = 'cell_name' values = 'values' response = 'updated_cell' self.mox.StubOutWithMock(self.state_manager, 'cell_update') self.state_manager.cell_update(self.ctxt, cell_name, values).\ AndReturn(response) self.mox.ReplayAll() self.assertEqual(response, self.cells_manager.cell_update(self.ctxt, cell_name, values)) def test_cell_delete(self): cell_name = 'cell_name' response = 1 self.mox.StubOutWithMock(self.state_manager, 'cell_delete') self.state_manager.cell_delete(self.ctxt, cell_name).\ AndReturn(response) self.mox.ReplayAll() self.assertEqual(response, self.cells_manager.cell_delete(self.ctxt, cell_name)) def test_cell_get(self): cell_name = 'cell_name' response = 'cell_info' self.mox.StubOutWithMock(self.state_manager, 'cell_get') self.state_manager.cell_get(self.ctxt, cell_name).\ AndReturn(response) self.mox.ReplayAll() self.assertEqual(response, self.cells_manager.cell_get(self.ctxt, cell_name)) def test_reboot_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'reboot_instance') self.msg_runner.reboot_instance(self.ctxt, 'fake-instance', 'HARD') self.mox.ReplayAll() self.cells_manager.reboot_instance(self.ctxt, instance='fake-instance', reboot_type='HARD') def test_suspend_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'suspend_instance') self.msg_runner.suspend_instance(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.suspend_instance(self.ctxt, instance='fake-instance') def test_resume_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'resume_instance') self.msg_runner.resume_instance(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.resume_instance(self.ctxt, instance='fake-instance') def test_terminate_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'terminate_instance') self.msg_runner.terminate_instance(self.ctxt, 'fake-instance', delete_type='delete') self.mox.ReplayAll() self.cells_manager.terminate_instance(self.ctxt, instance='fake-instance', delete_type='delete') def test_soft_delete_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'soft_delete_instance') self.msg_runner.soft_delete_instance(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.soft_delete_instance(self.ctxt, instance='fake-instance') def _test_resize_instance(self, clean_shutdown=True): self.mox.StubOutWithMock(self.msg_runner, 'resize_instance') self.msg_runner.resize_instance(self.ctxt, 'fake-instance', 'fake-flavor', 'fake-updates', clean_shutdown=clean_shutdown) self.mox.ReplayAll() self.cells_manager.resize_instance( self.ctxt, instance='fake-instance', flavor='fake-flavor', extra_instance_updates='fake-updates', clean_shutdown=clean_shutdown) def test_resize_instance(self): self._test_resize_instance() def test_resize_instance_forced_shutdown(self): self._test_resize_instance(clean_shutdown=False) def test_live_migrate_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'live_migrate_instance') self.msg_runner.live_migrate_instance(self.ctxt, 'fake-instance', 'fake-block', 'fake-commit', 'fake-host') self.mox.ReplayAll() self.cells_manager.live_migrate_instance( self.ctxt, instance='fake-instance', block_migration='fake-block', disk_over_commit='fake-commit', host_name='fake-host') def test_revert_resize(self): self.mox.StubOutWithMock(self.msg_runner, 'revert_resize') self.msg_runner.revert_resize(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.revert_resize(self.ctxt, instance='fake-instance') def test_confirm_resize(self): self.mox.StubOutWithMock(self.msg_runner, 'confirm_resize') self.msg_runner.confirm_resize(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.confirm_resize(self.ctxt, instance='fake-instance') def test_reset_network(self): self.mox.StubOutWithMock(self.msg_runner, 'reset_network') self.msg_runner.reset_network(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.reset_network(self.ctxt, instance='fake-instance') def test_inject_network_info(self): self.mox.StubOutWithMock(self.msg_runner, 'inject_network_info') self.msg_runner.inject_network_info(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.inject_network_info(self.ctxt, instance='fake-instance') def test_snapshot_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'snapshot_instance') self.msg_runner.snapshot_instance(self.ctxt, 'fake-instance', 'fake-id') self.mox.ReplayAll() self.cells_manager.snapshot_instance(self.ctxt, instance='fake-instance', image_id='fake-id') def test_backup_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'backup_instance') self.msg_runner.backup_instance(self.ctxt, 'fake-instance', 'fake-id', 'backup-type', 'rotation') self.mox.ReplayAll() self.cells_manager.backup_instance(self.ctxt, instance='fake-instance', image_id='fake-id', backup_type='backup-type', rotation='rotation') def test_set_admin_password(self): with mock.patch.object(self.msg_runner, 'set_admin_password') as set_admin_password: self.cells_manager.set_admin_password(self.ctxt, instance='fake-instance', new_pass='fake-password') set_admin_password.assert_called_once_with(self.ctxt, 'fake-instance', 'fake-password') def test_get_keypair_at_top(self): keypairs = [self._get_fake_response('fake_keypair'), self._get_fake_response('fake_keypair2')] with mock.patch.object(self.msg_runner, 'get_keypair_at_top', return_value=keypairs) as fake_get_keypair: response = self.cells_manager.get_keypair_at_top(self.ctxt, 'fake_user_id', 'fake_name') fake_get_keypair.assert_called_once_with(self.ctxt, 'fake_user_id', 'fake_name') self.assertEqual('fake_keypair', response) def test_get_keypair_at_top_with_empty_responses(self): with mock.patch.object(self.msg_runner, 'get_keypair_at_top', return_value=[]) as fake_get_keypair: self.assertIsNone( self.cells_manager.get_keypair_at_top(self.ctxt, 'fake_user_id', 'fake_name')) fake_get_keypair.assert_called_once_with(self.ctxt, 'fake_user_id', 'fake_name') nova-13.1.4/nova/tests/unit/cells/test_cells_state_manager.py0000664000567000056710000003343113064447152025511 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For CellStateManager """ import datetime import time import mock from oslo_config import cfg from oslo_db import exception as db_exc from oslo_utils import timeutils import six from nova.cells import state from nova.db.sqlalchemy import models from nova import exception from nova import objects from nova import test from nova import utils FAKE_COMPUTES = [ ('host1', 1024, 100, 0, 0), ('host2', 1024, 100, -1, -1), ('host3', 1024, 100, 1024, 100), ('host4', 1024, 100, 300, 30), ] FAKE_COMPUTES_N_TO_ONE = [ ('host1', 1024, 100, 0, 0), ('host1', 1024, 100, -1, -1), ('host2', 1024, 100, 1024, 100), ('host2', 1024, 100, 300, 30), ] FAKE_SERVICES = [ ('host1', 0), ('host2', 0), ('host3', 0), ('host4', 3600), ] # NOTE(alaski): It's important to have multiple types that end up having the # same memory and disk requirements. So two types need the same first value, # and two need the second and third values to add up to the same thing. FAKE_ITYPES = [ (0, 0, 0), (50, 12, 13), (50, 2, 4), (10, 20, 5), ] def _create_fake_node(host, total_mem, total_disk, free_mem, free_disk): return objects.ComputeNode(host=host, memory_mb=total_mem, local_gb=total_disk, free_ram_mb=free_mem, free_disk_gb=free_disk) @classmethod def _fake_service_get_all_by_binary(cls, context, binary): def _node(host, total_mem, total_disk, free_mem, free_disk): now = timeutils.utcnow() return objects.Service(host=host, disabled=False, forced_down=False, last_seen_up=now) return [_node(*fake) for fake in FAKE_COMPUTES] @classmethod def _fake_service_get_all_by_binary_nodedown(cls, context, binary): def _service(host, noupdate_sec): now = timeutils.utcnow() last_seen = now - datetime.timedelta(seconds=noupdate_sec) return objects.Service(host=host, disabled=False, forced_down=False, last_seen_up=last_seen, binary=binary) return [_service(*fake) for fake in FAKE_SERVICES] @classmethod def _fake_compute_node_get_all(cls, context): return [_create_fake_node(*fake) for fake in FAKE_COMPUTES] @classmethod def _fake_compute_node_n_to_one_get_all(cls, context): return [_create_fake_node(*fake) for fake in FAKE_COMPUTES_N_TO_ONE] def _fake_cell_get_all(context): return [] def _fake_instance_type_all(context): def _type(mem, root, eph): return {'root_gb': root, 'ephemeral_gb': eph, 'memory_mb': mem} return [_type(*fake) for fake in FAKE_ITYPES] class TestCellsStateManager(test.NoDBTestCase): def setUp(self): super(TestCellsStateManager, self).setUp() self.stubs.Set(objects.ComputeNodeList, 'get_all', _fake_compute_node_get_all) self.stubs.Set(objects.ServiceList, 'get_by_binary', _fake_service_get_all_by_binary) self.stub_out('nova.db.flavor_get_all', _fake_instance_type_all) self.stub_out('nova.db.cell_get_all', _fake_cell_get_all) def test_cells_config_not_found(self): self.flags(cells_config='no_such_file_exists.conf', group='cells') e = self.assertRaises(cfg.ConfigFilesNotFoundError, state.CellStateManager) self.assertEqual(['no_such_file_exists.conf'], e.config_files) @mock.patch.object(cfg.ConfigOpts, 'find_file') @mock.patch.object(utils, 'read_cached_file') def test_filemanager_returned(self, mock_read_cached_file, mock_find_file): mock_find_file.return_value = "/etc/nova/cells.json" mock_read_cached_file.return_value = (False, six.StringIO('{}')) self.flags(cells_config='cells.json', group='cells') manager = state.CellStateManager() self.assertIsInstance(manager, state.CellStateManagerFile) self.assertRaises(exception.CellsUpdateUnsupported, manager.cell_create, None, None) self.assertRaises(exception.CellsUpdateUnsupported, manager.cell_update, None, None, None) self.assertRaises(exception.CellsUpdateUnsupported, manager.cell_delete, None, None) def test_dbmanager_returned(self): self.assertIsInstance(state.CellStateManager(), state.CellStateManagerDB) def test_capacity_no_reserve(self): # utilize entire cell cap = self._capacity(0.0) cell_free_ram = sum(max(0, compute[3]) for compute in FAKE_COMPUTES) self.assertEqual(cell_free_ram, cap['ram_free']['total_mb']) cell_free_disk = 1024 * sum(max(0, compute[4]) for compute in FAKE_COMPUTES) self.assertEqual(cell_free_disk, cap['disk_free']['total_mb']) self.assertEqual(0, cap['ram_free']['units_by_mb']['0']) self.assertEqual(0, cap['disk_free']['units_by_mb']['0']) units = cell_free_ram // 50 self.assertEqual(units, cap['ram_free']['units_by_mb']['50']) sz = 25 * 1024 units = 5 # 4 on host 3, 1 on host4 self.assertEqual(units, cap['disk_free']['units_by_mb'][str(sz)]) def test_capacity_full_reserve(self): # reserve the entire cell. (utilize zero percent) cap = self._capacity(100.0) cell_free_ram = sum(max(0, compute[3]) for compute in FAKE_COMPUTES) self.assertEqual(cell_free_ram, cap['ram_free']['total_mb']) cell_free_disk = 1024 * sum(max(0, compute[4]) for compute in FAKE_COMPUTES) self.assertEqual(cell_free_disk, cap['disk_free']['total_mb']) self.assertEqual(0, cap['ram_free']['units_by_mb']['0']) self.assertEqual(0, cap['disk_free']['units_by_mb']['0']) self.assertEqual(0, cap['ram_free']['units_by_mb']['50']) sz = 25 * 1024 self.assertEqual(0, cap['disk_free']['units_by_mb'][str(sz)]) def test_capacity_part_reserve(self): # utilize half the cell's free capacity cap = self._capacity(50.0) cell_free_ram = sum(max(0, compute[3]) for compute in FAKE_COMPUTES) self.assertEqual(cell_free_ram, cap['ram_free']['total_mb']) cell_free_disk = 1024 * sum(max(0, compute[4]) for compute in FAKE_COMPUTES) self.assertEqual(cell_free_disk, cap['disk_free']['total_mb']) self.assertEqual(0, cap['ram_free']['units_by_mb']['0']) self.assertEqual(0, cap['disk_free']['units_by_mb']['0']) units = 10 # 10 from host 3 self.assertEqual(units, cap['ram_free']['units_by_mb']['50']) sz = 25 * 1024 units = 2 # 2 on host 3 self.assertEqual(units, cap['disk_free']['units_by_mb'][str(sz)]) def _get_state_manager(self, reserve_percent=0.0): self.flags(reserve_percent=reserve_percent, group='cells') return state.CellStateManager() def _capacity(self, reserve_percent): state_manager = self._get_state_manager(reserve_percent) my_state = state_manager.get_my_state() return my_state.capacities class TestCellsStateManagerNToOne(TestCellsStateManager): def setUp(self): super(TestCellsStateManagerNToOne, self).setUp() self.stubs.Set(objects.ComputeNodeList, 'get_all', _fake_compute_node_n_to_one_get_all) def test_capacity_part_reserve(self): # utilize half the cell's free capacity cap = self._capacity(50.0) cell_free_ram = sum(max(0, compute[3]) for compute in FAKE_COMPUTES_N_TO_ONE) self.assertEqual(cell_free_ram, cap['ram_free']['total_mb']) cell_free_disk = (1024 * sum(max(0, compute[4]) for compute in FAKE_COMPUTES_N_TO_ONE)) self.assertEqual(cell_free_disk, cap['disk_free']['total_mb']) self.assertEqual(0, cap['ram_free']['units_by_mb']['0']) self.assertEqual(0, cap['disk_free']['units_by_mb']['0']) units = 6 # 6 from host 2 self.assertEqual(units, cap['ram_free']['units_by_mb']['50']) sz = 25 * 1024 units = 1 # 1 on host 2 self.assertEqual(units, cap['disk_free']['units_by_mb'][str(sz)]) class TestCellsStateManagerNodeDown(test.NoDBTestCase): def setUp(self): super(TestCellsStateManagerNodeDown, self).setUp() self.stub_out('nova.objects.ComputeNodeList.get_all', _fake_compute_node_get_all) self.stub_out('nova.objects.ServiceList.get_by_binary', _fake_service_get_all_by_binary_nodedown) self.stub_out('nova.db.flavor_get_all', _fake_instance_type_all) self.stub_out('nova.db.cell_get_all', _fake_cell_get_all) def test_capacity_no_reserve_nodedown(self): cap = self._capacity(0.0) cell_free_ram = sum(max(0, compute[3]) for compute in FAKE_COMPUTES[:-1]) self.assertEqual(cell_free_ram, cap['ram_free']['total_mb']) free_disk = sum(max(0, compute[4]) for compute in FAKE_COMPUTES[:-1]) cell_free_disk = 1024 * free_disk self.assertEqual(cell_free_disk, cap['disk_free']['total_mb']) def _get_state_manager(self, reserve_percent=0.0): self.flags(reserve_percent=reserve_percent, group='cells') return state.CellStateManager() def _capacity(self, reserve_percent): state_manager = self._get_state_manager(reserve_percent) my_state = state_manager.get_my_state() return my_state.capacities class TestCellStateManagerException(test.NoDBTestCase): @mock.patch.object(time, 'sleep') def test_init_db_error(self, mock_sleep): class TestCellStateManagerDB(state.CellStateManagerDB): def __init__(self): self._cell_data_sync = mock.Mock() self._cell_data_sync.side_effect = [db_exc.DBError(), []] super(TestCellStateManagerDB, self).__init__() test = TestCellStateManagerDB() mock_sleep.assert_called_once_with(30) self.assertEqual(2, test._cell_data_sync.call_count) class TestCellsGetCapacity(TestCellsStateManager): def setUp(self): super(TestCellsGetCapacity, self).setUp() self.capacities = {"ram_free": 1234} self.state_manager = self._get_state_manager() cell = models.Cell(name="cell_name") other_cell = models.Cell(name="other_cell_name") cell.capacities = self.capacities other_cell.capacities = self.capacities self.stubs.Set(self.state_manager, 'child_cells', {"cell_name": cell, "other_cell_name": other_cell}) def test_get_cell_capacity_for_all_cells(self): self.stubs.Set(self.state_manager.my_cell_state, 'capacities', self.capacities) capacities = self.state_manager.get_capacities() self.assertEqual({"ram_free": 3702}, capacities) def test_get_cell_capacity_for_the_parent_cell(self): self.stubs.Set(self.state_manager.my_cell_state, 'capacities', self.capacities) capacities = self.state_manager.\ get_capacities(self.state_manager.my_cell_state.name) self.assertEqual({"ram_free": 3702}, capacities) def test_get_cell_capacity_for_a_cell(self): self.assertEqual(self.capacities, self.state_manager.get_capacities(cell_name="cell_name")) def test_get_cell_capacity_for_non_existing_cell(self): self.assertRaises(exception.CellNotFound, self.state_manager.get_capacities, cell_name="invalid_cell_name") class FakeCellStateManager(object): def __init__(self): self.called = [] def _cell_data_sync(self, force=False): self.called.append(('_cell_data_sync', force)) class TestSyncDecorators(test.NoDBTestCase): def test_sync_before(self): manager = FakeCellStateManager() def test(inst, *args, **kwargs): self.assertEqual(manager, inst) self.assertEqual((1, 2, 3), args) self.assertEqual(dict(a=4, b=5, c=6), kwargs) return 'result' wrapper = state.sync_before(test) result = wrapper(manager, 1, 2, 3, a=4, b=5, c=6) self.assertEqual('result', result) self.assertEqual([('_cell_data_sync', False)], manager.called) def test_sync_after(self): manager = FakeCellStateManager() def test(inst, *args, **kwargs): self.assertEqual(manager, inst) self.assertEqual((1, 2, 3), args) self.assertEqual(dict(a=4, b=5, c=6), kwargs) return 'result' wrapper = state.sync_after(test) result = wrapper(manager, 1, 2, 3, a=4, b=5, c=6) self.assertEqual('result', result) self.assertEqual([('_cell_data_sync', True)], manager.called) nova-13.1.4/nova/tests/unit/cells/fakes.py0000664000567000056710000001630313064447152021546 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Fakes For Cells tests. """ from nova.cells import driver from nova.cells import manager as cells_manager from nova.cells import state as cells_state from nova.cells import utils as cells_utils import nova.conf import nova.db from nova.db import base from nova import exception from nova import objects CONF = nova.conf.CONF # Fake Cell Hierarchy FAKE_TOP_LEVEL_CELL_NAME = 'api-cell' FAKE_CELL_LAYOUT = [{'child-cell1': []}, {'child-cell2': [{'grandchild-cell1': []}]}, {'child-cell3': [{'grandchild-cell2': []}, {'grandchild-cell3': []}]}, {'child-cell4': []}] # build_cell_stub_infos() below will take the above layout and create # a fake view of the DB from the perspective of each of the cells. # For each cell, a CellStubInfo will be created with this info. CELL_NAME_TO_STUB_INFO = {} class FakeDBApi(object): """Cells uses a different DB in each cell. This means in order to stub out things differently per cell, I need to create a fake DBApi object that is instantiated by each fake cell. """ def __init__(self, cell_db_entries): self.cell_db_entries = cell_db_entries def __getattr__(self, key): return getattr(nova.db, key) def cell_get_all(self, ctxt): return self.cell_db_entries def instance_get_all_by_filters(self, ctxt, *args, **kwargs): return [] def instance_get_by_uuid(self, ctxt, instance_uuid): raise exception.InstanceNotFound(instance_id=instance_uuid) class FakeCellsDriver(driver.BaseCellsDriver): pass class FakeCellState(cells_state.CellState): def send_message(self, message): message_runner = get_message_runner(self.name) orig_ctxt = message.ctxt json_message = message.to_json() message = message_runner.message_from_json(json_message) # Restore this so we can use mox and verify same context message.ctxt = orig_ctxt message.process() class FakeCellStateManager(cells_state.CellStateManagerDB): def __init__(self, *args, **kwargs): super(FakeCellStateManager, self).__init__(*args, cell_state_cls=FakeCellState, **kwargs) class FakeCellsManager(cells_manager.CellsManager): def __init__(self, *args, **kwargs): super(FakeCellsManager, self).__init__(*args, cell_state_manager=FakeCellStateManager, **kwargs) class CellStubInfo(object): def __init__(self, test_case, cell_name, db_entries): self.test_case = test_case self.cell_name = cell_name self.db_entries = db_entries def fake_base_init(_self, *args, **kwargs): _self.db = FakeDBApi(db_entries) @staticmethod def _fake_compute_node_get_all(context): return [] @staticmethod def _fake_service_get_by_binary(context, binary): return [] test_case.stubs.Set(base.Base, '__init__', fake_base_init) test_case.stubs.Set(objects.ComputeNodeList, 'get_all', _fake_compute_node_get_all) test_case.stubs.Set(objects.ServiceList, 'get_by_binary', _fake_service_get_by_binary) self.cells_manager = FakeCellsManager() # Fix the cell name, as it normally uses CONF.cells.name msg_runner = self.cells_manager.msg_runner msg_runner.our_name = self.cell_name self.cells_manager.state_manager.my_cell_state.name = self.cell_name def _build_cell_transport_url(cur_db_id): username = 'username%s' % cur_db_id password = 'password%s' % cur_db_id hostname = 'rpc_host%s' % cur_db_id port = 3090 + cur_db_id virtual_host = 'rpc_vhost%s' % cur_db_id return 'rabbit://%s:%s@%s:%s/%s' % (username, password, hostname, port, virtual_host) def _build_cell_stub_info(test_case, our_name, parent_path, children): cell_db_entries = [] cur_db_id = 1 sep_char = cells_utils.PATH_CELL_SEP if parent_path: cell_db_entries.append( dict(id=cur_db_id, name=parent_path.split(sep_char)[-1], is_parent=True, transport_url=_build_cell_transport_url(cur_db_id))) cur_db_id += 1 our_path = parent_path + sep_char + our_name else: our_path = our_name for child in children: for child_name, grandchildren in child.items(): _build_cell_stub_info(test_case, child_name, our_path, grandchildren) cell_entry = dict(id=cur_db_id, name=child_name, transport_url=_build_cell_transport_url( cur_db_id), is_parent=False) cell_db_entries.append(cell_entry) cur_db_id += 1 stub_info = CellStubInfo(test_case, our_name, cell_db_entries) CELL_NAME_TO_STUB_INFO[our_name] = stub_info def _build_cell_stub_infos(test_case): _build_cell_stub_info(test_case, FAKE_TOP_LEVEL_CELL_NAME, '', FAKE_CELL_LAYOUT) def init(test_case): global CELL_NAME_TO_STUB_INFO test_case.flags(driver='nova.tests.unit.cells.fakes.FakeCellsDriver', group='cells') CELL_NAME_TO_STUB_INFO = {} _build_cell_stub_infos(test_case) def _get_cell_stub_info(cell_name): return CELL_NAME_TO_STUB_INFO[cell_name] def get_state_manager(cell_name): return _get_cell_stub_info(cell_name).cells_manager.state_manager def get_cell_state(cur_cell_name, tgt_cell_name): state_manager = get_state_manager(cur_cell_name) cell = state_manager.child_cells.get(tgt_cell_name) if cell is None: cell = state_manager.parent_cells.get(tgt_cell_name) return cell def get_cells_manager(cell_name): return _get_cell_stub_info(cell_name).cells_manager def get_message_runner(cell_name): return _get_cell_stub_info(cell_name).cells_manager.msg_runner def stub_tgt_method(test_case, cell_name, method_name, method): msg_runner = get_message_runner(cell_name) tgt_msg_methods = msg_runner.methods_by_type['targeted'] setattr(tgt_msg_methods, method_name, method) def stub_bcast_method(test_case, cell_name, method_name, method): msg_runner = get_message_runner(cell_name) tgt_msg_methods = msg_runner.methods_by_type['broadcast'] setattr(tgt_msg_methods, method_name, method) def stub_bcast_methods(test_case, method_name, method): for cell_name in CELL_NAME_TO_STUB_INFO.keys(): stub_bcast_method(test_case, cell_name, method_name, method) nova-13.1.4/nova/tests/unit/cells/test_cells_scheduler.py0000664000567000056710000005671713064447152024671 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For CellsScheduler """ import copy import time import mock from oslo_utils import uuidutils from nova import block_device from nova.cells import filters from nova.cells import weights from nova.compute import vm_states import nova.conf from nova import context from nova import db from nova import exception from nova import objects from nova.scheduler import utils as scheduler_utils from nova import test from nova.tests.unit.cells import fakes from nova.tests.unit import fake_block_device from nova.tests import uuidsentinel from nova import utils CONF = nova.conf.CONF class FakeFilterClass1(filters.BaseCellFilter): pass class FakeFilterClass2(filters.BaseCellFilter): pass class FakeWeightClass1(weights.BaseCellWeigher): def _weigh_object(self, obj, weight_properties): pass class FakeWeightClass2(weights.BaseCellWeigher): def _weigh_object(self, obj, weight_properties): pass class CellsSchedulerTestCase(test.TestCase): """Test case for CellsScheduler class.""" def setUp(self): super(CellsSchedulerTestCase, self).setUp() self.flags(scheduler_filter_classes=[], scheduler_weight_classes=[], group='cells') self._init_cells_scheduler() def _init_cells_scheduler(self): fakes.init(self) self.msg_runner = fakes.get_message_runner('api-cell') self.scheduler = self.msg_runner.scheduler self.state_manager = self.msg_runner.state_manager self.my_cell_state = self.state_manager.get_my_state() self.ctxt = context.RequestContext('fake', 'fake') instance_uuids = [] for x in range(3): instance_uuids.append(uuidutils.generate_uuid()) self.instance_uuids = instance_uuids self.instances = [objects.Instance(uuid=uuid, id=id) for id, uuid in enumerate(instance_uuids)] self.request_spec = { 'num_instances': len(instance_uuids), 'instance_properties': self.instances[0], 'instance_type': 'fake_type', 'image': 'fake_image'} self.build_inst_kwargs = { 'instances': self.instances, 'image': 'fake_image', 'filter_properties': {'instance_type': 'fake_type'}, 'security_groups': 'fake_sec_groups', 'block_device_mapping': 'fake_bdm'} def test_create_instances_here(self): # Just grab the first instance type inst_type = objects.Flavor.get_by_id(self.ctxt, 1) image = {'properties': {}} instance_uuids = self.instance_uuids instance_props = {'id': 'removed', 'security_groups': 'removed', 'info_cache': 'removed', 'name': 'instance-00000001', 'hostname': 'meow', 'display_name': 'moo', 'image_ref': 'fake_image_ref', 'user_id': self.ctxt.user_id, # Test these as lists 'metadata': {'moo': 'cow'}, 'system_metadata': {'meow': 'cat'}, 'flavor': inst_type, 'project_id': self.ctxt.project_id} call_info = {'uuids': []} block_device_mapping = [ objects.BlockDeviceMapping(context=self.ctxt, **fake_block_device.FakeDbBlockDeviceDict( block_device.create_image_bdm('fake_image_ref'), anon=True)) ] def _fake_instance_update_at_top(_ctxt, instance): call_info['uuids'].append(instance['uuid']) self.stubs.Set(self.msg_runner, 'instance_update_at_top', _fake_instance_update_at_top) self.scheduler._create_instances_here(self.ctxt, instance_uuids, instance_props, inst_type, image, ['default'], block_device_mapping) self.assertEqual(instance_uuids, call_info['uuids']) for count, instance_uuid in enumerate(instance_uuids): instance = db.instance_get_by_uuid(self.ctxt, instance_uuid) meta = utils.instance_meta(instance) self.assertEqual('cow', meta['moo']) sys_meta = utils.instance_sys_meta(instance) self.assertEqual('cat', sys_meta['meow']) self.assertEqual('meow', instance['hostname']) self.assertEqual('moo-%d' % (count + 1), instance['display_name']) self.assertEqual('fake_image_ref', instance['image_ref']) @mock.patch('nova.objects.Instance.update') def test_create_instances_here_pops_problematic_properties(self, mock_update): values = { 'uuid': uuidsentinel.instance, 'metadata': [], 'id': 1, 'name': 'foo', 'info_cache': 'bar', 'security_groups': 'not secure', 'flavor': 'chocolate', 'pci_requests': 'no thanks', 'ec2_ids': 'prime', } @mock.patch.object(self.scheduler.compute_api, 'create_db_entry_for_new_instance') def test(mock_create_db): self.scheduler._create_instances_here( self.ctxt, [uuidsentinel.instance], values, objects.Flavor(), 'foo', [], []) test() # NOTE(danms): Make sure that only the expected properties # are applied to the instance object. The complex ones that # would have been mangled over RPC should be removed. mock_update.assert_called_once_with( {'uuid': uuidsentinel.instance, 'metadata': {}}) def test_build_instances_selects_child_cell(self): # Make sure there's no capacity info so we're sure to # select a child cell our_cell_info = self.state_manager.get_my_state() our_cell_info.capacities = {} call_info = {'times': 0} orig_fn = self.msg_runner.build_instances def msg_runner_build_instances(ctxt, target_cell, build_inst_kwargs): # This gets called twice. Once for our running it # in this cell.. and then it'll get called when the # child cell is picked. So, first time.. just run it # like normal. if not call_info['times']: call_info['times'] += 1 return orig_fn(ctxt, target_cell, build_inst_kwargs) call_info['ctxt'] = ctxt call_info['target_cell'] = target_cell call_info['build_inst_kwargs'] = build_inst_kwargs def fake_build_request_spec(ctxt, image, instances): request_spec = { 'num_instances': len(instances), 'image': image} return request_spec self.stubs.Set(self.msg_runner, 'build_instances', msg_runner_build_instances) self.stubs.Set(scheduler_utils, 'build_request_spec', fake_build_request_spec) self.msg_runner.build_instances(self.ctxt, self.my_cell_state, self.build_inst_kwargs) self.assertEqual(self.ctxt, call_info['ctxt']) self.assertEqual(self.build_inst_kwargs, call_info['build_inst_kwargs']) child_cells = self.state_manager.get_child_cells() self.assertIn(call_info['target_cell'], child_cells) def test_build_instances_selects_current_cell(self): # Make sure there's no child cells so that we will be # selected self.state_manager.child_cells = {} call_info = {} build_inst_kwargs = copy.deepcopy(self.build_inst_kwargs) def fake_create_instances_here(ctxt, instance_uuids, instance_properties, instance_type, image, security_groups, block_device_mapping): call_info['ctxt'] = ctxt call_info['instance_uuids'] = instance_uuids call_info['instance_properties'] = instance_properties call_info['instance_type'] = instance_type call_info['image'] = image call_info['security_groups'] = security_groups call_info['block_device_mapping'] = block_device_mapping return self.instances def fake_rpc_build_instances(ctxt, **build_inst_kwargs): call_info['build_inst_kwargs'] = build_inst_kwargs def fake_build_request_spec(ctxt, image, instances): request_spec = { 'num_instances': len(instances), 'image': image} return request_spec self.stubs.Set(self.scheduler, '_create_instances_here', fake_create_instances_here) self.stubs.Set(self.scheduler.compute_task_api, 'build_instances', fake_rpc_build_instances) self.stubs.Set(scheduler_utils, 'build_request_spec', fake_build_request_spec) self.msg_runner.build_instances(self.ctxt, self.my_cell_state, build_inst_kwargs) self.assertEqual(self.ctxt, call_info['ctxt']) self.assertEqual(self.instance_uuids, call_info['instance_uuids']) self.assertEqual(self.build_inst_kwargs['instances'][0]['id'], call_info['instance_properties']['id']) self.assertEqual( self.build_inst_kwargs['filter_properties']['instance_type'], call_info['instance_type']) self.assertEqual(self.build_inst_kwargs['image'], call_info['image']) self.assertEqual(self.build_inst_kwargs['security_groups'], call_info['security_groups']) self.assertEqual(self.build_inst_kwargs['block_device_mapping'], call_info['block_device_mapping']) self.assertEqual(build_inst_kwargs, call_info['build_inst_kwargs']) self.assertEqual(self.instance_uuids, call_info['instance_uuids']) def test_build_instances_retries_when_no_cells_avail(self): self.flags(scheduler_retries=7, group='cells') call_info = {'num_tries': 0, 'errored_uuids': []} def fake_grab_target_cells(filter_properties): call_info['num_tries'] += 1 raise exception.NoCellsAvailable() def fake_sleep(_secs): return def fake_instance_save(inst): self.assertEqual(vm_states.ERROR, inst.vm_state) call_info['errored_uuids'].append(inst.uuid) def fake_build_request_spec(ctxt, image, instances): request_spec = { 'num_instances': len(instances), 'image': image} return request_spec self.stubs.Set(self.scheduler, '_grab_target_cells', fake_grab_target_cells) self.stubs.Set(time, 'sleep', fake_sleep) self.stubs.Set(objects.Instance, 'save', fake_instance_save) self.stubs.Set(scheduler_utils, 'build_request_spec', fake_build_request_spec) self.msg_runner.build_instances(self.ctxt, self.my_cell_state, self.build_inst_kwargs) self.assertEqual(8, call_info['num_tries']) self.assertEqual(self.instance_uuids, call_info['errored_uuids']) def test_schedule_method_on_random_exception(self): self.flags(scheduler_retries=7, group='cells') instances = [objects.Instance(uuid=uuid) for uuid in self.instance_uuids] method_kwargs = { 'image': 'fake_image', 'instances': instances, 'filter_properties': {}} call_info = {'num_tries': 0, 'errored_uuids1': [], 'errored_uuids2': []} def fake_grab_target_cells(filter_properties): call_info['num_tries'] += 1 raise test.TestingException() def fake_instance_save(inst): self.assertEqual(vm_states.ERROR, inst.vm_state) call_info['errored_uuids1'].append(inst.uuid) def fake_instance_update_at_top(ctxt, instance): self.assertEqual(vm_states.ERROR, instance['vm_state']) call_info['errored_uuids2'].append(instance['uuid']) def fake_build_request_spec(ctxt, image, instances): request_spec = { 'num_instances': len(instances), 'image': image} return request_spec self.stubs.Set(self.scheduler, '_grab_target_cells', fake_grab_target_cells) self.stubs.Set(objects.Instance, 'save', fake_instance_save) self.stubs.Set(self.msg_runner, 'instance_update_at_top', fake_instance_update_at_top) self.stubs.Set(scheduler_utils, 'build_request_spec', fake_build_request_spec) self.msg_runner.build_instances(self.ctxt, self.my_cell_state, method_kwargs) # Shouldn't retry self.assertEqual(1, call_info['num_tries']) self.assertEqual(self.instance_uuids, call_info['errored_uuids1']) self.assertEqual(self.instance_uuids, call_info['errored_uuids2']) def test_filter_schedule_skipping(self): # if a filter handles scheduling, short circuit def _grab(filter_properties): return None self.stubs.Set(self.scheduler, '_grab_target_cells', _grab) def _test(self, *args): raise test.TestingException("shouldn't be called") try: self.scheduler._schedule_build_to_cells(None, None, None, _test, None) except test.TestingException: self.fail("Scheduling did not properly short circuit") def test_cells_filter_args_correct(self): # Re-init our fakes with some filters. our_path = 'nova.tests.unit.cells.test_cells_scheduler' cls_names = [our_path + '.' + 'FakeFilterClass1', our_path + '.' + 'FakeFilterClass2'] self.flags(scheduler_filter_classes=cls_names, group='cells') self._init_cells_scheduler() # Make sure there's no child cells so that we will be # selected. Makes stubbing easier. self.state_manager.child_cells = {} call_info = {} def fake_create_instances_here(ctxt, instance_uuids, instance_properties, instance_type, image, security_groups, block_device_mapping): call_info['ctxt'] = ctxt call_info['instance_uuids'] = instance_uuids call_info['instance_properties'] = instance_properties call_info['instance_type'] = instance_type call_info['image'] = image call_info['security_groups'] = security_groups call_info['block_device_mapping'] = block_device_mapping def fake_rpc_build_instances(ctxt, **host_sched_kwargs): call_info['host_sched_kwargs'] = host_sched_kwargs def fake_get_filtered_objs(filters, cells, filt_properties): call_info['filt_objects'] = filters call_info['filt_cells'] = cells call_info['filt_props'] = filt_properties return cells def fake_build_request_spec(ctxt, image, instances): request_spec = { 'num_instances': len(instances), 'instance_properties': instances[0], 'image': image, 'instance_type': 'fake_type'} return request_spec self.stubs.Set(self.scheduler, '_create_instances_here', fake_create_instances_here) self.stubs.Set(self.scheduler.compute_task_api, 'build_instances', fake_rpc_build_instances) self.stubs.Set(scheduler_utils, 'build_request_spec', fake_build_request_spec) filter_handler = self.scheduler.filter_handler self.stubs.Set(filter_handler, 'get_filtered_objects', fake_get_filtered_objs) host_sched_kwargs = {'image': 'fake_image', 'instances': self.instances, 'filter_properties': {'instance_type': 'fake_type'}, 'security_groups': 'fake_sec_groups', 'block_device_mapping': 'fake_bdm'} self.msg_runner.build_instances(self.ctxt, self.my_cell_state, host_sched_kwargs) # Our cell was selected. self.assertEqual(self.ctxt, call_info['ctxt']) self.assertEqual(self.instance_uuids, call_info['instance_uuids']) self.assertEqual(self.request_spec['instance_properties']['id'], call_info['instance_properties']['id']) self.assertEqual(self.request_spec['instance_type'], call_info['instance_type']) self.assertEqual(self.request_spec['image'], call_info['image']) self.assertEqual(host_sched_kwargs, call_info['host_sched_kwargs']) # Filter args are correct expected_filt_props = {'context': self.ctxt, 'scheduler': self.scheduler, 'routing_path': self.my_cell_state.name, 'host_sched_kwargs': host_sched_kwargs, 'request_spec': self.request_spec, 'instance_type': 'fake_type'} self.assertEqual(expected_filt_props, call_info['filt_props']) self.assertEqual([FakeFilterClass1, FakeFilterClass2], [obj.__class__ for obj in call_info['filt_objects']]) self.assertEqual([self.my_cell_state], call_info['filt_cells']) def test_cells_filter_returning_none(self): # Re-init our fakes with some filters. our_path = 'nova.tests.unit.cells.test_cells_scheduler' cls_names = [our_path + '.' + 'FakeFilterClass1', our_path + '.' + 'FakeFilterClass2'] self.flags(scheduler_filter_classes=cls_names, group='cells') self._init_cells_scheduler() # Make sure there's no child cells so that we will be # selected. Makes stubbing easier. self.state_manager.child_cells = {} call_info = {'scheduled': False} def fake_create_instances_here(ctxt, request_spec): # Should not be called call_info['scheduled'] = True def fake_get_filtered_objs(filter_classes, cells, filt_properties): # Should cause scheduling to be skipped. Means that the # filter did it. return None self.stubs.Set(self.scheduler, '_create_instances_here', fake_create_instances_here) filter_handler = self.scheduler.filter_handler self.stubs.Set(filter_handler, 'get_filtered_objects', fake_get_filtered_objs) self.msg_runner.build_instances(self.ctxt, self.my_cell_state, {}) self.assertFalse(call_info['scheduled']) def test_cells_weight_args_correct(self): # Re-init our fakes with some filters. our_path = 'nova.tests.unit.cells.test_cells_scheduler' cls_names = [our_path + '.' + 'FakeWeightClass1', our_path + '.' + 'FakeWeightClass2'] self.flags(scheduler_weight_classes=cls_names, group='cells') self._init_cells_scheduler() # Make sure there's no child cells so that we will be # selected. Makes stubbing easier. self.state_manager.child_cells = {} call_info = {} def fake_create_instances_here(ctxt, instance_uuids, instance_properties, instance_type, image, security_groups, block_device_mapping): call_info['ctxt'] = ctxt call_info['instance_uuids'] = instance_uuids call_info['instance_properties'] = instance_properties call_info['instance_type'] = instance_type call_info['image'] = image call_info['security_groups'] = security_groups call_info['block_device_mapping'] = block_device_mapping def fake_rpc_build_instances(ctxt, **host_sched_kwargs): call_info['host_sched_kwargs'] = host_sched_kwargs def fake_get_weighed_objs(weighers, cells, filt_properties): call_info['weighers'] = weighers call_info['weight_cells'] = cells call_info['weight_props'] = filt_properties return [weights.WeightedCell(cells[0], 0.0)] def fake_build_request_spec(ctxt, image, instances): request_spec = { 'num_instances': len(instances), 'instance_properties': instances[0], 'image': image, 'instance_type': 'fake_type'} return request_spec self.stubs.Set(self.scheduler, '_create_instances_here', fake_create_instances_here) self.stubs.Set(scheduler_utils, 'build_request_spec', fake_build_request_spec) self.stubs.Set(self.scheduler.compute_task_api, 'build_instances', fake_rpc_build_instances) weight_handler = self.scheduler.weight_handler self.stubs.Set(weight_handler, 'get_weighed_objects', fake_get_weighed_objs) host_sched_kwargs = {'image': 'fake_image', 'instances': self.instances, 'filter_properties': {'instance_type': 'fake_type'}, 'security_groups': 'fake_sec_groups', 'block_device_mapping': 'fake_bdm'} self.msg_runner.build_instances(self.ctxt, self.my_cell_state, host_sched_kwargs) # Our cell was selected. self.assertEqual(self.ctxt, call_info['ctxt']) self.assertEqual(self.instance_uuids, call_info['instance_uuids']) self.assertEqual(self.request_spec['instance_properties']['id'], call_info['instance_properties']['id']) self.assertEqual(self.request_spec['instance_type'], call_info['instance_type']) self.assertEqual(self.request_spec['image'], call_info['image']) self.assertEqual(host_sched_kwargs, call_info['host_sched_kwargs']) # Weight args are correct expected_filt_props = {'context': self.ctxt, 'scheduler': self.scheduler, 'routing_path': self.my_cell_state.name, 'host_sched_kwargs': host_sched_kwargs, 'request_spec': self.request_spec, 'instance_type': 'fake_type'} self.assertEqual(expected_filt_props, call_info['weight_props']) self.assertEqual([FakeWeightClass1, FakeWeightClass2], [obj.__class__ for obj in call_info['weighers']]) self.assertEqual([self.my_cell_state], call_info['weight_cells']) nova-13.1.4/nova/tests/unit/test_block_device.py0000664000567000056710000007124113064447152023025 0ustar jenkinsjenkins00000000000000# Copyright 2011 Isaku Yamahata # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for Block Device utility functions. """ from nova import block_device from nova import exception from nova import objects from nova import test from nova.tests.unit import fake_block_device from nova.tests.unit import matchers class BlockDeviceTestCase(test.NoDBTestCase): def setUp(self): super(BlockDeviceTestCase, self).setUp() BDM = block_device.BlockDeviceDict self.new_mapping = [ BDM({'id': 1, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sdb1', 'source_type': 'blank', 'destination_type': 'local', 'delete_on_termination': True, 'volume_size': 1, 'guest_format': 'swap', 'boot_index': -1}), BDM({'id': 2, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sdc1', 'source_type': 'blank', 'destination_type': 'local', 'volume_size': 10, 'delete_on_termination': True, 'boot_index': -1}), BDM({'id': 3, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda1', 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': 'fake-volume-id-1', 'connection_info': "{'fake': 'connection_info'}", 'boot_index': 0}), BDM({'id': 4, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda2', 'source_type': 'snapshot', 'destination_type': 'volume', 'connection_info': "{'fake': 'connection_info'}", 'snapshot_id': 'fake-snapshot-id-1', 'volume_id': 'fake-volume-id-2', 'boot_index': -1}), BDM({'id': 5, 'instance_uuid': 'fake-instance', 'no_device': True, 'device_name': '/dev/vdc'}), ] def test_properties(self): root_device0 = '/dev/sda' root_device1 = '/dev/sdb' mappings = [{'virtual': 'root', 'device': root_device0}] properties0 = {'mappings': mappings} properties1 = {'mappings': mappings, 'root_device_name': root_device1} self.assertIsNone(block_device.properties_root_device_name({})) self.assertEqual(root_device0, block_device.properties_root_device_name(properties0)) self.assertEqual(root_device1, block_device.properties_root_device_name(properties1)) def test_ephemeral(self): self.assertFalse(block_device.is_ephemeral('ephemeral')) self.assertTrue(block_device.is_ephemeral('ephemeral0')) self.assertTrue(block_device.is_ephemeral('ephemeral1')) self.assertTrue(block_device.is_ephemeral('ephemeral11')) self.assertFalse(block_device.is_ephemeral('root')) self.assertFalse(block_device.is_ephemeral('swap')) self.assertFalse(block_device.is_ephemeral('/dev/sda1')) self.assertEqual(0, block_device.ephemeral_num('ephemeral0')) self.assertEqual(1, block_device.ephemeral_num('ephemeral1')) self.assertEqual(11, block_device.ephemeral_num('ephemeral11')) self.assertFalse(block_device.is_swap_or_ephemeral('ephemeral')) self.assertTrue(block_device.is_swap_or_ephemeral('ephemeral0')) self.assertTrue(block_device.is_swap_or_ephemeral('ephemeral1')) self.assertTrue(block_device.is_swap_or_ephemeral('swap')) self.assertFalse(block_device.is_swap_or_ephemeral('root')) self.assertFalse(block_device.is_swap_or_ephemeral('/dev/sda1')) def test_mappings_prepend_dev(self): mapping = [ {'virtual': 'ami', 'device': '/dev/sda'}, {'virtual': 'root', 'device': 'sda'}, {'virtual': 'ephemeral0', 'device': 'sdb'}, {'virtual': 'swap', 'device': 'sdc'}, {'virtual': 'ephemeral1', 'device': 'sdd'}, {'virtual': 'ephemeral2', 'device': 'sde'}] expected = [ {'virtual': 'ami', 'device': '/dev/sda'}, {'virtual': 'root', 'device': 'sda'}, {'virtual': 'ephemeral0', 'device': '/dev/sdb'}, {'virtual': 'swap', 'device': '/dev/sdc'}, {'virtual': 'ephemeral1', 'device': '/dev/sdd'}, {'virtual': 'ephemeral2', 'device': '/dev/sde'}] prepended = block_device.mappings_prepend_dev(mapping) self.assertEqual(expected.sort(), prepended.sort()) def test_strip_dev(self): self.assertEqual('sda', block_device.strip_dev('/dev/sda')) self.assertEqual('sda', block_device.strip_dev('sda')) self.assertIsNone(block_device.strip_dev(None)) def test_strip_prefix(self): self.assertEqual('a', block_device.strip_prefix('/dev/sda')) self.assertEqual('a', block_device.strip_prefix('a')) self.assertEqual('a', block_device.strip_prefix('xvda')) self.assertEqual('a', block_device.strip_prefix('vda')) self.assertEqual('a', block_device.strip_prefix('hda')) self.assertIsNone(block_device.strip_prefix(None)) def test_get_device_letter(self): self.assertEqual('', block_device.get_device_letter('')) self.assertEqual('a', block_device.get_device_letter('/dev/sda1')) self.assertEqual('b', block_device.get_device_letter('/dev/xvdb')) self.assertEqual('d', block_device.get_device_letter('/dev/d')) self.assertEqual('a', block_device.get_device_letter('a')) self.assertEqual('b', block_device.get_device_letter('sdb2')) self.assertEqual('c', block_device.get_device_letter('vdc')) self.assertEqual('c', block_device.get_device_letter('hdc')) self.assertIsNone(block_device.get_device_letter(None)) def test_volume_in_mapping(self): swap = {'device_name': '/dev/sdb', 'swap_size': 1} ephemerals = [{'num': 0, 'virtual_name': 'ephemeral0', 'device_name': '/dev/sdc1', 'size': 1}, {'num': 2, 'virtual_name': 'ephemeral2', 'device_name': '/dev/sdd', 'size': 1}] block_device_mapping = [{'mount_device': '/dev/sde', 'device_path': 'fake_device'}, {'mount_device': '/dev/sdf', 'device_path': 'fake_device'}] block_device_info = { 'root_device_name': '/dev/sda', 'swap': swap, 'ephemerals': ephemerals, 'block_device_mapping': block_device_mapping} def _assert_volume_in_mapping(device_name, true_or_false): in_mapping = block_device.volume_in_mapping( device_name, block_device_info) self.assertEqual(true_or_false, in_mapping) _assert_volume_in_mapping('sda', False) _assert_volume_in_mapping('sdb', True) _assert_volume_in_mapping('sdc1', True) _assert_volume_in_mapping('sdd', True) _assert_volume_in_mapping('sde', True) _assert_volume_in_mapping('sdf', True) _assert_volume_in_mapping('sdg', False) _assert_volume_in_mapping('sdh1', False) def test_get_root_bdm(self): root_bdm = {'device_name': 'vda', 'boot_index': 0} bdms = [root_bdm, {'device_name': 'vdb', 'boot_index': 1}, {'device_name': 'vdc', 'boot_index': -1}, {'device_name': 'vdd'}] self.assertEqual(root_bdm, block_device.get_root_bdm(bdms)) self.assertEqual(root_bdm, block_device.get_root_bdm([bdms[0]])) self.assertIsNone(block_device.get_root_bdm(bdms[1:])) self.assertIsNone(block_device.get_root_bdm(bdms[2:])) self.assertIsNone(block_device.get_root_bdm(bdms[3:])) self.assertIsNone(block_device.get_root_bdm([])) def test_get_bdm_ephemeral_disk_size(self): size = block_device.get_bdm_ephemeral_disk_size(self.new_mapping) self.assertEqual(10, size) def test_get_bdm_swap_list(self): swap_list = block_device.get_bdm_swap_list(self.new_mapping) self.assertEqual(1, len(swap_list)) self.assertEqual(1, swap_list[0].get('id')) def test_get_bdm_local_disk_num(self): size = block_device.get_bdm_local_disk_num(self.new_mapping) self.assertEqual(2, size) def test_new_format_is_swap(self): expected_results = [True, False, False, False, False] for expected, bdm in zip(expected_results, self.new_mapping): res = block_device.new_format_is_swap(bdm) self.assertEqual(expected, res) def test_new_format_is_ephemeral(self): expected_results = [False, True, False, False, False] for expected, bdm in zip(expected_results, self.new_mapping): res = block_device.new_format_is_ephemeral(bdm) self.assertEqual(expected, res) def test_validate_device_name(self): for value in [' ', 10, None, 'a' * 260]: self.assertRaises(exception.InvalidBDMFormat, block_device.validate_device_name, value) def test_validate_and_default_volume_size(self): bdm = {} for value in [-1, 'a', 2.5]: bdm['volume_size'] = value self.assertRaises(exception.InvalidBDMFormat, block_device.validate_and_default_volume_size, bdm) def test_get_bdms_to_connect(self): root_bdm = {'device_name': 'vda', 'boot_index': 0} bdms = [root_bdm, {'device_name': 'vdb', 'boot_index': 1}, {'device_name': 'vdc', 'boot_index': -1}, {'device_name': 'vde', 'boot_index': None}, {'device_name': 'vdd'}] self.assertNotIn(root_bdm, block_device.get_bdms_to_connect(bdms, exclude_root_mapping=True)) self.assertIn(root_bdm, block_device.get_bdms_to_connect(bdms)) class TestBlockDeviceDict(test.NoDBTestCase): def setUp(self): super(TestBlockDeviceDict, self).setUp() BDM = block_device.BlockDeviceDict self.api_mapping = [ {'id': 1, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sdb1', 'source_type': 'blank', 'destination_type': 'local', 'delete_on_termination': True, 'guest_format': 'swap', 'boot_index': -1}, {'id': 2, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sdc1', 'source_type': 'blank', 'destination_type': 'local', 'delete_on_termination': True, 'boot_index': -1}, {'id': 3, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda1', 'source_type': 'volume', 'destination_type': 'volume', 'uuid': 'fake-volume-id-1', 'boot_index': 0}, {'id': 4, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda2', 'source_type': 'snapshot', 'destination_type': 'volume', 'uuid': 'fake-snapshot-id-1', 'boot_index': -1}, {'id': 5, 'instance_uuid': 'fake-instance', 'no_device': True, 'device_name': '/dev/vdc'}, ] self.new_mapping = [ BDM({'id': 1, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sdb1', 'source_type': 'blank', 'destination_type': 'local', 'delete_on_termination': True, 'guest_format': 'swap', 'boot_index': -1}), BDM({'id': 2, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sdc1', 'source_type': 'blank', 'destination_type': 'local', 'delete_on_termination': True, 'boot_index': -1}), BDM({'id': 3, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda1', 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': 'fake-volume-id-1', 'connection_info': "{'fake': 'connection_info'}", 'boot_index': 0}), BDM({'id': 4, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda2', 'source_type': 'snapshot', 'destination_type': 'volume', 'connection_info': "{'fake': 'connection_info'}", 'snapshot_id': 'fake-snapshot-id-1', 'volume_id': 'fake-volume-id-2', 'boot_index': -1}), BDM({'id': 5, 'instance_uuid': 'fake-instance', 'no_device': True, 'device_name': '/dev/vdc'}), ] self.legacy_mapping = [ {'id': 1, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sdb1', 'delete_on_termination': True, 'virtual_name': 'swap'}, {'id': 2, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sdc1', 'delete_on_termination': True, 'virtual_name': 'ephemeral0'}, {'id': 3, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda1', 'volume_id': 'fake-volume-id-1', 'connection_info': "{'fake': 'connection_info'}"}, {'id': 4, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda2', 'connection_info': "{'fake': 'connection_info'}", 'snapshot_id': 'fake-snapshot-id-1', 'volume_id': 'fake-volume-id-2'}, {'id': 5, 'instance_uuid': 'fake-instance', 'no_device': True, 'device_name': '/dev/vdc'}, ] self.new_mapping_source_image = [ BDM({'id': 6, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda3', 'source_type': 'image', 'destination_type': 'volume', 'connection_info': "{'fake': 'connection_info'}", 'volume_id': 'fake-volume-id-3', 'boot_index': -1}), BDM({'id': 7, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda4', 'source_type': 'image', 'destination_type': 'local', 'connection_info': "{'fake': 'connection_info'}", 'image_id': 'fake-image-id-2', 'boot_index': -1}), ] self.legacy_mapping_source_image = [ {'id': 6, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda3', 'connection_info': "{'fake': 'connection_info'}", 'volume_id': 'fake-volume-id-3'}, ] def test_init(self): def fake_validate(obj, dct): pass self.stub_out('nova.block_device.BlockDeviceDict._fields', set(['field1', 'field2'])) self.stub_out('nova.block_device.BlockDeviceDict._db_only_fields', set(['db_field1', 'db_field2'])) self.stub_out('nova.block_device.BlockDeviceDict._validate', fake_validate) # Make sure db fields are not picked up if they are not # in the original dict dev_dict = block_device.BlockDeviceDict({'field1': 'foo', 'field2': 'bar', 'db_field1': 'baz'}) self.assertIn('field1', dev_dict) self.assertIn('field2', dev_dict) self.assertIn('db_field1', dev_dict) self.assertNotIn('db_field2', dev_dict) # Make sure all expected fields are defaulted dev_dict = block_device.BlockDeviceDict({'field1': 'foo'}) self.assertIn('field1', dev_dict) self.assertIn('field2', dev_dict) self.assertIsNone(dev_dict['field2']) self.assertNotIn('db_field1', dev_dict) self.assertNotIn('db_field2', dev_dict) # Unless they are not meant to be dev_dict = block_device.BlockDeviceDict({'field1': 'foo'}, do_not_default=set(['field2'])) self.assertIn('field1', dev_dict) self.assertNotIn('field2', dev_dict) self.assertNotIn('db_field1', dev_dict) self.assertNotIn('db_field2', dev_dict) # Passing kwargs to constructor works dev_dict = block_device.BlockDeviceDict(field1='foo') self.assertIn('field1', dev_dict) self.assertIn('field2', dev_dict) self.assertIsNone(dev_dict['field2']) dev_dict = block_device.BlockDeviceDict( {'field1': 'foo'}, field2='bar') self.assertEqual('foo', dev_dict['field1']) self.assertEqual('bar', dev_dict['field2']) def test_init_prepend_dev_to_device_name(self): bdm = {'id': 3, 'instance_uuid': 'fake-instance', 'device_name': 'vda', 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': 'fake-volume-id-1', 'boot_index': 0} bdm_dict = block_device.BlockDeviceDict(bdm) self.assertEqual('/dev/vda', bdm_dict['device_name']) bdm['device_name'] = '/dev/vdb' bdm_dict = block_device.BlockDeviceDict(bdm) self.assertEqual('/dev/vdb', bdm_dict['device_name']) bdm['device_name'] = None bdm_dict = block_device.BlockDeviceDict(bdm) self.assertIsNone(bdm_dict['device_name']) def test_init_boolify_delete_on_termination(self): # Make sure that when delete_on_termination is not passed it's # still set to False and not None bdm = {'id': 3, 'instance_uuid': 'fake-instance', 'device_name': 'vda', 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': 'fake-volume-id-1', 'boot_index': 0} bdm_dict = block_device.BlockDeviceDict(bdm) self.assertFalse(bdm_dict['delete_on_termination']) def test_validate(self): self.assertRaises(exception.InvalidBDMFormat, block_device.BlockDeviceDict, {'bogus_field': 'lame_val'}) lame_bdm = dict(self.new_mapping[2]) del lame_bdm['source_type'] self.assertRaises(exception.InvalidBDMFormat, block_device.BlockDeviceDict, lame_bdm) lame_bdm['no_device'] = True block_device.BlockDeviceDict(lame_bdm) lame_dev_bdm = dict(self.new_mapping[2]) lame_dev_bdm['device_name'] = "not a valid name" self.assertRaises(exception.InvalidBDMFormat, block_device.BlockDeviceDict, lame_dev_bdm) lame_dev_bdm['device_name'] = "" self.assertRaises(exception.InvalidBDMFormat, block_device.BlockDeviceDict, lame_dev_bdm) cool_volume_size_bdm = dict(self.new_mapping[2]) cool_volume_size_bdm['volume_size'] = '42' cool_volume_size_bdm = block_device.BlockDeviceDict( cool_volume_size_bdm) self.assertEqual(42, cool_volume_size_bdm['volume_size']) lame_volume_size_bdm = dict(self.new_mapping[2]) lame_volume_size_bdm['volume_size'] = 'some_non_int_string' self.assertRaises(exception.InvalidBDMFormat, block_device.BlockDeviceDict, lame_volume_size_bdm) truthy_bdm = dict(self.new_mapping[2]) truthy_bdm['delete_on_termination'] = '1' truthy_bdm = block_device.BlockDeviceDict(truthy_bdm) self.assertTrue(truthy_bdm['delete_on_termination']) verbose_bdm = dict(self.new_mapping[2]) verbose_bdm['boot_index'] = 'first' self.assertRaises(exception.InvalidBDMFormat, block_device.BlockDeviceDict, verbose_bdm) def test_from_legacy(self): for legacy, new in zip(self.legacy_mapping, self.new_mapping): self.assertThat( block_device.BlockDeviceDict.from_legacy(legacy), matchers.IsSubDictOf(new)) def test_from_legacy_mapping(self): def _get_image_bdms(bdms): return [bdm for bdm in bdms if bdm['source_type'] == 'image'] def _get_bootable_bdms(bdms): return [bdm for bdm in bdms if bdm['boot_index'] >= 0] new_no_img = block_device.from_legacy_mapping(self.legacy_mapping) self.assertEqual(0, len(_get_image_bdms(new_no_img))) for new, expected in zip(new_no_img, self.new_mapping): self.assertThat(new, matchers.IsSubDictOf(expected)) new_with_img = block_device.from_legacy_mapping( self.legacy_mapping, 'fake_image_ref') image_bdms = _get_image_bdms(new_with_img) boot_bdms = _get_bootable_bdms(new_with_img) self.assertEqual(1, len(image_bdms)) self.assertEqual(1, len(boot_bdms)) self.assertEqual(0, image_bdms[0]['boot_index']) self.assertEqual('image', boot_bdms[0]['source_type']) new_with_img_and_root = block_device.from_legacy_mapping( self.legacy_mapping, 'fake_image_ref', 'sda1') image_bdms = _get_image_bdms(new_with_img_and_root) boot_bdms = _get_bootable_bdms(new_with_img_and_root) self.assertEqual(0, len(image_bdms)) self.assertEqual(1, len(boot_bdms)) self.assertEqual(0, boot_bdms[0]['boot_index']) self.assertEqual('volume', boot_bdms[0]['source_type']) new_no_root = block_device.from_legacy_mapping( self.legacy_mapping, 'fake_image_ref', 'sda1', no_root=True) self.assertEqual(0, len(_get_image_bdms(new_no_root))) self.assertEqual(0, len(_get_bootable_bdms(new_no_root))) def test_from_api(self): for api, new in zip(self.api_mapping, self.new_mapping): new['connection_info'] = None if new['snapshot_id']: new['volume_id'] = None self.assertThat( block_device.BlockDeviceDict.from_api(api, False), matchers.IsSubDictOf(new)) def test_from_api_invalid_blank_id(self): api_dict = {'id': 1, 'source_type': 'blank', 'destination_type': 'volume', 'uuid': 'fake-volume-id-1', 'delete_on_termination': True, 'boot_index': -1} self.assertRaises(exception.InvalidBDMFormat, block_device.BlockDeviceDict.from_api, api_dict, False) def test_from_api_invalid_source_to_local_mapping(self): api_dict = {'id': 1, 'source_type': 'image', 'destination_type': 'local', 'uuid': 'fake-volume-id-1'} self.assertRaises(exception.InvalidBDMFormat, block_device.BlockDeviceDict.from_api, api_dict, False) def test_from_api_valid_source_to_local_mapping(self): api_dict = {'id': 1, 'source_type': 'image', 'destination_type': 'local', 'volume_id': 'fake-volume-id-1', 'uuid': 1, 'boot_index': 0} retexp = block_device.BlockDeviceDict( {'id': 1, 'source_type': 'image', 'image_id': 1, 'destination_type': 'local', 'volume_id': 'fake-volume-id-1', 'boot_index': 0}) self.assertEqual(retexp, block_device.BlockDeviceDict.from_api(api_dict, True)) def test_from_api_invalid_source_to_local_mapping_with_string_bi(self): api_dict = {'id': 1, 'source_type': 'image', 'destination_type': 'local', 'uuid': 'fake-volume-id-1', 'boot_index': 'aaaa0'} self.assertRaises(exception.InvalidBDMFormat, block_device.BlockDeviceDict.from_api, api_dict, False) def test_from_api_valid_source_to_local_mapping_with_string_bi(self): api_dict = {'id': 1, 'source_type': 'image', 'destination_type': 'local', 'volume_id': 'fake-volume-id-1', 'uuid': 1, 'boot_index': '0'} retexp = block_device.BlockDeviceDict( {'id': 1, 'source_type': 'image', 'image_id': 1, 'destination_type': 'local', 'volume_id': 'fake-volume-id-1', 'boot_index': 0}) self.assertEqual(retexp, block_device.BlockDeviceDict.from_api(api_dict, True)) def test_legacy(self): for legacy, new in zip(self.legacy_mapping, self.new_mapping): self.assertThat( legacy, matchers.IsSubDictOf(new.legacy())) def test_legacy_mapping(self): got_legacy = block_device.legacy_mapping(self.new_mapping) for legacy, expected in zip(got_legacy, self.legacy_mapping): self.assertThat(expected, matchers.IsSubDictOf(legacy)) def test_legacy_source_image(self): for legacy, new in zip(self.legacy_mapping_source_image, self.new_mapping_source_image): if new['destination_type'] == 'volume': self.assertThat(legacy, matchers.IsSubDictOf(new.legacy())) else: self.assertRaises(exception.InvalidBDMForLegacy, new.legacy) def test_legacy_mapping_source_image(self): got_legacy = block_device.legacy_mapping(self.new_mapping) for legacy, expected in zip(got_legacy, self.legacy_mapping): self.assertThat(expected, matchers.IsSubDictOf(legacy)) def test_legacy_mapping_from_object_list(self): bdm1 = objects.BlockDeviceMapping() bdm1 = objects.BlockDeviceMapping._from_db_object( None, bdm1, fake_block_device.FakeDbBlockDeviceDict( self.new_mapping[0])) bdm2 = objects.BlockDeviceMapping() bdm2 = objects.BlockDeviceMapping._from_db_object( None, bdm2, fake_block_device.FakeDbBlockDeviceDict( self.new_mapping[1])) bdmlist = objects.BlockDeviceMappingList() bdmlist.objects = [bdm1, bdm2] block_device.legacy_mapping(bdmlist) def test_image_mapping(self): removed_fields = ['id', 'instance_uuid', 'connection_info', 'created_at', 'updated_at', 'deleted_at', 'deleted'] for bdm in self.new_mapping: mapping_bdm = fake_block_device.FakeDbBlockDeviceDict( bdm).get_image_mapping() for fld in removed_fields: self.assertNotIn(fld, mapping_bdm) def _test_snapshot_from_bdm(self, template): snapshot = block_device.snapshot_from_bdm('new-snapshot-id', template) self.assertEqual('new-snapshot-id', snapshot['snapshot_id']) self.assertEqual('snapshot', snapshot['source_type']) self.assertEqual('volume', snapshot['destination_type']) self.assertEqual(template.volume_size, snapshot['volume_size']) self.assertEqual(template.delete_on_termination, snapshot['delete_on_termination']) self.assertEqual(template.device_name, snapshot['device_name']) for key in ['disk_bus', 'device_type', 'boot_index']: self.assertEqual(template[key], snapshot[key]) def test_snapshot_from_bdm(self): for bdm in self.new_mapping: self._test_snapshot_from_bdm(objects.BlockDeviceMapping(**bdm)) def test_snapshot_from_object(self): for bdm in self.new_mapping[:-1]: obj = objects.BlockDeviceMapping() obj = objects.BlockDeviceMapping._from_db_object( None, obj, fake_block_device.FakeDbBlockDeviceDict( bdm)) self._test_snapshot_from_bdm(obj) nova-13.1.4/nova/tests/unit/conductor/0000775000567000056710000000000013064447471021002 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/conductor/tasks/0000775000567000056710000000000013064447471022127 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/conductor/tasks/test_base.py0000664000567000056710000000310413064447140024441 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova.conductor.tasks import base from nova import test class FakeTask(base.TaskBase): def __init__(self, context, instance, fail=False): super(FakeTask, self).__init__(context, instance) self.fail = fail def _execute(self): if self.fail: raise Exception else: pass class TaskBaseTestCase(test.NoDBTestCase): def setUp(self): super(TaskBaseTestCase, self).setUp() self.task = FakeTask(mock.MagicMock(), mock.MagicMock()) @mock.patch.object(FakeTask, 'rollback') def test_wrapper_exception(self, fake_rollback): self.task.fail = True try: self.task.execute() except Exception: pass fake_rollback.assert_called_once_with() @mock.patch.object(FakeTask, 'rollback') def test_wrapper_no_exception(self, fake_rollback): try: self.task.execute() except Exception: pass self.assertFalse(fake_rollback.called) nova-13.1.4/nova/tests/unit/conductor/tasks/__init__.py0000664000567000056710000000000013064447140024217 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/conductor/tasks/test_migrate.py0000664000567000056710000001033013064447152025161 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova.compute import rpcapi as compute_rpcapi from nova.conductor.tasks import migrate from nova import objects from nova.objects import base as obj_base from nova.scheduler import client as scheduler_client from nova.scheduler import utils as scheduler_utils from nova import test from nova.tests.unit.conductor.test_conductor import FakeContext from nova.tests.unit import fake_flavor from nova.tests.unit import fake_instance class MigrationTaskTestCase(test.NoDBTestCase): def setUp(self): super(MigrationTaskTestCase, self).setUp() self.user_id = 'fake' self.project_id = 'fake' self.context = FakeContext(self.user_id, self.project_id) inst = fake_instance.fake_db_instance(image_ref='image_ref') self.instance = objects.Instance._from_db_object( self.context, objects.Instance(), inst, []) self.instance.system_metadata = {'image_hw_disk_bus': 'scsi'} self.flavor = fake_flavor.fake_flavor_obj(self.context) self.flavor.extra_specs = {'extra_specs': 'fake'} self.request_spec = {'instance_type': obj_base.obj_to_primitive(self.flavor), 'instance_properties': {}, 'image': 'image'} self.hosts = [dict(host='host1', nodename=None, limits={})] self.filter_properties = {'limits': {}, 'retry': {'num_attempts': 1, 'hosts': [['host1', None]]}} self.reservations = [] self.clean_shutdown = True def _generate_task(self): return migrate.MigrationTask(self.context, self.instance, self.flavor, self.filter_properties, self.request_spec, self.reservations, self.clean_shutdown, compute_rpcapi.ComputeAPI(), scheduler_client.SchedulerClient()) @mock.patch.object(scheduler_utils, 'build_request_spec') @mock.patch.object(scheduler_utils, 'setup_instance_group') @mock.patch.object(objects.RequestSpec, 'from_primitives') @mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations') @mock.patch.object(compute_rpcapi.ComputeAPI, 'prep_resize') @mock.patch.object(objects.Quotas, 'from_reservations') def test_execute(self, quotas_mock, prep_resize_mock, sel_dest_mock, spec_fp_mock, sig_mock, brs_mock): brs_mock.return_value = self.request_spec fake_spec = objects.RequestSpec() spec_fp_mock.return_value = fake_spec sel_dest_mock.return_value = self.hosts task = self._generate_task() task.execute() quotas_mock.assert_called_once_with(self.context, self.reservations, instance=self.instance) sig_mock.assert_called_once_with(self.context, self.request_spec, self.filter_properties) task.scheduler_client.select_destinations.assert_called_once_with( self.context, fake_spec) prep_resize_mock.assert_called_once_with( self.context, 'image', self.instance, self.flavor, self.hosts[0]['host'], self.reservations, request_spec=self.request_spec, filter_properties=self.filter_properties, node=self.hosts[0]['nodename'], clean_shutdown=self.clean_shutdown) self.assertFalse(quotas_mock.return_value.rollback.called) def test_rollback(self): task = self._generate_task() task.quotas = mock.MagicMock() task.rollback() task.quotas.rollback.assert_called_once_with() nova-13.1.4/nova/tests/unit/conductor/tasks/test_live_migrate.py0000664000567000056710000005637313064447152026221 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import oslo_messaging as messaging from nova.compute import power_state from nova.compute import rpcapi as compute_rpcapi from nova.compute import vm_states from nova.conductor.tasks import live_migrate from nova import exception from nova import objects from nova.scheduler import client as scheduler_client from nova.scheduler import utils as scheduler_utils from nova import servicegroup from nova import test from nova.tests.unit import fake_instance from nova import utils class LiveMigrationTaskTestCase(test.NoDBTestCase): def setUp(self): super(LiveMigrationTaskTestCase, self).setUp() self.context = "context" self.instance_host = "host" self.instance_uuid = "uuid" self.instance_image = "image_ref" db_instance = fake_instance.fake_db_instance( host=self.instance_host, uuid=self.instance_uuid, power_state=power_state.RUNNING, vm_state = vm_states.ACTIVE, memory_mb=512, image_ref=self.instance_image) self.instance = objects.Instance._from_db_object( self.context, objects.Instance(), db_instance) self.instance.system_metadata = {'image_hw_disk_bus': 'scsi'} self.destination = "destination" self.block_migration = "bm" self.disk_over_commit = "doc" self.migration = objects.Migration() self.fake_spec = objects.RequestSpec() self._generate_task() def _generate_task(self): self.task = live_migrate.LiveMigrationTask(self.context, self.instance, self.destination, self.block_migration, self.disk_over_commit, self.migration, compute_rpcapi.ComputeAPI(), servicegroup.API(), scheduler_client.SchedulerClient(), self.fake_spec) def test_execute_with_destination(self): self.mox.StubOutWithMock(self.task, '_check_host_is_up') self.mox.StubOutWithMock(self.task, '_check_requested_destination') self.mox.StubOutWithMock(self.task.compute_rpcapi, 'live_migration') self.task._check_host_is_up(self.instance_host) self.task._check_requested_destination() self.task.compute_rpcapi.live_migration(self.context, host=self.instance_host, instance=self.instance, dest=self.destination, block_migration=self.block_migration, migration=self.migration, migrate_data=None).AndReturn("bob") self.mox.ReplayAll() self.assertEqual("bob", self.task.execute()) def test_execute_without_destination(self): self.destination = None self._generate_task() self.assertIsNone(self.task.destination) self.mox.StubOutWithMock(self.task, '_check_host_is_up') self.mox.StubOutWithMock(self.task, '_find_destination') self.mox.StubOutWithMock(self.task.compute_rpcapi, 'live_migration') self.task._check_host_is_up(self.instance_host) self.task._find_destination().AndReturn("found_host") self.task.compute_rpcapi.live_migration(self.context, host=self.instance_host, instance=self.instance, dest="found_host", block_migration=self.block_migration, migration=self.migration, migrate_data=None).AndReturn("bob") self.mox.ReplayAll() with mock.patch.object(self.migration, 'save') as mock_save: self.assertEqual("bob", self.task.execute()) self.assertTrue(mock_save.called) self.assertEqual('found_host', self.migration.dest_compute) def test_check_instance_is_active_passes_when_paused(self): self.task.instance['power_state'] = power_state.PAUSED self.task._check_instance_is_active() def test_check_instance_is_active_fails_when_shutdown(self): self.task.instance['power_state'] = power_state.SHUTDOWN self.assertRaises(exception.InstanceInvalidState, self.task._check_instance_is_active) def test_check_instance_host_is_up(self): self.mox.StubOutWithMock(objects.Service, 'get_by_compute_host') self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up') objects.Service.get_by_compute_host(self.context, "host").AndReturn("service") self.task.servicegroup_api.service_is_up("service").AndReturn(True) self.mox.ReplayAll() self.task._check_host_is_up("host") def test_check_instance_host_is_up_fails_if_not_up(self): self.mox.StubOutWithMock(objects.Service, 'get_by_compute_host') self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up') objects.Service.get_by_compute_host(self.context, "host").AndReturn("service") self.task.servicegroup_api.service_is_up("service").AndReturn(False) self.mox.ReplayAll() self.assertRaises(exception.ComputeServiceUnavailable, self.task._check_host_is_up, "host") def test_check_instance_host_is_up_fails_if_not_found(self): self.mox.StubOutWithMock(objects.Service, 'get_by_compute_host') objects.Service.get_by_compute_host( self.context, "host").AndRaise(exception.NotFound) self.mox.ReplayAll() self.assertRaises(exception.ComputeServiceUnavailable, self.task._check_host_is_up, "host") def test_check_requested_destination(self): self.mox.StubOutWithMock(objects.Service, 'get_by_compute_host') self.mox.StubOutWithMock(self.task, '_get_compute_info') self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up') self.mox.StubOutWithMock(self.task.compute_rpcapi, 'check_can_live_migrate_destination') objects.Service.get_by_compute_host( self.context, self.destination).AndReturn("service") self.task.servicegroup_api.service_is_up("service").AndReturn(True) hypervisor_details = objects.ComputeNode( hypervisor_type="a", hypervisor_version=6.1, free_ram_mb=513, memory_mb=512, ram_allocation_ratio=1.0, ) self.task._get_compute_info(self.destination)\ .AndReturn(hypervisor_details) self.task._get_compute_info(self.instance_host)\ .AndReturn(hypervisor_details) self.task._get_compute_info(self.destination)\ .AndReturn(hypervisor_details) self.task.compute_rpcapi.check_can_live_migrate_destination( self.context, self.instance, self.destination, self.block_migration, self.disk_over_commit).AndReturn( "migrate_data") self.mox.ReplayAll() self.task._check_requested_destination() self.assertEqual("migrate_data", self.task.migrate_data) def test_check_requested_destination_fails_with_same_dest(self): self.task.destination = "same" self.task.source = "same" self.assertRaises(exception.UnableToMigrateToSelf, self.task._check_requested_destination) def test_check_requested_destination_fails_when_destination_is_up(self): self.mox.StubOutWithMock(objects.Service, 'get_by_compute_host') objects.Service.get_by_compute_host( self.context, self.destination).AndRaise(exception.NotFound) self.mox.ReplayAll() self.assertRaises(exception.ComputeServiceUnavailable, self.task._check_requested_destination) def test_check_requested_destination_fails_with_not_enough_memory(self): self.mox.StubOutWithMock(self.task, '_check_host_is_up') self.mox.StubOutWithMock(objects.ComputeNode, 'get_first_node_by_host_for_old_compat') self.task._check_host_is_up(self.destination) objects.ComputeNode.get_first_node_by_host_for_old_compat(self.context, self.destination).AndReturn( objects.ComputeNode(free_ram_mb=513, memory_mb=1024, ram_allocation_ratio=0.9, )) self.mox.ReplayAll() # free_ram is bigger than instance.ram (512) but the allocation ratio # reduces the total available RAM to 410MB (1024 * 0.9 - (1024 - 513)) self.assertRaises(exception.MigrationPreCheckError, self.task._check_requested_destination) def test_check_requested_destination_fails_with_hypervisor_diff(self): self.mox.StubOutWithMock(self.task, '_check_host_is_up') self.mox.StubOutWithMock(self.task, '_check_destination_has_enough_memory') self.mox.StubOutWithMock(self.task, '_get_compute_info') self.task._check_host_is_up(self.destination) self.task._check_destination_has_enough_memory() self.task._get_compute_info(self.instance_host).AndReturn({ "hypervisor_type": "b" }) self.task._get_compute_info(self.destination).AndReturn({ "hypervisor_type": "a" }) self.mox.ReplayAll() self.assertRaises(exception.InvalidHypervisorType, self.task._check_requested_destination) def test_check_requested_destination_fails_with_hypervisor_too_old(self): self.mox.StubOutWithMock(self.task, '_check_host_is_up') self.mox.StubOutWithMock(self.task, '_check_destination_has_enough_memory') self.mox.StubOutWithMock(self.task, '_get_compute_info') self.task._check_host_is_up(self.destination) self.task._check_destination_has_enough_memory() self.task._get_compute_info(self.instance_host).AndReturn({ "hypervisor_type": "a", "hypervisor_version": 7 }) self.task._get_compute_info(self.destination).AndReturn({ "hypervisor_type": "a", "hypervisor_version": 6 }) self.mox.ReplayAll() self.assertRaises(exception.DestinationHypervisorTooOld, self.task._check_requested_destination) def test_find_destination_works(self): self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(objects.RequestSpec, 'reset_forced_destinations') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") fake_props = {'instance_properties': {'uuid': self.instance_uuid}} scheduler_utils.setup_instance_group( self.context, fake_props, {'ignore_hosts': [self.instance_host]}) self.fake_spec.reset_forced_destinations() self.task.scheduler_client.select_destinations( self.context, self.fake_spec).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1") self.mox.ReplayAll() self.assertEqual("host1", self.task._find_destination()) def test_find_destination_works_with_no_request_spec(self): task = live_migrate.LiveMigrationTask( self.context, self.instance, self.destination, self.block_migration, self.disk_over_commit, self.migration, compute_rpcapi.ComputeAPI(), servicegroup.API(), scheduler_client.SchedulerClient(), request_spec=None) another_spec = objects.RequestSpec() self.instance.flavor = objects.Flavor() self.instance.numa_topology = None self.instance.pci_requests = None @mock.patch.object(task, '_call_livem_checks_on_host') @mock.patch.object(task, '_check_compatible_with_source_hypervisor') @mock.patch.object(task.scheduler_client, 'select_destinations') @mock.patch.object(objects.RequestSpec, 'from_components') @mock.patch.object(scheduler_utils, 'setup_instance_group') @mock.patch.object(utils, 'get_image_from_system_metadata') def do_test(get_image, setup_ig, from_components, select_dest, check_compat, call_livem_checks): get_image.return_value = "image" from_components.return_value = another_spec select_dest.return_value = [{'host': 'host1'}] self.assertEqual("host1", task._find_destination()) get_image.assert_called_once_with(self.instance.system_metadata) fake_props = {'instance_properties': {'uuid': self.instance_uuid}} setup_ig.assert_called_once_with( self.context, fake_props, {'ignore_hosts': [self.instance_host]} ) select_dest.assert_called_once_with(self.context, another_spec) check_compat.assert_called_once_with("host1") call_livem_checks.assert_called_once_with("host1") do_test() def test_find_destination_no_image_works(self): self.instance['image_ref'] = '' self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') fake_props = {'instance_properties': {'uuid': self.instance_uuid}} scheduler_utils.setup_instance_group( self.context, fake_props, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations(self.context, self.fake_spec).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1") self.mox.ReplayAll() self.assertEqual("host1", self.task._find_destination()) def _test_find_destination_retry_hypervisor_raises(self, error): self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") fake_props = {'instance_properties': {'uuid': self.instance_uuid}} scheduler_utils.setup_instance_group( self.context, fake_props, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations(self.context, self.fake_spec).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1")\ .AndRaise(error) self.task.scheduler_client.select_destinations(self.context, self.fake_spec).AndReturn( [{'host': 'host2'}]) self.task._check_compatible_with_source_hypervisor("host2") self.task._call_livem_checks_on_host("host2") self.mox.ReplayAll() self.assertEqual("host2", self.task._find_destination()) def test_find_destination_retry_with_old_hypervisor(self): self._test_find_destination_retry_hypervisor_raises( exception.DestinationHypervisorTooOld) def test_find_destination_retry_with_invalid_hypervisor_type(self): self._test_find_destination_retry_hypervisor_raises( exception.InvalidHypervisorType) def test_find_destination_retry_with_invalid_livem_checks(self): self.flags(migrate_max_retries=1) self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") fake_props = {'instance_properties': {'uuid': self.instance_uuid}} scheduler_utils.setup_instance_group( self.context, fake_props, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations(self.context, self.fake_spec).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1")\ .AndRaise(exception.Invalid) self.task.scheduler_client.select_destinations(self.context, self.fake_spec).AndReturn( [{'host': 'host2'}]) self.task._check_compatible_with_source_hypervisor("host2") self.task._call_livem_checks_on_host("host2") self.mox.ReplayAll() self.assertEqual("host2", self.task._find_destination()) def test_find_destination_retry_with_failed_migration_pre_checks(self): self.flags(migrate_max_retries=1) self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") fake_props = {'instance_properties': {'uuid': self.instance_uuid}} scheduler_utils.setup_instance_group( self.context, fake_props, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations(self.context, self.fake_spec).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1")\ .AndRaise(exception.MigrationPreCheckError("reason")) self.task.scheduler_client.select_destinations(self.context, self.fake_spec).AndReturn( [{'host': 'host2'}]) self.task._check_compatible_with_source_hypervisor("host2") self.task._call_livem_checks_on_host("host2") self.mox.ReplayAll() self.assertEqual("host2", self.task._find_destination()) def test_find_destination_retry_exceeds_max(self): self.flags(migrate_max_retries=0) self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") fake_props = {'instance_properties': {'uuid': self.instance_uuid}} scheduler_utils.setup_instance_group( self.context, fake_props, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations(self.context, self.fake_spec).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1")\ .AndRaise(exception.DestinationHypervisorTooOld) self.mox.ReplayAll() with mock.patch.object(self.task.migration, 'save') as save_mock: self.assertRaises(exception.MaxRetriesExceeded, self.task._find_destination) self.assertEqual('failed', self.task.migration.status) save_mock.assert_called_once_with() def test_find_destination_when_runs_out_of_hosts(self): self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") fake_props = {'instance_properties': {'uuid': self.instance_uuid}} scheduler_utils.setup_instance_group( self.context, fake_props, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations(self.context, self.fake_spec).AndRaise( exception.NoValidHost(reason="")) self.mox.ReplayAll() self.assertRaises(exception.NoValidHost, self.task._find_destination) @mock.patch("nova.utils.get_image_from_system_metadata") @mock.patch("nova.scheduler.utils.build_request_spec") @mock.patch("nova.scheduler.utils.setup_instance_group") @mock.patch("nova.objects.RequestSpec.from_primitives") def test_find_destination_with_remoteError(self, m_from_primitives, m_setup_instance_group, m_build_request_spec, m_get_image_from_system_metadata): m_get_image_from_system_metadata.return_value = {'properties': {}} m_build_request_spec.return_value = {} fake_spec = objects.RequestSpec() m_from_primitives.return_value = fake_spec with mock.patch.object(self.task.scheduler_client, 'select_destinations') as m_select_destinations: error = messaging.RemoteError() m_select_destinations.side_effect = error self.assertRaises(exception.MigrationSchedulerRPCError, self.task._find_destination) def test_call_livem_checks_on_host(self): with mock.patch.object(self.task.compute_rpcapi, 'check_can_live_migrate_destination', side_effect=messaging.MessagingTimeout): self.assertRaises(exception.MigrationPreCheckError, self.task._call_livem_checks_on_host, {}) nova-13.1.4/nova/tests/unit/conductor/__init__.py0000664000567000056710000000000013064447140023072 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/conductor/test_conductor.py0000664000567000056710000023300713064447152024414 0ustar jenkinsjenkins00000000000000# Copyright 2012 IBM Corp. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for the conductor service.""" import copy import uuid import mock from mox3 import mox import oslo_messaging as messaging from oslo_utils import timeutils from oslo_versionedobjects import exception as ovo_exc import six from nova.compute import flavors from nova.compute import rpcapi as compute_rpcapi from nova.compute import task_states from nova.compute import vm_states from nova import conductor from nova.conductor import api as conductor_api from nova.conductor import manager as conductor_manager from nova.conductor import rpcapi as conductor_rpcapi from nova.conductor.tasks import live_migrate from nova.conductor.tasks import migrate from nova import context from nova import db from nova import exception as exc from nova.image import api as image_api from nova import objects from nova.objects import base as obj_base from nova.objects import fields from nova import rpc from nova.scheduler import client as scheduler_client from nova.scheduler import utils as scheduler_utils from nova import test from nova.tests import fixtures from nova.tests.unit import cast_as_call from nova.tests.unit.compute import test_compute from nova.tests.unit import fake_instance from nova.tests.unit import fake_notifier from nova.tests.unit import fake_request_spec from nova.tests.unit import fake_server_actions from nova.tests.unit import fake_utils from nova import utils class FakeContext(context.RequestContext): def elevated(self): """Return a consistent elevated context so we can detect it.""" if not hasattr(self, '_elevated'): self._elevated = super(FakeContext, self).elevated() return self._elevated class _BaseTestCase(object): def setUp(self): super(_BaseTestCase, self).setUp() self.user_id = 'fake' self.project_id = 'fake' self.context = FakeContext(self.user_id, self.project_id) fake_notifier.stub_notifier(self.stubs) self.addCleanup(fake_notifier.reset) def fake_deserialize_context(serializer, ctxt_dict): self.assertEqual(self.context.user_id, ctxt_dict['user_id']) self.assertEqual(self.context.project_id, ctxt_dict['project_id']) return self.context self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context', fake_deserialize_context) fake_utils.stub_out_utils_spawn_n(self.stubs) class ConductorTestCase(_BaseTestCase, test.TestCase): """Conductor Manager Tests.""" def setUp(self): super(ConductorTestCase, self).setUp() self.conductor = conductor_manager.ConductorManager() self.conductor_manager = self.conductor def _test_object_action(self, is_classmethod, raise_exception): class TestObject(obj_base.NovaObject): def foo(self, raise_exception=False): if raise_exception: raise Exception('test') else: return 'test' @classmethod def bar(cls, context, raise_exception=False): if raise_exception: raise Exception('test') else: return 'test' obj_base.NovaObjectRegistry.register(TestObject) obj = TestObject() # NOTE(danms): After a trip over RPC, any tuple will be a list, # so use a list here to make sure we can handle it fake_args = [] if is_classmethod: versions = {'TestObject': '1.0'} result = self.conductor.object_class_action_versions( self.context, TestObject.obj_name(), 'bar', versions, fake_args, {'raise_exception': raise_exception}) else: updates, result = self.conductor.object_action( self.context, obj, 'foo', fake_args, {'raise_exception': raise_exception}) self.assertEqual('test', result) def test_object_action(self): self._test_object_action(False, False) def test_object_action_on_raise(self): self.assertRaises(messaging.ExpectedException, self._test_object_action, False, True) def test_object_class_action(self): self._test_object_action(True, False) def test_object_class_action_on_raise(self): self.assertRaises(messaging.ExpectedException, self._test_object_action, True, True) def test_object_action_copies_object(self): class TestObject(obj_base.NovaObject): fields = {'dict': fields.DictOfStringsField()} def touch_dict(self): self.dict['foo'] = 'bar' self.obj_reset_changes() obj_base.NovaObjectRegistry.register(TestObject) obj = TestObject() obj.dict = {} obj.obj_reset_changes() updates, result = self.conductor.object_action( self.context, obj, 'touch_dict', tuple(), {}) # NOTE(danms): If conductor did not properly copy the object, then # the new and reference copies of the nested dict object will be # the same, and thus 'dict' will not be reported as changed self.assertIn('dict', updates) self.assertEqual({'foo': 'bar'}, updates['dict']) def test_object_class_action_versions(self): @obj_base.NovaObjectRegistry.register class TestObject(obj_base.NovaObject): VERSION = '1.10' @classmethod def foo(cls, context): return cls() versions = { 'TestObject': '1.2', 'OtherObj': '1.0', } with mock.patch.object(self.conductor_manager, '_object_dispatch') as m: m.return_value = TestObject() m.return_value.obj_to_primitive = mock.MagicMock() self.conductor.object_class_action_versions( self.context, TestObject.obj_name(), 'foo', versions, tuple(), {}) m.return_value.obj_to_primitive.assert_called_once_with( target_version='1.2', version_manifest=versions) def test_object_class_action_versions_old_object(self): # Make sure we return older than requested objects unmodified, # see bug #1596119. @obj_base.NovaObjectRegistry.register class TestObject(obj_base.NovaObject): VERSION = '1.10' @classmethod def foo(cls, context): return cls() versions = { 'TestObject': '1.10', 'OtherObj': '1.0', } with mock.patch.object(self.conductor_manager, '_object_dispatch') as m: m.return_value = TestObject() m.return_value.VERSION = '1.9' m.return_value.obj_to_primitive = mock.MagicMock() obj = self.conductor.object_class_action_versions( self.context, TestObject.obj_name(), 'foo', versions, tuple(), {}) self.assertFalse(m.return_value.obj_to_primitive.called) self.assertEqual('1.9', obj.VERSION) def test_object_class_action_versions_major_version_diff(self): @obj_base.NovaObjectRegistry.register class TestObject(obj_base.NovaObject): VERSION = '2.10' @classmethod def foo(cls, context): return cls() versions = { 'TestObject': '2.10', 'OtherObj': '1.0', } with mock.patch.object(self.conductor_manager, '_object_dispatch') as m: m.return_value = TestObject() m.return_value.VERSION = '1.9' self.assertRaises( ovo_exc.InvalidTargetVersion, self.conductor.object_class_action_versions, self.context, TestObject.obj_name(), 'foo', versions, tuple(), {}) def test_reset(self): with mock.patch.object(objects.Service, 'clear_min_version_cache' ) as mock_clear_cache: self.conductor.reset() mock_clear_cache.assert_called_once_with() def test_provider_fw_rule_get_all(self): result = self.conductor.provider_fw_rule_get_all(self.context) self.assertEqual([], result) class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase): """Conductor RPC API Tests.""" def setUp(self): super(ConductorRPCAPITestCase, self).setUp() self.conductor_service = self.start_service( 'conductor', manager='nova.conductor.manager.ConductorManager') self.conductor_manager = self.conductor_service.manager self.conductor = conductor_rpcapi.ConductorAPI() class ConductorAPITestCase(_BaseTestCase, test.TestCase): """Conductor API Tests.""" def setUp(self): super(ConductorAPITestCase, self).setUp() self.conductor_service = self.start_service( 'conductor', manager='nova.conductor.manager.ConductorManager') self.conductor = conductor_api.API() self.conductor_manager = self.conductor_service.manager def test_wait_until_ready(self): timeouts = [] calls = dict(count=0) def fake_ping(context, message, timeout): timeouts.append(timeout) calls['count'] += 1 if calls['count'] < 15: raise messaging.MessagingTimeout("fake") self.stubs.Set(self.conductor.base_rpcapi, 'ping', fake_ping) self.conductor.wait_until_ready(self.context) self.assertEqual(timeouts.count(10), 10) self.assertIn(None, timeouts) @mock.patch('oslo_versionedobjects.base.obj_tree_get_versions') def test_object_backport_redirect(self, mock_ovo): mock_ovo.return_value = mock.sentinel.obj_versions mock_objinst = mock.Mock() with mock.patch.object(self.conductor, 'object_backport_versions') as mock_call: self.conductor.object_backport(mock.sentinel.ctxt, mock_objinst, mock.sentinel.target_version) mock_call.assert_called_once_with(mock.sentinel.ctxt, mock_objinst, mock.sentinel.obj_versions) class ConductorLocalAPITestCase(ConductorAPITestCase): """Conductor LocalAPI Tests.""" def setUp(self): super(ConductorLocalAPITestCase, self).setUp() self.conductor = conductor_api.LocalAPI() self.conductor_manager = self.conductor._manager._target def test_wait_until_ready(self): # Override test in ConductorAPITestCase pass class ConductorImportTest(test.NoDBTestCase): def test_import_conductor_local(self): self.flags(use_local=True, group='conductor') self.assertIsInstance(conductor.API(), conductor_api.LocalAPI) self.assertIsInstance(conductor.ComputeTaskAPI(), conductor_api.LocalComputeTaskAPI) def test_import_conductor_rpc(self): self.flags(use_local=False, group='conductor') self.assertIsInstance(conductor.API(), conductor_api.API) self.assertIsInstance(conductor.ComputeTaskAPI(), conductor_api.ComputeTaskAPI) def test_import_conductor_override_to_local(self): self.flags(use_local=False, group='conductor') self.assertIsInstance(conductor.API(use_local=True), conductor_api.LocalAPI) self.assertIsInstance(conductor.ComputeTaskAPI(use_local=True), conductor_api.LocalComputeTaskAPI) class _BaseTaskTestCase(object): def setUp(self): super(_BaseTaskTestCase, self).setUp() self.user_id = 'fake' self.project_id = 'fake' self.context = FakeContext(self.user_id, self.project_id) fake_server_actions.stub_out_action_events(self.stubs) def fake_deserialize_context(serializer, ctxt_dict): self.assertEqual(self.context.user_id, ctxt_dict['user_id']) self.assertEqual(self.context.project_id, ctxt_dict['project_id']) return self.context self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context', fake_deserialize_context) self.useFixture(fixtures.SpawnIsSynchronousFixture()) def _prepare_rebuild_args(self, update_args=None): # Args that don't get passed in to the method but do get passed to RPC migration = update_args and update_args.pop('migration', None) node = update_args and update_args.pop('node', None) limits = update_args and update_args.pop('limits', None) rebuild_args = {'new_pass': 'admin_password', 'injected_files': 'files_to_inject', 'image_ref': 'image_ref', 'orig_image_ref': 'orig_image_ref', 'orig_sys_metadata': 'orig_sys_meta', 'bdms': {}, 'recreate': False, 'on_shared_storage': False, 'preserve_ephemeral': False, 'host': 'compute-host', 'request_spec': None} if update_args: rebuild_args.update(update_args) compute_rebuild_args = copy.deepcopy(rebuild_args) compute_rebuild_args['migration'] = migration compute_rebuild_args['node'] = node compute_rebuild_args['limits'] = limits # Args that are passed in to the method but don't get passed to RPC compute_rebuild_args.pop('request_spec') return rebuild_args, compute_rebuild_args @mock.patch('nova.objects.Migration') def test_live_migrate(self, migobj): inst = fake_instance.fake_db_instance() inst_obj = objects.Instance._from_db_object( self.context, objects.Instance(), inst, []) migration = migobj() self.mox.StubOutWithMock(live_migrate.LiveMigrationTask, 'execute') task = self.conductor_manager._build_live_migrate_task( self.context, inst_obj, 'destination', 'block_migration', 'disk_over_commit', migration) task.execute() self.mox.ReplayAll() if isinstance(self.conductor, (conductor_api.ComputeTaskAPI, conductor_api.LocalComputeTaskAPI)): # The API method is actually 'live_migrate_instance'. It gets # converted into 'migrate_server' when doing RPC. self.conductor.live_migrate_instance(self.context, inst_obj, 'destination', 'block_migration', 'disk_over_commit') else: self.conductor.migrate_server(self.context, inst_obj, {'host': 'destination'}, True, False, None, 'block_migration', 'disk_over_commit') self.assertEqual('accepted', migration.status) self.assertEqual('destination', migration.dest_compute) self.assertEqual(inst_obj.host, migration.source_compute) def _test_cold_migrate(self, clean_shutdown=True): self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec') self.mox.StubOutWithMock(migrate.MigrationTask, 'execute') inst = fake_instance.fake_db_instance(image_ref='image_ref') inst_obj = objects.Instance._from_db_object( self.context, objects.Instance(), inst, []) inst_obj.system_metadata = {'image_hw_disk_bus': 'scsi'} flavor = flavors.get_default_flavor() flavor.extra_specs = {'extra_specs': 'fake'} filter_properties = {'limits': {}, 'retry': {'num_attempts': 1, 'hosts': [['host1', None]]}} request_spec = {'instance_type': obj_base.obj_to_primitive(flavor), 'instance_properties': {}} utils.get_image_from_system_metadata( inst_obj.system_metadata).AndReturn('image') scheduler_utils.build_request_spec( self.context, 'image', [mox.IsA(objects.Instance)], instance_type=mox.IsA(objects.Flavor)).AndReturn(request_spec) task = self.conductor_manager._build_cold_migrate_task( self.context, inst_obj, flavor, filter_properties, request_spec, [], clean_shutdown=clean_shutdown) task.execute() self.mox.ReplayAll() scheduler_hint = {'filter_properties': {}} if isinstance(self.conductor, (conductor_api.ComputeTaskAPI, conductor_api.LocalComputeTaskAPI)): # The API method is actually 'resize_instance'. It gets # converted into 'migrate_server' when doing RPC. self.conductor.resize_instance( self.context, inst_obj, {}, scheduler_hint, flavor, [], clean_shutdown) else: self.conductor.migrate_server( self.context, inst_obj, scheduler_hint, False, False, flavor, None, None, [], clean_shutdown) def test_cold_migrate(self): self._test_cold_migrate() def test_cold_migrate_forced_shutdown(self): self._test_cold_migrate(clean_shutdown=False) @mock.patch('nova.objects.Instance.refresh') def test_build_instances(self, mock_refresh): instance_type = flavors.get_default_flavor() instances = [objects.Instance(context=self.context, id=i, uuid=uuid.uuid4(), flavor=instance_type) for i in range(2)] instance_type_p = obj_base.obj_to_primitive(instance_type) instance_properties = obj_base.obj_to_primitive(instances[0]) instance_properties['system_metadata'] = flavors.save_flavor_info( {}, instance_type) self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances') self.mox.StubOutWithMock(db, 'block_device_mapping_get_all_by_instance') self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi, 'build_and_run_instance') spec = {'image': {'fake_data': 'should_pass_silently'}, 'instance_properties': instance_properties, 'instance_type': instance_type_p, 'num_instances': 2} filter_properties = {'retry': {'num_attempts': 1, 'hosts': []}} self.conductor_manager._schedule_instances(self.context, spec, filter_properties).AndReturn( [{'host': 'host1', 'nodename': 'node1', 'limits': []}, {'host': 'host2', 'nodename': 'node2', 'limits': []}]) db.block_device_mapping_get_all_by_instance(self.context, instances[0].uuid).AndReturn([]) self.conductor_manager.compute_rpcapi.build_and_run_instance( self.context, instance=mox.IgnoreArg(), host='host1', image={'fake_data': 'should_pass_silently'}, request_spec={ 'image': {'fake_data': 'should_pass_silently'}, 'instance_properties': instance_properties, 'instance_type': instance_type_p, 'num_instances': 2}, filter_properties={'retry': {'num_attempts': 1, 'hosts': [['host1', 'node1']]}, 'limits': []}, admin_password='admin_password', injected_files='injected_files', requested_networks=None, security_groups='security_groups', block_device_mapping=mox.IgnoreArg(), node='node1', limits=[]) db.block_device_mapping_get_all_by_instance(self.context, instances[1].uuid).AndReturn([]) self.conductor_manager.compute_rpcapi.build_and_run_instance( self.context, instance=mox.IgnoreArg(), host='host2', image={'fake_data': 'should_pass_silently'}, request_spec={ 'image': {'fake_data': 'should_pass_silently'}, 'instance_properties': instance_properties, 'instance_type': instance_type_p, 'num_instances': 2}, filter_properties={'limits': [], 'retry': {'num_attempts': 1, 'hosts': [['host2', 'node2']]}}, admin_password='admin_password', injected_files='injected_files', requested_networks=None, security_groups='security_groups', block_device_mapping=mox.IgnoreArg(), node='node2', limits=[]) self.mox.ReplayAll() # build_instances() is a cast, we need to wait for it to complete self.useFixture(cast_as_call.CastAsCall(self.stubs)) self.conductor.build_instances(self.context, instances=instances, image={'fake_data': 'should_pass_silently'}, filter_properties={}, admin_password='admin_password', injected_files='injected_files', requested_networks=None, security_groups='security_groups', block_device_mapping='block_device_mapping', legacy_bdm=False) @mock.patch.object(scheduler_utils, 'build_request_spec') @mock.patch.object(scheduler_utils, 'setup_instance_group') @mock.patch.object(scheduler_utils, 'set_vm_state_and_notify') @mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations') @mock.patch.object(conductor_manager.ComputeTaskManager, '_cleanup_allocated_networks') def test_build_instances_scheduler_failure( self, cleanup_mock, sd_mock, state_mock, sig_mock, bs_mock): instances = [fake_instance.fake_instance_obj(self.context) for i in range(2)] image = {'fake-data': 'should_pass_silently'} spec = {'fake': 'specs', 'instance_properties': instances[0]} exception = exc.NoValidHost(reason='fake-reason') bs_mock.return_value = spec sd_mock.side_effect = exception updates = {'vm_state': vm_states.ERROR, 'task_state': None} # build_instances() is a cast, we need to wait for it to complete self.useFixture(cast_as_call.CastAsCall(self.stubs)) self.conductor.build_instances( self.context, instances=instances, image=image, filter_properties={}, admin_password='admin_password', injected_files='injected_files', requested_networks=None, security_groups='security_groups', block_device_mapping='block_device_mapping', legacy_bdm=False) set_state_calls = [] cleanup_network_calls = [] for instance in instances: set_state_calls.append(mock.call( self.context, instance.uuid, 'compute_task', 'build_instances', updates, exception, spec)) cleanup_network_calls.append(mock.call( self.context, mock.ANY, None)) state_mock.assert_has_calls(set_state_calls) cleanup_mock.assert_has_calls(cleanup_network_calls) def test_build_instances_retry_exceeded(self): instances = [fake_instance.fake_instance_obj(self.context)] image = {'fake-data': 'should_pass_silently'} filter_properties = {'retry': {'num_attempts': 10, 'hosts': []}} updates = {'vm_state': vm_states.ERROR, 'task_state': None} @mock.patch.object(conductor_manager.ComputeTaskManager, '_cleanup_allocated_networks') @mock.patch.object(scheduler_utils, 'set_vm_state_and_notify') @mock.patch.object(scheduler_utils, 'populate_retry') def _test(populate_retry, set_vm_state_and_notify, cleanup_mock): # build_instances() is a cast, we need to wait for it to # complete self.useFixture(cast_as_call.CastAsCall(self.stubs)) populate_retry.side_effect = exc.MaxRetriesExceeded( reason="Too many try") self.conductor.build_instances( self.context, instances=instances, image=image, filter_properties=filter_properties, admin_password='admin_password', injected_files='injected_files', requested_networks=None, security_groups='security_groups', block_device_mapping='block_device_mapping', legacy_bdm=False) populate_retry.assert_called_once_with( filter_properties, instances[0].uuid) set_vm_state_and_notify.assert_called_once_with( self.context, instances[0].uuid, 'compute_task', 'build_instances', updates, mock.ANY, {}) cleanup_mock.assert_called_once_with(self.context, mock.ANY, None) _test() @mock.patch.object(scheduler_utils, 'build_request_spec') @mock.patch.object(scheduler_utils, 'setup_instance_group') @mock.patch.object(conductor_manager.ComputeTaskManager, '_set_vm_state_and_notify') @mock.patch.object(conductor_manager.ComputeTaskManager, '_cleanup_allocated_networks') def test_build_instances_scheduler_group_failure( self, cleanup_mock, state_mock, sig_mock, bs_mock): instances = [fake_instance.fake_instance_obj(self.context) for i in range(2)] image = {'fake-data': 'should_pass_silently'} spec = {'fake': 'specs', 'instance_properties': instances[0]} bs_mock.return_value = spec exception = exc.UnsupportedPolicyException(reason='fake-reason') sig_mock.side_effect = exception updates = {'vm_state': vm_states.ERROR, 'task_state': None} # build_instances() is a cast, we need to wait for it to complete self.useFixture(cast_as_call.CastAsCall(self.stubs)) self.conductor.build_instances( context=self.context, instances=instances, image=image, filter_properties={}, admin_password='admin_password', injected_files='injected_files', requested_networks=None, security_groups='security_groups', block_device_mapping='block_device_mapping', legacy_bdm=False) set_state_calls = [] cleanup_network_calls = [] for instance in instances: set_state_calls.append(mock.call( self.context, instance.uuid, 'build_instances', updates, exception, spec)) cleanup_network_calls.append(mock.call( self.context, mock.ANY, None)) state_mock.assert_has_calls(set_state_calls) cleanup_mock.assert_has_calls(cleanup_network_calls) def test_unshelve_instance_on_host(self): instance = self._create_fake_instance_obj() instance.vm_state = vm_states.SHELVED instance.task_state = task_states.UNSHELVING instance.save() system_metadata = instance.system_metadata self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi, 'start_instance') self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi, 'unshelve_instance') self.conductor_manager.compute_rpcapi.start_instance(self.context, instance) self.mox.ReplayAll() system_metadata['shelved_at'] = timeutils.utcnow() system_metadata['shelved_image_id'] = 'fake_image_id' system_metadata['shelved_host'] = 'fake-mini' self.conductor_manager.unshelve_instance(self.context, instance) def test_unshelve_offload_instance_on_host_with_request_spec(self): instance = self._create_fake_instance_obj() instance.vm_state = vm_states.SHELVED_OFFLOADED instance.task_state = task_states.UNSHELVING instance.save() system_metadata = instance.system_metadata system_metadata['shelved_at'] = timeutils.utcnow() system_metadata['shelved_image_id'] = 'fake_image_id' system_metadata['shelved_host'] = 'fake-mini' fake_spec = fake_request_spec.fake_spec_obj() # FIXME(sbauza): Modify the fake RequestSpec object to either add a # non-empty SchedulerRetries object or nullify the field fake_spec.retry = None # FIXME(sbauza): Modify the fake RequestSpec object to either add a # non-empty SchedulerLimits object or nullify the field fake_spec.limits = None # FIXME(sbauza): Modify the fake RequestSpec object to either add a # non-empty InstanceGroup object or nullify the field fake_spec.instance_group = None filter_properties = fake_spec.to_legacy_filter_properties_dict() request_spec = fake_spec.to_legacy_request_spec_dict() host = {'host': 'host1', 'nodename': 'node1', 'limits': []} # unshelve_instance() is a cast, we need to wait for it to complete self.useFixture(cast_as_call.CastAsCall(self.stubs)) @mock.patch.object(self.conductor_manager.compute_rpcapi, 'unshelve_instance') @mock.patch.object(scheduler_utils, 'populate_filter_properties') @mock.patch.object(scheduler_utils, 'populate_retry') @mock.patch.object(self.conductor_manager, '_schedule_instances') @mock.patch.object(objects.RequestSpec, 'to_legacy_request_spec_dict') @mock.patch.object(objects.RequestSpec, 'to_legacy_filter_properties_dict') @mock.patch.object(objects.RequestSpec, 'reset_forced_destinations') def do_test(reset_forced_destinations, to_filtprops, to_reqspec, sched_instances, populate_retry, populate_filter_properties, unshelve_instance): to_filtprops.return_value = filter_properties to_reqspec.return_value = request_spec sched_instances.return_value = [host] self.conductor.unshelve_instance(self.context, instance, fake_spec) reset_forced_destinations.assert_called_once_with() sched_instances.assert_called_once_with(self.context, request_spec, filter_properties) # NOTE(sbauza): Since the instance is dehydrated when passing thru # the RPC API, we can only assert mock.ANY for it unshelve_instance.assert_called_once_with( self.context, mock.ANY, host['host'], image=mock.ANY, filter_properties=filter_properties, node=host['nodename'] ) do_test() def test_unshelve_offloaded_instance_glance_image_not_found(self): shelved_image_id = "image_not_found" instance = self._create_fake_instance_obj() instance.vm_state = vm_states.SHELVED_OFFLOADED instance.task_state = task_states.UNSHELVING instance.save() system_metadata = instance.system_metadata self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get') e = exc.ImageNotFound(image_id=shelved_image_id) self.conductor_manager.image_api.get( self.context, shelved_image_id, show_deleted=False).AndRaise(e) self.mox.ReplayAll() system_metadata['shelved_at'] = timeutils.utcnow() system_metadata['shelved_host'] = 'fake-mini' system_metadata['shelved_image_id'] = shelved_image_id self.assertRaises( exc.UnshelveException, self.conductor_manager.unshelve_instance, self.context, instance) self.assertEqual(instance.vm_state, vm_states.ERROR) def test_unshelve_offloaded_instance_image_id_is_none(self): instance = self._create_fake_instance_obj() instance.vm_state = vm_states.SHELVED_OFFLOADED instance.task_state = task_states.UNSHELVING # 'shelved_image_id' is None for volumebacked instance instance.system_metadata['shelved_image_id'] = None with test.nested( mock.patch.object(self.conductor_manager, '_schedule_instances'), mock.patch.object(self.conductor_manager.compute_rpcapi, 'unshelve_instance'), ) as (schedule_mock, unshelve_mock): schedule_mock.return_value = [{'host': 'fake_host', 'nodename': 'fake_node', 'limits': {}}] self.conductor_manager.unshelve_instance(self.context, instance) self.assertEqual(1, unshelve_mock.call_count) def test_unshelve_instance_schedule_and_rebuild(self): instance = self._create_fake_instance_obj() instance.vm_state = vm_states.SHELVED_OFFLOADED instance.save() filter_properties = {'retry': {'num_attempts': 1, 'hosts': []}} system_metadata = instance.system_metadata self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get') self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec') self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances') self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi, 'unshelve_instance') self.conductor_manager.image_api.get(self.context, 'fake_image_id', show_deleted=False).AndReturn('fake_image') scheduler_utils.build_request_spec(self.context, 'fake_image', mox.IgnoreArg()).AndReturn('req_spec') self.conductor_manager._schedule_instances(self.context, 'req_spec', filter_properties).AndReturn( [{'host': 'fake_host', 'nodename': 'fake_node', 'limits': {}}]) self.conductor_manager.compute_rpcapi.unshelve_instance(self.context, instance, 'fake_host', image='fake_image', filter_properties={'limits': {}, 'retry': {'num_attempts': 1, 'hosts': [['fake_host', 'fake_node']]}}, node='fake_node') self.mox.ReplayAll() system_metadata['shelved_at'] = timeutils.utcnow() system_metadata['shelved_image_id'] = 'fake_image_id' system_metadata['shelved_host'] = 'fake-mini' self.conductor_manager.unshelve_instance(self.context, instance) def test_unshelve_instance_schedule_and_rebuild_novalid_host(self): instance = self._create_fake_instance_obj() instance.vm_state = vm_states.SHELVED_OFFLOADED instance.save() system_metadata = instance.system_metadata def fake_schedule_instances(context, image, filter_properties, *instances): raise exc.NoValidHost(reason='') with test.nested( mock.patch.object(self.conductor_manager.image_api, 'get', return_value='fake_image'), mock.patch.object(self.conductor_manager, '_schedule_instances', fake_schedule_instances) ) as (_get_image, _schedule_instances): system_metadata['shelved_at'] = timeutils.utcnow() system_metadata['shelved_image_id'] = 'fake_image_id' system_metadata['shelved_host'] = 'fake-mini' self.conductor_manager.unshelve_instance(self.context, instance) _get_image.assert_has_calls([mock.call(self.context, system_metadata['shelved_image_id'], show_deleted=False)]) self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state) @mock.patch.object(conductor_manager.ComputeTaskManager, '_schedule_instances', side_effect=messaging.MessagingTimeout()) @mock.patch.object(image_api.API, 'get', return_value='fake_image') def test_unshelve_instance_schedule_and_rebuild_messaging_exception( self, mock_get_image, mock_schedule_instances): instance = self._create_fake_instance_obj() instance.vm_state = vm_states.SHELVED_OFFLOADED instance.task_state = task_states.UNSHELVING instance.save() system_metadata = instance.system_metadata system_metadata['shelved_at'] = timeutils.utcnow() system_metadata['shelved_image_id'] = 'fake_image_id' system_metadata['shelved_host'] = 'fake-mini' self.assertRaises(messaging.MessagingTimeout, self.conductor_manager.unshelve_instance, self.context, instance) mock_get_image.assert_has_calls([mock.call(self.context, system_metadata['shelved_image_id'], show_deleted=False)]) self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state) self.assertIsNone(instance.task_state) def test_unshelve_instance_schedule_and_rebuild_volume_backed(self): instance = self._create_fake_instance_obj() instance.vm_state = vm_states.SHELVED_OFFLOADED instance.save() filter_properties = {'retry': {'num_attempts': 1, 'hosts': []}} system_metadata = instance.system_metadata self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec') self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances') self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi, 'unshelve_instance') scheduler_utils.build_request_spec(self.context, None, mox.IgnoreArg()).AndReturn('req_spec') self.conductor_manager._schedule_instances(self.context, 'req_spec', filter_properties).AndReturn( [{'host': 'fake_host', 'nodename': 'fake_node', 'limits': {}}]) self.conductor_manager.compute_rpcapi.unshelve_instance(self.context, instance, 'fake_host', image=None, filter_properties={'limits': {}, 'retry': {'num_attempts': 1, 'hosts': [['fake_host', 'fake_node']]}}, node='fake_node') self.mox.ReplayAll() system_metadata['shelved_at'] = timeutils.utcnow() system_metadata['shelved_host'] = 'fake-mini' self.conductor_manager.unshelve_instance(self.context, instance) def test_rebuild_instance(self): inst_obj = self._create_fake_instance_obj() rebuild_args, compute_args = self._prepare_rebuild_args( {'host': inst_obj.host}) with test.nested( mock.patch.object(self.conductor_manager.compute_rpcapi, 'rebuild_instance'), mock.patch.object(self.conductor_manager.scheduler_client, 'select_destinations') ) as (rebuild_mock, select_dest_mock): self.conductor_manager.rebuild_instance(context=self.context, instance=inst_obj, **rebuild_args) self.assertFalse(select_dest_mock.called) rebuild_mock.assert_called_once_with(self.context, instance=inst_obj, **compute_args) def test_rebuild_instance_with_scheduler(self): inst_obj = self._create_fake_instance_obj() inst_obj.host = 'noselect' expected_host = 'thebesthost' expected_node = 'thebestnode' expected_limits = 'fake-limits' rebuild_args, compute_args = self._prepare_rebuild_args( {'host': None, 'node': expected_node, 'limits': expected_limits}) request_spec = {} filter_properties = {'ignore_hosts': [(inst_obj.host)]} fake_spec = objects.RequestSpec() with test.nested( mock.patch.object(self.conductor_manager.compute_rpcapi, 'rebuild_instance'), mock.patch.object(scheduler_utils, 'setup_instance_group', return_value=False), mock.patch.object(objects.RequestSpec, 'from_primitives', return_value=fake_spec), mock.patch.object(self.conductor_manager.scheduler_client, 'select_destinations', return_value=[{'host': expected_host, 'nodename': expected_node, 'limits': expected_limits}]), mock.patch('nova.scheduler.utils.build_request_spec', return_value=request_spec) ) as (rebuild_mock, sig_mock, fp_mock, select_dest_mock, bs_mock): self.conductor_manager.rebuild_instance(context=self.context, instance=inst_obj, **rebuild_args) fp_mock.assert_called_once_with(self.context, request_spec, filter_properties) select_dest_mock.assert_called_once_with(self.context, fake_spec) compute_args['host'] = expected_host rebuild_mock.assert_called_once_with(self.context, instance=inst_obj, **compute_args) self.assertEqual('compute.instance.rebuild.scheduled', fake_notifier.NOTIFICATIONS[0].event_type) def test_rebuild_instance_with_scheduler_no_host(self): inst_obj = self._create_fake_instance_obj() inst_obj.host = 'noselect' rebuild_args, _ = self._prepare_rebuild_args({'host': None}) request_spec = {} filter_properties = {'ignore_hosts': [(inst_obj.host)]} fake_spec = objects.RequestSpec() with test.nested( mock.patch.object(self.conductor_manager.compute_rpcapi, 'rebuild_instance'), mock.patch.object(scheduler_utils, 'setup_instance_group', return_value=False), mock.patch.object(objects.RequestSpec, 'from_primitives', return_value=fake_spec), mock.patch.object(self.conductor_manager.scheduler_client, 'select_destinations', side_effect=exc.NoValidHost(reason='')), mock.patch('nova.scheduler.utils.build_request_spec', return_value=request_spec) ) as (rebuild_mock, sig_mock, fp_mock, select_dest_mock, bs_mock): self.assertRaises(exc.NoValidHost, self.conductor_manager.rebuild_instance, context=self.context, instance=inst_obj, **rebuild_args) fp_mock.assert_called_once_with(self.context, request_spec, filter_properties) select_dest_mock.assert_called_once_with(self.context, fake_spec) self.assertFalse(rebuild_mock.called) @mock.patch.object(conductor_manager.compute_rpcapi.ComputeAPI, 'rebuild_instance') @mock.patch.object(scheduler_utils, 'setup_instance_group') @mock.patch.object(conductor_manager.scheduler_client.SchedulerClient, 'select_destinations') @mock.patch('nova.scheduler.utils.build_request_spec') @mock.patch.object(conductor_manager.ComputeTaskManager, '_set_vm_state_and_notify') def test_rebuild_instance_with_scheduler_group_failure(self, state_mock, bs_mock, select_dest_mock, sig_mock, rebuild_mock): inst_obj = self._create_fake_instance_obj() rebuild_args, _ = self._prepare_rebuild_args({'host': None}) request_spec = {} bs_mock.return_value = request_spec exception = exc.UnsupportedPolicyException(reason='') sig_mock.side_effect = exception # build_instances() is a cast, we need to wait for it to complete self.useFixture(cast_as_call.CastAsCall(self.stubs)) self.assertRaises(exc.UnsupportedPolicyException, self.conductor.rebuild_instance, self.context, inst_obj, **rebuild_args) updates = {'vm_state': vm_states.ACTIVE, 'task_state': None} state_mock.assert_called_once_with(self.context, inst_obj.uuid, 'rebuild_server', updates, exception, request_spec) self.assertFalse(select_dest_mock.called) self.assertFalse(rebuild_mock.called) def test_rebuild_instance_evacuate_migration_record(self): inst_obj = self._create_fake_instance_obj() migration = objects.Migration(context=self.context, source_compute=inst_obj.host, source_node=inst_obj.node, instance_uuid=inst_obj.uuid, status='accepted', migration_type='evacuation') rebuild_args, compute_args = self._prepare_rebuild_args( {'host': inst_obj.host, 'migration': migration}) with test.nested( mock.patch.object(self.conductor_manager.compute_rpcapi, 'rebuild_instance'), mock.patch.object(self.conductor_manager.scheduler_client, 'select_destinations'), mock.patch.object(objects.Migration, 'get_by_instance_and_status', return_value=migration) ) as (rebuild_mock, select_dest_mock, get_migration_mock): self.conductor_manager.rebuild_instance(context=self.context, instance=inst_obj, **rebuild_args) self.assertFalse(select_dest_mock.called) rebuild_mock.assert_called_once_with(self.context, instance=inst_obj, **compute_args) def test_rebuild_instance_with_request_spec(self): inst_obj = self._create_fake_instance_obj() inst_obj.host = 'noselect' expected_host = 'thebesthost' expected_node = 'thebestnode' expected_limits = 'fake-limits' request_spec = {} filter_properties = {'ignore_hosts': [(inst_obj.host)]} fake_spec = objects.RequestSpec(ignore_hosts=[]) augmented_spec = objects.RequestSpec(ignore_hosts=[inst_obj.host]) rebuild_args, compute_args = self._prepare_rebuild_args( {'host': None, 'node': expected_node, 'limits': expected_limits, 'request_spec': fake_spec}) with test.nested( mock.patch.object(self.conductor_manager.compute_rpcapi, 'rebuild_instance'), mock.patch.object(scheduler_utils, 'setup_instance_group', return_value=False), mock.patch.object(objects.RequestSpec, 'from_primitives', return_value=augmented_spec), mock.patch.object(self.conductor_manager.scheduler_client, 'select_destinations', return_value=[{'host': expected_host, 'nodename': expected_node, 'limits': expected_limits}]), mock.patch.object(fake_spec, 'reset_forced_destinations'), mock.patch.object(fake_spec, 'to_legacy_request_spec_dict', return_value=request_spec), mock.patch.object(fake_spec, 'to_legacy_filter_properties_dict', return_value=filter_properties), ) as (rebuild_mock, sig_mock, fp_mock, select_dest_mock, reset_fd, to_reqspec, to_filtprops): self.conductor_manager.rebuild_instance(context=self.context, instance=inst_obj, **rebuild_args) reset_fd.assert_called_once_with() to_reqspec.assert_called_once_with() to_filtprops.assert_called_once_with() fp_mock.assert_called_once_with(self.context, request_spec, filter_properties) select_dest_mock.assert_called_once_with(self.context, augmented_spec) compute_args['host'] = expected_host rebuild_mock.assert_called_once_with(self.context, instance=inst_obj, **compute_args) self.assertEqual('compute.instance.rebuild.scheduled', fake_notifier.NOTIFICATIONS[0].event_type) class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase): """ComputeTaskManager Tests.""" def setUp(self): super(ConductorTaskTestCase, self).setUp() self.conductor = conductor_manager.ComputeTaskManager() self.conductor_manager = self.conductor def test_reset(self): with mock.patch('nova.compute.rpcapi.ComputeAPI') as mock_rpc: old_rpcapi = self.conductor_manager.compute_rpcapi self.conductor_manager.reset() mock_rpc.assert_called_once_with() self.assertNotEqual(old_rpcapi, self.conductor_manager.compute_rpcapi) def test_migrate_server_fails_with_rebuild(self): self.assertRaises(NotImplementedError, self.conductor.migrate_server, self.context, None, None, True, True, None, None, None) def test_migrate_server_fails_with_flavor(self): flavor = flavors.get_flavor_by_name('m1.tiny') self.assertRaises(NotImplementedError, self.conductor.migrate_server, self.context, None, None, True, False, flavor, None, None) def _build_request_spec(self, instance): return { 'instance_properties': { 'uuid': instance['uuid'], }, } @mock.patch.object(scheduler_utils, 'set_vm_state_and_notify') @mock.patch.object(live_migrate.LiveMigrationTask, 'execute') def _test_migrate_server_deals_with_expected_exceptions(self, ex, mock_execute, mock_set): instance = fake_instance.fake_db_instance(uuid='uuid', vm_state=vm_states.ACTIVE) inst_obj = objects.Instance._from_db_object( self.context, objects.Instance(), instance, []) mock_execute.side_effect = ex self.conductor = utils.ExceptionHelper(self.conductor) self.assertRaises(type(ex), self.conductor.migrate_server, self.context, inst_obj, {'host': 'destination'}, True, False, None, 'block_migration', 'disk_over_commit') mock_set.assert_called_once_with(self.context, inst_obj.uuid, 'compute_task', 'migrate_server', {'vm_state': vm_states.ACTIVE, 'task_state': None, 'expected_task_state': task_states.MIGRATING}, ex, self._build_request_spec(inst_obj)) def test_migrate_server_deals_with_invalidcpuinfo_exception(self): instance = fake_instance.fake_db_instance(uuid='uuid', vm_state=vm_states.ACTIVE) inst_obj = objects.Instance._from_db_object( self.context, objects.Instance(), instance, []) self.mox.StubOutWithMock(live_migrate.LiveMigrationTask, 'execute') self.mox.StubOutWithMock(scheduler_utils, 'set_vm_state_and_notify') ex = exc.InvalidCPUInfo(reason="invalid cpu info.") task = self.conductor._build_live_migrate_task( self.context, inst_obj, 'destination', 'block_migration', 'disk_over_commit', mox.IsA(objects.Migration)) task.execute().AndRaise(ex) scheduler_utils.set_vm_state_and_notify(self.context, inst_obj.uuid, 'compute_task', 'migrate_server', {'vm_state': vm_states.ACTIVE, 'task_state': None, 'expected_task_state': task_states.MIGRATING}, ex, self._build_request_spec(inst_obj)) self.mox.ReplayAll() self.conductor = utils.ExceptionHelper(self.conductor) self.assertRaises(exc.InvalidCPUInfo, self.conductor.migrate_server, self.context, inst_obj, {'host': 'destination'}, True, False, None, 'block_migration', 'disk_over_commit') def test_migrate_server_deals_with_expected_exception(self): exs = [exc.InstanceInvalidState(instance_uuid="fake", attr='', state='', method=''), exc.DestinationHypervisorTooOld(), exc.HypervisorUnavailable(host='dummy'), exc.LiveMigrationWithOldNovaNotSafe(server='dummy'), exc.LiveMigrationWithOldNovaNotSupported(), exc.MigrationPreCheckError(reason='dummy'), exc.InvalidSharedStorage(path='dummy', reason='dummy'), exc.NoValidHost(reason='dummy'), exc.ComputeServiceUnavailable(host='dummy'), exc.InvalidHypervisorType(), exc.InvalidCPUInfo(reason='dummy'), exc.UnableToMigrateToSelf(instance_id='dummy', host='dummy'), exc.InvalidLocalStorage(path='dummy', reason='dummy'), exc.MigrationSchedulerRPCError(reason='dummy')] for ex in exs: self._test_migrate_server_deals_with_expected_exceptions(ex) @mock.patch.object(scheduler_utils, 'set_vm_state_and_notify') @mock.patch.object(live_migrate.LiveMigrationTask, 'execute') def test_migrate_server_deals_with_unexpected_exceptions(self, mock_live_migrate, mock_set_state): expected_ex = IOError('fake error') mock_live_migrate.side_effect = expected_ex instance = fake_instance.fake_db_instance() inst_obj = objects.Instance._from_db_object( self.context, objects.Instance(), instance, []) ex = self.assertRaises(exc.MigrationError, self.conductor.migrate_server, self.context, inst_obj, {'host': 'destination'}, True, False, None, 'block_migration', 'disk_over_commit') request_spec = {'instance_properties': { 'uuid': instance['uuid'], }, } mock_set_state.assert_called_once_with(self.context, instance['uuid'], 'compute_task', 'migrate_server', dict(vm_state=vm_states.ERROR, task_state=inst_obj.task_state, expected_task_state=task_states.MIGRATING,), expected_ex, request_spec) self.assertEqual(ex.kwargs['reason'], six.text_type(expected_ex)) def test_set_vm_state_and_notify(self): self.mox.StubOutWithMock(scheduler_utils, 'set_vm_state_and_notify') scheduler_utils.set_vm_state_and_notify( self.context, 1, 'compute_task', 'method', 'updates', 'ex', 'request_spec') self.mox.ReplayAll() self.conductor._set_vm_state_and_notify( self.context, 1, 'method', 'updates', 'ex', 'request_spec') @mock.patch.object(scheduler_utils, 'build_request_spec') @mock.patch.object(scheduler_utils, 'setup_instance_group') @mock.patch.object(objects.RequestSpec, 'from_primitives') @mock.patch.object(utils, 'get_image_from_system_metadata') @mock.patch.object(objects.Quotas, 'from_reservations') @mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations') @mock.patch.object(conductor_manager.ComputeTaskManager, '_set_vm_state_and_notify') @mock.patch.object(migrate.MigrationTask, 'rollback') def test_cold_migrate_no_valid_host_back_in_active_state( self, rollback_mock, notify_mock, select_dest_mock, quotas_mock, metadata_mock, spec_fp_mock, sig_mock, brs_mock): flavor = flavors.get_flavor_by_name('m1.tiny') inst_obj = objects.Instance( image_ref='fake-image_ref', instance_type_id=flavor['id'], vm_state=vm_states.ACTIVE, system_metadata={}, uuid='fake', user_id='fake') request_spec = dict(instance_type=dict(extra_specs=dict()), instance_properties=dict()) filter_props = dict(context=None) resvs = 'fake-resvs' image = 'fake-image' fake_spec = objects.RequestSpec() metadata_mock.return_value = image brs_mock.return_value = request_spec spec_fp_mock.return_value = fake_spec exc_info = exc.NoValidHost(reason="") select_dest_mock.side_effect = exc_info updates = {'vm_state': vm_states.ACTIVE, 'task_state': None} self.assertRaises(exc.NoValidHost, self.conductor._cold_migrate, self.context, inst_obj, flavor, filter_props, [resvs], clean_shutdown=True) metadata_mock.assert_called_with({}) brs_mock.assert_called_once_with(self.context, image, [inst_obj], instance_type=flavor) quotas_mock.assert_called_once_with(self.context, [resvs], instance=inst_obj) sig_mock.assert_called_once_with(self.context, request_spec, filter_props) notify_mock.assert_called_once_with(self.context, inst_obj.uuid, 'migrate_server', updates, exc_info, request_spec) rollback_mock.assert_called_once_with() @mock.patch.object(scheduler_utils, 'build_request_spec') @mock.patch.object(scheduler_utils, 'setup_instance_group') @mock.patch.object(objects.RequestSpec, 'from_primitives') @mock.patch.object(utils, 'get_image_from_system_metadata') @mock.patch.object(objects.Quotas, 'from_reservations') @mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations') @mock.patch.object(conductor_manager.ComputeTaskManager, '_set_vm_state_and_notify') @mock.patch.object(migrate.MigrationTask, 'rollback') def test_cold_migrate_no_valid_host_back_in_stopped_state( self, rollback_mock, notify_mock, select_dest_mock, quotas_mock, metadata_mock, spec_fp_mock, sig_mock, brs_mock): flavor = flavors.get_flavor_by_name('m1.tiny') inst_obj = objects.Instance( image_ref='fake-image_ref', vm_state=vm_states.STOPPED, instance_type_id=flavor['id'], system_metadata={}, uuid='fake', user_id='fake') image = 'fake-image' request_spec = dict(instance_type=dict(extra_specs=dict()), instance_properties=dict(), image=image) filter_props = dict(context=None) resvs = 'fake-resvs' fake_spec = objects.RequestSpec() metadata_mock.return_value = image brs_mock.return_value = request_spec spec_fp_mock.return_value = fake_spec exc_info = exc.NoValidHost(reason="") select_dest_mock.side_effect = exc_info updates = {'vm_state': vm_states.STOPPED, 'task_state': None} self.assertRaises(exc.NoValidHost, self.conductor._cold_migrate, self.context, inst_obj, flavor, filter_props, [resvs], clean_shutdown=True) metadata_mock.assert_called_with({}) brs_mock.assert_called_once_with(self.context, image, [inst_obj], instance_type=flavor) quotas_mock.assert_called_once_with(self.context, [resvs], instance=inst_obj) sig_mock.assert_called_once_with(self.context, request_spec, filter_props) notify_mock.assert_called_once_with(self.context, inst_obj.uuid, 'migrate_server', updates, exc_info, request_spec) rollback_mock.assert_called_once_with() def test_cold_migrate_no_valid_host_error_msg(self): flavor = flavors.get_flavor_by_name('m1.tiny') inst_obj = objects.Instance( image_ref='fake-image_ref', vm_state=vm_states.STOPPED, instance_type_id=flavor['id'], system_metadata={}, uuid='fake', user_id='fake') request_spec = dict(instance_type=dict(extra_specs=dict()), instance_properties=dict()) filter_props = dict(context=None) resvs = 'fake-resvs' image = 'fake-image' with test.nested( mock.patch.object(utils, 'get_image_from_system_metadata', return_value=image), mock.patch.object(scheduler_utils, 'build_request_spec', return_value=request_spec), mock.patch.object(self.conductor, '_set_vm_state_and_notify'), mock.patch.object(migrate.MigrationTask, 'execute', side_effect=exc.NoValidHost(reason="")), mock.patch.object(migrate.MigrationTask, 'rollback') ) as (image_mock, brs_mock, set_vm_mock, task_execute_mock, task_rollback_mock): nvh = self.assertRaises(exc.NoValidHost, self.conductor._cold_migrate, self.context, inst_obj, flavor, filter_props, [resvs], clean_shutdown=True) self.assertIn('cold migrate', nvh.message) @mock.patch.object(utils, 'get_image_from_system_metadata') @mock.patch('nova.scheduler.utils.build_request_spec') @mock.patch.object(migrate.MigrationTask, 'execute') @mock.patch.object(migrate.MigrationTask, 'rollback') @mock.patch.object(conductor_manager.ComputeTaskManager, '_set_vm_state_and_notify') def test_cold_migrate_no_valid_host_in_group(self, set_vm_mock, task_rollback_mock, task_exec_mock, brs_mock, image_mock): flavor = flavors.get_flavor_by_name('m1.tiny') inst_obj = objects.Instance( image_ref='fake-image_ref', vm_state=vm_states.STOPPED, instance_type_id=flavor['id'], system_metadata={}, uuid='fake', user_id='fake') request_spec = dict(instance_type=dict(extra_specs=dict()), instance_properties=dict()) filter_props = dict(context=None) resvs = 'fake-resvs' image = 'fake-image' exception = exc.UnsupportedPolicyException(reason='') image_mock.return_value = image brs_mock.return_value = request_spec task_exec_mock.side_effect = exception self.assertRaises(exc.UnsupportedPolicyException, self.conductor._cold_migrate, self.context, inst_obj, flavor, filter_props, [resvs], clean_shutdown=True) updates = {'vm_state': vm_states.STOPPED, 'task_state': None} set_vm_mock.assert_called_once_with(self.context, inst_obj.uuid, 'migrate_server', updates, exception, request_spec) @mock.patch.object(scheduler_utils, 'build_request_spec') @mock.patch.object(scheduler_utils, 'setup_instance_group') @mock.patch.object(objects.RequestSpec, 'from_primitives') @mock.patch.object(utils, 'get_image_from_system_metadata') @mock.patch.object(objects.Quotas, 'from_reservations') @mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations') @mock.patch.object(conductor_manager.ComputeTaskManager, '_set_vm_state_and_notify') @mock.patch.object(migrate.MigrationTask, 'rollback') @mock.patch.object(compute_rpcapi.ComputeAPI, 'prep_resize') def test_cold_migrate_exception_host_in_error_state_and_raise( self, prep_resize_mock, rollback_mock, notify_mock, select_dest_mock, quotas_mock, metadata_mock, spec_fp_mock, sig_mock, brs_mock): flavor = flavors.get_flavor_by_name('m1.tiny') inst_obj = objects.Instance( image_ref='fake-image_ref', vm_state=vm_states.STOPPED, instance_type_id=flavor['id'], system_metadata={}, uuid='fake', user_id='fake') image = 'fake-image' request_spec = dict(instance_type=dict(), instance_properties=dict(), image=image) filter_props = dict(context=None) resvs = 'fake-resvs' fake_spec = objects.RequestSpec() hosts = [dict(host='host1', nodename=None, limits={})] metadata_mock.return_value = image brs_mock.return_value = request_spec spec_fp_mock.return_value = fake_spec exc_info = test.TestingException('something happened') select_dest_mock.return_value = hosts updates = {'vm_state': vm_states.STOPPED, 'task_state': None} prep_resize_mock.side_effect = exc_info self.assertRaises(test.TestingException, self.conductor._cold_migrate, self.context, inst_obj, flavor, filter_props, [resvs], clean_shutdown=True) metadata_mock.assert_called_with({}) brs_mock.assert_called_once_with(self.context, image, [inst_obj], instance_type=flavor) quotas_mock.assert_called_once_with(self.context, [resvs], instance=inst_obj) sig_mock.assert_called_once_with(self.context, request_spec, filter_props) select_dest_mock.assert_called_once_with( self.context, fake_spec) prep_resize_mock.assert_called_once_with( self.context, image, inst_obj, flavor, hosts[0]['host'], [resvs], request_spec=request_spec, filter_properties=filter_props, node=hosts[0]['nodename'], clean_shutdown=True) notify_mock.assert_called_once_with(self.context, inst_obj.uuid, 'migrate_server', updates, exc_info, request_spec) rollback_mock.assert_called_once_with() def test_resize_no_valid_host_error_msg(self): flavor = flavors.get_flavor_by_name('m1.tiny') flavor_new = flavors.get_flavor_by_name('m1.small') inst_obj = objects.Instance( image_ref='fake-image_ref', vm_state=vm_states.STOPPED, instance_type_id=flavor['id'], system_metadata={}, uuid='fake', user_id='fake') request_spec = dict(instance_type=dict(extra_specs=dict()), instance_properties=dict()) filter_props = dict(context=None) resvs = 'fake-resvs' image = 'fake-image' with test.nested( mock.patch.object(utils, 'get_image_from_system_metadata', return_value=image), mock.patch.object(scheduler_utils, 'build_request_spec', return_value=request_spec), mock.patch.object(self.conductor, '_set_vm_state_and_notify'), mock.patch.object(migrate.MigrationTask, 'execute', side_effect=exc.NoValidHost(reason="")), mock.patch.object(migrate.MigrationTask, 'rollback') ) as (image_mock, brs_mock, vm_st_mock, task_execute_mock, task_rb_mock): nvh = self.assertRaises(exc.NoValidHost, self.conductor._cold_migrate, self.context, inst_obj, flavor_new, filter_props, [resvs], clean_shutdown=True) self.assertIn('resize', nvh.message) def test_build_instances_instance_not_found(self): instances = [fake_instance.fake_instance_obj(self.context) for i in range(2)] self.mox.StubOutWithMock(instances[0], 'refresh') self.mox.StubOutWithMock(instances[1], 'refresh') image = {'fake-data': 'should_pass_silently'} spec = {'fake': 'specs', 'instance_properties': instances[0]} self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec') self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances') self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi, 'build_and_run_instance') scheduler_utils.build_request_spec(self.context, image, mox.IgnoreArg()).AndReturn(spec) filter_properties = {'retry': {'num_attempts': 1, 'hosts': []}} self.conductor_manager._schedule_instances(self.context, spec, filter_properties).AndReturn( [{'host': 'host1', 'nodename': 'node1', 'limits': []}, {'host': 'host2', 'nodename': 'node2', 'limits': []}]) instances[0].refresh().AndRaise( exc.InstanceNotFound(instance_id=instances[0].uuid)) instances[1].refresh() self.conductor_manager.compute_rpcapi.build_and_run_instance( self.context, instance=instances[1], host='host2', image={'fake-data': 'should_pass_silently'}, request_spec=spec, filter_properties={'limits': [], 'retry': {'num_attempts': 1, 'hosts': [['host2', 'node2']]}}, admin_password='admin_password', injected_files='injected_files', requested_networks=None, security_groups='security_groups', block_device_mapping=mox.IsA(objects.BlockDeviceMappingList), node='node2', limits=[]) self.mox.ReplayAll() # build_instances() is a cast, we need to wait for it to complete self.useFixture(cast_as_call.CastAsCall(self.stubs)) self.conductor.build_instances(self.context, instances=instances, image=image, filter_properties={}, admin_password='admin_password', injected_files='injected_files', requested_networks=None, security_groups='security_groups', block_device_mapping='block_device_mapping', legacy_bdm=False) @mock.patch.object(scheduler_utils, 'setup_instance_group') @mock.patch.object(scheduler_utils, 'build_request_spec') def test_build_instances_info_cache_not_found(self, build_request_spec, setup_instance_group): instances = [fake_instance.fake_instance_obj(self.context) for i in range(2)] image = {'fake-data': 'should_pass_silently'} destinations = [{'host': 'host1', 'nodename': 'node1', 'limits': []}, {'host': 'host2', 'nodename': 'node2', 'limits': []}] spec = {'fake': 'specs', 'instance_properties': instances[0]} build_request_spec.return_value = spec with test.nested( mock.patch.object(instances[0], 'refresh', side_effect=exc.InstanceInfoCacheNotFound( instance_uuid=instances[0].uuid)), mock.patch.object(instances[1], 'refresh'), mock.patch.object(objects.RequestSpec, 'from_primitives'), mock.patch.object(self.conductor_manager.scheduler_client, 'select_destinations', return_value=destinations), mock.patch.object(self.conductor_manager.compute_rpcapi, 'build_and_run_instance') ) as (inst1_refresh, inst2_refresh, from_primitives, select_destinations, build_and_run_instance): # build_instances() is a cast, we need to wait for it to complete self.useFixture(cast_as_call.CastAsCall(self.stubs)) self.conductor.build_instances(self.context, instances=instances, image=image, filter_properties={}, admin_password='admin_password', injected_files='injected_files', requested_networks=None, security_groups='security_groups', block_device_mapping='block_device_mapping', legacy_bdm=False) # NOTE(sbauza): Due to populate_retry() later in the code, # filter_properties is dynamically modified setup_instance_group.assert_called_once_with( self.context, spec, {'retry': {'num_attempts': 1, 'hosts': []}}) build_and_run_instance.assert_called_once_with(self.context, instance=instances[1], host='host2', image={'fake-data': 'should_pass_silently'}, request_spec=spec, filter_properties={'limits': [], 'retry': {'num_attempts': 1, 'hosts': [['host2', 'node2']]}}, admin_password='admin_password', injected_files='injected_files', requested_networks=None, security_groups='security_groups', block_device_mapping=mock.ANY, node='node2', limits=[]) class ConductorTaskRPCAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase): """Conductor compute_task RPC namespace Tests.""" def setUp(self): super(ConductorTaskRPCAPITestCase, self).setUp() self.conductor_service = self.start_service( 'conductor', manager='nova.conductor.manager.ConductorManager') self.conductor = conductor_rpcapi.ComputeTaskAPI() service_manager = self.conductor_service.manager self.conductor_manager = service_manager.compute_task_mgr class ConductorTaskAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase): """Compute task API Tests.""" def setUp(self): super(ConductorTaskAPITestCase, self).setUp() self.conductor_service = self.start_service( 'conductor', manager='nova.conductor.manager.ConductorManager') self.conductor = conductor_api.ComputeTaskAPI() service_manager = self.conductor_service.manager self.conductor_manager = service_manager.compute_task_mgr class ConductorLocalComputeTaskAPITestCase(ConductorTaskAPITestCase): """Conductor LocalComputeTaskAPI Tests.""" def setUp(self): super(ConductorLocalComputeTaskAPITestCase, self).setUp() self.conductor = conductor_api.LocalComputeTaskAPI() self.conductor_manager = self.conductor._manager._target nova-13.1.4/nova/tests/unit/conf_fixture.py0000664000567000056710000000671713064447152022056 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import fixture as config_fixture from oslo_policy import opts as policy_opts import nova.conf from nova import config from nova import ipv6 from nova import paths from nova.tests.unit import utils CONF = nova.conf.CONF CONF.import_opt('use_ipv6', 'nova.netconf') CONF.import_opt('host', 'nova.netconf') CONF.import_opt('fake_network', 'nova.network.linux_net') CONF.import_opt('network_size', 'nova.network.manager') CONF.import_opt('num_networks', 'nova.network.manager') CONF.import_opt('floating_ip_dns_manager', 'nova.network.floating_ips') CONF.import_opt('instance_dns_manager', 'nova.network.floating_ips') class ConfFixture(config_fixture.Config): """Fixture to manage global conf settings.""" def setUp(self): super(ConfFixture, self).setUp() self.conf.set_default('api_paste_config', paths.state_path_def('etc/nova/api-paste.ini')) self.conf.set_default('host', 'fake-mini') self.conf.set_default('compute_driver', 'nova.virt.fake.SmallFakeDriver') self.conf.set_default('fake_network', True) self.conf.set_default('flat_network_bridge', 'br100') self.conf.set_default('floating_ip_dns_manager', 'nova.tests.unit.utils.dns_manager') self.conf.set_default('instance_dns_manager', 'nova.tests.unit.utils.dns_manager') self.conf.set_default('network_size', 8) self.conf.set_default('num_networks', 2) self.conf.set_default('use_ipv6', True) self.conf.set_default('vlan_interface', 'eth0') self.conf.set_default('auth_strategy', 'noauth2') config.parse_args([], default_config_files=[], configure_db=False, init_rpc=False) self.conf.set_default('connection', "sqlite://", group='database') self.conf.set_default('connection', "sqlite://", group='api_database') self.conf.set_default('sqlite_synchronous', False, group='database') self.conf.set_default('sqlite_synchronous', False, group='api_database') self.conf.set_default('fatal_exception_format_errors', True) self.conf.set_default('enabled', True, 'osapi_v21') # TODO(sdague): this makes our project_id match 'fake' as well. # We should fix the tests to use real # UUIDs then drop this work around. self.conf.set_default('project_id_regex', '[0-9a-fk\-]+', 'osapi_v21') self.conf.set_default('force_dhcp_release', False) self.conf.set_default('periodic_enable', False) policy_opts.set_defaults(self.conf) self.addCleanup(utils.cleanup_dns_managers) self.addCleanup(ipv6.api.reset_backend) nova-13.1.4/nova/tests/unit/test_api_validation.py0000664000567000056710000014276513064447152023411 0ustar jenkinsjenkins00000000000000# Copyright 2013 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import re import fixtures from jsonschema import exceptions as jsonschema_exc import six from nova.api.openstack import api_version_request as api_version from nova.api import validation from nova.api.validation import parameter_types from nova.api.validation import validators from nova import exception from nova import test class FakeRequest(object): api_version_request = api_version.APIVersionRequest("2.1") environ = {} legacy_v2 = False def is_legacy_v2(self): return self.legacy_v2 class ValidationRegex(test.NoDBTestCase): def test_cell_names(self): cellre = re.compile(parameter_types.valid_cell_name_regex.regex) self.assertTrue(cellre.search('foo')) self.assertFalse(cellre.search('foo.bar')) self.assertFalse(cellre.search('foo@bar')) self.assertFalse(cellre.search('foo!bar')) self.assertFalse(cellre.search(' foo!bar')) self.assertFalse(cellre.search('\nfoo!bar')) def test_build_regex_range(self): # this is much easier to think about if we only use the ascii # subset because it's a printable range we can think # about. The algorithm works for all ranges. def _get_all_chars(): for i in range(0x7F): yield six.unichr(i) self.useFixture(fixtures.MonkeyPatch( 'nova.api.validation.parameter_types._get_all_chars', _get_all_chars)) r = parameter_types._build_regex_range(ws=False) self.assertEqual(r, re.escape('!') + '-' + re.escape('~')) # if we allow whitespace the range starts earlier r = parameter_types._build_regex_range(ws=True) self.assertEqual(r, re.escape(' ') + '-' + re.escape('~')) # excluding a character will give us 2 ranges r = parameter_types._build_regex_range(ws=True, exclude=['A']) self.assertEqual(r, re.escape(' ') + '-' + re.escape('@') + 'B' + '-' + re.escape('~')) # inverting which gives us all the initial unprintable characters. r = parameter_types._build_regex_range(ws=False, invert=True) self.assertEqual(r, re.escape('\x00') + '-' + re.escape(' ')) # excluding characters that create a singleton. Naively this would be: # ' -@B-BD-~' which seems to work, but ' -@BD-~' is more natural. r = parameter_types._build_regex_range(ws=True, exclude=['A', 'C']) self.assertEqual(r, re.escape(' ') + '-' + re.escape('@') + 'B' + 'D' + '-' + re.escape('~')) # ws=True means the positive regex has printable whitespaces, # so the inverse will not. The inverse will include things we # exclude. r = parameter_types._build_regex_range( ws=True, exclude=['A', 'B', 'C', 'Z'], invert=True) self.assertEqual(r, re.escape('\x00') + '-' + re.escape('\x1f') + 'A-CZ') class APIValidationTestCase(test.NoDBTestCase): def check_validation_error(self, method, body, expected_detail, req=None): if not req: req = FakeRequest() try: method(body=body, req=req,) except exception.ValidationError as ex: self.assertEqual(400, ex.kwargs['code']) if not re.match(expected_detail, ex.kwargs['detail']): self.assertEqual(expected_detail, ex.kwargs['detail'], 'Exception details did not match expected') except Exception as ex: self.fail('An unexpected exception happens: %s' % ex) else: self.fail('Any exception does not happen.') class FormatCheckerTestCase(test.NoDBTestCase): def test_format_checker_failed(self): format_checker = validators.FormatChecker() exc = self.assertRaises(jsonschema_exc.FormatError, format_checker.check, " ", "name") self.assertIsInstance(exc.cause, exception.InvalidName) self.assertEqual("An invalid 'name' value was provided. The name must " "be: printable characters. " "Can not start or end with whitespace.", exc.cause.format_message()) def test_format_checker_failed_with_non_string(self): checks = ["name", "name_with_leading_trailing_spaces", "cell_name", "cell_name_with_leading_trailing_spaces"] format_checker = validators.FormatChecker() for check in checks: exc = self.assertRaises(jsonschema_exc.FormatError, format_checker.check, None, "name") self.assertIsInstance(exc.cause, exception.InvalidName) self.assertEqual("An invalid 'name' value was provided. The name " "must be: printable characters. " "Can not start or end with whitespace.", exc.cause.format_message()) class MicroversionsSchemaTestCase(APIValidationTestCase): def setUp(self): super(MicroversionsSchemaTestCase, self).setUp() schema_v21_int = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', } } } schema_v20_str = copy.deepcopy(schema_v21_int) schema_v20_str['properties']['foo'] = {'type': 'string'} @validation.schema(schema_v20_str, '2.0', '2.0') @validation.schema(schema_v21_int, '2.1') def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_v2compatible_request(self): req = FakeRequest() req.legacy_v2 = True self.assertEqual(self.post(body={'foo': 'bar'}, req=req), 'Validation succeeded.') detail = ("Invalid input for field/attribute foo. Value: 1. " "1 is not of type 'string'") self.check_validation_error(self.post, body={'foo': 1}, expected_detail=detail, req=req) def test_validate_v21_request(self): req = FakeRequest() self.assertEqual(self.post(body={'foo': 1}, req=req), 'Validation succeeded.') detail = ("Invalid input for field/attribute foo. Value: bar. " "'bar' is not of type 'integer'") self.check_validation_error(self.post, body={'foo': 'bar'}, expected_detail=detail, req=req) def test_validate_v2compatible_request_with_none_min_version(self): schema_none = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer' } } } @validation.schema(schema_none) def post(req, body): return 'Validation succeeded.' req = FakeRequest() req.legacy_v2 = True self.assertEqual('Validation succeeded.', post(body={'foo': 1}, req=req)) detail = ("Invalid input for field/attribute foo. Value: bar. " "'bar' is not of type 'integer'") self.check_validation_error(post, body={'foo': 'bar'}, expected_detail=detail, req=req) class RequiredDisableTestCase(APIValidationTestCase): def setUp(self): super(RequiredDisableTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', }, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_required_disable(self): self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'abc': 1}, req=FakeRequest()), 'Validation succeeded.') class RequiredEnableTestCase(APIValidationTestCase): def setUp(self): super(RequiredEnableTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', }, }, 'required': ['foo'] } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_required_enable(self): self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()), 'Validation succeeded.') def test_validate_required_enable_fails(self): detail = "'foo' is a required property" self.check_validation_error(self.post, body={'abc': 1}, expected_detail=detail) class AdditionalPropertiesEnableTestCase(APIValidationTestCase): def setUp(self): super(AdditionalPropertiesEnableTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', }, }, 'required': ['foo'], } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_additionalProperties_enable(self): self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': 1, 'ext': 1}, req=FakeRequest()), 'Validation succeeded.') class AdditionalPropertiesDisableTestCase(APIValidationTestCase): def setUp(self): super(AdditionalPropertiesDisableTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', }, }, 'required': ['foo'], 'additionalProperties': False, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_additionalProperties_disable(self): self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()), 'Validation succeeded.') def test_validate_additionalProperties_disable_fails(self): detail = "Additional properties are not allowed ('ext' was unexpected)" self.check_validation_error(self.post, body={'foo': 1, 'ext': 1}, expected_detail=detail) class PatternPropertiesTestCase(APIValidationTestCase): def setUp(self): super(PatternPropertiesTestCase, self).setUp() schema = { 'patternProperties': { '^[a-zA-Z0-9]{1,10}$': { 'type': 'string' }, }, 'additionalProperties': False, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_patternProperties(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'bar'}, req=FakeRequest())) def test_validate_patternProperties_fails(self): detail = "Additional properties are not allowed ('__' was unexpected)" self.check_validation_error(self.post, body={'__': 'bar'}, expected_detail=detail) detail = "Additional properties are not allowed ('' was unexpected)" self.check_validation_error(self.post, body={'': 'bar'}, expected_detail=detail) detail = ("Additional properties are not allowed ('0123456789a' was" " unexpected)") self.check_validation_error(self.post, body={'0123456789a': 'bar'}, expected_detail=detail) detail = "expected string or buffer" self.check_validation_error(self.post, body={None: 'bar'}, expected_detail=detail) class StringTestCase(APIValidationTestCase): def setUp(self): super(StringTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', }, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_string(self): self.assertEqual(self.post(body={'foo': 'abc'}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': '0'}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': ''}, req=FakeRequest()), 'Validation succeeded.') def test_validate_string_fails(self): detail = ("Invalid input for field/attribute foo. Value: 1." " 1 is not of type 'string'") self.check_validation_error(self.post, body={'foo': 1}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1.5." " 1.5 is not of type 'string'") self.check_validation_error(self.post, body={'foo': 1.5}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: True." " True is not of type 'string'") self.check_validation_error(self.post, body={'foo': True}, expected_detail=detail) class StringLengthTestCase(APIValidationTestCase): def setUp(self): super(StringLengthTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'minLength': 1, 'maxLength': 10, }, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_string_length(self): self.assertEqual(self.post(body={'foo': '0'}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': '0123456789'}, req=FakeRequest()), 'Validation succeeded.') def test_validate_string_length_fails(self): detail = ("Invalid input for field/attribute foo. Value: ." " '' is too short") self.check_validation_error(self.post, body={'foo': ''}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 0123456789a." " '0123456789a' is too long") self.check_validation_error(self.post, body={'foo': '0123456789a'}, expected_detail=detail) class IntegerTestCase(APIValidationTestCase): def setUp(self): super(IntegerTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': { 'type': ['integer', 'string'], 'pattern': '^[0-9]+$', }, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_integer(self): self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': '1'}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': '0123456789'}, req=FakeRequest()), 'Validation succeeded.') def test_validate_integer_fails(self): detail = ("Invalid input for field/attribute foo. Value: abc." " 'abc' does not match '^[0-9]+$'") self.check_validation_error(self.post, body={'foo': 'abc'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: True." " True is not of type 'integer', 'string'") self.check_validation_error(self.post, body={'foo': True}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 0xffff." " '0xffff' does not match '^[0-9]+$'") self.check_validation_error(self.post, body={'foo': '0xffff'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1.0." " 1.0 is not of type 'integer', 'string'") self.check_validation_error(self.post, body={'foo': 1.0}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1.0." " '1.0' does not match '^[0-9]+$'") self.check_validation_error(self.post, body={'foo': '1.0'}, expected_detail=detail) class IntegerRangeTestCase(APIValidationTestCase): def setUp(self): super(IntegerRangeTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': { 'type': ['integer', 'string'], 'pattern': '^[0-9]+$', 'minimum': 1, 'maximum': 10, }, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_integer_range(self): self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': 10}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': '1'}, req=FakeRequest()), 'Validation succeeded.') def test_validate_integer_range_fails(self): detail = ("Invalid input for field/attribute foo. Value: 0." " 0(.0)? is less than the minimum of 1") self.check_validation_error(self.post, body={'foo': 0}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 11." " 11(.0)? is greater than the maximum of 10") self.check_validation_error(self.post, body={'foo': 11}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 0." " 0(.0)? is less than the minimum of 1") self.check_validation_error(self.post, body={'foo': '0'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 11." " 11(.0)? is greater than the maximum of 10") self.check_validation_error(self.post, body={'foo': '11'}, expected_detail=detail) class BooleanTestCase(APIValidationTestCase): def setUp(self): super(BooleanTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': parameter_types.boolean, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_boolean(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': True}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': False}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'True'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'False'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '1'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '0'}, req=FakeRequest())) def test_validate_boolean_fails(self): enum_boolean = ("[True, 'True', 'TRUE', 'true', '1', 'ON', 'On'," " 'on', 'YES', 'Yes', 'yes'," " False, 'False', 'FALSE', 'false', '0', 'OFF', 'Off'," " 'off', 'NO', 'No', 'no']") detail = ("Invalid input for field/attribute foo. Value: bar." " 'bar' is not one of %s") % enum_boolean self.check_validation_error(self.post, body={'foo': 'bar'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 2." " '2' is not one of %s") % enum_boolean self.check_validation_error(self.post, body={'foo': '2'}, expected_detail=detail) class HostnameTestCase(APIValidationTestCase): def setUp(self): super(HostnameTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': parameter_types.hostname, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_hostname(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'localhost'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'localhost.localdomain.com'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'my-host'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'my_host'}, req=FakeRequest())) def test_validate_hostname_fails(self): detail = ("Invalid input for field/attribute foo. Value: True." " True is not of type 'string'") self.check_validation_error(self.post, body={'foo': True}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1." " 1 is not of type 'string'") self.check_validation_error(self.post, body={'foo': 1}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: my$host." " 'my$host' does not match '^[a-zA-Z0-9-._]*$'") self.check_validation_error(self.post, body={'foo': 'my$host'}, expected_detail=detail) class HostnameIPaddressTestCase(APIValidationTestCase): def setUp(self): super(HostnameIPaddressTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': parameter_types.hostname_or_ip_address, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_hostname_or_ip_address(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'localhost'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'localhost.localdomain.com'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'my-host'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'my_host'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '192.168.10.100'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '2001:db8::9abc'}, req=FakeRequest())) def test_validate_hostname_or_ip_address_fails(self): detail = ("Invalid input for field/attribute foo. Value: True." " True is not of type 'string'") self.check_validation_error(self.post, body={'foo': True}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1." " 1 is not of type 'string'") self.check_validation_error(self.post, body={'foo': 1}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: my$host." " 'my$host' does not match '^[a-zA-Z0-9-_.:]*$'") self.check_validation_error(self.post, body={'foo': 'my$host'}, expected_detail=detail) class CellNameTestCase(APIValidationTestCase): def setUp(self): super(CellNameTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': parameter_types.cell_name, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_name(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'abc'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'my server'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': u'\u0434'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': u'\u0434\u2006\ufffd'}, req=FakeRequest())) def test_validate_name_fails(self): error = ("An invalid 'name' value was provided. The name must be: " "printable characters except !, ., @. " "Can not start or end with whitespace.") should_fail = (' ', ' server', 'server ', u'a\xa0', # trailing unicode space u'\uffff', # non-printable unicode 'abc!def', 'abc.def', 'abc@def') for item in should_fail: self.check_validation_error(self.post, body={'foo': item}, expected_detail=error) # four-byte unicode, if supported by this python build try: self.check_validation_error(self.post, body={'foo': u'\U00010000'}, expected_detail=error) except ValueError: pass class CellNameLeadingTrailingSpacesTestCase(APIValidationTestCase): def setUp(self): super(CellNameLeadingTrailingSpacesTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': parameter_types.cell_name_leading_trailing_spaces, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_name(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'abc'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'my server'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': u'\u0434'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': u'\u0434\u2006\ufffd'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': ' my server'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'my server '}, req=FakeRequest())) def test_validate_name_fails(self): error = ("An invalid 'name' value was provided. The name must be: " "printable characters except !, ., @, " "with at least one non space character") should_fail = ( ' ', u'\uffff', # non-printable unicode 'abc!def', 'abc.def', 'abc@def') for item in should_fail: self.check_validation_error(self.post, body={'foo': item}, expected_detail=error) # four-byte unicode, if supported by this python build try: self.check_validation_error(self.post, body={'foo': u'\U00010000'}, expected_detail=error) except ValueError: pass class NameTestCase(APIValidationTestCase): def setUp(self): super(NameTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': parameter_types.name, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_name(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'm1.small'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'my server'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'a'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': u'\u0434'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': u'\u0434\u2006\ufffd'}, req=FakeRequest())) def test_validate_name_fails(self): error = ("An invalid 'name' value was provided. The name must be: " "printable characters. " "Can not start or end with whitespace.") should_fail = (' ', ' server', 'server ', u'a\xa0', # trailing unicode space u'\uffff', # non-printable unicode ) for item in should_fail: self.check_validation_error(self.post, body={'foo': item}, expected_detail=error) # four-byte unicode, if supported by this python build try: self.check_validation_error(self.post, body={'foo': u'\U00010000'}, expected_detail=error) except ValueError: pass class NameWithLeadingTrailingSpacesTestCase(APIValidationTestCase): def setUp(self): super(NameWithLeadingTrailingSpacesTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': parameter_types.name_with_leading_trailing_spaces, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_name(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'm1.small'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'my server'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'a'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': u'\u0434'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': u'\u0434\u2006\ufffd'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': ' abc '}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'abc abc abc'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': ' abc abc abc '}, req=FakeRequest())) # leading unicode space self.assertEqual('Validation succeeded.', self.post(body={'foo': '\xa0abc'}, req=FakeRequest())) def test_validate_name_fails(self): error = ("An invalid 'name' value was provided. The name must be: " "printable characters with at least one non space character") should_fail = ( ' ', u'\xa0', # unicode space u'\uffff', # non-printable unicode ) for item in should_fail: self.check_validation_error(self.post, body={'foo': item}, expected_detail=error) # four-byte unicode, if supported by this python build try: self.check_validation_error(self.post, body={'foo': u'\U00010000'}, expected_detail=error) except ValueError: pass class NoneTypeTestCase(APIValidationTestCase): def setUp(self): super(NoneTypeTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': parameter_types.none } } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_none(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'None'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': None}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': {}}, req=FakeRequest())) def test_validate_none_fails(self): detail = ("Invalid input for field/attribute foo. Value: ." " '' is not one of ['None', None, {}]") self.check_validation_error(self.post, body={'foo': ''}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: " "{'key': 'val'}. {'key': 'val'} is not one of " "['None', None, {}]") self.check_validation_error(self.post, body={'foo': {'key': 'val'}}, expected_detail=detail) class TcpUdpPortTestCase(APIValidationTestCase): def setUp(self): super(TcpUdpPortTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': parameter_types.tcp_udp_port, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_tcp_udp_port(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 1024}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '1024'}, req=FakeRequest())) def test_validate_tcp_udp_port_fails(self): detail = ("Invalid input for field/attribute foo. Value: True." " True is not of type 'integer', 'string'") self.check_validation_error(self.post, body={'foo': True}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 65536." " 65536(.0)? is greater than the maximum of 65535") self.check_validation_error(self.post, body={'foo': 65536}, expected_detail=detail) class CidrFormatTestCase(APIValidationTestCase): def setUp(self): super(CidrFormatTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'format': 'cidr', }, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_cidr(self): self.assertEqual('Validation succeeded.', self.post( body={'foo': '192.168.10.0/24'}, req=FakeRequest() )) def test_validate_cidr_fails(self): detail = ("Invalid input for field/attribute foo." " Value: bar." " 'bar' is not a 'cidr'") self.check_validation_error(self.post, body={'foo': 'bar'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo." " Value: . '' is not a 'cidr'") self.check_validation_error(self.post, body={'foo': ''}, expected_detail=detail) detail = ("Invalid input for field/attribute foo." " Value: 192.168.1.0. '192.168.1.0' is not a 'cidr'") self.check_validation_error(self.post, body={'foo': '192.168.1.0'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo." " Value: 192.168.1.0 /24." " '192.168.1.0 /24' is not a 'cidr'") self.check_validation_error(self.post, body={'foo': '192.168.1.0 /24'}, expected_detail=detail) class DatetimeTestCase(APIValidationTestCase): def setUp(self): super(DatetimeTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'format': 'date-time', }, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_datetime(self): self.assertEqual('Validation succeeded.', self.post( body={'foo': '2014-01-14T01:00:00Z'}, req=FakeRequest() )) def test_validate_datetime_fails(self): detail = ("Invalid input for field/attribute foo." " Value: 2014-13-14T01:00:00Z." " '2014-13-14T01:00:00Z' is not a 'date-time'") self.check_validation_error(self.post, body={'foo': '2014-13-14T01:00:00Z'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo." " Value: bar. 'bar' is not a 'date-time'") self.check_validation_error(self.post, body={'foo': 'bar'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1." " '1' is not a 'date-time'") self.check_validation_error(self.post, body={'foo': '1'}, expected_detail=detail) class UuidTestCase(APIValidationTestCase): def setUp(self): super(UuidTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'format': 'uuid', }, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_uuid(self): self.assertEqual('Validation succeeded.', self.post( body={'foo': '70a599e0-31e7-49b7-b260-868f441e862b'}, req=FakeRequest() )) def test_validate_uuid_fails(self): detail = ("Invalid input for field/attribute foo." " Value: 70a599e031e749b7b260868f441e862." " '70a599e031e749b7b260868f441e862' is not a 'uuid'") self.check_validation_error(self.post, body={'foo': '70a599e031e749b7b260868f441e862'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1." " '1' is not a 'uuid'") self.check_validation_error(self.post, body={'foo': '1'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: abc." " 'abc' is not a 'uuid'") self.check_validation_error(self.post, body={'foo': 'abc'}, expected_detail=detail) class UriTestCase(APIValidationTestCase): def setUp(self): super(UriTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'format': 'uri', }, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_uri(self): self.assertEqual('Validation succeeded.', self.post( body={'foo': 'http://localhost:8774/v2/servers'}, req=FakeRequest() )) self.assertEqual('Validation succeeded.', self.post( body={'foo': 'http://[::1]:8774/v2/servers'}, req=FakeRequest() )) def test_validate_uri_fails(self): base_detail = ("Invalid input for field/attribute foo. Value: {0}. " "'{0}' is not a 'uri'") invalid_uri = 'http://localhost:8774/v2/servers##' self.check_validation_error(self.post, body={'foo': invalid_uri}, expected_detail=base_detail.format( invalid_uri)) invalid_uri = 'http://[fdf8:01]:8774/v2/servers' self.check_validation_error(self.post, body={'foo': invalid_uri}, expected_detail=base_detail.format( invalid_uri)) invalid_uri = '1' self.check_validation_error(self.post, body={'foo': invalid_uri}, expected_detail=base_detail.format( invalid_uri)) invalid_uri = 'abc' self.check_validation_error(self.post, body={'foo': invalid_uri}, expected_detail=base_detail.format( invalid_uri)) class Ipv4TestCase(APIValidationTestCase): def setUp(self): super(Ipv4TestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'format': 'ipv4', }, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_ipv4(self): self.assertEqual('Validation succeeded.', self.post( body={'foo': '192.168.0.100'}, req=FakeRequest() )) def test_validate_ipv4_fails(self): detail = ("Invalid input for field/attribute foo. Value: abc." " 'abc' is not a 'ipv4'") self.check_validation_error(self.post, body={'foo': 'abc'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: localhost." " 'localhost' is not a 'ipv4'") self.check_validation_error(self.post, body={'foo': 'localhost'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo." " Value: 2001:db8::1234:0:0:9abc." " '2001:db8::1234:0:0:9abc' is not a 'ipv4'") self.check_validation_error(self.post, body={'foo': '2001:db8::1234:0:0:9abc'}, expected_detail=detail) class Ipv6TestCase(APIValidationTestCase): def setUp(self): super(Ipv6TestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'format': 'ipv6', }, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_ipv6(self): self.assertEqual('Validation succeeded.', self.post( body={'foo': '2001:db8::1234:0:0:9abc'}, req=FakeRequest() )) def test_validate_ipv6_fails(self): detail = ("Invalid input for field/attribute foo. Value: abc." " 'abc' is not a 'ipv6'") self.check_validation_error(self.post, body={'foo': 'abc'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: localhost." " 'localhost' is not a 'ipv6'") self.check_validation_error(self.post, body={'foo': 'localhost'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo." " Value: 192.168.0.100. '192.168.0.100' is not a 'ipv6'") self.check_validation_error(self.post, body={'foo': '192.168.0.100'}, expected_detail=detail) class Base64TestCase(APIValidationTestCase): def setUp(self): super(APIValidationTestCase, self).setUp() schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'format': 'base64', }, }, } @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_base64(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'aGVsbG8gd29ybGQ='}, req=FakeRequest())) # 'aGVsbG8gd29ybGQ=' is the base64 code of 'hello world' def test_validate_base64_fails(self): value = 'A random string' detail = ("Invalid input for field/attribute foo. " "Value: %s. '%s' is not a 'base64'") % (value, value) self.check_validation_error(self.post, body={'foo': value}, expected_detail=detail) nova-13.1.4/nova/tests/unit/fake_ldap.py0000664000567000056710000002206113064447152021257 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Fake LDAP server for test harness. This class does very little error checking, and knows nothing about ldap class definitions. It implements the minimum emulation of the python ldap library to work with nova. """ import fnmatch from oslo_serialization import jsonutils import six from six.moves import range class Store(object): def __init__(self): if hasattr(self.__class__, '_instance'): raise Exception('Attempted to instantiate singleton') @classmethod def instance(cls): if not hasattr(cls, '_instance'): cls._instance = _StorageDict() return cls._instance class _StorageDict(dict): def keys(self, pat=None): ret = super(_StorageDict, self).keys() if pat is not None: ret = fnmatch.filter(ret, pat) return ret def delete(self, key): try: del self[key] except KeyError: pass def flushdb(self): self.clear() def hgetall(self, key): """Returns the hash for the given key Creates the hash if the key doesn't exist. """ try: return self[key] except KeyError: self[key] = {} return self[key] def hget(self, key, field): hashdict = self.hgetall(key) try: return hashdict[field] except KeyError: hashdict[field] = {} return hashdict[field] def hset(self, key, field, val): hashdict = self.hgetall(key) hashdict[field] = val def hmset(self, key, value_dict): hashdict = self.hgetall(key) for field, val in value_dict.items(): hashdict[field] = val SCOPE_BASE = 0 SCOPE_ONELEVEL = 1 # Not implemented SCOPE_SUBTREE = 2 MOD_ADD = 0 MOD_DELETE = 1 MOD_REPLACE = 2 class NO_SUCH_OBJECT(Exception): """Duplicate exception class from real LDAP module.""" pass class OBJECT_CLASS_VIOLATION(Exception): """Duplicate exception class from real LDAP module.""" pass class SERVER_DOWN(Exception): """Duplicate exception class from real LDAP module.""" pass def initialize(_uri): """Opens a fake connection with an LDAP server.""" return FakeLDAP() def _match_query(query, attrs): """Match an ldap query to an attribute dictionary. The characters &, |, and ! are supported in the query. No syntax checking is performed, so malformed queries will not work correctly. """ # cut off the parentheses inner = query[1:-1] if inner.startswith('&'): # cut off the & l, r = _paren_groups(inner[1:]) return _match_query(l, attrs) and _match_query(r, attrs) if inner.startswith('|'): # cut off the | l, r = _paren_groups(inner[1:]) return _match_query(l, attrs) or _match_query(r, attrs) if inner.startswith('!'): # cut off the ! and the nested parentheses return not _match_query(query[2:-1], attrs) (k, _sep, v) = inner.partition('=') return _match(k, v, attrs) def _paren_groups(source): """Split a string into parenthesized groups.""" count = 0 start = 0 result = [] for pos in range(len(source)): if source[pos] == '(': if count == 0: start = pos count += 1 if source[pos] == ')': count -= 1 if count == 0: result.append(source[start:pos + 1]) return result def _match(key, value, attrs): """Match a given key and value against an attribute list.""" if key not in attrs: return False # This is a wild card search. Implemented as all or nothing for now. if value == "*": return True if key != "objectclass": return value in attrs[key] # it is an objectclass check, so check subclasses values = _subs(value) for v in values: if v in attrs[key]: return True return False def _subs(value): """Returns a list of subclass strings. The strings represent the ldap object class plus any subclasses that inherit from it. Fakeldap doesn't know about the ldap object structure, so subclasses need to be defined manually in the dictionary below. """ subs = {'groupOfNames': ['novaProject']} if value in subs: return [value] + subs[value] return [value] def _from_json(encoded): """Convert attribute values from json representation. Args: encoded -- a json encoded string Returns a list of strings """ return [str(x) for x in jsonutils.loads(encoded)] def _to_json(unencoded): """Convert attribute values into json representation. Args: unencoded -- an unencoded string or list of strings. If it is a single string, it will be converted into a list. Returns a json string """ return jsonutils.dumps(list(unencoded)) server_fail = False class FakeLDAP(object): """Fake LDAP connection.""" def simple_bind_s(self, dn, password): """This method is ignored, but provided for compatibility.""" if server_fail: raise SERVER_DOWN() pass def unbind_s(self): """This method is ignored, but provided for compatibility.""" if server_fail: raise SERVER_DOWN() pass def add_s(self, dn, attr): """Add an object with the specified attributes at dn.""" if server_fail: raise SERVER_DOWN() key = "%s%s" % (self.__prefix, dn) value_dict = {k: _to_json(v) for k, v in attr} Store.instance().hmset(key, value_dict) def delete_s(self, dn): """Remove the ldap object at specified dn.""" if server_fail: raise SERVER_DOWN() Store.instance().delete("%s%s" % (self.__prefix, dn)) def modify_s(self, dn, attrs): """Modify the object at dn using the attribute list. :param dn: a dn :param attrs: a list of tuples in the following form:: ([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value) """ if server_fail: raise SERVER_DOWN() store = Store.instance() key = "%s%s" % (self.__prefix, dn) for cmd, k, v in attrs: values = _from_json(store.hget(key, k)) if cmd == MOD_ADD: values.append(v) elif cmd == MOD_REPLACE: values = [v] else: values.remove(v) store.hset(key, k, _to_json(values)) def modrdn_s(self, dn, newrdn): oldobj = self.search_s(dn, SCOPE_BASE) if not oldobj: raise NO_SUCH_OBJECT() newdn = "%s,%s" % (newrdn, dn.partition(',')[2]) newattrs = oldobj[0][1] modlist = [] for attrtype in newattrs.keys(): modlist.append((attrtype, newattrs[attrtype])) self.add_s(newdn, modlist) self.delete_s(dn) def search_s(self, dn, scope, query=None, fields=None): """Search for all matching objects under dn using the query. Args: dn -- dn to search under scope -- only SCOPE_BASE and SCOPE_SUBTREE are supported query -- query to filter objects by fields -- fields to return. Returns all fields if not specified """ if server_fail: raise SERVER_DOWN() if scope != SCOPE_BASE and scope != SCOPE_SUBTREE: raise NotImplementedError(str(scope)) store = Store.instance() if scope == SCOPE_BASE: pattern = "%s%s" % (self.__prefix, dn) keys = store.keys(pattern) else: keys = store.keys("%s*%s" % (self.__prefix, dn)) if not keys: raise NO_SUCH_OBJECT() objects = [] for key in keys: # get the attributes from the store attrs = store.hgetall(key) # turn the values from the store into lists attrs = {k: _from_json(v) for k, v in six.iteritems(attrs)} # filter the objects by query if not query or _match_query(query, attrs): # filter the attributes by fields attrs = {k: v for k, v in six.iteritems(attrs) if not fields or k in fields} objects.append((key[len(self.__prefix):], attrs)) return objects @property def __prefix(self): """Get the prefix to use for all keys.""" return 'ldap:' nova-13.1.4/nova/tests/unit/image/0000775000567000056710000000000013064447471020064 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/image/rel.tar.gz0000664000567000056710000000024513064447140021767 0ustar jenkinsjenkins00000000000000‹,«ØNíÒA Â0…á¬=ENÐdj2=O]tW"1¢ÞÞv!H±$ âÿ˜ÅòàeH¹ÜÊ%¹¦™OnHÉ™šü¤ëâ<¥‹þy>U•6жÆË¾•`l¬šbÅùTúl­)åºyïÝþG ¯û?ô¹ÞsÁªa£ÿ°ì_§ïb}½ëþ¼ÿ1¥Ý·3øÌõB;<(nova-13.1.4/nova/tests/unit/image/test_fake.py0000664000567000056710000001106713064447152022404 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from six.moves import StringIO from nova import context from nova import exception from nova import test import nova.tests.unit.image.fake class FakeImageServiceTestCase(test.NoDBTestCase): def setUp(self): super(FakeImageServiceTestCase, self).setUp() self.image_service = nova.tests.unit.image.fake.FakeImageService() self.context = context.get_admin_context() def tearDown(self): super(FakeImageServiceTestCase, self).tearDown() nova.tests.unit.image.fake.FakeImageService_reset() def test_detail(self): res = self.image_service.detail(self.context) for image in res: keys = set(image.keys()) self.assertEqual(keys, set(['id', 'name', 'created_at', 'updated_at', 'deleted_at', 'deleted', 'status', 'is_public', 'properties', 'disk_format', 'container_format', 'size'])) self.assertIsInstance(image['created_at'], datetime.datetime) self.assertIsInstance(image['updated_at'], datetime.datetime) if not (isinstance(image['deleted_at'], datetime.datetime) or image['deleted_at'] is None): self.fail('image\'s "deleted_at" attribute was neither a ' 'datetime object nor None') def check_is_bool(image, key): val = image.get('deleted') if not isinstance(val, bool): self.fail('image\'s "%s" attribute wasn\'t ' 'a bool: %r' % (key, val)) check_is_bool(image, 'deleted') check_is_bool(image, 'is_public') def test_show_raises_imagenotfound_for_invalid_id(self): self.assertRaises(exception.ImageNotFound, self.image_service.show, self.context, 'this image does not exist') def test_create_adds_id(self): index = self.image_service.detail(self.context) image_count = len(index) self.image_service.create(self.context, {}) index = self.image_service.detail(self.context) self.assertEqual(len(index), image_count + 1) self.assertTrue(index[0]['id']) def test_create_keeps_id(self): self.image_service.create(self.context, {'id': '34'}) self.image_service.show(self.context, '34') def test_create_rejects_duplicate_ids(self): self.image_service.create(self.context, {'id': '34'}) self.assertRaises(exception.CouldNotUploadImage, self.image_service.create, self.context, {'id': '34'}) # Make sure there's still one left self.image_service.show(self.context, '34') def test_update(self): self.image_service.create(self.context, {'id': '34', 'foo': 'bar'}) self.image_service.update(self.context, '34', {'id': '34', 'foo': 'baz'}) img = self.image_service.show(self.context, '34') self.assertEqual(img['foo'], 'baz') def test_delete(self): self.image_service.create(self.context, {'id': '34', 'foo': 'bar'}) self.image_service.delete(self.context, '34') self.assertRaises(exception.NotFound, self.image_service.show, self.context, '34') def test_create_then_get(self): blob = 'some data' s1 = StringIO(blob) self.image_service.create(self.context, {'id': '32', 'foo': 'bar'}, data=s1) s2 = StringIO() self.image_service.download(self.context, '32', data=s2) self.assertEqual(s2.getvalue(), blob, 'Did not get blob back intact') nova-13.1.4/nova/tests/unit/image/abs.tar.gz0000664000567000056710000000023113064447140021745 0ustar jenkinsjenkins00000000000000‹‹ªØNíÑA Â0…á¬=En™šLÎSîJ¤FðøšE¥¸hA"þßf30^¨Ó%œK ®yÊ9µ©9Éz.œª™éÔ'*míSÇL/·kgï]­÷Í»½ý Kÿ§qîõ£l7úoýk<ªóÒ+ÐÚŸ÷?•røvŸyQä‡ (nova-13.1.4/nova/tests/unit/image/test_glance.py0000664000567000056710000017021613064447152022731 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from six.moves import StringIO import cryptography import glanceclient.exc import mock from oslo_config import cfg from oslo_service import sslutils from oslo_utils import netutils import six import testtools from nova import context from nova import exception from nova.image import glance from nova import test CONF = cfg.CONF NOW_GLANCE_FORMAT = "2010-10-11T10:30:22.000000" class tzinfo(datetime.tzinfo): @staticmethod def utcoffset(*args, **kwargs): return datetime.timedelta() NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22, tzinfo=tzinfo()) class TestConversions(test.NoDBTestCase): def test_convert_timestamps_to_datetimes(self): fixture = {'name': None, 'properties': {}, 'status': None, 'is_public': None, 'created_at': NOW_GLANCE_FORMAT, 'updated_at': NOW_GLANCE_FORMAT, 'deleted_at': NOW_GLANCE_FORMAT} result = glance._convert_timestamps_to_datetimes(fixture) self.assertEqual(result['created_at'], NOW_DATETIME) self.assertEqual(result['updated_at'], NOW_DATETIME) self.assertEqual(result['deleted_at'], NOW_DATETIME) def _test_extracting_missing_attributes(self, include_locations, size_attr=True): # Verify behavior from glance objects that are missing attributes # TODO(jaypipes): Find a better way of testing this crappy # glanceclient magic object stuff. class MyFakeGlanceImage(object): def __init__(self, metadata): IMAGE_ATTRIBUTES = ['size', 'owner', 'id', 'created_at', 'updated_at', 'status', 'min_disk', 'min_ram', 'is_public'] if not size_attr: IMAGE_ATTRIBUTES.pop(0) raw = dict.fromkeys(IMAGE_ATTRIBUTES) raw.update(metadata) self.__dict__['raw'] = raw def __getattr__(self, key): try: return self.__dict__['raw'][key] except KeyError: raise AttributeError(key) def __setattr__(self, key, value): try: self.__dict__['raw'][key] = value except KeyError: raise AttributeError(key) metadata = { 'id': 1, 'created_at': NOW_DATETIME, 'updated_at': NOW_DATETIME, } image = MyFakeGlanceImage(metadata) observed = glance._extract_attributes( image, include_locations=include_locations) expected = { 'id': 1, 'name': None, 'is_public': None, 'size': 0, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': NOW_DATETIME, 'updated_at': NOW_DATETIME, 'deleted_at': None, 'deleted': None, 'status': None, 'properties': {}, 'owner': None } if include_locations: expected['locations'] = None expected['direct_url'] = None self.assertEqual(expected, observed) def test_extracting_missing_attributes_include_locations(self): self._test_extracting_missing_attributes(include_locations=True) def test_extracting_missing_attributes_exclude_locations(self): self._test_extracting_missing_attributes(include_locations=False) def test_extracting_missing_attributes_exclude_size(self): # If image status is 'queued', 'size' attribute will be excluded # by glance v1 API. self._test_extracting_missing_attributes(include_locations=False, size_attr=False) class TestExceptionTranslations(test.NoDBTestCase): def test_client_forbidden_to_imagenotauthed(self): in_exc = glanceclient.exc.Forbidden('123') out_exc = glance._translate_image_exception('123', in_exc) self.assertIsInstance(out_exc, exception.ImageNotAuthorized) def test_client_httpforbidden_converts_to_imagenotauthed(self): in_exc = glanceclient.exc.HTTPForbidden('123') out_exc = glance._translate_image_exception('123', in_exc) self.assertIsInstance(out_exc, exception.ImageNotAuthorized) def test_client_notfound_converts_to_imagenotfound(self): in_exc = glanceclient.exc.NotFound('123') out_exc = glance._translate_image_exception('123', in_exc) self.assertIsInstance(out_exc, exception.ImageNotFound) def test_client_httpnotfound_converts_to_imagenotfound(self): in_exc = glanceclient.exc.HTTPNotFound('123') out_exc = glance._translate_image_exception('123', in_exc) self.assertIsInstance(out_exc, exception.ImageNotFound) class TestGlanceSerializer(test.NoDBTestCase): def test_serialize(self): metadata = {'name': 'image1', 'is_public': True, 'foo': 'bar', 'properties': { 'prop1': 'propvalue1', 'mappings': [ {'virtual': 'aaa', 'device': 'bbb'}, {'virtual': 'xxx', 'device': 'yyy'}], 'block_device_mapping': [ {'virtual_device': 'fake', 'device_name': '/dev/fake'}, {'virtual_device': 'ephemeral0', 'device_name': '/dev/fake0'}]}} # NOTE(tdurakov): Assertion of serialized objects won't work # during using of random PYTHONHASHSEED. Assertion of # serialized/deserialized object and initial one is enough converted = glance._convert_to_string(metadata) self.assertEqual(glance._convert_from_string(converted), metadata) class TestGetImageService(test.NoDBTestCase): @mock.patch.object(glance.GlanceClientWrapper, '__init__', return_value=None) def test_get_remote_service_from_id(self, gcwi_mocked): id_or_uri = '123' _ignored, image_id = glance.get_remote_image_service( mock.sentinel.ctx, id_or_uri) self.assertEqual(id_or_uri, image_id) gcwi_mocked.assert_called_once_with() @mock.patch.object(glance.GlanceClientWrapper, '__init__', return_value=None) def test_get_remote_service_from_href(self, gcwi_mocked): id_or_uri = 'http://127.0.0.1/v1/images/123' _ignored, image_id = glance.get_remote_image_service( mock.sentinel.ctx, id_or_uri) self.assertEqual('123', image_id) gcwi_mocked.assert_called_once_with(context=mock.sentinel.ctx, endpoint='http://127.0.0.1') class TestCreateGlanceClient(test.NoDBTestCase): @mock.patch('oslo_utils.netutils.is_valid_ipv6') @mock.patch('glanceclient.Client') def test_headers_passed_glanceclient(self, init_mock, ipv6_mock): self.flags(auth_strategy='keystone') ipv6_mock.return_value = False auth_token = 'token' ctx = context.RequestContext('fake', 'fake', auth_token=auth_token) expected_endpoint = 'http://host4:9295' expected_params = { 'identity_headers': { 'X-Auth-Token': 'token', 'X-User-Id': 'fake', 'X-Roles': '', 'X-Tenant-Id': 'fake', 'X-Identity-Status': 'Confirmed' } } glance._glanceclient_from_endpoint(ctx, expected_endpoint) init_mock.assert_called_once_with('1', expected_endpoint, **expected_params) # Test the version is properly passed to glanceclient. ipv6_mock.reset_mock() init_mock.reset_mock() expected_endpoint = 'http://host4:9295' expected_params = { 'identity_headers': { 'X-Auth-Token': 'token', 'X-User-Id': 'fake', 'X-Roles': '', 'X-Tenant-Id': 'fake', 'X-Identity-Status': 'Confirmed' } } glance._glanceclient_from_endpoint(ctx, expected_endpoint, version=2) init_mock.assert_called_once_with('2', expected_endpoint, **expected_params) # Test that the IPv6 bracketization adapts the endpoint properly. ipv6_mock.reset_mock() init_mock.reset_mock() ipv6_mock.return_value = True expected_endpoint = 'http://[host4]:9295' glance._glanceclient_from_endpoint(ctx, expected_endpoint) init_mock.assert_called_once_with('1', expected_endpoint, **expected_params) class TestGlanceClientWrapper(test.NoDBTestCase): @mock.patch('time.sleep') @mock.patch('nova.image.glance._glanceclient_from_endpoint') def test_static_client_without_retries(self, create_client_mock, sleep_mock): client_mock = mock.MagicMock() images_mock = mock.MagicMock() images_mock.get.side_effect = glanceclient.exc.ServiceUnavailable type(client_mock).images = mock.PropertyMock(return_value=images_mock) create_client_mock.return_value = client_mock self.flags(num_retries=0, group='glance') ctx = context.RequestContext('fake', 'fake') host = 'host4' port = 9295 endpoint = 'http://%s:%s' % (host, port) client = glance.GlanceClientWrapper(context=ctx, endpoint=endpoint) create_client_mock.assert_called_once_with(ctx, mock.ANY, 1) self.assertRaises(exception.GlanceConnectionFailed, client.call, ctx, 1, 'get', 'meow') self.assertFalse(sleep_mock.called) @mock.patch('nova.image.glance.LOG') @mock.patch('time.sleep') @mock.patch('nova.image.glance._glanceclient_from_endpoint') def test_static_client_with_retries_negative(self, create_client_mock, sleep_mock, mock_log): client_mock = mock.Mock(spec=glanceclient.Client) images_mock = mock.Mock() images_mock.get.side_effect = glanceclient.exc.ServiceUnavailable client_mock.images = images_mock create_client_mock.return_value = client_mock self.flags(num_retries=-1, group='glance') ctx = context.RequestContext('fake', 'fake') host = 'host4' port = 9295 endpoint = 'http://%s:%s' % (host, port) client = glance.GlanceClientWrapper(context=ctx, endpoint=endpoint) create_client_mock.assert_called_once_with(ctx, mock.ANY, 1) self.assertRaises(exception.GlanceConnectionFailed, client.call, ctx, 1, 'get', 'meow') self.assertTrue(mock_log.warning.called) msg = mock_log.warning.call_args_list[0] self.assertIn('Treating negative config value', msg[0][0]) self.assertFalse(sleep_mock.called) @mock.patch('time.sleep') @mock.patch('nova.image.glance._glanceclient_from_endpoint') def test_static_client_with_retries(self, create_client_mock, sleep_mock): self.flags(num_retries=1, group='glance') client_mock = mock.MagicMock() images_mock = mock.MagicMock() images_mock.get.side_effect = [ glanceclient.exc.ServiceUnavailable, None ] type(client_mock).images = mock.PropertyMock(return_value=images_mock) create_client_mock.return_value = client_mock ctx = context.RequestContext('fake', 'fake') host = 'host4' port = 9295 endpoint = 'http://%s:%s' % (host, port) client = glance.GlanceClientWrapper(context=ctx, endpoint=endpoint) client.call(ctx, 1, 'get', 'meow') sleep_mock.assert_called_once_with(1) @mock.patch('random.shuffle') @mock.patch('time.sleep') @mock.patch('nova.image.glance._glanceclient_from_endpoint') def test_default_client_without_retries(self, create_client_mock, sleep_mock, shuffle_mock): api_servers = [ 'host1:9292', 'https://host2:9293', 'http://host3:9294' ] client_mock = mock.MagicMock() images_mock = mock.MagicMock() images_mock.get.side_effect = glanceclient.exc.ServiceUnavailable type(client_mock).images = mock.PropertyMock(return_value=images_mock) create_client_mock.return_value = client_mock shuffle_mock.return_value = api_servers self.flags(num_retries=0, group='glance') self.flags(api_servers=api_servers, group='glance') # Here we are testing the behaviour that calling client.call() twice # when there are no retries will cycle through the api_servers and not # sleep (which would be an indication of a retry) ctx = context.RequestContext('fake', 'fake') client = glance.GlanceClientWrapper() self.assertRaises(exception.GlanceConnectionFailed, client.call, ctx, 1, 'get', 'meow') self.assertEqual(str(client.api_server), "http://host1:9292") self.assertFalse(sleep_mock.called) self.assertRaises(exception.GlanceConnectionFailed, client.call, ctx, 1, 'get', 'meow') self.assertEqual(str(client.api_server), "https://host2:9293") self.assertFalse(sleep_mock.called) @mock.patch('random.shuffle') @mock.patch('time.sleep') @mock.patch('nova.image.glance._glanceclient_from_endpoint') def test_default_client_with_retries(self, create_client_mock, sleep_mock, shuffle_mock): api_servers = [ 'host1:9292', 'https://host2:9293', 'http://host3:9294' ] client_mock = mock.MagicMock() images_mock = mock.MagicMock() images_mock.get.side_effect = [ glanceclient.exc.ServiceUnavailable, None ] type(client_mock).images = mock.PropertyMock(return_value=images_mock) create_client_mock.return_value = client_mock self.flags(num_retries=1, group='glance') self.flags(api_servers=api_servers, group='glance') ctx = context.RequestContext('fake', 'fake') # And here we're testing that if num_retries is not 0, then we attempt # to retry the same connection action against the next client. client = glance.GlanceClientWrapper() client.call(ctx, 1, 'get', 'meow') self.assertEqual(str(client.api_server), "https://host2:9293") sleep_mock.assert_called_once_with(1) @mock.patch('random.shuffle') @mock.patch('time.sleep') @mock.patch('nova.image.glance._glanceclient_from_endpoint') def test_retry_works_with_generators(self, create_client_mock, sleep_mock, shuffle_mock): def some_generator(exception): if exception: raise glanceclient.exc.CommunicationError('Boom!') yield 'something' api_servers = [ 'https://host2:9292', 'https://host2:9293', 'http://host3:9294' ] client_mock = mock.MagicMock() images_mock = mock.MagicMock() images_mock.list.side_effect = [ some_generator(exception=True), some_generator(exception=False), ] type(client_mock).images = mock.PropertyMock(return_value=images_mock) create_client_mock.return_value = client_mock self.flags(num_retries=1, group='glance') self.flags(api_servers=api_servers, group='glance') ctx = context.RequestContext('fake', 'fake') client = glance.GlanceClientWrapper() client.call(ctx, 1, 'list', 'meow') sleep_mock.assert_called_once_with(1) self.assertEqual(str(client.api_server), 'https://host2:9293') @mock.patch('oslo_service.sslutils.is_enabled') @mock.patch('glanceclient.Client') def test_create_glance_client_with_ssl(self, client_mock, ssl_enable_mock): sslutils.register_opts(CONF) self.flags(ca_file='foo.cert', cert_file='bar.cert', key_file='wut.key', group='ssl') ctxt = mock.sentinel.ctx glance._glanceclient_from_endpoint(ctxt, 'https://host4:9295') client_mock.assert_called_once_with( '1', 'https://host4:9295', insecure=False, ssl_compression=False, cert_file='bar.cert', key_file='wut.key', cacert='foo.cert', identity_headers=mock.ANY) @mock.patch.object(glanceclient.common.http.HTTPClient, 'get') def test_determine_curr_major_version(self, http_client_mock): result = ("http://host1:9292/v2/", {'versions': [ {'status': 'CURRENT', 'id': 'v2.3'}, {'status': 'SUPPORTED', 'id': 'v1.0'}]}) http_client_mock.return_value = result maj_ver = glance._determine_curr_major_version('http://host1:9292') self.assertEqual(2, maj_ver) @mock.patch.object(glanceclient.common.http.HTTPClient, 'get') def test_determine_curr_major_version_invalid(self, http_client_mock): result = ("http://host1:9292/v2/", "Invalid String") http_client_mock.return_value = result curr_major_version = glance._determine_curr_major_version('abc') self.assertIsNone(curr_major_version) @mock.patch.object(glanceclient.common.http.HTTPClient, 'get') def test_determine_curr_major_version_unsupported(self, http_client_mock): result = ("http://host1:9292/v2/", {'versions': [ {'status': 'CURRENT', 'id': 'v666.0'}, {'status': 'SUPPORTED', 'id': 'v1.0'}]}) http_client_mock.return_value = result maj_ver = glance._determine_curr_major_version('http://host1:9292') self.assertIsNone(maj_ver) class TestDownloadNoDirectUri(test.NoDBTestCase): """Tests the download method of the GlanceImageService when the default of not allowing direct URI transfers is set. """ @mock.patch.object(six.moves.builtins, 'open') @mock.patch('nova.image.glance.GlanceImageService.show') def test_download_no_data_no_dest_path(self, show_mock, open_mock): client = mock.MagicMock() client.call.return_value = mock.sentinel.image_chunks ctx = mock.sentinel.ctx service = glance.GlanceImageService(client) res = service.download(ctx, mock.sentinel.image_id) self.assertFalse(show_mock.called) self.assertFalse(open_mock.called) client.call.assert_called_once_with(ctx, 1, 'data', mock.sentinel.image_id) self.assertEqual(mock.sentinel.image_chunks, res) @mock.patch.object(six.moves.builtins, 'open') @mock.patch('nova.image.glance.GlanceImageService.show') def test_download_data_no_dest_path(self, show_mock, open_mock): client = mock.MagicMock() client.call.return_value = [1, 2, 3] ctx = mock.sentinel.ctx data = mock.MagicMock() service = glance.GlanceImageService(client) res = service.download(ctx, mock.sentinel.image_id, data=data) self.assertFalse(show_mock.called) self.assertFalse(open_mock.called) client.call.assert_called_once_with(ctx, 1, 'data', mock.sentinel.image_id) self.assertIsNone(res) data.write.assert_has_calls( [ mock.call(1), mock.call(2), mock.call(3) ] ) self.assertFalse(data.close.called) @mock.patch.object(six.moves.builtins, 'open') @mock.patch('nova.image.glance.GlanceImageService.show') def test_download_no_data_dest_path(self, show_mock, open_mock): client = mock.MagicMock() client.call.return_value = [1, 2, 3] ctx = mock.sentinel.ctx writer = mock.MagicMock() open_mock.return_value = writer service = glance.GlanceImageService(client) res = service.download(ctx, mock.sentinel.image_id, dst_path=mock.sentinel.dst_path) self.assertFalse(show_mock.called) client.call.assert_called_once_with(ctx, 1, 'data', mock.sentinel.image_id) open_mock.assert_called_once_with(mock.sentinel.dst_path, 'wb') self.assertIsNone(res) writer.write.assert_has_calls( [ mock.call(1), mock.call(2), mock.call(3) ] ) writer.close.assert_called_once_with() @mock.patch.object(six.moves.builtins, 'open') @mock.patch('nova.image.glance.GlanceImageService.show') def test_download_data_dest_path(self, show_mock, open_mock): # NOTE(jaypipes): This really shouldn't be allowed, but because of the # horrible design of the download() method in GlanceImageService, no # error is raised, and the dst_path is ignored... # #TODO(jaypipes): Fix the aforementioned horrible design of # the download() method. client = mock.MagicMock() client.call.return_value = [1, 2, 3] ctx = mock.sentinel.ctx data = mock.MagicMock() service = glance.GlanceImageService(client) res = service.download(ctx, mock.sentinel.image_id, data=data) self.assertFalse(show_mock.called) self.assertFalse(open_mock.called) client.call.assert_called_once_with(ctx, 1, 'data', mock.sentinel.image_id) self.assertIsNone(res) data.write.assert_has_calls( [ mock.call(1), mock.call(2), mock.call(3) ] ) self.assertFalse(data.close.called) @mock.patch.object(six.moves.builtins, 'open') @mock.patch('nova.image.glance.GlanceImageService.show') def test_download_data_dest_path_write_fails(self, show_mock, open_mock): client = mock.MagicMock() client.call.return_value = [1, 2, 3] ctx = mock.sentinel.ctx service = glance.GlanceImageService(client) # NOTE(mikal): data is a file like object, which in our case always # raises an exception when we attempt to write to the file. class FakeDiskException(Exception): pass class Exceptionator(StringIO): def write(self, _): raise FakeDiskException('Disk full!') self.assertRaises(FakeDiskException, service.download, ctx, mock.sentinel.image_id, data=Exceptionator()) @mock.patch('nova.image.glance.GlanceImageService._get_transfer_module') @mock.patch('nova.image.glance.GlanceImageService.show') def test_download_direct_file_uri(self, show_mock, get_tran_mock): self.flags(allowed_direct_url_schemes=['file'], group='glance') show_mock.return_value = { 'locations': [ { 'url': 'file:///files/image', 'metadata': mock.sentinel.loc_meta } ] } tran_mod = mock.MagicMock() get_tran_mock.return_value = tran_mod client = mock.MagicMock() ctx = mock.sentinel.ctx service = glance.GlanceImageService(client) res = service.download(ctx, mock.sentinel.image_id, dst_path=mock.sentinel.dst_path) self.assertIsNone(res) self.assertFalse(client.call.called) show_mock.assert_called_once_with(ctx, mock.sentinel.image_id, include_locations=True) get_tran_mock.assert_called_once_with('file') tran_mod.download.assert_called_once_with(ctx, mock.ANY, mock.sentinel.dst_path, mock.sentinel.loc_meta) @mock.patch.object(six.moves.builtins, 'open') @mock.patch('nova.image.glance.GlanceImageService._get_transfer_module') @mock.patch('nova.image.glance.GlanceImageService.show') def test_download_direct_exception_fallback(self, show_mock, get_tran_mock, open_mock): # Test that we fall back to downloading to the dst_path # if the download method of the transfer module raised # an exception. self.flags(allowed_direct_url_schemes=['file'], group='glance') show_mock.return_value = { 'locations': [ { 'url': 'file:///files/image', 'metadata': mock.sentinel.loc_meta } ] } tran_mod = mock.MagicMock() tran_mod.download.side_effect = Exception get_tran_mock.return_value = tran_mod client = mock.MagicMock() client.call.return_value = [1, 2, 3] ctx = mock.sentinel.ctx writer = mock.MagicMock() open_mock.return_value = writer service = glance.GlanceImageService(client) res = service.download(ctx, mock.sentinel.image_id, dst_path=mock.sentinel.dst_path) self.assertIsNone(res) show_mock.assert_called_once_with(ctx, mock.sentinel.image_id, include_locations=True) get_tran_mock.assert_called_once_with('file') tran_mod.download.assert_called_once_with(ctx, mock.ANY, mock.sentinel.dst_path, mock.sentinel.loc_meta) client.call.assert_called_once_with(ctx, 1, 'data', mock.sentinel.image_id) # NOTE(jaypipes): log messages call open() in part of the # download path, so here, we just check that the last open() # call was done for the dst_path file descriptor. open_mock.assert_called_with(mock.sentinel.dst_path, 'wb') self.assertIsNone(res) writer.write.assert_has_calls( [ mock.call(1), mock.call(2), mock.call(3) ] ) @mock.patch.object(six.moves.builtins, 'open') @mock.patch('nova.image.glance.GlanceImageService._get_transfer_module') @mock.patch('nova.image.glance.GlanceImageService.show') def test_download_direct_no_mod_fallback(self, show_mock, get_tran_mock, open_mock): # Test that we fall back to downloading to the dst_path # if no appropriate transfer module is found... # an exception. self.flags(allowed_direct_url_schemes=['funky'], group='glance') show_mock.return_value = { 'locations': [ { 'url': 'file:///files/image', 'metadata': mock.sentinel.loc_meta } ] } get_tran_mock.return_value = None client = mock.MagicMock() client.call.return_value = [1, 2, 3] ctx = mock.sentinel.ctx writer = mock.MagicMock() open_mock.return_value = writer service = glance.GlanceImageService(client) res = service.download(ctx, mock.sentinel.image_id, dst_path=mock.sentinel.dst_path) self.assertIsNone(res) show_mock.assert_called_once_with(ctx, mock.sentinel.image_id, include_locations=True) get_tran_mock.assert_called_once_with('file') client.call.assert_called_once_with(ctx, 1, 'data', mock.sentinel.image_id) # NOTE(jaypipes): log messages call open() in part of the # download path, so here, we just check that the last open() # call was done for the dst_path file descriptor. open_mock.assert_called_with(mock.sentinel.dst_path, 'wb') self.assertIsNone(res) writer.write.assert_has_calls( [ mock.call(1), mock.call(2), mock.call(3) ] ) writer.close.assert_called_once_with() class TestDownloadSignatureVerification(test.NoDBTestCase): class MockVerifier(object): def update(self, data): return def verify(self): return True class BadVerifier(object): def update(self, data): return def verify(self): raise cryptography.exceptions.InvalidSignature( 'Invalid signature.' ) def setUp(self): super(TestDownloadSignatureVerification, self).setUp() self.flags(verify_glance_signatures=True, group='glance') self.fake_img_props = { 'properties': { 'img_signature': 'signature', 'img_signature_hash_method': 'SHA-224', 'img_signature_certificate_uuid': 'uuid', 'img_signature_key_type': 'RSA-PSS', } } self.fake_img_data = ['A' * 256, 'B' * 256] client = mock.MagicMock() client.call.return_value = self.fake_img_data self.service = glance.GlanceImageService(client) @mock.patch('nova.image.glance.LOG') @mock.patch('nova.image.glance.GlanceImageService.show') @mock.patch('nova.signature_utils.get_verifier') def test_download_with_signature_verification(self, mock_get_verifier, mock_show, mock_log): mock_get_verifier.return_value = self.MockVerifier() mock_show.return_value = self.fake_img_props res = self.service.download(context=None, image_id=None, data=None, dst_path=None) self.assertEqual(self.fake_img_data, res) mock_get_verifier.assert_called_once_with(None, 'uuid', 'SHA-224', 'signature', 'RSA-PSS') mock_log.info.assert_called_once_with(mock.ANY, mock.ANY) @mock.patch.object(six.moves.builtins, 'open') @mock.patch('nova.image.glance.LOG') @mock.patch('nova.image.glance.GlanceImageService.show') @mock.patch('nova.signature_utils.get_verifier') def test_download_dst_path_signature_verification(self, mock_get_verifier, mock_show, mock_log, mock_open): mock_get_verifier.return_value = self.MockVerifier() mock_show.return_value = self.fake_img_props mock_dest = mock.MagicMock() fake_path = 'FAKE_PATH' mock_open.return_value = mock_dest self.service.download(context=None, image_id=None, data=None, dst_path=fake_path) mock_get_verifier.assert_called_once_with(None, 'uuid', 'SHA-224', 'signature', 'RSA-PSS') mock_log.info.assert_called_once_with(mock.ANY, mock.ANY) self.assertEqual(len(self.fake_img_data), mock_dest.write.call_count) self.assertTrue(mock_dest.close.called) @mock.patch('nova.image.glance.LOG') @mock.patch('nova.image.glance.GlanceImageService.show') @mock.patch('nova.signature_utils.get_verifier') def test_download_with_get_verifier_failure(self, mock_get_verifier, mock_show, mock_log): mock_get_verifier.side_effect = exception.SignatureVerificationError( reason='Signature verification ' 'failed.' ) mock_show.return_value = self.fake_img_props self.assertRaises(exception.SignatureVerificationError, self.service.download, context=None, image_id=None, data=None, dst_path=None) mock_log.error.assert_called_once_with(mock.ANY, mock.ANY) @mock.patch('nova.image.glance.LOG') @mock.patch('nova.image.glance.GlanceImageService.show') @mock.patch('nova.signature_utils.get_verifier') def test_download_with_invalid_signature(self, mock_get_verifier, mock_show, mock_log): mock_get_verifier.return_value = self.BadVerifier() mock_show.return_value = self.fake_img_props self.assertRaises(cryptography.exceptions.InvalidSignature, self.service.download, context=None, image_id=None, data=None, dst_path=None) mock_log.error.assert_called_once_with(mock.ANY, mock.ANY) @mock.patch('nova.image.glance.LOG') @mock.patch('nova.image.glance.GlanceImageService.show') def test_download_missing_signature_metadata(self, mock_show, mock_log): mock_show.return_value = {'properties': {}} self.assertRaisesRegex(exception.SignatureVerificationError, 'Required image properties for signature ' 'verification do not exist. Cannot verify ' 'signature. Missing property: .*', self.service.download, context=None, image_id=None, data=None, dst_path=None) @mock.patch.object(six.moves.builtins, 'open') @mock.patch('nova.signature_utils.get_verifier') @mock.patch('nova.image.glance.LOG') @mock.patch('nova.image.glance.GlanceImageService.show') def test_download_dst_path_signature_fail(self, mock_show, mock_log, mock_get_verifier, mock_open): mock_get_verifier.return_value = self.BadVerifier() mock_dest = mock.MagicMock() fake_path = 'FAKE_PATH' mock_open.return_value = mock_dest mock_show.return_value = self.fake_img_props self.assertRaises(cryptography.exceptions.InvalidSignature, self.service.download, context=None, image_id=None, data=None, dst_path=fake_path) mock_log.error.assert_called_once_with(mock.ANY, mock.ANY) mock_open.assert_called_once_with(fake_path, 'wb') mock_dest.truncate.assert_called_once_with(0) self.assertTrue(mock_dest.close.called) class TestIsImageAvailable(test.NoDBTestCase): """Tests the internal _is_image_available function.""" class ImageSpecV2(object): visibility = None properties = None class ImageSpecV1(object): is_public = None properties = None def test_auth_token_override(self): ctx = mock.MagicMock(auth_token=True) img = mock.MagicMock() res = glance._is_image_available(ctx, img) self.assertTrue(res) self.assertFalse(img.called) def test_admin_override(self): ctx = mock.MagicMock(auth_token=False, is_admin=True) img = mock.MagicMock() res = glance._is_image_available(ctx, img) self.assertTrue(res) self.assertFalse(img.called) def test_v2_visibility(self): ctx = mock.MagicMock(auth_token=False, is_admin=False) # We emulate warlock validation that throws an AttributeError # if you try to call is_public on an image model returned by # a call to V2 image.get(). Here, the ImageSpecV2 does not have # an is_public attribute and MagicMock will throw an AttributeError. img = mock.MagicMock(visibility='PUBLIC', spec=TestIsImageAvailable.ImageSpecV2) res = glance._is_image_available(ctx, img) self.assertTrue(res) def test_v1_is_public(self): ctx = mock.MagicMock(auth_token=False, is_admin=False) img = mock.MagicMock(is_public=True, spec=TestIsImageAvailable.ImageSpecV1) res = glance._is_image_available(ctx, img) self.assertTrue(res) def test_project_is_owner(self): ctx = mock.MagicMock(auth_token=False, is_admin=False, project_id='123') props = { 'owner_id': '123' } img = mock.MagicMock(visibility='private', properties=props, spec=TestIsImageAvailable.ImageSpecV2) res = glance._is_image_available(ctx, img) self.assertTrue(res) ctx.reset_mock() img = mock.MagicMock(is_public=False, properties=props, spec=TestIsImageAvailable.ImageSpecV1) res = glance._is_image_available(ctx, img) self.assertTrue(res) def test_project_context_matches_project_prop(self): ctx = mock.MagicMock(auth_token=False, is_admin=False, project_id='123') props = { 'project_id': '123' } img = mock.MagicMock(visibility='private', properties=props, spec=TestIsImageAvailable.ImageSpecV2) res = glance._is_image_available(ctx, img) self.assertTrue(res) ctx.reset_mock() img = mock.MagicMock(is_public=False, properties=props, spec=TestIsImageAvailable.ImageSpecV1) res = glance._is_image_available(ctx, img) self.assertTrue(res) def test_no_user_in_props(self): ctx = mock.MagicMock(auth_token=False, is_admin=False, project_id='123') props = { } img = mock.MagicMock(visibility='private', properties=props, spec=TestIsImageAvailable.ImageSpecV2) res = glance._is_image_available(ctx, img) self.assertFalse(res) ctx.reset_mock() img = mock.MagicMock(is_public=False, properties=props, spec=TestIsImageAvailable.ImageSpecV1) res = glance._is_image_available(ctx, img) self.assertFalse(res) def test_user_matches_context(self): ctx = mock.MagicMock(auth_token=False, is_admin=False, user_id='123') props = { 'user_id': '123' } img = mock.MagicMock(visibility='private', properties=props, spec=TestIsImageAvailable.ImageSpecV2) res = glance._is_image_available(ctx, img) self.assertTrue(res) ctx.reset_mock() img = mock.MagicMock(is_public=False, properties=props, spec=TestIsImageAvailable.ImageSpecV1) res = glance._is_image_available(ctx, img) self.assertTrue(res) class TestShow(test.NoDBTestCase): """Tests the show method of the GlanceImageService.""" @mock.patch('nova.image.glance._translate_from_glance') @mock.patch('nova.image.glance._is_image_available') def test_show_success(self, is_avail_mock, trans_from_mock): is_avail_mock.return_value = True trans_from_mock.return_value = {'mock': mock.sentinel.trans_from} client = mock.MagicMock() client.call.return_value = {} ctx = mock.sentinel.ctx service = glance.GlanceImageService(client) info = service.show(ctx, mock.sentinel.image_id) client.call.assert_called_once_with(ctx, 1, 'get', mock.sentinel.image_id) is_avail_mock.assert_called_once_with(ctx, {}) trans_from_mock.assert_called_once_with({}, include_locations=False) self.assertIn('mock', info) self.assertEqual(mock.sentinel.trans_from, info['mock']) @mock.patch('nova.image.glance._translate_from_glance') @mock.patch('nova.image.glance._is_image_available') def test_show_not_available(self, is_avail_mock, trans_from_mock): is_avail_mock.return_value = False client = mock.MagicMock() client.call.return_value = mock.sentinel.images_0 ctx = mock.sentinel.ctx service = glance.GlanceImageService(client) with testtools.ExpectedException(exception.ImageNotFound): service.show(ctx, mock.sentinel.image_id) client.call.assert_called_once_with(ctx, 1, 'get', mock.sentinel.image_id) is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0) self.assertFalse(trans_from_mock.called) @mock.patch('nova.image.glance._reraise_translated_image_exception') @mock.patch('nova.image.glance._translate_from_glance') @mock.patch('nova.image.glance._is_image_available') def test_show_client_failure(self, is_avail_mock, trans_from_mock, reraise_mock): raised = exception.ImageNotAuthorized(image_id=123) client = mock.MagicMock() client.call.side_effect = glanceclient.exc.Forbidden ctx = mock.sentinel.ctx reraise_mock.side_effect = raised service = glance.GlanceImageService(client) with testtools.ExpectedException(exception.ImageNotAuthorized): service.show(ctx, mock.sentinel.image_id) client.call.assert_called_once_with(ctx, 1, 'get', mock.sentinel.image_id) self.assertFalse(is_avail_mock.called) self.assertFalse(trans_from_mock.called) reraise_mock.assert_called_once_with(mock.sentinel.image_id) @mock.patch('nova.image.glance._is_image_available') def test_show_queued_image_without_some_attrs(self, is_avail_mock): is_avail_mock.return_value = True client = mock.MagicMock() # fake image cls without disk_format, container_format, name attributes class fake_image_cls(dict): id = 'b31aa5dd-f07a-4748-8f15-398346887584' deleted = False protected = False min_disk = 0 created_at = '2014-05-20T08:16:48' size = 0 status = 'queued' is_public = False min_ram = 0 owner = '980ec4870033453ead65c0470a78b8a8' updated_at = '2014-05-20T08:16:48' glance_image = fake_image_cls() client.call.return_value = glance_image ctx = mock.sentinel.ctx service = glance.GlanceImageService(client) image_info = service.show(ctx, glance_image.id) client.call.assert_called_once_with(ctx, 1, 'get', glance_image.id) NOVA_IMAGE_ATTRIBUTES = set(['size', 'disk_format', 'owner', 'container_format', 'status', 'id', 'name', 'created_at', 'updated_at', 'deleted', 'deleted_at', 'checksum', 'min_disk', 'min_ram', 'is_public', 'properties']) self.assertEqual(NOVA_IMAGE_ATTRIBUTES, set(image_info.keys())) @mock.patch('nova.image.glance._translate_from_glance') @mock.patch('nova.image.glance._is_image_available') def test_include_locations_success(self, avail_mock, trans_from_mock): locations = [mock.sentinel.loc1] avail_mock.return_value = True trans_from_mock.return_value = {'locations': locations} client = mock.Mock() client.call.return_value = mock.sentinel.image service = glance.GlanceImageService(client) ctx = mock.sentinel.ctx image_id = mock.sentinel.image_id info = service.show(ctx, image_id, include_locations=True) client.call.assert_called_once_with(ctx, 2, 'get', image_id) avail_mock.assert_called_once_with(ctx, mock.sentinel.image) trans_from_mock.assert_called_once_with(mock.sentinel.image, include_locations=True) self.assertIn('locations', info) self.assertEqual(locations, info['locations']) @mock.patch('nova.image.glance._translate_from_glance') @mock.patch('nova.image.glance._is_image_available') def test_include_direct_uri_success(self, avail_mock, trans_from_mock): locations = [mock.sentinel.loc1] avail_mock.return_value = True trans_from_mock.return_value = {'locations': locations, 'direct_uri': mock.sentinel.duri} client = mock.Mock() client.call.return_value = mock.sentinel.image service = glance.GlanceImageService(client) ctx = mock.sentinel.ctx image_id = mock.sentinel.image_id info = service.show(ctx, image_id, include_locations=True) client.call.assert_called_once_with(ctx, 2, 'get', image_id) expected = locations expected.append({'url': mock.sentinel.duri, 'metadata': {}}) self.assertIn('locations', info) self.assertEqual(expected, info['locations']) @mock.patch('nova.image.glance._translate_from_glance') @mock.patch('nova.image.glance._is_image_available') def test_do_not_show_deleted_images(self, is_avail_mock, trans_from_mock): class fake_image_cls(dict): id = 'b31aa5dd-f07a-4748-8f15-398346887584' deleted = True glance_image = fake_image_cls() client = mock.MagicMock() client.call.return_value = glance_image ctx = mock.sentinel.ctx service = glance.GlanceImageService(client) with testtools.ExpectedException(exception.ImageNotFound): service.show(ctx, glance_image.id, show_deleted=False) client.call.assert_called_once_with(ctx, 1, 'get', glance_image.id) self.assertFalse(is_avail_mock.called) self.assertFalse(trans_from_mock.called) class TestDetail(test.NoDBTestCase): """Tests the detail method of the GlanceImageService.""" @mock.patch('nova.image.glance._extract_query_params') @mock.patch('nova.image.glance._translate_from_glance') @mock.patch('nova.image.glance._is_image_available') def test_detail_success_available(self, is_avail_mock, trans_from_mock, ext_query_mock): params = {} is_avail_mock.return_value = True ext_query_mock.return_value = params trans_from_mock.return_value = mock.sentinel.trans_from client = mock.MagicMock() client.call.return_value = [mock.sentinel.images_0] ctx = mock.sentinel.ctx service = glance.GlanceImageService(client) images = service.detail(ctx, **params) client.call.assert_called_once_with(ctx, 1, 'list') is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0) trans_from_mock.assert_called_once_with(mock.sentinel.images_0) self.assertEqual([mock.sentinel.trans_from], images) @mock.patch('nova.image.glance._extract_query_params') @mock.patch('nova.image.glance._translate_from_glance') @mock.patch('nova.image.glance._is_image_available') def test_detail_success_unavailable(self, is_avail_mock, trans_from_mock, ext_query_mock): params = {} is_avail_mock.return_value = False ext_query_mock.return_value = params trans_from_mock.return_value = mock.sentinel.trans_from client = mock.MagicMock() client.call.return_value = [mock.sentinel.images_0] ctx = mock.sentinel.ctx service = glance.GlanceImageService(client) images = service.detail(ctx, **params) client.call.assert_called_once_with(ctx, 1, 'list') is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0) self.assertFalse(trans_from_mock.called) self.assertEqual([], images) @mock.patch('nova.image.glance._translate_from_glance') @mock.patch('nova.image.glance._is_image_available') def test_detail_params_passed(self, is_avail_mock, _trans_from_mock): client = mock.MagicMock() client.call.return_value = [mock.sentinel.images_0] ctx = mock.sentinel.ctx service = glance.GlanceImageService(client) service.detail(ctx, page_size=5, limit=10) expected_filters = { 'is_public': 'none' } client.call.assert_called_once_with(ctx, 1, 'list', filters=expected_filters, page_size=5, limit=10) @mock.patch('nova.image.glance._reraise_translated_exception') @mock.patch('nova.image.glance._extract_query_params') @mock.patch('nova.image.glance._translate_from_glance') @mock.patch('nova.image.glance._is_image_available') def test_detail_client_failure(self, is_avail_mock, trans_from_mock, ext_query_mock, reraise_mock): params = {} ext_query_mock.return_value = params raised = exception.Forbidden() client = mock.MagicMock() client.call.side_effect = glanceclient.exc.Forbidden ctx = mock.sentinel.ctx reraise_mock.side_effect = raised service = glance.GlanceImageService(client) with testtools.ExpectedException(exception.Forbidden): service.detail(ctx, **params) client.call.assert_called_once_with(ctx, 1, 'list') self.assertFalse(is_avail_mock.called) self.assertFalse(trans_from_mock.called) reraise_mock.assert_called_once_with() class TestCreate(test.NoDBTestCase): """Tests the create method of the GlanceImageService.""" @mock.patch('nova.image.glance._translate_from_glance') @mock.patch('nova.image.glance._translate_to_glance') def test_create_success(self, trans_to_mock, trans_from_mock): translated = { 'image_id': mock.sentinel.image_id } trans_to_mock.return_value = translated trans_from_mock.return_value = mock.sentinel.trans_from image_mock = mock.MagicMock(spec=dict) client = mock.MagicMock() client.call.return_value = mock.sentinel.image_meta ctx = mock.sentinel.ctx service = glance.GlanceImageService(client) image_meta = service.create(ctx, image_mock) trans_to_mock.assert_called_once_with(image_mock) client.call.assert_called_once_with(ctx, 1, 'create', image_id=mock.sentinel.image_id) trans_from_mock.assert_called_once_with(mock.sentinel.image_meta) self.assertEqual(mock.sentinel.trans_from, image_meta) # Now verify that if we supply image data to the call, # that the client is also called with the data kwarg client.reset_mock() service.create(ctx, image_mock, data=mock.sentinel.data) client.call.assert_called_once_with(ctx, 1, 'create', image_id=mock.sentinel.image_id, data=mock.sentinel.data) @mock.patch('nova.image.glance._reraise_translated_exception') @mock.patch('nova.image.glance._translate_from_glance') @mock.patch('nova.image.glance._translate_to_glance') def test_create_client_failure(self, trans_to_mock, trans_from_mock, reraise_mock): translated = {} trans_to_mock.return_value = translated image_mock = mock.MagicMock(spec=dict) raised = exception.Invalid() client = mock.MagicMock() client.call.side_effect = glanceclient.exc.BadRequest ctx = mock.sentinel.ctx reraise_mock.side_effect = raised service = glance.GlanceImageService(client) self.assertRaises(exception.Invalid, service.create, ctx, image_mock) trans_to_mock.assert_called_once_with(image_mock) self.assertFalse(trans_from_mock.called) class TestUpdate(test.NoDBTestCase): """Tests the update method of the GlanceImageService.""" @mock.patch('nova.image.glance._translate_from_glance') @mock.patch('nova.image.glance._translate_to_glance') def test_update_success(self, trans_to_mock, trans_from_mock): translated = { 'id': mock.sentinel.image_id, 'name': mock.sentinel.name } trans_to_mock.return_value = translated trans_from_mock.return_value = mock.sentinel.trans_from image_mock = mock.MagicMock(spec=dict) client = mock.MagicMock() client.call.return_value = mock.sentinel.image_meta ctx = mock.sentinel.ctx service = glance.GlanceImageService(client) image_meta = service.update(ctx, mock.sentinel.image_id, image_mock) trans_to_mock.assert_called_once_with(image_mock) # Verify that the 'id' element has been removed as a kwarg to # the call to glanceclient's update (since the image ID is # supplied as a positional arg), and that the # purge_props default is True. client.call.assert_called_once_with(ctx, 1, 'update', mock.sentinel.image_id, name=mock.sentinel.name, purge_props=True) trans_from_mock.assert_called_once_with(mock.sentinel.image_meta) self.assertEqual(mock.sentinel.trans_from, image_meta) # Now verify that if we supply image data to the call, # that the client is also called with the data kwarg client.reset_mock() service.update(ctx, mock.sentinel.image_id, image_mock, data=mock.sentinel.data) client.call.assert_called_once_with(ctx, 1, 'update', mock.sentinel.image_id, name=mock.sentinel.name, purge_props=True, data=mock.sentinel.data) @mock.patch('nova.image.glance._reraise_translated_image_exception') @mock.patch('nova.image.glance._translate_from_glance') @mock.patch('nova.image.glance._translate_to_glance') def test_update_client_failure(self, trans_to_mock, trans_from_mock, reraise_mock): translated = { 'name': mock.sentinel.name } trans_to_mock.return_value = translated trans_from_mock.return_value = mock.sentinel.trans_from image_mock = mock.MagicMock(spec=dict) raised = exception.ImageNotAuthorized(image_id=123) client = mock.MagicMock() client.call.side_effect = glanceclient.exc.Forbidden ctx = mock.sentinel.ctx reraise_mock.side_effect = raised service = glance.GlanceImageService(client) self.assertRaises(exception.ImageNotAuthorized, service.update, ctx, mock.sentinel.image_id, image_mock) client.call.assert_called_once_with(ctx, 1, 'update', mock.sentinel.image_id, purge_props=True, name=mock.sentinel.name) self.assertFalse(trans_from_mock.called) reraise_mock.assert_called_once_with(mock.sentinel.image_id) class TestDelete(test.NoDBTestCase): """Tests the delete method of the GlanceImageService.""" def test_delete_success(self): client = mock.MagicMock() client.call.return_value = True ctx = mock.sentinel.ctx service = glance.GlanceImageService(client) service.delete(ctx, mock.sentinel.image_id) client.call.assert_called_once_with(ctx, 1, 'delete', mock.sentinel.image_id) def test_delete_client_failure(self): client = mock.MagicMock() client.call.side_effect = glanceclient.exc.NotFound ctx = mock.sentinel.ctx service = glance.GlanceImageService(client) self.assertRaises(exception.ImageNotFound, service.delete, ctx, mock.sentinel.image_id) class TestGlanceUrl(test.NoDBTestCase): def test_generate_glance_http_url(self): generated_url = glance.generate_glance_url() glance_host = CONF.glance.host # ipv6 address, need to wrap it with '[]' if netutils.is_valid_ipv6(glance_host): glance_host = '[%s]' % glance_host http_url = "http://%s:%d" % (glance_host, CONF.glance.port) self.assertEqual(generated_url, http_url) def test_generate_glance_https_url(self): self.flags(protocol="https", group='glance') generated_url = glance.generate_glance_url() glance_host = CONF.glance.host # ipv6 address, need to wrap it with '[]' if netutils.is_valid_ipv6(glance_host): glance_host = '[%s]' % glance_host https_url = "https://%s:%d" % (glance_host, CONF.glance.port) self.assertEqual(generated_url, https_url) class TestGlanceApiServers(test.NoDBTestCase): def test_get_api_servers(self): glance_servers = ['10.0.1.1:9292', 'https://10.0.0.1:9293', 'http://10.0.2.2:9294'] expected_servers = ['http://10.0.1.1:9292', 'https://10.0.0.1:9293', 'http://10.0.2.2:9294'] self.flags(api_servers=glance_servers, group='glance') api_servers = glance.get_api_servers() i = 0 for server in api_servers: i += 1 self.assertIn(server, expected_servers) if i > 2: break class TestGlanceNoApiServers(test.NoDBTestCase): def test_get_api_server_no_server(self): self.flags(group='glance', host="10.0.0.1", port=9292) api_servers = glance.get_api_servers() self.assertEqual("http://10.0.0.1:9292", next(api_servers)) self.flags(group='glance', host="10.0.0.1", protocol="https", port=9292) api_servers = glance.get_api_servers() self.assertEqual("https://10.0.0.1:9292", next(api_servers)) self.flags(group='glance', host="f000::c0de", protocol="https", port=9292) api_servers = glance.get_api_servers() self.assertEqual("https://[f000::c0de]:9292", next(api_servers)) class TestUpdateGlanceImage(test.NoDBTestCase): @mock.patch('nova.image.glance.GlanceImageService') def test_start(self, mock_glance_image_service): consumer = glance.UpdateGlanceImage( 'context', 'id', 'metadata', 'stream') with mock.patch.object(glance, 'get_remote_image_service') as a_mock: a_mock.return_value = (mock_glance_image_service, 'image_id') consumer.start() mock_glance_image_service.update.assert_called_with( 'context', 'image_id', 'metadata', 'stream', purge_props=False) nova-13.1.4/nova/tests/unit/image/__init__.py0000664000567000056710000000000013064447140022154 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/image/test_transfer_modules.py0000664000567000056710000000723213064447152025051 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import six.moves.urllib.parse as urlparse from nova import exception from nova.image.download import file as tm_file from nova import test class TestFileTransferModule(test.NoDBTestCase): @mock.patch('nova.virt.libvirt.utils.copy_image') def test_filesystem_success(self, copy_mock): self.flags(allowed_direct_url_schemes=['file'], group='glance') self.flags(group='image_file_url', filesystems=['gluster']) mountpoint = '/gluster' url = 'file:///gluster/my/image/path' url_parts = urlparse.urlparse(url) fs_id = 'someid' loc_meta = { 'id': fs_id, 'mountpoint': mountpoint } dst_file = mock.MagicMock() tm = tm_file.FileTransfer() # NOTE(Jbresnah) The following options must be added after the module # has added the specific groups. self.flags(group='image_file_url:gluster', id=fs_id) self.flags(group='image_file_url:gluster', mountpoint=mountpoint) tm.download(mock.sentinel.ctx, url_parts, dst_file, loc_meta) copy_mock.assert_called_once_with('/gluster/my/image/path', dst_file) @mock.patch('nova.virt.libvirt.utils.copy_image') def test_filesystem_mismatched_mountpoint(self, copy_mock): self.flags(allowed_direct_url_schemes=['file'], group='glance') self.flags(group='image_file_url', filesystems=['gluster']) mountpoint = '/gluster' # Should include the mountpoint before my/image/path url = 'file:///my/image/path' url_parts = urlparse.urlparse(url) fs_id = 'someid' loc_meta = { 'id': fs_id, 'mountpoint': mountpoint } dst_file = mock.MagicMock() tm = tm_file.FileTransfer() self.flags(group='image_file_url:gluster', id=fs_id) self.flags(group='image_file_url:gluster', mountpoint=mountpoint) self.assertRaises(exception.ImageDownloadModuleMetaDataError, tm.download, mock.sentinel.ctx, url_parts, dst_file, loc_meta) self.assertFalse(copy_mock.called) @mock.patch('nova.virt.libvirt.utils.copy_image') def test_filesystem_mismatched_filesystem(self, copy_mock): self.flags(allowed_direct_url_schemes=['file'], group='glance') self.flags(group='image_file_url', filesystems=['gluster']) mountpoint = '/gluster' # Should include the mountpoint before my/image/path url = 'file:///my/image/path' url_parts = urlparse.urlparse(url) fs_id = 'someid' loc_meta = { 'id': 'funky', 'mountpoint': mountpoint } dst_file = mock.MagicMock() tm = tm_file.FileTransfer() self.flags(group='image_file_url:gluster', id=fs_id) self.flags(group='image_file_url:gluster', mountpoint=mountpoint) self.assertRaises(exception.ImageDownloadModuleError, tm.download, mock.sentinel.ctx, url_parts, dst_file, loc_meta) self.assertFalse(copy_mock.called) nova-13.1.4/nova/tests/unit/image/fake.py0000664000567000056710000002261013064447152021341 0ustar jenkinsjenkins00000000000000# Copyright 2011 Justin Santa Barbara # Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of a fake image service.""" import copy import datetime import uuid from oslo_config import cfg from oslo_log import log as logging from nova.compute import arch from nova import exception CONF = cfg.CONF CONF.import_opt('null_kernel', 'nova.compute.api') LOG = logging.getLogger(__name__) AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID = '70a599e0-31e7-49b7-b260-868f441e862b' class _FakeImageService(object): """Mock (fake) image service for unit testing.""" def __init__(self): self.images = {} # NOTE(justinsb): The OpenStack API can't upload an image? # So, make sure we've got one.. timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3) image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': False, 'container_format': 'raw', 'disk_format': 'raw', 'size': '25165824', 'properties': {'kernel_id': CONF.null_kernel, 'ramdisk_id': CONF.null_kernel, 'architecture': arch.X86_64}} image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': True, 'container_format': 'ami', 'disk_format': 'ami', 'size': '58145823', 'properties': {'kernel_id': CONF.null_kernel, 'ramdisk_id': CONF.null_kernel}} image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': True, 'container_format': 'bare', 'disk_format': 'raw', 'size': '83594576', 'properties': {'kernel_id': CONF.null_kernel, 'ramdisk_id': CONF.null_kernel}} image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': True, 'container_format': 'ami', 'disk_format': 'ami', 'size': '84035174', 'properties': {'kernel_id': CONF.null_kernel, 'ramdisk_id': CONF.null_kernel}} image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': True, 'container_format': 'ami', 'disk_format': 'ami', 'size': '26360814', 'properties': {'kernel_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6', 'ramdisk_id': None}} image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379', 'name': 'fakeimage6', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': False, 'container_format': 'ova', 'disk_format': 'vhd', 'size': '49163826', 'properties': {'kernel_id': CONF.null_kernel, 'ramdisk_id': CONF.null_kernel, 'architecture': arch.X86_64, 'auto_disk_config': 'False'}} image7 = {'id': AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID, 'name': 'fakeimage7', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': False, 'container_format': 'ova', 'disk_format': 'vhd', 'size': '74185822', 'properties': {'kernel_id': CONF.null_kernel, 'ramdisk_id': CONF.null_kernel, 'architecture': arch.X86_64, 'auto_disk_config': 'True'}} self.create(None, image1) self.create(None, image2) self.create(None, image3) self.create(None, image4) self.create(None, image5) self.create(None, image6) self.create(None, image7) self._imagedata = {} super(_FakeImageService, self).__init__() # TODO(bcwaldon): implement optional kwargs such as limit, sort_dir def detail(self, context, **kwargs): """Return list of detailed image information.""" return copy.deepcopy(self.images.values()) def download(self, context, image_id, dst_path=None, data=None): self.show(context, image_id) if data: data.write(self._imagedata.get(image_id, '')) elif dst_path: with open(dst_path, 'wb') as data: data.write(self._imagedata.get(image_id, '')) def show(self, context, image_id, include_locations=False, show_deleted=True): """Get data about specified image. Returns a dict containing image data for the given opaque image id. """ image = self.images.get(str(image_id)) if image: return copy.deepcopy(image) LOG.warning('Unable to find image id %s. Have images: %s', image_id, self.images) raise exception.ImageNotFound(image_id=image_id) def create(self, context, metadata, data=None): """Store the image data and return the new image id. :raises: Duplicate if the image already exist. """ image_id = str(metadata.get('id', uuid.uuid4())) metadata['id'] = image_id if image_id in self.images: raise exception.CouldNotUploadImage(image_id=image_id) self.images[image_id] = copy.deepcopy(metadata) if data: self._imagedata[image_id] = data.read() return self.images[image_id] def update(self, context, image_id, metadata, data=None, purge_props=False): """Replace the contents of the given image with the new data. :raises: ImageNotFound if the image does not exist. """ if not self.images.get(image_id): raise exception.ImageNotFound(image_id=image_id) if purge_props: self.images[image_id] = copy.deepcopy(metadata) else: image = self.images[image_id] try: image['properties'].update(metadata.pop('properties')) except KeyError: pass image.update(metadata) return self.images[image_id] def delete(self, context, image_id): """Delete the given image. :raises: ImageNotFound if the image does not exist. """ removed = self.images.pop(image_id, None) if not removed: raise exception.ImageNotFound(image_id=image_id) def get_location(self, context, image_id): if image_id in self.images: return 'fake_location' return None _fakeImageService = _FakeImageService() def FakeImageService(): return _fakeImageService def FakeImageService_reset(): global _fakeImageService _fakeImageService = _FakeImageService() def get_valid_image_id(): return AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID def stub_out_image_service(test): """Stubs out the image service for the test with the FakeImageService :param test: instance of nova.test.TestCase :returns: The stubbed out FakeImageService object """ image_service = FakeImageService() test.stub_out('nova.image.glance.get_remote_image_service', lambda x, y: (image_service, y)) test.stub_out('nova.image.glance.get_default_image_service', lambda: image_service) return image_service nova-13.1.4/nova/tests/unit/fake_network.py0000664000567000056710000004057613064447152022043 0ustar jenkinsjenkins00000000000000# Copyright 2011 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_serialization import jsonutils from six.moves import range from nova.compute import api as compute_api from nova.compute import manager as compute_manager import nova.context from nova import db from nova import exception from nova.network import manager as network_manager from nova.network import model as network_model from nova.network import rpcapi as network_rpcapi from nova import objects from nova.objects import base as obj_base from nova.objects import network as network_obj from nova.objects import virtual_interface as vif_obj from nova.tests.unit.objects import test_fixed_ip from nova.tests.unit.objects import test_instance_info_cache from nova.tests.unit.objects import test_pci_device from nova.tests import uuidsentinel as uuids HOST = "testhost" CONF = cfg.CONF CONF.import_opt('use_ipv6', 'nova.netconf') class FakeModel(dict): """Represent a model from the db.""" def __init__(self, *args, **kwargs): self.update(kwargs) class FakeNetworkManager(network_manager.NetworkManager): """This NetworkManager doesn't call the base class so we can bypass all inherited service cruft and just perform unit tests. """ class FakeDB(object): vifs = [{'id': 0, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'instance_uuid': uuids.instance_1, 'network_id': 1, 'uuid': uuids.vifs_1, 'address': 'DC:AD:BE:FF:EF:01'}, {'id': 1, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'instance_uuid': uuids.instance_2, 'network_id': 21, 'uuid': uuids.vifs_2, 'address': 'DC:AD:BE:FF:EF:02'}, {'id': 2, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'instance_uuid': uuids.instance_1, 'network_id': 31, 'uuid': uuids.vifs_3, 'address': 'DC:AD:BE:FF:EF:03'}] floating_ips = [dict(address='172.16.1.1', fixed_ip_id=100), dict(address='172.16.1.2', fixed_ip_id=200), dict(address='173.16.1.2', fixed_ip_id=210)] fixed_ips = [dict(test_fixed_ip.fake_fixed_ip, id=100, address='172.16.0.1', virtual_interface_id=0), dict(test_fixed_ip.fake_fixed_ip, id=200, address='172.16.0.2', virtual_interface_id=1), dict(test_fixed_ip.fake_fixed_ip, id=210, address='173.16.0.2', virtual_interface_id=2)] def fixed_ip_get_by_instance(self, context, instance_uuid): return [dict(address='10.0.0.0'), dict(address='10.0.0.1'), dict(address='10.0.0.2')] def network_get_by_cidr(self, context, cidr): raise exception.NetworkNotFoundForCidr(cidr=cidr) def network_create_safe(self, context, net): fakenet = dict(net) fakenet['id'] = 999 return fakenet def network_get(self, context, network_id, project_only="allow_none"): return {'cidr_v6': '2001:db8:69:%x::/64' % network_id} def network_get_by_uuid(self, context, network_uuid): raise exception.NetworkNotFoundForUUID(uuid=network_uuid) def network_get_all(self, context): raise exception.NoNetworksFound() def network_get_all_by_uuids(self, context, project_only="allow_none"): raise exception.NoNetworksFound() def network_disassociate(self, context, network_id): return True def virtual_interface_get_all(self, context): return self.vifs def fixed_ips_by_virtual_interface(self, context, vif_id): return [ip for ip in self.fixed_ips if ip['virtual_interface_id'] == vif_id] def fixed_ip_disassociate(self, context, address): return True def __init__(self, stubs=None): self.db = self.FakeDB() if stubs: stubs.Set(vif_obj, 'db', self.db) self.deallocate_called = None self.deallocate_fixed_ip_calls = [] self.network_rpcapi = network_rpcapi.NetworkAPI() # TODO(matelakat) method signature should align with the faked one's def deallocate_fixed_ip(self, context, address=None, host=None, instance=None): self.deallocate_fixed_ip_calls.append((context, address, host)) # TODO(matelakat) use the deallocate_fixed_ip_calls instead self.deallocate_called = address def _create_fixed_ips(self, context, network_id, fixed_cidr=None, extra_reserved=None, bottom_reserved=0, top_reserved=0): pass def get_instance_nw_info(context, instance_id, rxtx_factor, host, instance_uuid=None, **kwargs): pass def fake_network(network_id, ipv6=None): if ipv6 is None: ipv6 = CONF.use_ipv6 fake_network = {'id': network_id, 'uuid': getattr(uuids, 'network%i' % network_id), 'label': 'test%d' % network_id, 'injected': False, 'multi_host': False, 'cidr': '192.168.%d.0/24' % network_id, 'cidr_v6': None, 'netmask': '255.255.255.0', 'netmask_v6': None, 'bridge': 'fake_br%d' % network_id, 'bridge_interface': 'fake_eth%d' % network_id, 'gateway': '192.168.%d.1' % network_id, 'gateway_v6': None, 'broadcast': '192.168.%d.255' % network_id, 'dns1': '192.168.%d.3' % network_id, 'dns2': '192.168.%d.4' % network_id, 'dns3': '192.168.%d.3' % network_id, 'vlan': None, 'host': None, 'project_id': uuids.project, 'vpn_public_address': '192.168.%d.2' % network_id, 'vpn_public_port': None, 'vpn_private_address': None, 'dhcp_start': None, 'rxtx_base': network_id * 10, 'priority': None, 'deleted': False, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'mtu': None, 'dhcp_server': '192.168.%d.1' % network_id, 'enable_dhcp': True, 'share_address': False} if ipv6: fake_network['cidr_v6'] = '2001:db8:0:%x::/64' % network_id fake_network['gateway_v6'] = '2001:db8:0:%x::1' % network_id fake_network['netmask_v6'] = '64' if CONF.flat_injected: fake_network['injected'] = True return fake_network def fake_network_obj(context, network_id=1, ipv6=None): return network_obj.Network._from_db_object( context, network_obj.Network(), fake_network(network_id, ipv6)) def fake_vif(x): return{'id': x, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'address': 'DE:AD:BE:EF:00:%02x' % x, 'uuid': getattr(uuids, 'vif%i' % x), 'network_id': x, 'instance_uuid': uuids.vifs_1} def floating_ip_ids(): for i in range(1, 100): yield i def fixed_ip_ids(): for i in range(1, 100): yield i floating_ip_id = floating_ip_ids() fixed_ip_id = fixed_ip_ids() def next_fixed_ip(network_id, num_floating_ips=0): next_id = next(fixed_ip_id) f_ips = [FakeModel(**next_floating_ip(next_id)) for i in range(num_floating_ips)] return {'id': next_id, 'network_id': network_id, 'address': '192.168.%d.%03d' % (network_id, (next_id + 99)), 'instance_uuid': uuids.fixed_ip, 'allocated': False, 'reserved': False, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'leased': True, 'host': HOST, 'deleted': 0, 'network': fake_network(network_id), 'virtual_interface': fake_vif(network_id), # and since network_id and vif_id happen to be equivalent 'virtual_interface_id': network_id, 'floating_ips': f_ips} def next_floating_ip(fixed_ip_id): next_id = next(floating_ip_id) return {'id': next_id, 'address': '10.10.10.%03d' % (next_id + 99), 'fixed_ip_id': fixed_ip_id, 'project_id': None, 'auto_assigned': False} def ipv4_like(ip, match_string): ip = ip.split('.') match_octets = match_string.split('.') for i, octet in enumerate(match_octets): if octet == '*': continue if octet != ip[i]: return False return True def fake_get_instance_nw_info(test, num_networks=1, ips_per_vif=2, floating_ips_per_fixed_ip=0): # test is an instance of nova.test.TestCase # ips_per_vif is the number of ips each vif will have # num_floating_ips is number of float ips for each fixed ip network = network_manager.FlatManager(host=HOST) network.db = db # reset the fixed and floating ip generators global floating_ip_id, fixed_ip_id, fixed_ips floating_ip_id = floating_ip_ids() fixed_ip_id = fixed_ip_ids() fixed_ips = [] def fixed_ips_fake(*args, **kwargs): global fixed_ips ips = [next_fixed_ip(i, floating_ips_per_fixed_ip) for i in range(1, num_networks + 1) for j in range(ips_per_vif)] fixed_ips = ips return ips def update_cache_fake(*args, **kwargs): fake_info_cache = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'instance_uuid': uuids.vifs_1, 'network_info': '[]', } return fake_info_cache test.stub_out('nova.db.fixed_ip_get_by_instance', fixed_ips_fake) test.stub_out('nova.db.instance_info_cache_update', update_cache_fake) class FakeContext(nova.context.RequestContext): def is_admin(self): return True nw_model = network.get_instance_nw_info( FakeContext('fakeuser', 'fake_project'), 0, 3, None) return nw_model def stub_out_nw_api_get_instance_nw_info(test, func=None, num_networks=1, ips_per_vif=1, floating_ips_per_fixed_ip=0): def get_instance_nw_info(self, context, instance, conductor_api=None): return fake_get_instance_nw_info(test, num_networks=num_networks, ips_per_vif=ips_per_vif, floating_ips_per_fixed_ip=floating_ips_per_fixed_ip) if func is None: func = get_instance_nw_info test.stub_out('nova.network.api.API.get_instance_nw_info', func) def stub_out_network_cleanup(test): test.stub_out('nova.network.api.API.deallocate_for_instance', lambda *args, **kwargs: None) _real_functions = {} def set_stub_network_methods(test): global _real_functions cm = compute_manager.ComputeManager if not _real_functions: _real_functions = { '_allocate_network': cm._allocate_network, '_deallocate_network': cm._deallocate_network} def fake_networkinfo(*args, **kwargs): return network_model.NetworkInfo() def fake_async_networkinfo(*args, **kwargs): return network_model.NetworkInfoAsyncWrapper(fake_networkinfo) test.stub_out('nova.compute.manager.ComputeManager._allocate_network', fake_async_networkinfo) test.stub_out('nova.compute.manager.ComputeManager._deallocate_network', lambda *args, **kwargs: None) def unset_stub_network_methods(test): global _real_functions if _real_functions: for name in _real_functions: test.stub_out('nova.compute.manager.ComputeManager.' + name, _real_functions[name]) def stub_compute_with_ips(stubs): orig_get = compute_api.API.get orig_get_all = compute_api.API.get_all orig_create = compute_api.API.create def fake_get(*args, **kwargs): return _get_instances_with_cached_ips(orig_get, *args, **kwargs) def fake_get_all(*args, **kwargs): return _get_instances_with_cached_ips(orig_get_all, *args, **kwargs) def fake_create(*args, **kwargs): return _create_instances_with_cached_ips(orig_create, *args, **kwargs) def fake_pci_device_get_by_addr(context, node_id, dev_addr): return test_pci_device.fake_db_dev stubs.Set(db, 'pci_device_get_by_addr', fake_pci_device_get_by_addr) stubs.Set(compute_api.API, 'get', fake_get) stubs.Set(compute_api.API, 'get_all', fake_get_all) stubs.Set(compute_api.API, 'create', fake_create) def _get_fake_cache(): def _ip(ip, fixed=True, floats=None): ip_dict = {'address': ip, 'type': 'fixed'} if not fixed: ip_dict['type'] = 'floating' if fixed and floats: ip_dict['floating_ips'] = [_ip(f, fixed=False) for f in floats] return ip_dict info = [{'address': 'aa:bb:cc:dd:ee:ff', 'id': 1, 'network': {'bridge': 'br0', 'id': 1, 'label': 'private', 'subnets': [{'cidr': '192.168.0.0/24', 'ips': [_ip('192.168.0.3')]}]}}] if CONF.use_ipv6: ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff' info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64', 'ips': [_ip(ipv6_addr)]}) return jsonutils.dumps(info) def _get_instances_with_cached_ips(orig_func, *args, **kwargs): """Kludge the cache into instance(s) without having to create DB entries """ instances = orig_func(*args, **kwargs) context = args[0] fake_device = objects.PciDevice.get_by_dev_addr(context, 1, 'a') def _info_cache_for(instance): info_cache = dict(test_instance_info_cache.fake_info_cache, network_info=_get_fake_cache(), instance_uuid=instance['uuid']) if isinstance(instance, obj_base.NovaObject): _info_cache = objects.InstanceInfoCache(context) objects.InstanceInfoCache._from_db_object(context, _info_cache, info_cache) info_cache = _info_cache instance['info_cache'] = info_cache if isinstance(instances, (list, obj_base.ObjectListBase)): for instance in instances: _info_cache_for(instance) fake_device.claim(instance.uuid) fake_device.allocate(instance) else: _info_cache_for(instances) fake_device.claim(instances.uuid) fake_device.allocate(instances) return instances def _create_instances_with_cached_ips(orig_func, *args, **kwargs): """Kludge the above kludge so that the database doesn't get out of sync with the actual instance. """ instances, reservation_id = orig_func(*args, **kwargs) fake_cache = _get_fake_cache() for instance in instances: instance['info_cache']['network_info'] = fake_cache db.instance_info_cache_update(args[1], instance['uuid'], {'network_info': fake_cache}) return (instances, reservation_id) nova-13.1.4/nova/tests/unit/test_fixtures.py0000664000567000056710000003635513064447152022274 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy import sys import fixtures as fx import mock from oslo_config import cfg from oslo_log import log as logging from oslo_utils import uuidutils import testtools from nova.db.sqlalchemy import api as session from nova import exception from nova.objects import base as obj_base from nova.tests import fixtures from nova.tests.unit import conf_fixture from nova import utils CONF = cfg.CONF class TestConfFixture(testtools.TestCase): """Test the Conf fixtures in Nova. This is a basic test that this fixture works like we expect. Expectations: 1. before using the fixture, a default value (api_paste_config) comes through untouched. 2. before using the fixture, a known default value that we override is correct. 3. after using the fixture a known value that we override is the new value. 4. after using the fixture we can set a default value to something random, and it will be reset once we are done. There are 2 copies of this test so that you can verify they do the right thing with: tox -e py27 test_fixtures -- --concurrency=1 As regardless of run order, their initial asserts would be impacted if the reset behavior isn't working correctly. """ def _test_override(self): self.assertEqual('api-paste.ini', CONF.api_paste_config) self.assertFalse(CONF.fake_network) self.useFixture(conf_fixture.ConfFixture()) CONF.set_default('api_paste_config', 'foo') self.assertTrue(CONF.fake_network) def test_override1(self): self._test_override() def test_override2(self): self._test_override() class TestOutputStream(testtools.TestCase): """Ensure Output Stream capture works as expected. This has the added benefit of providing a code example of how you can manipulate the output stream in your own tests. """ def test_output(self): self.useFixture(fx.EnvironmentVariable('OS_STDOUT_CAPTURE', '1')) self.useFixture(fx.EnvironmentVariable('OS_STDERR_CAPTURE', '1')) out = self.useFixture(fixtures.OutputStreamCapture()) sys.stdout.write("foo") sys.stderr.write("bar") self.assertEqual("foo", out.stdout) self.assertEqual("bar", out.stderr) # TODO(sdague): nuke the out and err buffers so it doesn't # make it to testr class TestLogging(testtools.TestCase): def test_default_logging(self): stdlog = self.useFixture(fixtures.StandardLogging()) root = logging.getLogger() # there should be a null handler as well at DEBUG self.assertEqual(2, len(root.handlers), root.handlers) log = logging.getLogger(__name__) log.info("at info") log.debug("at debug") self.assertIn("at info", stdlog.logger.output) self.assertNotIn("at debug", stdlog.logger.output) # broken debug messages should still explode, even though we # aren't logging them in the regular handler self.assertRaises(TypeError, log.debug, "this is broken %s %s", "foo") # and, ensure that one of the terrible log messages isn't # output at info warn_log = logging.getLogger('migrate.versioning.api') warn_log.info("warn_log at info, should be skipped") warn_log.error("warn_log at error") self.assertIn("warn_log at error", stdlog.logger.output) self.assertNotIn("warn_log at info", stdlog.logger.output) def test_debug_logging(self): self.useFixture(fx.EnvironmentVariable('OS_DEBUG', '1')) stdlog = self.useFixture(fixtures.StandardLogging()) root = logging.getLogger() # there should no longer be a null handler self.assertEqual(1, len(root.handlers), root.handlers) log = logging.getLogger(__name__) log.info("at info") log.debug("at debug") self.assertIn("at info", stdlog.logger.output) self.assertIn("at debug", stdlog.logger.output) class TestTimeout(testtools.TestCase): """Tests for our timeout fixture. Testing the actual timeout mechanism is beyond the scope of this test, because it's a pretty clear pass through to fixtures' timeout fixture, which tested in their tree. """ def test_scaling(self): # a bad scaling factor self.assertRaises(ValueError, fixtures.Timeout, 1, 0.5) # various things that should work. timeout = fixtures.Timeout(10) self.assertEqual(10, timeout.test_timeout) timeout = fixtures.Timeout("10") self.assertEqual(10, timeout.test_timeout) timeout = fixtures.Timeout("10", 2) self.assertEqual(20, timeout.test_timeout) class TestOSAPIFixture(testtools.TestCase): @mock.patch('nova.objects.Service.get_by_host_and_binary') @mock.patch('nova.objects.Service.create') def test_responds_to_version(self, mock_service_create, mock_get): """Ensure the OSAPI server responds to calls sensibly.""" self.useFixture(fixtures.OutputStreamCapture()) self.useFixture(fixtures.StandardLogging()) self.useFixture(conf_fixture.ConfFixture()) self.useFixture(fixtures.RPCFixture('nova.test')) api = self.useFixture(fixtures.OSAPIFixture()).api # request the API root, which provides us the versions of the API resp = api.api_request('/', strip_version=True) self.assertEqual(200, resp.status_code, resp.content) # request a bad root url, should be a 404 # # NOTE(sdague): this currently fails, as it falls into the 300 # dispatcher instead. This is a bug. The test case is left in # here, commented out until we can address it. # # resp = api.api_request('/foo', strip_version=True) # self.assertEqual(resp.status_code, 400, resp.content) # request a known bad url, and we should get a 404 resp = api.api_request('/foo') self.assertEqual(404, resp.status_code, resp.content) class TestDatabaseFixture(testtools.TestCase): def test_fixture_reset(self): # because this sets up reasonable db connection strings self.useFixture(conf_fixture.ConfFixture()) self.useFixture(fixtures.Database()) engine = session.get_engine() conn = engine.connect() result = conn.execute("select * from instance_types") rows = result.fetchall() self.assertEqual(5, len(rows), "Rows %s" % rows) # insert a 6th instance type, column 5 below is an int id # which has a constraint on it, so if new standard instance # types are added you have to bump it. conn.execute("insert into instance_types VALUES " "(NULL, NULL, NULL, 't1.test', 6, 4096, 2, 0, NULL, '87'" ", 1.0, 40, 0, 0, 1, 0)") result = conn.execute("select * from instance_types") rows = result.fetchall() self.assertEqual(6, len(rows), "Rows %s" % rows) # reset by invoking the fixture again # # NOTE(sdague): it's important to reestablish the db # connection because otherwise we have a reference to the old # in mem db. self.useFixture(fixtures.Database()) conn = engine.connect() result = conn.execute("select * from instance_types") rows = result.fetchall() self.assertEqual(5, len(rows), "Rows %s" % rows) def test_api_fixture_reset(self): # This sets up reasonable db connection strings self.useFixture(conf_fixture.ConfFixture()) self.useFixture(fixtures.Database(database='api')) engine = session.get_api_engine() conn = engine.connect() result = conn.execute("select * from cell_mappings") rows = result.fetchall() self.assertEqual(0, len(rows), "Rows %s" % rows) uuid = uuidutils.generate_uuid() conn.execute("insert into cell_mappings (uuid, name) VALUES " "('%s', 'fake-cell')" % (uuid,)) result = conn.execute("select * from cell_mappings") rows = result.fetchall() self.assertEqual(1, len(rows), "Rows %s" % rows) # reset by invoking the fixture again # # NOTE(sdague): it's important to reestablish the db # connection because otherwise we have a reference to the old # in mem db. self.useFixture(fixtures.Database(database='api')) conn = engine.connect() result = conn.execute("select * from cell_mappings") rows = result.fetchall() self.assertEqual(0, len(rows), "Rows %s" % rows) def test_fixture_cleanup(self): # because this sets up reasonable db connection strings self.useFixture(conf_fixture.ConfFixture()) fix = fixtures.Database() self.useFixture(fix) # manually do the cleanup that addCleanup will do fix.cleanup() # ensure the db contains nothing engine = session.get_engine() conn = engine.connect() schema = "".join(line for line in conn.connection.iterdump()) self.assertEqual(schema, "BEGIN TRANSACTION;COMMIT;") def test_api_fixture_cleanup(self): # This sets up reasonable db connection strings self.useFixture(conf_fixture.ConfFixture()) fix = fixtures.Database(database='api') self.useFixture(fix) # No data inserted by migrations so we need to add a row engine = session.get_api_engine() conn = engine.connect() uuid = uuidutils.generate_uuid() conn.execute("insert into cell_mappings (uuid, name) VALUES " "('%s', 'fake-cell')" % (uuid,)) result = conn.execute("select * from cell_mappings") rows = result.fetchall() self.assertEqual(1, len(rows), "Rows %s" % rows) # Manually do the cleanup that addCleanup will do fix.cleanup() # Ensure the db contains nothing engine = session.get_api_engine() conn = engine.connect() schema = "".join(line for line in conn.connection.iterdump()) self.assertEqual("BEGIN TRANSACTION;COMMIT;", schema) class TestDatabaseAtVersionFixture(testtools.TestCase): def test_fixture_schema_version(self): self.useFixture(conf_fixture.ConfFixture()) # In/after 317 aggregates did have uuid self.useFixture(fixtures.DatabaseAtVersion(318)) engine = session.get_engine() engine.connect() meta = sqlalchemy.MetaData(engine) aggregate = sqlalchemy.Table('aggregates', meta, autoload=True) self.assertTrue(hasattr(aggregate.c, 'uuid')) # Before 317, aggregates had no uuid self.useFixture(fixtures.DatabaseAtVersion(316)) engine = session.get_engine() engine.connect() meta = sqlalchemy.MetaData(engine) aggregate = sqlalchemy.Table('aggregates', meta, autoload=True) self.assertFalse(hasattr(aggregate.c, 'uuid')) engine.dispose() def test_fixture_after_database_fixture(self): self.useFixture(conf_fixture.ConfFixture()) self.useFixture(fixtures.Database()) self.useFixture(fixtures.DatabaseAtVersion(318)) class TestIndirectionAPIFixture(testtools.TestCase): def test_indirection_api(self): # Should initially be None self.assertIsNone(obj_base.NovaObject.indirection_api) # make sure the fixture correctly sets the value fix = fixtures.IndirectionAPIFixture('foo') self.useFixture(fix) self.assertEqual('foo', obj_base.NovaObject.indirection_api) # manually do the cleanup that addCleanup will do fix.cleanup() # ensure the initial value is restored self.assertIsNone(obj_base.NovaObject.indirection_api) class TestSpawnIsSynchronousFixture(testtools.TestCase): def test_spawn_patch(self): orig_spawn = utils.spawn_n fix = fixtures.SpawnIsSynchronousFixture() self.useFixture(fix) self.assertNotEqual(orig_spawn, utils.spawn_n) def test_spawn_passes_through(self): self.useFixture(fixtures.SpawnIsSynchronousFixture()) tester = mock.MagicMock() utils.spawn_n(tester.function, 'foo', bar='bar') tester.function.assert_called_once_with('foo', bar='bar') def test_spawn_return_has_wait(self): self.useFixture(fixtures.SpawnIsSynchronousFixture()) gt = utils.spawn(lambda x: '%s' % x, 'foo') foo = gt.wait() self.assertEqual('foo', foo) def test_spawn_n_return_has_wait(self): self.useFixture(fixtures.SpawnIsSynchronousFixture()) gt = utils.spawn_n(lambda x: '%s' % x, 'foo') foo = gt.wait() self.assertEqual('foo', foo) def test_spawn_has_link(self): self.useFixture(fixtures.SpawnIsSynchronousFixture()) gt = utils.spawn(mock.MagicMock) passed_arg = 'test' call_count = [] def fake(thread, param): self.assertEqual(gt, thread) self.assertEqual(passed_arg, param) call_count.append(1) gt.link(fake, passed_arg) self.assertEqual(1, len(call_count)) def test_spawn_n_has_link(self): self.useFixture(fixtures.SpawnIsSynchronousFixture()) gt = utils.spawn_n(mock.MagicMock) passed_arg = 'test' call_count = [] def fake(thread, param): self.assertEqual(gt, thread) self.assertEqual(passed_arg, param) call_count.append(1) gt.link(fake, passed_arg) self.assertEqual(1, len(call_count)) class TestBannedDBSchemaOperations(testtools.TestCase): def test_column(self): column = sqlalchemy.Column() with fixtures.BannedDBSchemaOperations(['Column']): self.assertRaises(exception.DBNotAllowed, column.drop) self.assertRaises(exception.DBNotAllowed, column.alter) def test_table(self): table = sqlalchemy.Table() with fixtures.BannedDBSchemaOperations(['Table']): self.assertRaises(exception.DBNotAllowed, table.drop) self.assertRaises(exception.DBNotAllowed, table.alter) class TestStableObjectJsonFixture(testtools.TestCase): def test_changes_sort(self): class TestObject(obj_base.NovaObject): def obj_what_changed(self): return ['z', 'a'] obj = TestObject() self.assertEqual(['z', 'a'], obj.obj_to_primitive()['nova_object.changes']) with fixtures.StableObjectJsonFixture(): self.assertEqual(['a', 'z'], obj.obj_to_primitive()['nova_object.changes']) nova-13.1.4/nova/tests/unit/test_rpc.py0000664000567000056710000003142713064447152021202 0ustar jenkinsjenkins00000000000000# Copyright 2016 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import fixtures import mock import oslo_messaging as messaging from oslo_serialization import jsonutils import testtools from nova import context from nova import rpc from nova import test # Make a class that resets all of the global variables in nova.rpc class RPCResetFixture(fixtures.Fixture): def _setUp(self): self.trans = copy.copy(rpc.TRANSPORT) self.noti_trans = copy.copy(rpc.NOTIFICATION_TRANSPORT) self.noti = copy.copy(rpc.NOTIFIER) self.all_mods = copy.copy(rpc.ALLOWED_EXMODS) self.ext_mods = copy.copy(rpc.EXTRA_EXMODS) self.addCleanup(self._reset_everything) def _reset_everything(self): rpc.TRANSPORT = self.trans rpc.NOTIFICATION_TRANSPORT = self.noti_trans rpc.NOTIFIER = self.noti rpc.ALLOWED_EXMODS = self.all_mods rpc.EXTRA_EXMODS = self.ext_mods # We can't import nova.test.TestCase because that sets up an RPCFixture # that pretty much nullifies all of this testing class TestRPC(testtools.TestCase): def setUp(self): super(TestRPC, self).setUp() self.useFixture(RPCResetFixture()) @mock.patch.object(rpc, 'get_allowed_exmods') @mock.patch.object(rpc, 'RequestContextSerializer') @mock.patch.object(messaging, 'get_transport') @mock.patch.object(messaging, 'get_notification_transport') @mock.patch.object(messaging, 'Notifier') def test_init_unversioned(self, mock_notif, mock_noti_trans, mock_trans, mock_ser, mock_exmods): # The expected call to get the legacy notifier will require no new # kwargs, and we expect the new notifier will need the noop driver expected = [{}, {'driver': 'noop'}] self._test_init(mock_notif, mock_noti_trans, mock_trans, mock_ser, mock_exmods, 'unversioned', expected) @mock.patch.object(rpc, 'get_allowed_exmods') @mock.patch.object(rpc, 'RequestContextSerializer') @mock.patch.object(messaging, 'get_transport') @mock.patch.object(messaging, 'get_notification_transport') @mock.patch.object(messaging, 'Notifier') def test_init_both(self, mock_notif, mock_noti_trans, mock_trans, mock_ser, mock_exmods): expected = [{}, {'topic': 'versioned_notifications'}] self._test_init(mock_notif, mock_noti_trans, mock_trans, mock_ser, mock_exmods, 'both', expected) @mock.patch.object(rpc, 'get_allowed_exmods') @mock.patch.object(rpc, 'RequestContextSerializer') @mock.patch.object(messaging, 'get_transport') @mock.patch.object(messaging, 'get_notification_transport') @mock.patch.object(messaging, 'Notifier') def test_init_versioned(self, mock_notif, mock_noti_trans, mock_trans, mock_ser, mock_exmods): expected = [{'driver': 'noop'}, {'topic': 'versioned_notifications'}] self._test_init(mock_notif, mock_noti_trans, mock_trans, mock_ser, mock_exmods, 'versioned', expected) def test_cleanup_transport_null(self): rpc.NOTIFICATION_TRANSPORT = mock.Mock() rpc.LEGACY_NOTIFIER = mock.Mock() rpc.NOTIFIER = mock.Mock() self.assertRaises(AssertionError, rpc.cleanup) def test_cleanup_notification_transport_null(self): rpc.TRANSPORT = mock.Mock() rpc.NOTIFIER = mock.Mock() self.assertRaises(AssertionError, rpc.cleanup) def test_cleanup_legacy_notifier_null(self): rpc.TRANSPORT = mock.Mock() rpc.NOTIFICATION_TRANSPORT = mock.Mock() rpc.NOTIFIER = mock.Mock() def test_cleanup_notifier_null(self): rpc.TRANSPORT = mock.Mock() rpc.LEGACY_NOTIFIER = mock.Mock() rpc.NOTIFICATION_TRANSPORT = mock.Mock() self.assertRaises(AssertionError, rpc.cleanup) def test_cleanup(self): rpc.LEGACY_NOTIFIER = mock.Mock() rpc.NOTIFIER = mock.Mock() rpc.NOTIFICATION_TRANSPORT = mock.Mock() rpc.TRANSPORT = mock.Mock() trans_cleanup = mock.Mock() not_trans_cleanup = mock.Mock() rpc.TRANSPORT.cleanup = trans_cleanup rpc.NOTIFICATION_TRANSPORT.cleanup = not_trans_cleanup rpc.cleanup() trans_cleanup.assert_called_once_with() not_trans_cleanup.assert_called_once_with() self.assertIsNone(rpc.TRANSPORT) self.assertIsNone(rpc.NOTIFICATION_TRANSPORT) self.assertIsNone(rpc.LEGACY_NOTIFIER) self.assertIsNone(rpc.NOTIFIER) @mock.patch.object(messaging, 'set_transport_defaults') def test_set_defaults(self, mock_set): control_exchange = mock.Mock() rpc.set_defaults(control_exchange) mock_set.assert_called_once_with(control_exchange) def test_add_extra_exmods(self): rpc.EXTRA_EXMODS = [] rpc.add_extra_exmods('foo', 'bar') self.assertEqual(['foo', 'bar'], rpc.EXTRA_EXMODS) def test_clear_extra_exmods(self): rpc.EXTRA_EXMODS = ['foo', 'bar'] rpc.clear_extra_exmods() self.assertEqual(0, len(rpc.EXTRA_EXMODS)) def test_get_allowed_exmods(self): rpc.ALLOWED_EXMODS = ['foo'] rpc.EXTRA_EXMODS = ['bar'] exmods = rpc.get_allowed_exmods() self.assertEqual(['foo', 'bar'], exmods) @mock.patch.object(messaging, 'TransportURL') def test_get_transport_url(self, mock_url): conf = mock.Mock() rpc.CONF = conf mock_url.parse.return_value = 'foo' url = rpc.get_transport_url(url_str='bar') self.assertEqual('foo', url) mock_url.parse.assert_called_once_with(conf, 'bar', rpc.TRANSPORT_ALIASES) @mock.patch.object(messaging, 'TransportURL') def test_get_transport_url_null(self, mock_url): conf = mock.Mock() rpc.CONF = conf mock_url.parse.return_value = 'foo' url = rpc.get_transport_url() self.assertEqual('foo', url) mock_url.parse.assert_called_once_with(conf, None, rpc.TRANSPORT_ALIASES) @mock.patch.object(rpc, 'RequestContextSerializer') @mock.patch.object(messaging, 'RPCClient') def test_get_client(self, mock_client, mock_ser): rpc.TRANSPORT = mock.Mock() tgt = mock.Mock() ser = mock.Mock() mock_client.return_value = 'client' mock_ser.return_value = ser client = rpc.get_client(tgt, version_cap='1.0', serializer='foo') mock_ser.assert_called_once_with('foo') mock_client.assert_called_once_with(rpc.TRANSPORT, tgt, version_cap='1.0', serializer=ser) self.assertEqual('client', client) @mock.patch.object(rpc, 'RequestContextSerializer') @mock.patch.object(messaging, 'get_rpc_server') def test_get_server(self, mock_get, mock_ser): rpc.TRANSPORT = mock.Mock() ser = mock.Mock() tgt = mock.Mock() ends = mock.Mock() mock_ser.return_value = ser mock_get.return_value = 'server' server = rpc.get_server(tgt, ends, serializer='foo') mock_ser.assert_called_once_with('foo') mock_get.assert_called_once_with(rpc.TRANSPORT, tgt, ends, executor='eventlet', serializer=ser) self.assertEqual('server', server) def test_get_notifier(self): rpc.LEGACY_NOTIFIER = mock.Mock() mock_prep = mock.Mock() mock_prep.return_value = 'notifier' rpc.LEGACY_NOTIFIER.prepare = mock_prep notifier = rpc.get_notifier('service', publisher_id='foo') mock_prep.assert_called_once_with(publisher_id='foo') self.assertIsInstance(notifier, rpc.LegacyValidatingNotifier) self.assertEqual('notifier', notifier.notifier) def test_get_notifier_null_publisher(self): rpc.LEGACY_NOTIFIER = mock.Mock() mock_prep = mock.Mock() mock_prep.return_value = 'notifier' rpc.LEGACY_NOTIFIER.prepare = mock_prep notifier = rpc.get_notifier('service', host='bar') mock_prep.assert_called_once_with(publisher_id='service.bar') self.assertIsInstance(notifier, rpc.LegacyValidatingNotifier) self.assertEqual('notifier', notifier.notifier) def test_get_versioned_notifier(self): rpc.NOTIFIER = mock.Mock() mock_prep = mock.Mock() mock_prep.return_value = 'notifier' rpc.NOTIFIER.prepare = mock_prep notifier = rpc.get_versioned_notifier('service.foo') mock_prep.assert_called_once_with(publisher_id='service.foo') self.assertEqual('notifier', notifier) def _test_init(self, mock_notif, mock_noti_trans, mock_trans, mock_ser, mock_exmods, notif_format, expected_driver_topic_kwargs): legacy_notifier = mock.Mock() notifier = mock.Mock() notif_transport = mock.Mock() transport = mock.Mock() serializer = mock.Mock() conf = mock.Mock() conf.notification_format = notif_format mock_exmods.return_value = ['foo'] mock_trans.return_value = transport mock_noti_trans.return_value = notif_transport mock_ser.return_value = serializer mock_notif.side_effect = [legacy_notifier, notifier] rpc.init(conf) mock_exmods.assert_called_once_with() mock_trans.assert_called_once_with(conf, allowed_remote_exmods=['foo'], aliases=rpc.TRANSPORT_ALIASES) self.assertIsNotNone(rpc.TRANSPORT) self.assertIsNotNone(rpc.LEGACY_NOTIFIER) self.assertIsNotNone(rpc.NOTIFIER) self.assertEqual(legacy_notifier, rpc.LEGACY_NOTIFIER) self.assertEqual(notifier, rpc.NOTIFIER) expected_calls = [] for kwargs in expected_driver_topic_kwargs: expected_kwargs = {'serializer': serializer} expected_kwargs.update(kwargs) expected_calls.append(((notif_transport,), expected_kwargs)) self.assertEqual(expected_calls, mock_notif.call_args_list, "The calls to messaging.Notifier() did not create " "the legacy and versioned notifiers properly.") class TestJsonPayloadSerializer(test.NoDBTestCase): def test_serialize_entity(self): with mock.patch.object(jsonutils, 'to_primitive') as mock_prim: rpc.JsonPayloadSerializer.serialize_entity('context', 'entity') mock_prim.assert_called_once_with('entity', convert_instances=True) class TestRequestContextSerializer(test.NoDBTestCase): def setUp(self): super(TestRequestContextSerializer, self).setUp() self.mock_base = mock.Mock() self.ser = rpc.RequestContextSerializer(self.mock_base) self.ser_null = rpc.RequestContextSerializer(None) def test_serialize_entity(self): self.mock_base.serialize_entity.return_value = 'foo' ser_ent = self.ser.serialize_entity('context', 'entity') self.mock_base.serialize_entity.assert_called_once_with('context', 'entity') self.assertEqual('foo', ser_ent) def test_serialize_entity_null_base(self): ser_ent = self.ser_null.serialize_entity('context', 'entity') self.assertEqual('entity', ser_ent) def test_deserialize_entity(self): self.mock_base.deserialize_entity.return_value = 'foo' deser_ent = self.ser.deserialize_entity('context', 'entity') self.mock_base.deserialize_entity.assert_called_once_with('context', 'entity') self.assertEqual('foo', deser_ent) def test_deserialize_entity_null_base(self): deser_ent = self.ser_null.deserialize_entity('context', 'entity') self.assertEqual('entity', deser_ent) def test_serialize_context(self): context = mock.Mock() self.ser.serialize_context(context) context.to_dict.assert_called_once_with() @mock.patch.object(context, 'RequestContext') def test_deserialize_context(self, mock_req): self.ser.deserialize_context('context') mock_req.from_dict.assert_called_once_with('context') nova-13.1.4/nova/tests/unit/network/0000775000567000056710000000000013064447471020473 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/network/security_group/0000775000567000056710000000000013064447471023556 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/network/security_group/__init__.py0000664000567000056710000000000013064447140025646 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/network/security_group/test_neutron_driver.py0000664000567000056710000004635413064447152030244 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from mox3 import mox from neutronclient.common import exceptions as n_exc from neutronclient.v2_0 import client from six.moves import range from nova import context from nova import exception from nova.network.neutronv2 import api as neutronapi from nova.network.security_group import neutron_driver from nova.network.security_group import openstack_driver from nova import objects from nova import test from nova.tests import uuidsentinel as uuids class TestNeutronDriver(test.NoDBTestCase): def setUp(self): super(TestNeutronDriver, self).setUp() self.mox.StubOutWithMock(neutronapi, 'get_client') self.moxed_client = self.mox.CreateMock(client.Client) neutronapi.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn( self.moxed_client) self.context = context.RequestContext('userid', 'my_tenantid') setattr(self.context, 'auth_token', 'bff4a5a6b9eb4ea2a6efec6eefb77936') def test_list_with_project(self): project_id = '0af70a4d22cf4652824ddc1f2435dd85' security_groups_list = {'security_groups': []} self.moxed_client.list_security_groups(tenant_id=project_id).AndReturn( security_groups_list) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() sg_api.list(self.context, project=project_id) def test_list_with_all_tenants_and_admin_context(self): project_id = '0af70a4d22cf4652824ddc1f2435dd85' search_opts = {'all_tenants': 1} security_groups_list = {'security_groups': []} admin_context = context.RequestContext('user1', project_id, True) self.mox.ReplayAll() with mock.patch.object( self.moxed_client, 'list_security_groups', return_value=security_groups_list) as mock_list_secgroup: sg_api = neutron_driver.SecurityGroupAPI() sg_api.list(admin_context, project=project_id, search_opts=search_opts) mock_list_secgroup.assert_called_once_with() def test_list_without_all_tenants_and_admin_context(self): project_id = '0af70a4d22cf4652824ddc1f2435dd85' security_groups_list = {'security_groups': []} admin_context = context.RequestContext('user1', project_id, True) self.mox.ReplayAll() with mock.patch.object( self.moxed_client, 'list_security_groups', return_value=security_groups_list) as mock_list_secgroup: sg_api = neutron_driver.SecurityGroupAPI() sg_api.list(admin_context, project=project_id) mock_list_secgroup.assert_called_once_with(tenant_id=project_id) def test_list_with_all_tenants_sec_name_and_admin_context(self): project_id = '0af70a4d22cf4652824ddc1f2435dd85' search_opts = {'all_tenants': 1} security_group_names = ['secgroup_ssh'] security_groups_list = {'security_groups': []} admin_context = context.RequestContext('user1', project_id, True) self.mox.ReplayAll() with mock.patch.object( self.moxed_client, 'list_security_groups', return_value=security_groups_list) as mock_list_secgroup: sg_api = neutron_driver.SecurityGroupAPI() sg_api.list(admin_context, project=project_id, names=security_group_names, search_opts=search_opts) mock_list_secgroup.assert_called_once_with( name=security_group_names, tenant_id=project_id) def test_list_with_all_tenants_sec_name_ids_and_admin_context(self): project_id = '0af70a4d22cf4652824ddc1f2435dd85' search_opts = {'all_tenants': 1} security_group_names = ['secgroup_ssh'] security_group_ids = ['id1'] security_groups_list = {'security_groups': []} admin_context = context.RequestContext('user1', project_id, True) self.mox.ReplayAll() with mock.patch.object( self.moxed_client, 'list_security_groups', return_value=security_groups_list) as mock_list_secgroup: sg_api = neutron_driver.SecurityGroupAPI() sg_api.list(admin_context, project=project_id, names=security_group_names, ids=security_group_ids, search_opts=search_opts) mock_list_secgroup.assert_called_once_with( name=security_group_names, id=security_group_ids, tenant_id=project_id) def test_list_with_all_tenants_not_admin(self): search_opts = {'all_tenants': 1} security_groups_list = {'security_groups': []} self.mox.ReplayAll() with mock.patch.object( self.moxed_client, 'list_security_groups', return_value=security_groups_list) as mock_list_secgroup: sg_api = neutron_driver.SecurityGroupAPI() sg_api.list(self.context, project=self.context.tenant, search_opts=search_opts) mock_list_secgroup.assert_called_once_with( tenant_id=self.context.tenant) def test_get_with_name_duplicated(self): sg_name = 'web_server' expected_sg_id = '85cc3048-abc3-43cc-89b3-377341426ac5' list_security_groups = {'security_groups': [{'name': sg_name, 'id': expected_sg_id, 'tenant_id': self.context.tenant, 'description': 'server', 'rules': []} ]} self.moxed_client.list_security_groups(name=sg_name, fields='id', tenant_id=self.context.tenant).AndReturn(list_security_groups) expected_sg = {'security_group': {'name': sg_name, 'id': expected_sg_id, 'tenant_id': self.context.tenant, 'description': 'server', 'rules': []}} self.moxed_client.show_security_group(expected_sg_id).AndReturn( expected_sg) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() observed_sg = sg_api.get(self.context, name=sg_name) expected_sg['security_group']['project_id'] = self.context.tenant del expected_sg['security_group']['tenant_id'] self.assertEqual(expected_sg['security_group'], observed_sg) def test_get_with_invalid_name(self): sg_name = 'invalid_name' expected_sg_id = '85cc3048-abc3-43cc-89b3-377341426ac5' list_security_groups = {'security_groups': [{'name': sg_name, 'id': expected_sg_id, 'tenant_id': self.context.tenant, 'description': 'server', 'rules': []} ]} self.moxed_client.list_security_groups(name=sg_name, fields='id', tenant_id=self.context.tenant).AndReturn(list_security_groups) self.moxed_client.show_security_group(expected_sg_id).AndRaise( TypeError) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() self.assertRaises(exception.SecurityGroupNotFound, sg_api.get, self.context, name=sg_name) def test_create_security_group_with_bad_request(self): name = 'test-security-group' description = None body = {'security_group': {'name': name, 'description': description}} message = "Invalid input. Reason: 'None' is not a valid string." self.moxed_client.create_security_group( body).AndRaise(n_exc.BadRequest(message=message)) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() self.assertRaises(exception.Invalid, sg_api.create_security_group, self.context, name, description) def test_create_security_group_exceed_quota(self): name = 'test-security-group' description = 'test-security-group' body = {'security_group': {'name': name, 'description': description}} message = "Quota exceeded for resources: ['security_group']" self.moxed_client.create_security_group( body).AndRaise(n_exc.NeutronClientException(status_code=409, message=message)) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() self.assertRaises(exception.SecurityGroupLimitExceeded, sg_api.create_security_group, self.context, name, description) def test_create_security_group_rules_exceed_quota(self): vals = {'protocol': 'tcp', 'cidr': '0.0.0.0/0', 'parent_group_id': '7ae75663-277e-4a0e-8f87-56ea4e70cb47', 'group_id': None, 'from_port': 1025, 'to_port': 1025} body = {'security_group_rules': [{'remote_group_id': None, 'direction': 'ingress', 'protocol': 'tcp', 'ethertype': 'IPv4', 'port_range_max': 1025, 'port_range_min': 1025, 'security_group_id': '7ae75663-277e-4a0e-8f87-56ea4e70cb47', 'remote_ip_prefix': '0.0.0.0/0'}]} name = 'test-security-group' message = "Quota exceeded for resources: ['security_group_rule']" self.moxed_client.create_security_group_rule( body).AndRaise(n_exc.NeutronClientException(status_code=409, message=message)) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() self.assertRaises(exception.SecurityGroupLimitExceeded, sg_api.add_rules, self.context, None, name, [vals]) def test_create_security_group_rules_bad_request(self): vals = {'protocol': 'icmp', 'cidr': '0.0.0.0/0', 'parent_group_id': '7ae75663-277e-4a0e-8f87-56ea4e70cb47', 'group_id': None, 'to_port': 255} body = {'security_group_rules': [{'remote_group_id': None, 'direction': 'ingress', 'protocol': 'icmp', 'ethertype': 'IPv4', 'port_range_max': 255, 'security_group_id': '7ae75663-277e-4a0e-8f87-56ea4e70cb47', 'remote_ip_prefix': '0.0.0.0/0'}]} name = 'test-security-group' message = "ICMP code (port-range-max) 255 is provided but ICMP type" \ " (port-range-min) is missing" self.moxed_client.create_security_group_rule( body).AndRaise(n_exc.NeutronClientException(status_code=400, message=message)) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() self.assertRaises(exception.Invalid, sg_api.add_rules, self.context, None, name, [vals]) def test_list_security_group_with_no_port_range_and_not_tcp_udp_icmp(self): sg1 = {'description': 'default', 'id': '07f1362f-34f6-4136-819a-2dcde112269e', 'name': 'default', 'tenant_id': 'c166d9316f814891bcb66b96c4c891d6', 'security_group_rules': [{'direction': 'ingress', 'ethertype': 'IPv4', 'id': '0a4647f1-e1aa-488d-90e1-97a7d0293beb', 'port_range_max': None, 'port_range_min': None, 'protocol': '51', 'remote_group_id': None, 'remote_ip_prefix': None, 'security_group_id': '07f1362f-34f6-4136-819a-2dcde112269e', 'tenant_id': 'c166d9316f814891bcb66b96c4c891d6'}]} self.moxed_client.list_security_groups().AndReturn( {'security_groups': [sg1]}) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() result = sg_api.list(self.context) expected = [{'rules': [{'from_port': -1, 'protocol': '51', 'to_port': -1, 'parent_group_id': '07f1362f-34f6-4136-819a-2dcde112269e', 'cidr': '0.0.0.0/0', 'group_id': None, 'id': '0a4647f1-e1aa-488d-90e1-97a7d0293beb'}], 'project_id': 'c166d9316f814891bcb66b96c4c891d6', 'id': '07f1362f-34f6-4136-819a-2dcde112269e', 'name': 'default', 'description': 'default'}] self.assertEqual(expected, result) def test_instances_security_group_bindings(self): server_id = 'c5a20e8d-c4b0-47cf-9dca-ebe4f758acb1' port1_id = '4c505aec-09aa-47bc-bcc0-940477e84dc0' port2_id = 'b3b31a53-6e29-479f-ae5c-00b7b71a6d44' sg1_id = '2f7ce969-1a73-4ef9-bbd6-c9a91780ecd4' sg2_id = '20c89ce5-9388-4046-896e-64ffbd3eb584' servers = [{'id': server_id}] ports = [{'id': port1_id, 'device_id': server_id, 'security_groups': [sg1_id]}, {'id': port2_id, 'device_id': server_id, 'security_groups': [sg2_id]}] port_list = {'ports': ports} sg1 = {'id': sg1_id, 'name': 'wol'} sg2 = {'id': sg2_id, 'name': 'eor'} security_groups_list = {'security_groups': [sg1, sg2]} sg_bindings = {server_id: [{'name': 'wol'}, {'name': 'eor'}]} self.moxed_client.list_ports(device_id=[server_id]).AndReturn( port_list) self.moxed_client.list_security_groups( id=mox.SameElementsAs([sg2_id, sg1_id])).AndReturn( security_groups_list) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() result = sg_api.get_instances_security_groups_bindings( self.context, servers) self.assertEqual(sg_bindings, result) def _test_instances_security_group_bindings_scale(self, num_servers): max_query = 150 sg1_id = '2f7ce969-1a73-4ef9-bbd6-c9a91780ecd4' sg2_id = '20c89ce5-9388-4046-896e-64ffbd3eb584' sg1 = {'id': sg1_id, 'name': 'wol'} sg2 = {'id': sg2_id, 'name': 'eor'} security_groups_list = {'security_groups': [sg1, sg2]} servers = [] device_ids = [] ports = [] sg_bindings = {} for i in range(0, num_servers): server_id = "server-%d" % i port_id = "port-%d" % i servers.append({'id': server_id}) device_ids.append(server_id) ports.append({'id': port_id, 'device_id': server_id, 'security_groups': [sg1_id, sg2_id]}) sg_bindings[server_id] = [{'name': 'wol'}, {'name': 'eor'}] for x in range(0, num_servers, max_query): self.moxed_client.list_ports( device_id=device_ids[x:x + max_query]).\ AndReturn({'ports': ports[x:x + max_query]}) self.moxed_client.list_security_groups( id=mox.SameElementsAs([sg2_id, sg1_id])).AndReturn( security_groups_list) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() result = sg_api.get_instances_security_groups_bindings( self.context, servers) self.assertEqual(sg_bindings, result) def test_instances_security_group_bindings_less_than_max(self): self._test_instances_security_group_bindings_scale(100) def test_instances_security_group_bindings_max(self): self._test_instances_security_group_bindings_scale(150) def test_instances_security_group_bindings_more_then_max(self): self._test_instances_security_group_bindings_scale(300) def test_instances_security_group_bindings_with_hidden_sg(self): servers = [{'id': 'server_1'}] ports = [{'id': '1', 'device_id': 'dev_1', 'security_groups': ['1']}, {'id': '2', 'device_id': 'dev_1', 'security_groups': ['2']}] port_list = {'ports': ports} sg1 = {'id': '1', 'name': 'wol'} # User doesn't have access to sg2 security_groups_list = {'security_groups': [sg1]} sg_bindings = {'dev_1': [{'name': 'wol'}]} self.moxed_client.list_ports(device_id=['server_1']).AndReturn( port_list) self.moxed_client.\ list_security_groups(id=mox.SameElementsAs(['1', '2'])).AndReturn( security_groups_list) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() result = sg_api.get_instances_security_groups_bindings( self.context, servers) self.assertEqual(sg_bindings, result) def test_instance_empty_security_groups(self): port_list = {'ports': [{'id': 1, 'device_id': uuids.instance, 'security_groups': []}]} self.moxed_client.list_ports( device_id=[uuids.instance]).AndReturn(port_list) self.mox.ReplayAll() sg_api = neutron_driver.SecurityGroupAPI() result = sg_api.get_instance_security_groups( self.context, objects.Instance(uuid=uuids.instance)) self.assertEqual([], result) class TestNeutronDriverWithoutMock(test.NoDBTestCase): def test_validate_property(self): sg_api = neutron_driver.SecurityGroupAPI() sg_api.validate_property('foo', 'name', None) sg_api.validate_property('', 'name', None) self.assertRaises(exception.Invalid, sg_api.validate_property, 'a' * 256, 'name', None) self.assertRaises(exception.Invalid, sg_api.validate_property, None, 'name', None) def test_populate_security_groups(self): sg_api = neutron_driver.SecurityGroupAPI() r = sg_api.populate_security_groups('ignore') self.assertIsInstance(r, objects.SecurityGroupList) self.assertEqual(0, len(r)) class TestGetter(test.NoDBTestCase): @mock.patch('nova.network.security_group.openstack_driver.' '_get_openstack_security_group_driver') def test_caches(self, mock_get): getter = openstack_driver.get_openstack_security_group_driver openstack_driver.DRIVER_CACHE = {} getter(False) getter(False) getter(True) getter(False) self.assertEqual(2, len(mock_get.call_args_list)) self.assertEqual({True: mock_get.return_value, False: mock_get.return_value}, openstack_driver.DRIVER_CACHE) nova-13.1.4/nova/tests/unit/network/test_neutronv2.py0000664000567000056710000063415213064447152024055 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import collections import copy import uuid from keystoneauth1.fixture import V2Token from keystoneauth1 import loading as ks_loading import mock from mox3 import mox from neutronclient.common import exceptions from neutronclient.v2_0 import client from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_policy import policy as oslo_policy from oslo_serialization import jsonutils from oslo_utils import timeutils import requests_mock import six from six.moves import range from nova.compute import flavors from nova import context from nova import exception from nova.network import model from nova.network.neutronv2 import api as neutronapi from nova.network.neutronv2 import constants from nova import objects from nova.pci import manager as pci_manager from nova.pci import whitelist as pci_whitelist from nova import policy from nova import test from nova.tests.unit import fake_instance from nova.tests import uuidsentinel as uuids CONF = cfg.CONF # NOTE: Neutron client raises Exception which is discouraged by HACKING. # We set this variable here and use it for assertions below to avoid # the hacking checks until we can make neutron client throw a custom # exception class instead. NEUTRON_CLIENT_EXCEPTION = Exception fake_info_cache = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'instance_uuid': 'fake-uuid', 'network_info': '[]', } class MyComparator(mox.Comparator): def __init__(self, lhs): self.lhs = lhs def _com_dict(self, lhs, rhs): if len(lhs) != len(rhs): return False for key, value in six.iteritems(lhs): if key not in rhs: return False rhs_value = rhs[key] if not self._com(value, rhs_value): return False return True def _com_list(self, lhs, rhs): if len(lhs) != len(rhs): return False for lhs_value in lhs: if lhs_value not in rhs: return False return True def _com(self, lhs, rhs): if lhs is None: return rhs is None if isinstance(lhs, dict): if not isinstance(rhs, dict): return False return self._com_dict(lhs, rhs) if isinstance(lhs, list): if not isinstance(rhs, list): return False return self._com_list(lhs, rhs) if isinstance(lhs, tuple): if not isinstance(rhs, tuple): return False return self._com_list(lhs, rhs) return lhs == rhs def equals(self, rhs): return self._com(self.lhs, rhs) def __repr__(self): return str(self.lhs) class TestNeutronClient(test.NoDBTestCase): def setUp(self): super(TestNeutronClient, self).setUp() neutronapi.reset_state() def test_withtoken(self): self.flags(url='http://anyhost/', group='neutron') self.flags(timeout=30, group='neutron') my_context = context.RequestContext('userid', 'my_tenantid', auth_token='token') cl = neutronapi.get_client(my_context) self.assertEqual(CONF.neutron.url, cl.httpclient.endpoint_override) self.assertEqual(my_context.auth_token, cl.httpclient.auth.auth_token) self.assertEqual(CONF.neutron.timeout, cl.httpclient.session.timeout) def test_withouttoken(self): my_context = context.RequestContext('userid', 'my_tenantid') self.assertRaises(exceptions.Unauthorized, neutronapi.get_client, my_context) def test_withtoken_context_is_admin(self): self.flags(url='http://anyhost/', group='neutron') self.flags(timeout=30, group='neutron') my_context = context.RequestContext('userid', 'my_tenantid', auth_token='token', is_admin=True) cl = neutronapi.get_client(my_context) self.assertEqual(CONF.neutron.url, cl.httpclient.endpoint_override) self.assertEqual(my_context.auth_token, cl.httpclient.auth.auth_token) self.assertEqual(CONF.neutron.timeout, cl.httpclient.session.timeout) def test_withouttoken_keystone_connection_error(self): self.flags(url='http://anyhost/', group='neutron') my_context = context.RequestContext('userid', 'my_tenantid') self.assertRaises(NEUTRON_CLIENT_EXCEPTION, neutronapi.get_client, my_context) @mock.patch('nova.network.neutronv2.api._ADMIN_AUTH') @mock.patch.object(client.Client, "list_networks", new=mock.Mock()) def test_reuse_admin_token(self, m): self.flags(url='http://anyhost/', group='neutron') my_context = context.RequestContext('userid', 'my_tenantid', auth_token='token') tokens = ['new_token2', 'new_token1'] def token_vals(*args, **kwargs): return tokens.pop() m.get_token.side_effect = token_vals client1 = neutronapi.get_client(my_context, True) client1.list_networks(retrieve_all=False) self.assertEqual('new_token1', client1.httpclient.auth.get_token(None)) client1 = neutronapi.get_client(my_context, True) client1.list_networks(retrieve_all=False) self.assertEqual('new_token2', client1.httpclient.auth.get_token(None)) @mock.patch.object(ks_loading, 'load_auth_from_conf_options') def test_load_auth_plugin_failed(self, mock_load_from_conf): mock_load_from_conf.return_value = None from neutronclient.common import exceptions as neutron_client_exc self.assertRaises(neutron_client_exc.Unauthorized, neutronapi._load_auth_plugin, CONF) class TestNeutronv2Base(test.TestCase): def setUp(self): super(TestNeutronv2Base, self).setUp() self.context = context.RequestContext('userid', 'my_tenantid') setattr(self.context, 'auth_token', 'bff4a5a6b9eb4ea2a6efec6eefb77936') self.tenant_id = '9d049e4b60b64716978ab415e6fbd5c0' self.instance = {'project_id': self.tenant_id, 'uuid': str(uuid.uuid4()), 'display_name': 'test_instance', 'hostname': 'test-instance', 'availability_zone': 'nova', 'host': 'some_host', 'info_cache': {'network_info': []}, 'security_groups': []} self.instance2 = {'project_id': self.tenant_id, 'uuid': str(uuid.uuid4()), 'display_name': 'test_instance2', 'availability_zone': 'nova', 'info_cache': {'network_info': []}, 'security_groups': []} self.nets1 = [{'id': 'my_netid1', 'name': 'my_netname1', 'subnets': ['mysubnid1'], 'tenant_id': 'my_tenantid'}] self.nets2 = [] self.nets2.append(self.nets1[0]) self.nets2.append({'id': 'my_netid2', 'name': 'my_netname2', 'subnets': ['mysubnid2'], 'tenant_id': 'my_tenantid'}) self.nets3 = self.nets2 + [{'id': 'my_netid3', 'name': 'my_netname3', 'subnets': ['mysubnid3'], 'tenant_id': 'my_tenantid'}] self.nets4 = [{'id': 'his_netid4', 'name': 'his_netname4', 'tenant_id': 'his_tenantid'}] # A network request with external networks self.nets5 = self.nets1 + [{'id': 'the-external-one', 'name': 'out-of-this-world', 'subnets': ['mysubnid5'], 'router:external': True, 'tenant_id': 'should-be-an-admin'}] # A network request with a duplicate self.nets6 = [] self.nets6.append(self.nets1[0]) self.nets6.append(self.nets1[0]) # A network request with a combo self.nets7 = [] self.nets7.append(self.nets2[1]) self.nets7.append(self.nets1[0]) self.nets7.append(self.nets2[1]) self.nets7.append(self.nets1[0]) # A network request with only external network self.nets8 = [self.nets5[1]] # An empty network self.nets9 = [] # A network that is both shared and external self.nets10 = [{'id': 'net_id', 'name': 'net_name', 'router:external': True, 'shared': True, 'subnets': ['mysubnid10']}] # A network with non-blank dns_domain to test _update_port_dns_name self.nets11 = [{'id': 'my_netid1', 'name': 'my_netname1', 'subnets': ['mysubnid1'], 'tenant_id': 'my_tenantid', 'dns_domain': 'my-domain.org.'}] self.nets = [self.nets1, self.nets2, self.nets3, self.nets4, self.nets5, self.nets6, self.nets7, self.nets8, self.nets9, self.nets10, self.nets11] self.port_address = '10.0.1.2' self.port_data1 = [{'network_id': 'my_netid1', 'device_id': self.instance2['uuid'], 'tenant_id': self.tenant_id, 'device_owner': 'compute:nova', 'id': 'my_portid1', 'binding:vnic_type': model.VNIC_TYPE_NORMAL, 'status': 'DOWN', 'admin_state_up': True, 'fixed_ips': [{'ip_address': self.port_address, 'subnet_id': 'my_subid1'}], 'mac_address': 'my_mac1', }] self.float_data1 = [{'port_id': 'my_portid1', 'fixed_ip_address': self.port_address, 'floating_ip_address': '172.0.1.2'}] self.dhcp_port_data1 = [{'fixed_ips': [{'ip_address': '10.0.1.9', 'subnet_id': 'my_subid1'}], 'status': 'ACTIVE', 'admin_state_up': True}] self.port_address2 = '10.0.2.2' self.port_data2 = [] self.port_data2.append(self.port_data1[0]) self.port_data2.append({'network_id': 'my_netid2', 'device_id': self.instance['uuid'], 'tenant_id': self.tenant_id, 'admin_state_up': True, 'status': 'ACTIVE', 'device_owner': 'compute:nova', 'id': 'my_portid2', 'binding:vnic_type': model.VNIC_TYPE_NORMAL, 'fixed_ips': [{'ip_address': self.port_address2, 'subnet_id': 'my_subid2'}], 'mac_address': 'my_mac2', }) self.float_data2 = [] self.float_data2.append(self.float_data1[0]) self.float_data2.append({'port_id': 'my_portid2', 'fixed_ip_address': '10.0.2.2', 'floating_ip_address': '172.0.2.2'}) self.port_data3 = [{'network_id': 'my_netid1', 'device_id': 'device_id3', 'tenant_id': self.tenant_id, 'status': 'DOWN', 'admin_state_up': True, 'device_owner': 'compute:nova', 'id': 'my_portid3', 'binding:vnic_type': model.VNIC_TYPE_NORMAL, 'fixed_ips': [], # no fixed ip 'mac_address': 'my_mac3', }] self.subnet_data1 = [{'id': 'my_subid1', 'cidr': '10.0.1.0/24', 'network_id': 'my_netid1', 'gateway_ip': '10.0.1.1', 'dns_nameservers': ['8.8.1.1', '8.8.1.2']}] self.subnet_data2 = [] self.subnet_data_n = [{'id': 'my_subid1', 'cidr': '10.0.1.0/24', 'network_id': 'my_netid1', 'gateway_ip': '10.0.1.1', 'dns_nameservers': ['8.8.1.1', '8.8.1.2']}, {'id': 'my_subid2', 'cidr': '20.0.1.0/24', 'network_id': 'my_netid2', 'gateway_ip': '20.0.1.1', 'dns_nameservers': ['8.8.1.1', '8.8.1.2']}] self.subnet_data2.append({'id': 'my_subid2', 'cidr': '10.0.2.0/24', 'network_id': 'my_netid2', 'gateway_ip': '10.0.2.1', 'dns_nameservers': ['8.8.2.1', '8.8.2.2']}) self.fip_pool = {'id': '4fdbfd74-eaf8-4884-90d9-00bd6f10c2d3', 'name': 'ext_net', 'router:external': True, 'tenant_id': 'admin_tenantid'} self.fip_pool_nova = {'id': '435e20c3-d9f1-4f1b-bee5-4611a1dd07db', 'name': 'nova', 'router:external': True, 'tenant_id': 'admin_tenantid'} self.fip_unassociated = {'tenant_id': 'my_tenantid', 'id': 'fip_id1', 'floating_ip_address': '172.24.4.227', 'floating_network_id': self.fip_pool['id'], 'port_id': None, 'fixed_ip_address': None, 'router_id': None} fixed_ip_address = self.port_data2[1]['fixed_ips'][0]['ip_address'] self.fip_associated = {'tenant_id': 'my_tenantid', 'id': 'fip_id2', 'floating_ip_address': '172.24.4.228', 'floating_network_id': self.fip_pool['id'], 'port_id': self.port_data2[1]['id'], 'fixed_ip_address': fixed_ip_address, 'router_id': 'router_id1'} self._returned_nw_info = [] self.mox.StubOutWithMock(neutronapi, 'get_client') self.moxed_client = self.mox.CreateMock(client.Client) self.addCleanup(CONF.reset) self.addCleanup(self.mox.VerifyAll) self.addCleanup(self.mox.UnsetStubs) self.addCleanup(self.stubs.UnsetAll) def _fake_instance_object(self, instance): return fake_instance.fake_instance_obj(self.context, **instance) def _fake_instance_info_cache(self, nw_info, instance_uuid=None): info_cache = {} if instance_uuid is None: info_cache['instance_uuid'] = str(uuid.uuid4()) else: info_cache['instance_uuid'] = instance_uuid info_cache['deleted'] = False info_cache['created_at'] = timeutils.utcnow() info_cache['deleted_at'] = timeutils.utcnow() info_cache['updated_at'] = timeutils.utcnow() info_cache['network_info'] = model.NetworkInfo.hydrate(six.text_type( jsonutils.dumps(nw_info))) return info_cache def _fake_instance_object_with_info_cache(self, instance): expected_attrs = ['info_cache'] instance = objects.Instance._from_db_object(self.context, objects.Instance(), fake_instance.fake_db_instance(**instance), expected_attrs=expected_attrs) return instance def _stub_allocate_for_instance(self, net_idx=1, **kwargs): self.instance = self._fake_instance_object(self.instance) self.instance2 = self._fake_instance_object(self.instance2) api = neutronapi.API() self.mox.StubOutWithMock(api, 'get_instance_nw_info') has_portbinding = False has_extra_dhcp_opts = False dhcp_options = kwargs.get('dhcp_options') if dhcp_options is not None: has_extra_dhcp_opts = True has_dns_extension = False if kwargs.get('dns_extension'): has_dns_extension = True api.extensions[constants.DNS_INTEGRATION] = 1 if kwargs.get('portbinding'): has_portbinding = True api.extensions[constants.PORTBINDING_EXT] = 1 self.mox.StubOutWithMock(api, '_refresh_neutron_extensions_cache') neutronapi.get_client(mox.IgnoreArg()).AndReturn( self.moxed_client) neutronapi.get_client( mox.IgnoreArg(), admin=True).AndReturn( self.moxed_client) api._refresh_neutron_extensions_cache(mox.IgnoreArg(), neutron=self.moxed_client) self.mox.StubOutWithMock(api, '_has_port_binding_extension') api._has_port_binding_extension(mox.IgnoreArg(), neutron=self.moxed_client, refresh_cache=True).AndReturn(has_portbinding) elif has_dns_extension: self.mox.StubOutWithMock(api, '_refresh_neutron_extensions_cache') api._refresh_neutron_extensions_cache(mox.IgnoreArg(), neutron=self.moxed_client) else: self.mox.StubOutWithMock(api, '_refresh_neutron_extensions_cache') api._refresh_neutron_extensions_cache(mox.IgnoreArg(), neutron=self.moxed_client) self.mox.StubOutWithMock(api, '_populate_neutron_extension_values') # Net idx is 1-based for compatibility with existing unit tests nets = self.nets[net_idx - 1] ports = {} fixed_ips = {} macs = kwargs.get('macs') if macs: macs = set(macs) req_net_ids = [] ordered_networks = [] if 'requested_networks' in kwargs: for request in kwargs['requested_networks']: if request.port_id: if request.port_id == 'my_portid3': self.moxed_client.show_port(request.port_id ).AndReturn( {'port': {'id': 'my_portid3', 'network_id': 'my_netid1', 'tenant_id': self.tenant_id, 'mac_address': 'my_mac1', 'device_id': kwargs.get('_device') and self.instance2.uuid or ''}}) ports['my_netid1'] = [self.port_data1[0], self.port_data3[0]] ports[request.port_id] = self.port_data3[0] request.network_id = 'my_netid1' if macs is not None: macs.discard('my_mac1') elif request.port_id == 'invalid_id': PortNotFound = exceptions.PortNotFoundClient( status_code=404) self.moxed_client.show_port(request.port_id ).AndRaise(PortNotFound) else: self.moxed_client.show_port(request.port_id).AndReturn( {'port': {'id': 'my_portid1', 'network_id': 'my_netid1', 'tenant_id': self.tenant_id, 'mac_address': 'my_mac1', 'device_id': kwargs.get('_device') and self.instance2.uuid or '', 'dns_name': kwargs.get('_dns_name') or ''}}) ports[request.port_id] = self.port_data1[0] request.network_id = 'my_netid1' if macs is not None: macs.discard('my_mac1') else: fixed_ips[request.network_id] = request.address req_net_ids.append(request.network_id) ordered_networks.append(request) else: for n in nets: ordered_networks.append( objects.NetworkRequest(network_id=n['id'])) if kwargs.get('_break') == 'pre_list_networks': self.mox.ReplayAll() return api # search all req_net_ids as in api.py search_ids = req_net_ids if search_ids: mox_list_params = {'id': mox.SameElementsAs(search_ids)} self.moxed_client.list_networks( **mox_list_params).AndReturn({'networks': nets}) else: mox_list_params = {'tenant_id': self.instance.project_id, 'shared': False} self.moxed_client.list_networks( **mox_list_params).AndReturn({'networks': nets}) mox_list_params = {'shared': True} self.moxed_client.list_networks( **mox_list_params).AndReturn({'networks': []}) if kwargs.get('_break') == 'post_list_networks': self.mox.ReplayAll() return api if (('requested_networks' not in kwargs or kwargs['requested_networks'].as_tuples() == [(None, None, None)]) and len(nets) > 1): self.mox.ReplayAll() return api preexisting_port_ids = [] ports_in_requested_net_order = [] nets_in_requested_net_order = [] for request in ordered_networks: port_req_body = { 'port': { 'device_id': self.instance.uuid, 'device_owner': 'compute:nova', }, } # Network lookup for available network_id network = None for net in nets: if net['id'] == request.network_id: network = net break # if net_id did not pass validate_networks() and not available # here then skip it safely not continuing with a None Network else: continue if has_portbinding: port_req_body['port']['binding:host_id'] = ( self.instance.get('host')) if has_dns_extension and not network.get('dns_domain'): port_req_body['port']['dns_name'] = self.instance.hostname if not has_portbinding and not has_dns_extension: api._populate_neutron_extension_values(mox.IgnoreArg(), self.instance, mox.IgnoreArg(), mox.IgnoreArg(), network=network, neutron=self.moxed_client, bind_host_id=None).AndReturn(None) elif has_portbinding: # since _populate_neutron_extension_values() will call # _has_port_binding_extension() api._has_port_binding_extension(mox.IgnoreArg(), neutron=self.moxed_client).\ AndReturn(has_portbinding) else: api._refresh_neutron_extensions_cache(mox.IgnoreArg(), neutron=self.moxed_client) if request.port_id: port = ports[request.port_id] self.moxed_client.update_port(request.port_id, MyComparator(port_req_body) ).AndReturn( {'port': port}) ports_in_requested_net_order.append(request.port_id) preexisting_port_ids.append(request.port_id) else: request.address = fixed_ips.get(request.network_id) if request.address: port_req_body['port']['fixed_ips'] = [ {'ip_address': str(request.address)}] port_req_body['port']['network_id'] = request.network_id port_req_body['port']['admin_state_up'] = True port_req_body['port']['tenant_id'] = \ self.instance.project_id if macs: port_req_body['port']['mac_address'] = macs.pop() if has_portbinding: port_req_body['port']['binding:host_id'] = ( self.instance.get('host')) res_port = {'port': {'id': 'fake'}} if has_extra_dhcp_opts: port_req_body['port']['extra_dhcp_opts'] = dhcp_options if kwargs.get('_break') == 'mac' + request.network_id: self.mox.ReplayAll() return api self.moxed_client.create_port( MyComparator(port_req_body)).AndReturn(res_port) ports_in_requested_net_order.append(res_port['port']['id']) if has_portbinding and has_dns_extension: api._has_port_binding_extension(mox.IgnoreArg()).\ AndReturn(has_portbinding) if net_idx == 11: port_req_body_dns = { 'port': { 'dns_name': self.instance.hostname } } res_port_dns = { 'port': { 'id': ports_in_requested_net_order[-1] } } self.moxed_client.update_port( ports_in_requested_net_order[-1], MyComparator(port_req_body_dns) ).AndReturn(res_port_dns) nets_in_requested_net_order.append(network) api.get_instance_nw_info(mox.IgnoreArg(), self.instance, networks=nets_in_requested_net_order, port_ids=ports_in_requested_net_order, admin_client=None, preexisting_port_ids=preexisting_port_ids, update_cells=True ).AndReturn(self._returned_nw_info) self.mox.ReplayAll() return api def _verify_nw_info(self, nw_inf, index=0): id_suffix = index + 1 self.assertEqual('10.0.%s.2' % id_suffix, nw_inf.fixed_ips()[index]['address']) self.assertEqual('172.0.%s.2' % id_suffix, nw_inf.fixed_ips()[index].floating_ip_addresses()[0]) self.assertEqual('my_netname%s' % id_suffix, nw_inf[index]['network']['label']) self.assertEqual('my_portid%s' % id_suffix, nw_inf[index]['id']) self.assertEqual('my_mac%s' % id_suffix, nw_inf[index]['address']) self.assertEqual('10.0.%s.0/24' % id_suffix, nw_inf[index]['network']['subnets'][0]['cidr']) ip_addr = model.IP(address='8.8.%s.1' % id_suffix, version=4, type='dns') self.assertIn(ip_addr, nw_inf[index]['network']['subnets'][0]['dns']) def _get_instance_nw_info(self, number): api = neutronapi.API() self.mox.StubOutWithMock(api.db, 'instance_info_cache_update') api.db.instance_info_cache_update(mox.IgnoreArg(), self.instance['uuid'], mox.IgnoreArg()).AndReturn( fake_info_cache) port_data = number == 1 and self.port_data1 or self.port_data2 net_info_cache = [] for port in port_data: net_info_cache.append({"network": {"id": port['network_id']}, "id": port['id']}) self.moxed_client.list_ports( tenant_id=self.instance['project_id'], device_id=self.instance['uuid']).AndReturn( {'ports': port_data}) net_ids = [port['network_id'] for port in port_data] nets = number == 1 and self.nets1 or self.nets2 self.moxed_client.list_networks( id=net_ids).AndReturn({'networks': nets}) for i in range(1, number + 1): float_data = number == 1 and self.float_data1 or self.float_data2 for ip in port_data[i - 1]['fixed_ips']: float_data = [x for x in float_data if x['fixed_ip_address'] == ip['ip_address']] self.moxed_client.list_floatingips( fixed_ip_address=ip['ip_address'], port_id=port_data[i - 1]['id']).AndReturn( {'floatingips': float_data}) subnet_data = i == 1 and self.subnet_data1 or self.subnet_data2 self.moxed_client.list_subnets( id=mox.SameElementsAs(['my_subid%s' % i])).AndReturn( {'subnets': subnet_data}) self.moxed_client.list_ports( network_id=subnet_data[0]['network_id'], device_owner='network:dhcp').AndReturn( {'ports': []}) self.instance['info_cache'] = self._fake_instance_info_cache( net_info_cache, self.instance['uuid']) self.mox.StubOutWithMock(api.db, 'instance_info_cache_get') api.db.instance_info_cache_get(mox.IgnoreArg(), self.instance['uuid']).AndReturn( self.instance['info_cache']) self.mox.ReplayAll() instance = self._fake_instance_object_with_info_cache(self.instance) nw_inf = api.get_instance_nw_info(self.context, instance) for i in range(0, number): self._verify_nw_info(nw_inf, i) def _allocate_for_instance(self, net_idx=1, **kwargs): api = self._stub_allocate_for_instance(net_idx, **kwargs) return api.allocate_for_instance(self.context, self.instance, **kwargs) class TestNeutronv2(TestNeutronv2Base): def setUp(self): super(TestNeutronv2, self).setUp() neutronapi.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn( self.moxed_client) def test_get_instance_nw_info_1(self): # Test to get one port in one network and subnet. neutronapi.get_client(mox.IgnoreArg(), admin=True).MultipleTimes().AndReturn( self.moxed_client) self._get_instance_nw_info(1) def test_get_instance_nw_info_2(self): # Test to get one port in each of two networks and subnets. neutronapi.get_client(mox.IgnoreArg(), admin=True).MultipleTimes().AndReturn( self.moxed_client) self._get_instance_nw_info(2) def test_get_instance_nw_info_with_nets_add_interface(self): # This tests that adding an interface to an instance does not # remove the first instance from the instance. network_model = model.Network(id='network_id', bridge='br-int', injected='injected', label='fake_network', tenant_id='fake_tenant') network_cache = {'info_cache': { 'network_info': [{'id': self.port_data2[0]['id'], 'address': 'mac_address', 'network': network_model, 'type': 'ovs', 'ovs_interfaceid': 'ovs_interfaceid', 'devname': 'devname'}]}} self._fake_get_instance_nw_info_helper(network_cache, self.port_data2, self.nets2, [self.port_data2[1]['id']]) def test_get_instance_nw_info_remove_ports_from_neutron(self): # This tests that when a port is removed in neutron it # is also removed from the nova. network_model = model.Network(id=self.port_data2[0]['network_id'], bridge='br-int', injected='injected', label='fake_network', tenant_id='fake_tenant') network_cache = {'info_cache': { 'network_info': [{'id': 'network_id', 'address': 'mac_address', 'network': network_model, 'type': 'ovs', 'ovs_interfaceid': 'ovs_interfaceid', 'devname': 'devname'}]}} self._fake_get_instance_nw_info_helper(network_cache, self.port_data2, None, None) def test_get_instance_nw_info_ignores_neutron_ports(self): # Tests that only ports in the network_cache are updated # and ports returned from neutron that match the same # instance_id/device_id are ignored. port_data2 = copy.copy(self.port_data2) # set device_id on the ports to be the same. port_data2[1]['device_id'] = port_data2[0]['device_id'] network_model = model.Network(id='network_id', bridge='br-int', injected='injected', label='fake_network', tenant_id='fake_tenant') network_cache = {'info_cache': { 'network_info': [{'id': 'network_id', 'address': 'mac_address', 'network': network_model, 'type': 'ovs', 'ovs_interfaceid': 'ovs_interfaceid', 'devname': 'devname'}]}} self._fake_get_instance_nw_info_helper(network_cache, port_data2, None, None) def test_get_instance_nw_info_ignores_neutron_ports_empty_cache(self): # Tests that ports returned from neutron that match the same # instance_id/device_id are ignored when the instance info cache is # empty. port_data2 = copy.copy(self.port_data2) # set device_id on the ports to be the same. port_data2[1]['device_id'] = port_data2[0]['device_id'] network_cache = {'info_cache': {'network_info': []}} self._fake_get_instance_nw_info_helper(network_cache, port_data2, None, None) def _fake_get_instance_nw_info_helper(self, network_cache, current_neutron_ports, networks=None, port_ids=None): """Helper function to test get_instance_nw_info. :param network_cache - data already in the nova network cache. :param current_neutron_ports - updated list of ports from neutron. :param networks - networks of ports being added to instance. :param port_ids - new ports being added to instance. """ # keep a copy of the original ports/networks to pass to # get_instance_nw_info() as the code below changes them. original_port_ids = copy.copy(port_ids) original_networks = copy.copy(networks) api = neutronapi.API() self.mox.StubOutWithMock(api.db, 'instance_info_cache_update') api.db.instance_info_cache_update( mox.IgnoreArg(), self.instance['uuid'], mox.IgnoreArg()).AndReturn(fake_info_cache) neutronapi.get_client(mox.IgnoreArg(), admin=True).MultipleTimes().AndReturn( self.moxed_client) self.moxed_client.list_ports( tenant_id=self.instance['project_id'], device_id=self.instance['uuid']).AndReturn( {'ports': current_neutron_ports}) ifaces = network_cache['info_cache']['network_info'] if port_ids is None: port_ids = [iface['id'] for iface in ifaces] net_ids = [iface['network']['id'] for iface in ifaces] nets = [{'id': iface['network']['id'], 'name': iface['network']['label'], 'tenant_id': iface['network']['meta']['tenant_id']} for iface in ifaces] if networks is None: if ifaces: self.moxed_client.list_networks( id=net_ids).AndReturn({'networks': nets}) else: non_shared_nets = [ {'id': iface['network']['id'], 'name': iface['network']['label'], 'tenant_id': iface['network']['meta']['tenant_id']} for iface in ifaces if not iface['shared']] shared_nets = [ {'id': iface['network']['id'], 'name': iface['network']['label'], 'tenant_id': iface['network']['meta']['tenant_id']} for iface in ifaces if iface['shared']] self.moxed_client.list_networks( shared=False, tenant_id=self.instance['project_id'] ).AndReturn({'networks': non_shared_nets}) self.moxed_client.list_networks( shared=True).AndReturn({'networks': shared_nets}) else: networks = networks + [ dict(id=iface['network']['id'], name=iface['network']['label'], tenant_id=iface['network']['meta']['tenant_id']) for iface in ifaces] port_ids = [iface['id'] for iface in ifaces] + port_ids index = 0 current_neutron_port_map = {} for current_neutron_port in current_neutron_ports: current_neutron_port_map[current_neutron_port['id']] = ( current_neutron_port) for port_id in port_ids: current_neutron_port = current_neutron_port_map.get(port_id) if current_neutron_port: for ip in current_neutron_port['fixed_ips']: self.moxed_client.list_floatingips( fixed_ip_address=ip['ip_address'], port_id=current_neutron_port['id']).AndReturn( {'floatingips': [self.float_data2[index]]}) self.moxed_client.list_subnets( id=mox.SameElementsAs([ip['subnet_id']]) ).AndReturn( {'subnets': [self.subnet_data_n[index]]}) self.moxed_client.list_ports( network_id=current_neutron_port['network_id'], device_owner='network:dhcp').AndReturn( {'ports': self.dhcp_port_data1}) index += 1 self.instance['info_cache'] = self._fake_instance_info_cache( network_cache['info_cache']['network_info'], self.instance['uuid']) self.mox.StubOutWithMock(api.db, 'instance_info_cache_get') api.db.instance_info_cache_get( mox.IgnoreArg(), self.instance['uuid']).MultipleTimes().AndReturn( self.instance['info_cache']) self.mox.ReplayAll() instance = self._fake_instance_object_with_info_cache(self.instance) nw_infs = api.get_instance_nw_info(self.context, instance, networks=original_networks, port_ids=original_port_ids) self.assertEqual(index, len(nw_infs)) # ensure that nic ordering is preserved for iface_index in range(index): self.assertEqual(port_ids[iface_index], nw_infs[iface_index]['id']) def test_get_instance_nw_info_without_subnet(self): # Test get instance_nw_info for a port without subnet. api = neutronapi.API() self.mox.StubOutWithMock(api.db, 'instance_info_cache_update') api.db.instance_info_cache_update( mox.IgnoreArg(), self.instance['uuid'], mox.IgnoreArg()).AndReturn(fake_info_cache) self.moxed_client.list_ports( tenant_id=self.instance['project_id'], device_id=self.instance['uuid']).AndReturn( {'ports': self.port_data3}) self.moxed_client.list_networks( id=[self.port_data1[0]['network_id']]).AndReturn( {'networks': self.nets1}) neutronapi.get_client(mox.IgnoreArg(), admin=True).MultipleTimes().AndReturn( self.moxed_client) net_info_cache = [] for port in self.port_data3: net_info_cache.append({"network": {"id": port['network_id']}, "id": port['id']}) self.instance['info_cache'] = self._fake_instance_info_cache( net_info_cache, self.instance['uuid']) self.mox.StubOutWithMock(api.db, 'instance_info_cache_get') api.db.instance_info_cache_get( mox.IgnoreArg(), self.instance['uuid']).AndReturn(self.instance['info_cache']) self.mox.ReplayAll() instance = self._fake_instance_object_with_info_cache(self.instance) nw_inf = api.get_instance_nw_info(self.context, instance) id_suffix = 3 self.assertEqual(0, len(nw_inf.fixed_ips())) self.assertEqual('my_netname1', nw_inf[0]['network']['label']) self.assertEqual('my_portid%s' % id_suffix, nw_inf[0]['id']) self.assertEqual('my_mac%s' % id_suffix, nw_inf[0]['address']) self.assertEqual(0, len(nw_inf[0]['network']['subnets'])) def test_refresh_neutron_extensions_cache(self): api = neutronapi.API() # Note: Don't want the default get_client from setUp() self.mox.ResetAll() neutronapi.get_client(mox.IgnoreArg()).AndReturn( self.moxed_client) self.moxed_client.list_extensions().AndReturn( {'extensions': [{'name': constants.QOS_QUEUE}]}) self.mox.ReplayAll() api._refresh_neutron_extensions_cache(mox.IgnoreArg()) self.assertEqual( {constants.QOS_QUEUE: {'name': constants.QOS_QUEUE}}, api.extensions) def test_populate_neutron_extension_values_rxtx_factor(self): api = neutronapi.API() # Note: Don't want the default get_client from setUp() self.mox.ResetAll() neutronapi.get_client(mox.IgnoreArg()).AndReturn( self.moxed_client) self.moxed_client.list_extensions().AndReturn( {'extensions': [{'name': constants.QOS_QUEUE}]}) self.mox.ReplayAll() flavor = flavors.get_default_flavor() flavor['rxtx_factor'] = 1 instance = objects.Instance(system_metadata={}) instance.flavor = flavor port_req_body = {'port': {}} api._populate_neutron_extension_values(self.context, instance, None, port_req_body) self.assertEqual(1, port_req_body['port']['rxtx_factor']) def test_allocate_for_instance_1(self): # Allocate one port in one network env. self._allocate_for_instance(1) def test_allocate_for_instance_2(self): # Allocate one port in two networks env. api = self._stub_allocate_for_instance(net_idx=2) self.assertRaises(exception.NetworkAmbiguous, api.allocate_for_instance, self.context, self.instance) def test_allocate_for_instance_accepts_macs_kwargs_None(self): # The macs kwarg should be accepted as None. self._allocate_for_instance(1, macs=None) def test_allocate_for_instance_accepts_macs_kwargs_set(self): # The macs kwarg should be accepted, as a set, the # _allocate_for_instance helper checks that the mac is used to create a # port. self._allocate_for_instance(1, macs=set(['ab:cd:ef:01:23:45'])) def test_allocate_for_instance_accepts_only_portid(self): # Make sure allocate_for_instance works when only a portid is provided self._returned_nw_info = self.port_data1 result = self._allocate_for_instance( requested_networks=objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id='my_portid1')])) self.assertEqual(self.port_data1, result) @mock.patch('nova.network.neutronv2.api.API._unbind_ports') def test_allocate_for_instance_not_enough_macs_via_ports(self, mock_unbind): # using a hypervisor MAC via a pre-created port will stop it being # used to dynamically create a port on a network. We put the network # first in requested_networks so that if the code were to not pre-check # requested ports, it would incorrectly assign the mac and not fail. requested_networks = objects.NetworkRequestList( objects = [ objects.NetworkRequest(network_id=self.nets2[1]['id']), objects.NetworkRequest(port_id='my_portid1')]) api = self._stub_allocate_for_instance( net_idx=2, requested_networks=requested_networks, macs=set(['my_mac1']), _break='mac' + self.nets2[1]['id']) self.assertRaises(exception.PortNotFree, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks, macs=set(['my_mac1'])) mock_unbind.assert_called_once_with(self.context, [], self.moxed_client, mock.ANY) @mock.patch('nova.network.neutronv2.api.API._unbind_ports') def test_allocate_for_instance_not_enough_macs(self, mock_unbind): # If not enough MAC addresses are available to allocate to networks, an # error should be raised. # We could pass in macs=set(), but that wouldn't tell us that # allocate_for_instance tracks used macs properly, so we pass in one # mac, and ask for two networks. requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id=self.nets2[1]['id']), objects.NetworkRequest(network_id=self.nets2[0]['id'])]) api = self._stub_allocate_for_instance( net_idx=2, requested_networks=requested_networks, macs=set(['my_mac2']), _break='mac' + self.nets2[0]['id']) with mock.patch.object(api, '_delete_ports'): self.assertRaises(exception.PortNotFree, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks, macs=set(['my_mac2'])) mock_unbind.assert_called_once_with(self.context, [], self.moxed_client, mock.ANY) def test_allocate_for_instance_two_macs_two_networks(self): # If two MACs are available and two networks requested, two new ports # get made and no exceptions raised. requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id=self.nets2[1]['id']), objects.NetworkRequest(network_id=self.nets2[0]['id'])]) self._allocate_for_instance( net_idx=2, requested_networks=requested_networks, macs=set(['my_mac2', 'my_mac1'])) def test_allocate_for_instance_mac_conflicting_requested_port(self): # specify only first and last network requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id='my_portid1')]) api = self._stub_allocate_for_instance( net_idx=1, requested_networks=requested_networks, macs=set(['unknown:mac']), _break='pre_list_networks') self.assertRaises(exception.PortNotUsable, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks, macs=set(['unknown:mac'])) def test_allocate_for_instance_without_requested_networks(self): api = self._stub_allocate_for_instance(net_idx=3) self.assertRaises(exception.NetworkAmbiguous, api.allocate_for_instance, self.context, self.instance) def test_allocate_for_instance_with_requested_non_available_network(self): """verify that a non available network is ignored. self.nets2 (net_idx=2) is composed of self.nets3[0] and self.nets3[1] Do not create a port on a non available network self.nets3[2]. """ requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id=net['id']) for net in (self.nets3[0], self.nets3[2], self.nets3[1])]) self._allocate_for_instance(net_idx=2, requested_networks=requested_networks) def test_allocate_for_instance_with_requested_networks(self): # specify only first and last network requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id=net['id']) for net in (self.nets3[1], self.nets3[0], self.nets3[2])]) self._allocate_for_instance(net_idx=3, requested_networks=requested_networks) def test_allocate_for_instance_with_no_subnet_defined(self): # net_id=4 does not specify subnet and does not set the option # port_security_disabled to True, so Neutron will not been # able to associate the default security group to the port # requested to be created. We expect an exception to be # raised. self.assertRaises(exception.SecurityGroupCannotBeApplied, self._allocate_for_instance, net_idx=4, _break='post_list_networks') def test_allocate_for_instance_with_invalid_network_id(self): requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='invalid_id')]) api = self._stub_allocate_for_instance(net_idx=9, requested_networks=requested_networks, _break='post_list_networks') self.assertRaises(exception.NetworkNotFound, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks) def test_allocate_for_instance_with_requested_networks_with_fixedip(self): # specify only first and last network requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id=self.nets1[0]['id'], address='10.0.1.0')]) self._allocate_for_instance(net_idx=1, requested_networks=requested_networks) def test_allocate_for_instance_with_requested_networks_with_port(self): requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id='my_portid1')]) self._allocate_for_instance(net_idx=1, requested_networks=requested_networks) def test_allocate_for_instance_no_networks(self): """verify the exception thrown when there are no networks defined.""" self.instance = fake_instance.fake_instance_obj(self.context, **self.instance) api = neutronapi.API() self.moxed_client.list_extensions().AndReturn({'extensions': []}) self.moxed_client.list_networks( tenant_id=self.instance.project_id, shared=False).AndReturn( {'networks': model.NetworkInfo([])}) self.moxed_client.list_networks(shared=True).AndReturn( {'networks': model.NetworkInfo([])}) self.mox.ReplayAll() nwinfo = api.allocate_for_instance(self.context, self.instance) self.assertEqual(0, len(nwinfo)) @mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids') @mock.patch('nova.network.neutronv2.api.API._unbind_ports') def test_allocate_for_instance_ex1(self, mock_unbind, mock_preexisting): """verify we will delete created ports if we fail to allocate all net resources. Mox to raise exception when creating a second port. In this case, the code should delete the first created port. """ self.instance = fake_instance.fake_instance_obj(self.context, **self.instance) mock_preexisting.return_value = [] api = neutronapi.API() self.mox.StubOutWithMock(api, '_populate_neutron_extension_values') self.mox.StubOutWithMock(api, '_has_port_binding_extension') api._has_port_binding_extension(mox.IgnoreArg(), neutron=self.moxed_client, refresh_cache=True).AndReturn(False) requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id=net['id']) for net in (self.nets2[0], self.nets2[1])]) self.moxed_client.list_networks( id=['my_netid1', 'my_netid2']).AndReturn({'networks': self.nets2}) index = 0 for network in self.nets2: binding_port_req_body = { 'port': { 'device_id': self.instance.uuid, 'device_owner': 'compute:nova', }, } port_req_body = { 'port': { 'network_id': network['id'], 'admin_state_up': True, 'tenant_id': self.instance.project_id, }, } port_req_body['port'].update(binding_port_req_body['port']) port = {'id': 'portid_' + network['id']} api._populate_neutron_extension_values(self.context, self.instance, None, binding_port_req_body, network=network, neutron=self.moxed_client, bind_host_id=None).AndReturn(None) if index == 0: self.moxed_client.create_port( MyComparator(port_req_body)).AndReturn({'port': port}) else: NeutronOverQuota = exceptions.OverQuotaClient() self.moxed_client.create_port( MyComparator(port_req_body)).AndRaise(NeutronOverQuota) index += 1 self.moxed_client.delete_port('portid_' + self.nets2[0]['id']) self.mox.ReplayAll() self.assertRaises(exception.PortLimitExceeded, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks) mock_unbind.assert_called_once_with(self.context, [], self.moxed_client, mock.ANY) def test_allocate_for_instance_ex2(self): """verify we have no port to delete if we fail to allocate the first net resource. Mox to raise exception when creating the first port. In this case, the code should not delete any ports. """ self.instance = fake_instance.fake_instance_obj(self.context, **self.instance) api = neutronapi.API() self.mox.StubOutWithMock(api, '_populate_neutron_extension_values') self.mox.StubOutWithMock(api, '_has_port_binding_extension') api._has_port_binding_extension(mox.IgnoreArg(), neutron=self.moxed_client, refresh_cache=True).AndReturn(False) requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id=net['id']) for net in (self.nets2[0], self.nets2[1])]) self.moxed_client.list_networks( id=['my_netid1', 'my_netid2']).AndReturn({'networks': self.nets2}) binding_port_req_body = { 'port': { 'device_id': self.instance.uuid, 'device_owner': 'compute:nova', }, } port_req_body = { 'port': { 'network_id': self.nets2[0]['id'], 'admin_state_up': True, 'device_id': self.instance.uuid, 'tenant_id': self.instance.project_id, }, } api._populate_neutron_extension_values(self.context, self.instance, None, binding_port_req_body, network=self.nets2[0], neutron=self.moxed_client, bind_host_id=None).AndReturn(None) self.moxed_client.create_port( MyComparator(port_req_body)).AndRaise( Exception("fail to create port")) self.mox.ReplayAll() self.assertRaises(NEUTRON_CLIENT_EXCEPTION, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks) def test_allocate_for_instance_no_port_or_network(self): class BailOutEarly(Exception): pass self.instance = fake_instance.fake_instance_obj(self.context, **self.instance) api = neutronapi.API() self.moxed_client.list_extensions().AndReturn({'extensions': []}) self.mox.StubOutWithMock(api, '_get_available_networks') # Make sure we get an empty list and then bail out of the rest # of the function api._get_available_networks(self.context, self.instance.project_id, [], neutron=self.moxed_client).\ AndRaise(BailOutEarly) self.mox.ReplayAll() requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest()]) self.assertRaises(BailOutEarly, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks) def test_allocate_for_instance_second_time(self): # Make sure that allocate_for_instance only returns ports that it # allocated during _that_ run. new_port = {'id': 'fake'} self._returned_nw_info = self.port_data1 + [new_port] nw_info = self._allocate_for_instance() self.assertEqual([new_port], nw_info) def test_allocate_for_instance_port_in_use(self): # If a port is already in use, an exception should be raised. requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id='my_portid1')]) api = self._stub_allocate_for_instance( requested_networks=requested_networks, _break='pre_list_networks', _device=True) self.assertRaises(exception.PortInUse, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks) def test_allocate_for_instance_port_not_found(self): # If a port is not found, an exception should be raised. requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id='invalid_id')]) api = self._stub_allocate_for_instance( requested_networks=requested_networks, _break='pre_list_networks') self.assertRaises(exception.PortNotFound, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks) def test_allocate_for_instance_port_invalid_tenantid(self): self.tenant_id = 'invalid_id' requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id='my_portid1')]) api = self._stub_allocate_for_instance( requested_networks=requested_networks, _break='pre_list_networks') self.assertRaises(exception.PortNotUsable, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks) def test_allocate_for_instance_with_externalnet_forbidden(self): """Only one network is available, it's external, and the client is unauthorized to use it. """ self.instance = fake_instance.fake_instance_obj(self.context, **self.instance) self.moxed_client.list_extensions().AndReturn({'extensions': []}) # no networks in the tenant self.moxed_client.list_networks( tenant_id=self.instance.project_id, shared=False).AndReturn( {'networks': model.NetworkInfo([])}) # external network is shared self.moxed_client.list_networks(shared=True).AndReturn( {'networks': self.nets8}) self.mox.ReplayAll() api = neutronapi.API() self.assertRaises(exception.ExternalNetworkAttachForbidden, api.allocate_for_instance, self.context, self.instance) def test_allocate_for_instance_with_externalnet_multiple(self): """Multiple networks are available, one the client is authorized to use, and an external one the client is unauthorized to use. """ self.instance = fake_instance.fake_instance_obj(self.context, **self.instance) self.moxed_client.list_extensions().AndReturn({'extensions': []}) # network found in the tenant self.moxed_client.list_networks( tenant_id=self.instance.project_id, shared=False).AndReturn( {'networks': self.nets1}) # external network is shared self.moxed_client.list_networks(shared=True).AndReturn( {'networks': self.nets8}) self.mox.ReplayAll() api = neutronapi.API() self.assertRaises( exception.NetworkAmbiguous, api.allocate_for_instance, self.context, self.instance) def test_allocate_for_instance_with_externalnet_admin_ctx(self): """Only one network is available, it's external, and the client is authorized. """ admin_ctx = context.RequestContext('userid', 'my_tenantid', is_admin=True) api = self._stub_allocate_for_instance(net_idx=8) api.allocate_for_instance(admin_ctx, self.instance) def test_allocate_for_instance_with_external_shared_net(self): """Only one network is available, it's external and shared.""" ctx = context.RequestContext('userid', 'my_tenantid') api = self._stub_allocate_for_instance(net_idx=10) api.allocate_for_instance(ctx, self.instance) def _deallocate_for_instance(self, number, requested_networks=None): # TODO(mriedem): Remove this conversion when all neutronv2 APIs are # converted to handling instance objects. self.instance = fake_instance.fake_instance_obj(self.context, **self.instance) api = neutronapi.API() port_data = number == 1 and self.port_data1 or self.port_data2 ports = {port['id'] for port in port_data} ret_data = copy.deepcopy(port_data) if requested_networks: if isinstance(requested_networks, objects.NetworkRequestList): # NOTE(danms): Temporary and transitional with mock.patch('nova.utils.is_neutron', return_value=True): requested_networks = requested_networks.as_tuples() for net, fip, port, request_id in requested_networks: ret_data.append({'network_id': net, 'device_id': self.instance.uuid, 'device_owner': 'compute:nova', 'id': port, 'status': 'DOWN', 'admin_state_up': True, 'fixed_ips': [], 'mac_address': 'fake_mac', }) self.moxed_client.list_ports( device_id=self.instance.uuid).AndReturn( {'ports': ret_data}) self.moxed_client.list_extensions().AndReturn({'extensions': []}) if requested_networks: for net, fip, port, request_id in requested_networks: self.moxed_client.update_port(port) for port in ports: self.moxed_client.delete_port(port).InAnyOrder("delete_port_group") self.mox.StubOutWithMock(api.db, 'instance_info_cache_update') api.db.instance_info_cache_update(self.context, self.instance.uuid, {'network_info': '[]'}).AndReturn( fake_info_cache) self.mox.ReplayAll() api = neutronapi.API() api.deallocate_for_instance(self.context, self.instance, requested_networks=requested_networks) @mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids') def test_deallocate_for_instance_1_with_requested(self, mock_preexisting): mock_preexisting.return_value = [] requested = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='fake-net', address='1.2.3.4', port_id='fake-port')]) # Test to deallocate in one port env. self._deallocate_for_instance(1, requested_networks=requested) @mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids') def test_deallocate_for_instance_2_with_requested(self, mock_preexisting): mock_preexisting.return_value = [] requested = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='fake-net', address='1.2.3.4', port_id='fake-port')]) # Test to deallocate in one port env. self._deallocate_for_instance(2, requested_networks=requested) @mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids') def test_deallocate_for_instance_1(self, mock_preexisting): mock_preexisting.return_value = [] # Test to deallocate in one port env. self._deallocate_for_instance(1) @mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids') def test_deallocate_for_instance_2(self, mock_preexisting): mock_preexisting.return_value = [] # Test to deallocate in two ports env. self._deallocate_for_instance(2) @mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids') def test_deallocate_for_instance_port_not_found(self, mock_preexisting): # TODO(mriedem): Remove this conversion when all neutronv2 APIs are # converted to handling instance objects. self.instance = fake_instance.fake_instance_obj(self.context, **self.instance) mock_preexisting.return_value = [] port_data = self.port_data1 self.moxed_client.list_ports( device_id=self.instance.uuid).AndReturn( {'ports': port_data}) self.moxed_client.list_extensions().AndReturn({'extensions': []}) NeutronNotFound = exceptions.NeutronClientException(status_code=404) for port in reversed(port_data): self.moxed_client.delete_port(port['id']).AndRaise( NeutronNotFound) self.mox.ReplayAll() api = neutronapi.API() api.deallocate_for_instance(self.context, self.instance) def _test_deallocate_port_for_instance(self, number): port_data = number == 1 and self.port_data1 or self.port_data2 nets = number == 1 and self.nets1 or self.nets2 self.moxed_client.delete_port(port_data[0]['id']) net_info_cache = [] for port in port_data: net_info_cache.append({"network": {"id": port['network_id']}, "id": port['id']}) self.instance['info_cache'] = self._fake_instance_info_cache( net_info_cache, self.instance['uuid']) api = neutronapi.API() neutronapi.get_client(mox.IgnoreArg(), admin=True).AndReturn( self.moxed_client) self.moxed_client.list_ports( tenant_id=self.instance['project_id'], device_id=self.instance['uuid']).AndReturn( {'ports': port_data[1:]}) neutronapi.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn( self.moxed_client) net_ids = [port['network_id'] for port in port_data] self.moxed_client.list_networks(id=net_ids).AndReturn( {'networks': nets}) float_data = number == 1 and self.float_data1 or self.float_data2 for data in port_data[1:]: for ip in data['fixed_ips']: self.moxed_client.list_floatingips( fixed_ip_address=ip['ip_address'], port_id=data['id']).AndReturn( {'floatingips': float_data[1:]}) for port in port_data[1:]: self.moxed_client.list_subnets(id=['my_subid2']).AndReturn({}) self.mox.StubOutWithMock(api.db, 'instance_info_cache_get') api.db.instance_info_cache_get(mox.IgnoreArg(), self.instance['uuid']).AndReturn( self.instance['info_cache']) self.mox.ReplayAll() instance = self._fake_instance_object_with_info_cache(self.instance) nwinfo = api.deallocate_port_for_instance(self.context, instance, port_data[0]['id']) self.assertEqual(len(port_data[1:]), len(nwinfo)) if len(port_data) > 1: self.assertEqual('my_netid2', nwinfo[0]['network']['id']) def test_deallocate_port_for_instance_1(self): # Test to deallocate the first and only port self._test_deallocate_port_for_instance(1) def test_deallocate_port_for_instance_2(self): # Test to deallocate the first port of two self._test_deallocate_port_for_instance(2) def test_list_ports(self): search_opts = {'parm': 'value'} self.moxed_client.list_ports(**search_opts) self.mox.ReplayAll() neutronapi.API().list_ports(self.context, **search_opts) def test_show_port(self): self.moxed_client.show_port('foo').AndReturn( {'port': self.port_data1[0]}) self.mox.ReplayAll() neutronapi.API().show_port(self.context, 'foo') def test_validate_networks(self): requested_networks = [('my_netid1', None, None, None), ('my_netid2', None, None, None)] ids = ['my_netid1', 'my_netid2'] self.moxed_client.list_networks( id=mox.SameElementsAs(ids)).AndReturn( {'networks': self.nets2}) self.moxed_client.show_quota( tenant_id='my_tenantid').AndReturn( {'quota': {'port': 50}}) self.moxed_client.list_ports( tenant_id='my_tenantid', fields=['id']).AndReturn( {'ports': []}) self.mox.ReplayAll() api = neutronapi.API() api.validate_networks(self.context, requested_networks, 1) def test_validate_networks_without_port_quota_on_network_side(self): requested_networks = [('my_netid1', None, None, None), ('my_netid2', None, None, None)] ids = ['my_netid1', 'my_netid2'] self.moxed_client.list_networks( id=mox.SameElementsAs(ids)).AndReturn( {'networks': self.nets2}) self.moxed_client.show_quota( tenant_id='my_tenantid').AndReturn( {'quota': {}}) self.mox.ReplayAll() api = neutronapi.API() api.validate_networks(self.context, requested_networks, 1) def test_validate_networks_ex_1(self): requested_networks = [('my_netid1', None, None, None)] self.moxed_client.list_networks( id=mox.SameElementsAs(['my_netid1'])).AndReturn( {'networks': self.nets1}) self.moxed_client.show_quota( tenant_id='my_tenantid').AndReturn( {'quota': {'port': 50}}) self.moxed_client.list_ports( tenant_id='my_tenantid', fields=['id']).AndReturn( {'ports': []}) self.mox.ReplayAll() api = neutronapi.API() try: api.validate_networks(self.context, requested_networks, 1) except exception.NetworkNotFound as ex: self.assertIn("my_netid2", six.text_type(ex)) def test_validate_networks_ex_2(self): requested_networks = [('my_netid1', None, None, None), ('my_netid2', None, None, None), ('my_netid3', None, None, None)] ids = ['my_netid1', 'my_netid2', 'my_netid3'] self.moxed_client.list_networks( id=mox.SameElementsAs(ids)).AndReturn( {'networks': self.nets1}) self.mox.ReplayAll() api = neutronapi.API() try: api.validate_networks(self.context, requested_networks, 1) except exception.NetworkNotFound as ex: self.assertIn("my_netid2", six.text_type(ex)) self.assertIn("my_netid3", six.text_type(ex)) def test_validate_networks_duplicate_enable(self): # Verify that no duplicateNetworks exception is thrown when duplicate # network ids are passed to validate_networks. requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='my_netid1'), objects.NetworkRequest(network_id='my_netid1')]) ids = ['my_netid1', 'my_netid1'] self.moxed_client.list_networks( id=mox.SameElementsAs(ids)).AndReturn( {'networks': self.nets1}) self.moxed_client.show_quota( tenant_id='my_tenantid').AndReturn( {'quota': {'port': 50}}) self.moxed_client.list_ports( tenant_id='my_tenantid', fields=['id']).AndReturn( {'ports': []}) self.mox.ReplayAll() api = neutronapi.API() api.validate_networks(self.context, requested_networks, 1) def test_allocate_for_instance_with_requested_networks_duplicates(self): # specify a duplicate network to allocate to instance requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id=net['id']) for net in (self.nets6[0], self.nets6[1])]) self._allocate_for_instance(net_idx=6, requested_networks=requested_networks) def test_allocate_for_instance_requested_networks_duplicates_port(self): # specify first port and last port that are in same network requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id=port['id']) for port in (self.port_data1[0], self.port_data3[0])]) self._allocate_for_instance(net_idx=6, requested_networks=requested_networks) def test_allocate_for_instance_requested_networks_duplicates_combo(self): # specify a combo net_idx=7 : net2, port in net1, net2, port in net1 requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='my_netid2'), objects.NetworkRequest(port_id=self.port_data1[0]['id']), objects.NetworkRequest(network_id='my_netid2'), objects.NetworkRequest(port_id=self.port_data3[0]['id'])]) self._allocate_for_instance(net_idx=7, requested_networks=requested_networks) def test_validate_networks_not_specified(self): requested_networks = objects.NetworkRequestList(objects=[]) self.moxed_client.list_networks( tenant_id=self.context.project_id, shared=False).AndReturn( {'networks': self.nets1}) self.moxed_client.list_networks( shared=True).AndReturn( {'networks': self.nets2}) self.mox.ReplayAll() api = neutronapi.API() self.assertRaises(exception.NetworkAmbiguous, api.validate_networks, self.context, requested_networks, 1) def test_validate_networks_port_not_found(self): # Verify that the correct exception is thrown when a non existent # port is passed to validate_networks. requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest( network_id='my_netid1', port_id='3123-ad34-bc43-32332ca33e')]) PortNotFound = exceptions.PortNotFoundClient() self.moxed_client.show_port(requested_networks[0].port_id).AndRaise( PortNotFound) self.mox.ReplayAll() # Expected call from setUp. neutronapi.get_client(None) api = neutronapi.API() self.assertRaises(exception.PortNotFound, api.validate_networks, self.context, requested_networks, 1) def test_validate_networks_port_show_raises_non404(self): # Verify that the correct exception is thrown when a non existent # port is passed to validate_networks. fake_port_id = '3123-ad34-bc43-32332ca33e' requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest( network_id='my_netid1', port_id=fake_port_id)]) NeutronNotFound = exceptions.NeutronClientException(status_code=0) self.moxed_client.show_port(requested_networks[0].port_id).AndRaise( NeutronNotFound) self.mox.ReplayAll() # Expected call from setUp. neutronapi.get_client(None) api = neutronapi.API() exc = self.assertRaises(exception.NovaException, api.validate_networks, self.context, requested_networks, 1) expected_exception_message = ('Failed to access port %(port_id)s: ' 'An unknown exception occurred.' % {'port_id': fake_port_id}) self.assertEqual(expected_exception_message, str(exc)) def test_validate_networks_port_in_use(self): requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id=self.port_data3[0]['id'])]) self.moxed_client.show_port(self.port_data3[0]['id']).\ AndReturn({'port': self.port_data3[0]}) self.mox.ReplayAll() api = neutronapi.API() self.assertRaises(exception.PortInUse, api.validate_networks, self.context, requested_networks, 1) def test_validate_networks_port_no_subnet_id(self): port_a = self.port_data3[0] port_a['device_id'] = None port_a['device_owner'] = None requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id=port_a['id'])]) self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a}) self.mox.ReplayAll() api = neutronapi.API() self.assertRaises(exception.PortRequiresFixedIP, api.validate_networks, self.context, requested_networks, 1) def test_validate_networks_no_subnet_id(self): requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='his_netid4')]) ids = ['his_netid4'] self.moxed_client.list_networks( id=mox.SameElementsAs(ids)).AndReturn( {'networks': self.nets4}) self.mox.ReplayAll() api = neutronapi.API() self.assertRaises(exception.NetworkRequiresSubnet, api.validate_networks, self.context, requested_networks, 1) def test_validate_networks_ports_in_same_network_enable(self): # Verify that duplicateNetworks exception is not thrown when ports # on same duplicate network are passed to validate_networks. port_a = self.port_data3[0] port_a['fixed_ips'] = {'ip_address': '10.0.0.2', 'subnet_id': 'subnet_id'} port_b = self.port_data1[0] self.assertEqual(port_a['network_id'], port_b['network_id']) for port in [port_a, port_b]: port['device_id'] = None port['device_owner'] = None requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id=port_a['id']), objects.NetworkRequest(port_id=port_b['id'])]) self.moxed_client.show_port(port_a['id']).AndReturn( {'port': port_a}) self.moxed_client.show_port(port_b['id']).AndReturn( {'port': port_b}) self.mox.ReplayAll() api = neutronapi.API() api.validate_networks(self.context, requested_networks, 1) def test_validate_networks_ports_not_in_same_network(self): port_a = self.port_data3[0] port_a['fixed_ips'] = {'ip_address': '10.0.0.2', 'subnet_id': 'subnet_id'} port_b = self.port_data2[1] self.assertNotEqual(port_a['network_id'], port_b['network_id']) for port in [port_a, port_b]: port['device_id'] = None port['device_owner'] = None requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id=port_a['id']), objects.NetworkRequest(port_id=port_b['id'])]) self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a}) self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b}) self.mox.ReplayAll() api = neutronapi.API() api.validate_networks(self.context, requested_networks, 1) def test_validate_networks_no_quota(self): # Test validation for a request for one instance needing # two ports, where the quota is 2 and 2 ports are in use # => instances which can be created = 0 requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='my_netid1'), objects.NetworkRequest(network_id='my_netid2')]) ids = ['my_netid1', 'my_netid2'] self.moxed_client.list_networks( id=mox.SameElementsAs(ids)).AndReturn( {'networks': self.nets2}) self.moxed_client.show_quota( tenant_id='my_tenantid').AndReturn( {'quota': {'port': 2}}) self.moxed_client.list_ports( tenant_id='my_tenantid', fields=['id']).AndReturn( {'ports': self.port_data2}) self.mox.ReplayAll() api = neutronapi.API() max_count = api.validate_networks(self.context, requested_networks, 1) self.assertEqual(0, max_count) def test_validate_networks_with_ports_and_networks(self): # Test validation for a request for one instance needing # one port allocated via nova with another port being passed in. port_b = self.port_data2[1] port_b['device_id'] = None port_b['device_owner'] = None requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='my_netid1'), objects.NetworkRequest(port_id=port_b['id'])]) self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b}) ids = ['my_netid1'] self.moxed_client.list_networks( id=mox.SameElementsAs(ids)).AndReturn( {'networks': self.nets1}) self.moxed_client.show_quota( tenant_id='my_tenantid').AndReturn( {'quota': {'port': 5}}) self.moxed_client.list_ports( tenant_id='my_tenantid', fields=['id']).AndReturn( {'ports': self.port_data2}) self.mox.ReplayAll() api = neutronapi.API() max_count = api.validate_networks(self.context, requested_networks, 1) self.assertEqual(1, max_count) def test_validate_networks_one_port_and_no_networks(self): # Test that show quota is not called if no networks are # passed in and only ports. port_b = self.port_data2[1] port_b['device_id'] = None port_b['device_owner'] = None requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id=port_b['id'])]) self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b}) self.mox.ReplayAll() api = neutronapi.API() max_count = api.validate_networks(self.context, requested_networks, 1) self.assertEqual(1, max_count) def test_validate_networks_some_quota(self): # Test validation for a request for two instance needing # two ports each, where the quota is 5 and 2 ports are in use # => instances which can be created = 1 requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='my_netid1'), objects.NetworkRequest(network_id='my_netid2')]) ids = ['my_netid1', 'my_netid2'] self.moxed_client.list_networks( id=mox.SameElementsAs(ids)).AndReturn( {'networks': self.nets2}) self.moxed_client.show_quota( tenant_id='my_tenantid').AndReturn( {'quota': {'port': 5}}) self.moxed_client.list_ports( tenant_id='my_tenantid', fields=['id']).AndReturn( {'ports': self.port_data2}) self.mox.ReplayAll() api = neutronapi.API() max_count = api.validate_networks(self.context, requested_networks, 2) self.assertEqual(1, max_count) def test_validate_networks_unlimited_quota(self): # Test validation for a request for two instance needing # two ports each, where the quota is -1 (unlimited) # => instances which can be created = 1 requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id='my_netid1'), objects.NetworkRequest(network_id='my_netid2')]) ids = ['my_netid1', 'my_netid2'] self.moxed_client.list_networks( id=mox.SameElementsAs(ids)).AndReturn( {'networks': self.nets2}) self.moxed_client.show_quota( tenant_id='my_tenantid').AndReturn( {'quota': {'port': -1}}) self.mox.ReplayAll() api = neutronapi.API() max_count = api.validate_networks(self.context, requested_networks, 2) self.assertEqual(2, max_count) def test_validate_networks_no_quota_but_ports_supplied(self): port_a = self.port_data3[0] port_a['fixed_ips'] = {'ip_address': '10.0.0.2', 'subnet_id': 'subnet_id'} port_b = self.port_data2[1] self.assertNotEqual(port_a['network_id'], port_b['network_id']) for port in [port_a, port_b]: port['device_id'] = None port['device_owner'] = None requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id=port_a['id']), objects.NetworkRequest(port_id=port_b['id'])]) self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a}) self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b}) self.mox.ReplayAll() api = neutronapi.API() max_count = api.validate_networks(self.context, requested_networks, 1) self.assertEqual(1, max_count) def _mock_list_ports(self, port_data=None): if port_data is None: port_data = self.port_data2 address = self.port_address self.moxed_client.list_ports( fixed_ips=MyComparator('ip_address=%s' % address)).AndReturn( {'ports': port_data}) self.mox.ReplayAll() return address def test_get_fixed_ip_by_address_fails_for_no_ports(self): address = self._mock_list_ports(port_data=[]) api = neutronapi.API() self.assertRaises(exception.FixedIpNotFoundForAddress, api.get_fixed_ip_by_address, self.context, address) def test_get_fixed_ip_by_address_succeeds_for_1_port(self): address = self._mock_list_ports(port_data=self.port_data1) api = neutronapi.API() result = api.get_fixed_ip_by_address(self.context, address) self.assertEqual(self.instance2['uuid'], result['instance_uuid']) def test_get_fixed_ip_by_address_fails_for_more_than_1_port(self): address = self._mock_list_ports() api = neutronapi.API() self.assertRaises(exception.FixedIpAssociatedWithMultipleInstances, api.get_fixed_ip_by_address, self.context, address) def _get_available_networks(self, prv_nets, pub_nets, req_ids=None, context=None): api = neutronapi.API() nets = prv_nets + pub_nets if req_ids: mox_list_params = {'id': req_ids} self.moxed_client.list_networks( **mox_list_params).AndReturn({'networks': nets}) else: mox_list_params = {'tenant_id': self.instance['project_id'], 'shared': False} self.moxed_client.list_networks( **mox_list_params).AndReturn({'networks': prv_nets}) mox_list_params = {'shared': True} self.moxed_client.list_networks( **mox_list_params).AndReturn({'networks': pub_nets}) self.mox.ReplayAll() rets = api._get_available_networks( context if context else self.context, self.instance['project_id'], req_ids) self.assertEqual(nets, rets) def test_get_available_networks_all_private(self): self._get_available_networks(prv_nets=self.nets2, pub_nets=[]) def test_get_available_networks_all_public(self): self._get_available_networks(prv_nets=[], pub_nets=self.nets2) def test_get_available_networks_private_and_public(self): self._get_available_networks(prv_nets=self.nets1, pub_nets=self.nets4) def test_get_available_networks_with_network_ids(self): prv_nets = [self.nets3[0]] pub_nets = [self.nets3[-1]] # specify only first and last network req_ids = [net['id'] for net in (self.nets3[0], self.nets3[-1])] self._get_available_networks(prv_nets, pub_nets, req_ids) def test_get_available_networks_with_custom_policy(self): rules = {'network:attach_external_network': ''} policy.set_rules(oslo_policy.Rules.from_dict(rules)) req_ids = [net['id'] for net in self.nets5] self._get_available_networks(self.nets5, pub_nets=[], req_ids=req_ids) def test_get_floating_ip_pools(self): api = neutronapi.API() search_opts = {'router:external': True} self.moxed_client.list_networks(**search_opts).\ AndReturn({'networks': [self.fip_pool, self.fip_pool_nova]}) self.mox.ReplayAll() pools = api.get_floating_ip_pools(self.context) expected = [self.fip_pool['name'], self.fip_pool_nova['name']] self.assertEqual(expected, pools) def _get_expected_fip_model(self, fip_data, idx=0): expected = {'id': fip_data['id'], 'address': fip_data['floating_ip_address'], 'pool': self.fip_pool['name'], 'project_id': fip_data['tenant_id'], 'fixed_ip_id': fip_data['port_id'], 'fixed_ip': {'address': fip_data['fixed_ip_address']}, 'instance': ({'uuid': self.port_data2[idx]['device_id']} if fip_data['port_id'] else None)} if expected['instance'] is not None: expected['fixed_ip']['instance_uuid'] = \ expected['instance']['uuid'] return expected def _test_get_floating_ip(self, fip_data, idx=0, by_address=False): api = neutronapi.API() fip_id = fip_data['id'] net_id = fip_data['floating_network_id'] address = fip_data['floating_ip_address'] if by_address: self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': [fip_data]}) else: self.moxed_client.show_floatingip(fip_id).\ AndReturn({'floatingip': fip_data}) self.moxed_client.show_network(net_id).\ AndReturn({'network': self.fip_pool}) if fip_data['port_id']: self.moxed_client.show_port(fip_data['port_id']).\ AndReturn({'port': self.port_data2[idx]}) self.mox.ReplayAll() expected = self._get_expected_fip_model(fip_data, idx) if by_address: fip = api.get_floating_ip_by_address(self.context, address) else: fip = api.get_floating_ip(self.context, fip_id) self.assertEqual(expected, fip) def test_get_floating_ip_unassociated(self): self._test_get_floating_ip(self.fip_unassociated, idx=0) def test_get_floating_ip_associated(self): self._test_get_floating_ip(self.fip_associated, idx=1) def test_get_floating_ip_by_address(self): self._test_get_floating_ip(self.fip_unassociated, idx=0, by_address=True) def test_get_floating_ip_by_address_associated(self): self._test_get_floating_ip(self.fip_associated, idx=1, by_address=True) def test_get_floating_ip_by_address_not_found(self): api = neutronapi.API() address = self.fip_unassociated['floating_ip_address'] self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': []}) self.mox.ReplayAll() self.assertRaises(exception.FloatingIpNotFoundForAddress, api.get_floating_ip_by_address, self.context, address) def test_get_floating_ip_by_id_not_found(self): api = neutronapi.API() NeutronNotFound = exceptions.NeutronClientException(status_code=404) floating_ip_id = self.fip_unassociated['id'] self.moxed_client.show_floatingip(floating_ip_id).\ AndRaise(NeutronNotFound) self.mox.ReplayAll() self.assertRaises(exception.FloatingIpNotFound, api.get_floating_ip, self.context, floating_ip_id) def test_get_floating_ip_raises_non404(self): api = neutronapi.API() NeutronNotFound = exceptions.NeutronClientException(status_code=0) floating_ip_id = self.fip_unassociated['id'] self.moxed_client.show_floatingip(floating_ip_id).\ AndRaise(NeutronNotFound) self.mox.ReplayAll() self.assertRaises(exceptions.NeutronClientException, api.get_floating_ip, self.context, floating_ip_id) def test_get_floating_ip_by_address_multiple_found(self): api = neutronapi.API() address = self.fip_unassociated['floating_ip_address'] self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': [self.fip_unassociated] * 2}) self.mox.ReplayAll() self.assertRaises(exception.FloatingIpMultipleFoundForAddress, api.get_floating_ip_by_address, self.context, address) def test_get_floating_ips_by_project(self): api = neutronapi.API() project_id = self.context.project_id self.moxed_client.list_floatingips(tenant_id=project_id).\ AndReturn({'floatingips': [self.fip_unassociated, self.fip_associated]}) search_opts = {'router:external': True} self.moxed_client.list_networks(**search_opts).\ AndReturn({'networks': [self.fip_pool, self.fip_pool_nova]}) self.moxed_client.list_ports(tenant_id=project_id).\ AndReturn({'ports': self.port_data2}) self.mox.ReplayAll() expected = [self._get_expected_fip_model(self.fip_unassociated), self._get_expected_fip_model(self.fip_associated, idx=1)] fips = api.get_floating_ips_by_project(self.context) self.assertEqual(expected, fips) def _test_get_instance_id_by_floating_address(self, fip_data, associated=False): api = neutronapi.API() address = fip_data['floating_ip_address'] self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': [fip_data]}) if associated: self.moxed_client.show_port(fip_data['port_id']).\ AndReturn({'port': self.port_data2[1]}) self.mox.ReplayAll() if associated: expected = self.port_data2[1]['device_id'] else: expected = None fip = api.get_instance_id_by_floating_address(self.context, address) self.assertEqual(expected, fip) def test_get_instance_id_by_floating_address(self): self._test_get_instance_id_by_floating_address(self.fip_unassociated) def test_get_instance_id_by_floating_address_associated(self): self._test_get_instance_id_by_floating_address(self.fip_associated, associated=True) def test_allocate_floating_ip(self): api = neutronapi.API() pool_name = self.fip_pool['name'] pool_id = self.fip_pool['id'] search_opts = {'router:external': True, 'fields': 'id', 'name': pool_name} self.moxed_client.list_networks(**search_opts).\ AndReturn({'networks': [self.fip_pool]}) self.moxed_client.create_floatingip( {'floatingip': {'floating_network_id': pool_id}}).\ AndReturn({'floatingip': self.fip_unassociated}) self.mox.ReplayAll() fip = api.allocate_floating_ip(self.context, 'ext_net') self.assertEqual(self.fip_unassociated['floating_ip_address'], fip) def test_allocate_floating_ip_addr_gen_fail(self): api = neutronapi.API() pool_name = self.fip_pool['name'] pool_id = self.fip_pool['id'] search_opts = {'router:external': True, 'fields': 'id', 'name': pool_name} self.moxed_client.list_networks(**search_opts).\ AndReturn({'networks': [self.fip_pool]}) self.moxed_client.create_floatingip( {'floatingip': {'floating_network_id': pool_id}}).\ AndRaise(exceptions.IpAddressGenerationFailureClient) self.mox.ReplayAll() self.assertRaises(exception.NoMoreFloatingIps, api.allocate_floating_ip, self.context, 'ext_net') def test_allocate_floating_ip_exhausted_fail(self): api = neutronapi.API() pool_name = self.fip_pool['name'] pool_id = self.fip_pool['id'] search_opts = {'router:external': True, 'fields': 'id', 'name': pool_name} self.moxed_client.list_networks(**search_opts).\ AndReturn({'networks': [self.fip_pool]}) self.moxed_client.create_floatingip( {'floatingip': {'floating_network_id': pool_id}}).\ AndRaise(exceptions.ExternalIpAddressExhaustedClient) self.mox.ReplayAll() self.assertRaises(exception.NoMoreFloatingIps, api.allocate_floating_ip, self.context, 'ext_net') def test_allocate_floating_ip_with_pool_id(self): api = neutronapi.API() pool_id = self.fip_pool['id'] search_opts = {'router:external': True, 'fields': 'id', 'id': pool_id} self.moxed_client.list_networks(**search_opts).\ AndReturn({'networks': [self.fip_pool]}) self.moxed_client.create_floatingip( {'floatingip': {'floating_network_id': pool_id}}).\ AndReturn({'floatingip': self.fip_unassociated}) self.mox.ReplayAll() fip = api.allocate_floating_ip(self.context, pool_id) self.assertEqual(self.fip_unassociated['floating_ip_address'], fip) def test_allocate_floating_ip_with_default_pool(self): api = neutronapi.API() pool_name = self.fip_pool_nova['name'] pool_id = self.fip_pool_nova['id'] search_opts = {'router:external': True, 'fields': 'id', 'name': pool_name} self.moxed_client.list_networks(**search_opts).\ AndReturn({'networks': [self.fip_pool_nova]}) self.moxed_client.create_floatingip( {'floatingip': {'floating_network_id': pool_id}}).\ AndReturn({'floatingip': self.fip_unassociated}) self.mox.ReplayAll() fip = api.allocate_floating_ip(self.context) self.assertEqual(self.fip_unassociated['floating_ip_address'], fip) def test_release_floating_ip(self): api = neutronapi.API() address = self.fip_unassociated['floating_ip_address'] fip_id = self.fip_unassociated['id'] self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': [self.fip_unassociated]}) self.moxed_client.delete_floatingip(fip_id) self.mox.ReplayAll() api.release_floating_ip(self.context, address) def test_disassociate_and_release_floating_ip(self): api = neutronapi.API() address = self.fip_unassociated['floating_ip_address'] fip_id = self.fip_unassociated['id'] floating_ip = {'address': address} self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': [self.fip_unassociated]}) self.moxed_client.delete_floatingip(fip_id) self.mox.ReplayAll() api.disassociate_and_release_floating_ip(self.context, None, floating_ip) def test_disassociate_and_release_floating_ip_with_instance(self): api = neutronapi.API() address = self.fip_unassociated['floating_ip_address'] fip_id = self.fip_unassociated['id'] floating_ip = {'address': address} instance = self._fake_instance_object(self.instance) self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': [self.fip_unassociated]}) self.moxed_client.delete_floatingip(fip_id) self._setup_mock_for_refresh_cache(api, [instance]) self.mox.ReplayAll() api.disassociate_and_release_floating_ip(self.context, instance, floating_ip) def test_release_floating_ip_associated(self): api = neutronapi.API() address = self.fip_associated['floating_ip_address'] self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': [self.fip_associated]}) self.mox.ReplayAll() self.assertRaises(exception.FloatingIpAssociated, api.release_floating_ip, self.context, address) def _setup_mock_for_refresh_cache(self, api, instances): nw_info = model.NetworkInfo() self.mox.StubOutWithMock(api, '_get_instance_nw_info') self.mox.StubOutWithMock(api.db, 'instance_info_cache_update') for instance in instances: api._get_instance_nw_info(mox.IgnoreArg(), instance).\ AndReturn(nw_info) api.db.instance_info_cache_update(mox.IgnoreArg(), instance['uuid'], mox.IgnoreArg()).AndReturn( fake_info_cache) def test_associate_floating_ip(self): api = neutronapi.API() address = self.fip_unassociated['floating_ip_address'] fixed_address = self.port_address2 fip_id = self.fip_unassociated['id'] instance = self._fake_instance_object(self.instance) search_opts = {'device_owner': 'compute:nova', 'device_id': instance.uuid} self.moxed_client.list_ports(**search_opts).\ AndReturn({'ports': [self.port_data2[1]]}) self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': [self.fip_unassociated]}) self.moxed_client.update_floatingip( fip_id, {'floatingip': {'port_id': self.fip_associated['port_id'], 'fixed_ip_address': fixed_address}}) self._setup_mock_for_refresh_cache(api, [instance]) self.mox.ReplayAll() api.associate_floating_ip(self.context, instance, address, fixed_address) @mock.patch('nova.objects.Instance.get_by_uuid') def test_reassociate_floating_ip(self, mock_get): api = neutronapi.API() address = self.fip_associated['floating_ip_address'] new_fixed_address = self.port_address fip_id = self.fip_associated['id'] search_opts = {'device_owner': 'compute:nova', 'device_id': self.instance2['uuid']} self.moxed_client.list_ports(**search_opts).\ AndReturn({'ports': [self.port_data2[0]]}) self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': [self.fip_associated]}) self.moxed_client.update_floatingip( fip_id, {'floatingip': {'port_id': 'my_portid1', 'fixed_ip_address': new_fixed_address}}) self.moxed_client.show_port(self.fip_associated['port_id']).\ AndReturn({'port': self.port_data2[1]}) mock_get.return_value = fake_instance.fake_instance_obj( self.context, **self.instance) instance2 = self._fake_instance_object(self.instance2) self._setup_mock_for_refresh_cache(api, [mock_get.return_value, instance2]) self.mox.ReplayAll() api.associate_floating_ip(self.context, instance2, address, new_fixed_address) def test_associate_floating_ip_not_found_fixed_ip(self): instance = self._fake_instance_object(self.instance) api = neutronapi.API() address = self.fip_associated['floating_ip_address'] fixed_address = self.fip_associated['fixed_ip_address'] search_opts = {'device_owner': 'compute:nova', 'device_id': self.instance['uuid']} self.moxed_client.list_ports(**search_opts).\ AndReturn({'ports': [self.port_data2[0]]}) self.mox.ReplayAll() self.assertRaises(exception.FixedIpNotFoundForAddress, api.associate_floating_ip, self.context, instance, address, fixed_address) def test_disassociate_floating_ip(self): instance = self._fake_instance_object(self.instance) api = neutronapi.API() address = self.fip_associated['floating_ip_address'] fip_id = self.fip_associated['id'] self.moxed_client.list_floatingips(floating_ip_address=address).\ AndReturn({'floatingips': [self.fip_associated]}) self.moxed_client.update_floatingip( fip_id, {'floatingip': {'port_id': None}}) self._setup_mock_for_refresh_cache(api, [instance]) self.mox.ReplayAll() api.disassociate_floating_ip(self.context, instance, address) def test_add_fixed_ip_to_instance(self): instance = self._fake_instance_object(self.instance) api = neutronapi.API() self._setup_mock_for_refresh_cache(api, [instance]) network_id = 'my_netid1' search_opts = {'network_id': network_id} self.moxed_client.list_subnets( **search_opts).AndReturn({'subnets': self.subnet_data_n}) search_opts = {'device_id': instance.uuid, 'device_owner': 'compute:nova', 'network_id': network_id} self.moxed_client.list_ports( **search_opts).AndReturn({'ports': self.port_data1}) port_req_body = { 'port': { 'fixed_ips': [{'subnet_id': 'my_subid1'}, {'subnet_id': 'my_subid1'}], }, } port = self.port_data1[0] port['fixed_ips'] = [{'subnet_id': 'my_subid1'}] self.moxed_client.update_port('my_portid1', MyComparator(port_req_body)).AndReturn({'port': port}) self.mox.ReplayAll() api.add_fixed_ip_to_instance(self.context, instance, network_id) def test_remove_fixed_ip_from_instance(self): instance = self._fake_instance_object(self.instance) api = neutronapi.API() self._setup_mock_for_refresh_cache(api, [instance]) address = '10.0.0.3' zone = 'compute:%s' % self.instance['availability_zone'] search_opts = {'device_id': self.instance['uuid'], 'device_owner': zone, 'fixed_ips': 'ip_address=%s' % address} self.moxed_client.list_ports( **search_opts).AndReturn({'ports': self.port_data1}) port_req_body = { 'port': { 'fixed_ips': [], }, } port = self.port_data1[0] port['fixed_ips'] = [] self.moxed_client.update_port('my_portid1', MyComparator(port_req_body)).AndReturn({'port': port}) self.mox.ReplayAll() api.remove_fixed_ip_from_instance(self.context, instance, address) def test_list_floating_ips_without_l3_support(self): api = neutronapi.API() NeutronNotFound = exceptions.NotFound() self.moxed_client.list_floatingips( fixed_ip_address='1.1.1.1', port_id=1).AndRaise(NeutronNotFound) self.mox.ReplayAll() neutronapi.get_client('fake') floatingips = api._get_floating_ips_by_fixed_and_port( self.moxed_client, '1.1.1.1', 1) self.assertEqual([], floatingips) def test_nw_info_get_ips(self): fake_port = { 'fixed_ips': [ {'ip_address': '1.1.1.1'}], 'id': 'port-id', } api = neutronapi.API() self.mox.StubOutWithMock(api, '_get_floating_ips_by_fixed_and_port') api._get_floating_ips_by_fixed_and_port( self.moxed_client, '1.1.1.1', 'port-id').AndReturn( [{'floating_ip_address': '10.0.0.1'}]) self.mox.ReplayAll() neutronapi.get_client('fake') result = api._nw_info_get_ips(self.moxed_client, fake_port) self.assertEqual(1, len(result)) self.assertEqual('1.1.1.1', result[0]['address']) self.assertEqual('10.0.0.1', result[0]['floating_ips'][0]['address']) def test_nw_info_get_subnets(self): fake_port = { 'fixed_ips': [ {'ip_address': '1.1.1.1'}, {'ip_address': '2.2.2.2'}], 'id': 'port-id', } fake_subnet = model.Subnet(cidr='1.0.0.0/8') fake_ips = [model.IP(x['ip_address']) for x in fake_port['fixed_ips']] api = neutronapi.API() self.mox.StubOutWithMock(api, '_get_subnets_from_port') api._get_subnets_from_port(self.context, fake_port).AndReturn( [fake_subnet]) self.mox.ReplayAll() neutronapi.get_client('fake') subnets = api._nw_info_get_subnets(self.context, fake_port, fake_ips) self.assertEqual(1, len(subnets)) self.assertEqual(1, len(subnets[0]['ips'])) self.assertEqual('1.1.1.1', subnets[0]['ips'][0]['address']) def _test_nw_info_build_network(self, vif_type): fake_port = { 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'id': 'port-id', 'network_id': 'net-id', 'binding:vif_type': vif_type, } fake_subnets = [model.Subnet(cidr='1.0.0.0/8')] fake_nets = [{'id': 'net-id', 'name': 'foo', 'tenant_id': 'tenant', 'mtu': 9000}] api = neutronapi.API() self.mox.ReplayAll() neutronapi.get_client('fake') net, iid = api._nw_info_build_network(fake_port, fake_nets, fake_subnets) self.assertEqual(fake_subnets, net['subnets']) self.assertEqual('net-id', net['id']) self.assertEqual('foo', net['label']) self.assertEqual('tenant', net.get_meta('tenant_id')) self.assertEqual(9000, net.get_meta('mtu')) self.assertEqual(CONF.flat_injected, net.get_meta('injected')) return net, iid def test_nw_info_build_network_ovs(self): net, iid = self._test_nw_info_build_network(model.VIF_TYPE_OVS) self.assertEqual(CONF.neutron.ovs_bridge, net['bridge']) self.assertNotIn('should_create_bridge', net) self.assertEqual('port-id', iid) def test_nw_info_build_network_dvs(self): net, iid = self._test_nw_info_build_network(model.VIF_TYPE_DVS) self.assertEqual('net-id', net['bridge']) self.assertNotIn('should_create_bridge', net) self.assertNotIn('ovs_interfaceid', net) self.assertIsNone(iid) def test_nw_info_build_network_bridge(self): net, iid = self._test_nw_info_build_network(model.VIF_TYPE_BRIDGE) self.assertEqual('brqnet-id', net['bridge']) self.assertTrue(net['should_create_bridge']) self.assertIsNone(iid) def test_nw_info_build_network_tap(self): net, iid = self._test_nw_info_build_network(model.VIF_TYPE_TAP) self.assertIsNone(net['bridge']) self.assertNotIn('should_create_bridge', net) self.assertIsNone(iid) def test_nw_info_build_network_other(self): net, iid = self._test_nw_info_build_network(None) self.assertIsNone(net['bridge']) self.assertNotIn('should_create_bridge', net) self.assertIsNone(iid) def test_nw_info_build_no_match(self): fake_port = { 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'id': 'port-id', 'network_id': 'net-id1', 'tenant_id': 'tenant', 'binding:vif_type': model.VIF_TYPE_OVS, } fake_subnets = [model.Subnet(cidr='1.0.0.0/8')] fake_nets = [{'id': 'net-id2', 'name': 'foo', 'tenant_id': 'tenant'}] api = neutronapi.API() self.mox.ReplayAll() neutronapi.get_client('fake') net, iid = api._nw_info_build_network(fake_port, fake_nets, fake_subnets) self.assertEqual(fake_subnets, net['subnets']) self.assertEqual('net-id1', net['id']) self.assertEqual('tenant', net['meta']['tenant_id']) def test_nw_info_build_network_vhostuser(self): fake_port = { 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'id': 'port-id', 'network_id': 'net-id', 'binding:vif_type': model.VIF_TYPE_VHOSTUSER, 'binding:vif_details': { model.VIF_DETAILS_VHOSTUSER_OVS_PLUG: True } } fake_subnets = [model.Subnet(cidr='1.0.0.0/8')] fake_nets = [{'id': 'net-id', 'name': 'foo', 'tenant_id': 'tenant'}] api = neutronapi.API() self.mox.ReplayAll() neutronapi.get_client('fake') net, iid = api._nw_info_build_network(fake_port, fake_nets, fake_subnets) self.assertEqual(fake_subnets, net['subnets']) self.assertEqual('net-id', net['id']) self.assertEqual('foo', net['label']) self.assertEqual('tenant', net.get_meta('tenant_id')) self.assertEqual(CONF.flat_injected, net.get_meta('injected')) self.assertEqual(CONF.neutron.ovs_bridge, net['bridge']) self.assertNotIn('should_create_bridge', net) self.assertEqual('port-id', iid) def _test_nw_info_build_custom_bridge(self, vif_type, extra_details=None): fake_port = { 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'id': 'port-id', 'network_id': 'net-id', 'binding:vif_type': vif_type, 'binding:vif_details': { model.VIF_DETAILS_BRIDGE_NAME: 'custom-bridge', } } if extra_details: fake_port['binding:vif_details'].update(extra_details) fake_subnets = [model.Subnet(cidr='1.0.0.0/8')] fake_nets = [{'id': 'net-id', 'name': 'foo', 'tenant_id': 'tenant'}] api = neutronapi.API() self.mox.ReplayAll() neutronapi.get_client('fake') net, iid = api._nw_info_build_network(fake_port, fake_nets, fake_subnets) self.assertNotEqual(CONF.neutron.ovs_bridge, net['bridge']) self.assertEqual('custom-bridge', net['bridge']) def test_nw_info_build_custom_ovs_bridge(self): self._test_nw_info_build_custom_bridge(model.VIF_TYPE_OVS) def test_nw_info_build_custom_ovs_bridge_vhostuser(self): self._test_nw_info_build_custom_bridge(model.VIF_TYPE_VHOSTUSER, {model.VIF_DETAILS_VHOSTUSER_OVS_PLUG: True}) def test_nw_info_build_custom_lb_bridge(self): self._test_nw_info_build_custom_bridge(model.VIF_TYPE_BRIDGE) def test_build_network_info_model(self): api = neutronapi.API() fake_inst = objects.Instance() fake_inst.project_id = 'fake' fake_inst.uuid = 'uuid' fake_inst.info_cache = objects.InstanceInfoCache() fake_inst.info_cache.network_info = model.NetworkInfo() fake_ports = [ # admin_state_up=True and status='ACTIVE' thus vif.active=True {'id': 'port1', 'network_id': 'net-id', 'admin_state_up': True, 'status': 'ACTIVE', 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'mac_address': 'de:ad:be:ef:00:01', 'binding:vif_type': model.VIF_TYPE_BRIDGE, 'binding:vnic_type': model.VNIC_TYPE_NORMAL, 'binding:vif_details': {}, }, # admin_state_up=False and status='DOWN' thus vif.active=True {'id': 'port2', 'network_id': 'net-id', 'admin_state_up': False, 'status': 'DOWN', 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'mac_address': 'de:ad:be:ef:00:02', 'binding:vif_type': model.VIF_TYPE_BRIDGE, 'binding:vnic_type': model.VNIC_TYPE_NORMAL, 'binding:vif_details': {}, }, # admin_state_up=True and status='DOWN' thus vif.active=False {'id': 'port0', 'network_id': 'net-id', 'admin_state_up': True, 'status': 'DOWN', 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'mac_address': 'de:ad:be:ef:00:03', 'binding:vif_type': model.VIF_TYPE_BRIDGE, 'binding:vnic_type': model.VNIC_TYPE_NORMAL, 'binding:vif_details': {}, }, # admin_state_up=True and status='ACTIVE' thus vif.active=True {'id': 'port3', 'network_id': 'net-id', 'admin_state_up': True, 'status': 'ACTIVE', 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'mac_address': 'de:ad:be:ef:00:04', 'binding:vif_type': model.VIF_TYPE_HW_VEB, 'binding:vnic_type': model.VNIC_TYPE_DIRECT, 'binding:profile': {'pci_vendor_info': '1137:0047', 'pci_slot': '0000:0a:00.1', 'physical_network': 'phynet1'}, 'binding:vif_details': {model.VIF_DETAILS_PROFILEID: 'pfid'}, }, # admin_state_up=True and status='ACTIVE' thus vif.active=True {'id': 'port4', 'network_id': 'net-id', 'admin_state_up': True, 'status': 'ACTIVE', 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'mac_address': 'de:ad:be:ef:00:05', 'binding:vif_type': model.VIF_TYPE_802_QBH, 'binding:vnic_type': model.VNIC_TYPE_MACVTAP, 'binding:profile': {'pci_vendor_info': '1137:0047', 'pci_slot': '0000:0a:00.2', 'physical_network': 'phynet1'}, 'binding:vif_details': {model.VIF_DETAILS_PROFILEID: 'pfid'}, }, # admin_state_up=True and status='ACTIVE' thus vif.active=True # This port has no binding:vnic_type to verify default is assumed {'id': 'port5', 'network_id': 'net-id', 'admin_state_up': True, 'status': 'ACTIVE', 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'mac_address': 'de:ad:be:ef:00:06', 'binding:vif_type': model.VIF_TYPE_BRIDGE, # No binding:vnic_type 'binding:vif_details': {}, }, # This does not match the networks we provide below, # so it should be ignored (and is here to verify that) {'id': 'port6', 'network_id': 'other-net-id', 'admin_state_up': True, 'status': 'DOWN', 'binding:vnic_type': model.VNIC_TYPE_NORMAL, }, ] fake_subnets = [model.Subnet(cidr='1.0.0.0/8')] fake_nets = [ {'id': 'net-id', 'name': 'foo', 'tenant_id': 'fake', } ] neutronapi.get_client(mox.IgnoreArg(), admin=True).MultipleTimes( ).AndReturn(self.moxed_client) self.moxed_client.list_ports( tenant_id='fake', device_id='uuid').AndReturn( {'ports': fake_ports}) self.mox.StubOutWithMock(api, '_get_floating_ips_by_fixed_and_port') self.mox.StubOutWithMock(api, '_get_subnets_from_port') requested_ports = [fake_ports[2], fake_ports[0], fake_ports[1], fake_ports[3], fake_ports[4], fake_ports[5]] for requested_port in requested_ports: api._get_floating_ips_by_fixed_and_port( self.moxed_client, '1.1.1.1', requested_port['id']).AndReturn( [{'floating_ip_address': '10.0.0.1'}]) for requested_port in requested_ports: api._get_subnets_from_port(self.context, requested_port ).AndReturn(fake_subnets) self.mox.StubOutWithMock(api, '_get_preexisting_port_ids') api._get_preexisting_port_ids(fake_inst).AndReturn(['port5']) self.mox.ReplayAll() neutronapi.get_client('fake') fake_inst.info_cache = objects.InstanceInfoCache.new( self.context, 'fake-uuid') fake_inst.info_cache.network_info = model.NetworkInfo.hydrate([]) nw_infos = api._build_network_info_model( self.context, fake_inst, fake_nets, [fake_ports[2]['id'], fake_ports[0]['id'], fake_ports[1]['id'], fake_ports[3]['id'], fake_ports[4]['id'], fake_ports[5]['id']], preexisting_port_ids=['port3']) self.assertEqual(6, len(nw_infos)) index = 0 for nw_info in nw_infos: self.assertEqual(requested_ports[index]['mac_address'], nw_info['address']) self.assertEqual('tapport' + str(index), nw_info['devname']) self.assertIsNone(nw_info['ovs_interfaceid']) self.assertEqual(requested_ports[index]['binding:vif_type'], nw_info['type']) if nw_info['type'] == model.VIF_TYPE_BRIDGE: self.assertEqual('brqnet-id', nw_info['network']['bridge']) self.assertEqual(requested_ports[index].get('binding:vnic_type', model.VNIC_TYPE_NORMAL), nw_info['vnic_type']) self.assertEqual(requested_ports[index].get('binding:vif_details'), nw_info.get('details')) self.assertEqual(requested_ports[index].get('binding:profile'), nw_info.get('profile')) index += 1 self.assertFalse(nw_infos[0]['active']) self.assertTrue(nw_infos[1]['active']) self.assertTrue(nw_infos[2]['active']) self.assertTrue(nw_infos[3]['active']) self.assertTrue(nw_infos[4]['active']) self.assertTrue(nw_infos[5]['active']) self.assertEqual('port0', nw_infos[0]['id']) self.assertEqual('port1', nw_infos[1]['id']) self.assertEqual('port2', nw_infos[2]['id']) self.assertEqual('port3', nw_infos[3]['id']) self.assertEqual('port4', nw_infos[4]['id']) self.assertEqual('port5', nw_infos[5]['id']) self.assertFalse(nw_infos[0]['preserve_on_delete']) self.assertFalse(nw_infos[1]['preserve_on_delete']) self.assertFalse(nw_infos[2]['preserve_on_delete']) self.assertTrue(nw_infos[3]['preserve_on_delete']) self.assertFalse(nw_infos[4]['preserve_on_delete']) self.assertTrue(nw_infos[5]['preserve_on_delete']) @mock.patch('nova.network.neutronv2.api.API._nw_info_get_subnets') @mock.patch('nova.network.neutronv2.api.API._nw_info_get_ips') @mock.patch('nova.network.neutronv2.api.API._nw_info_build_network') @mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids') @mock.patch('nova.network.neutronv2.api.API._gather_port_ids_and_networks') def test_build_network_info_model_empty( self, mock_gather_port_ids_and_networks, mock_get_preexisting_port_ids, mock_nw_info_build_network, mock_nw_info_get_ips, mock_nw_info_get_subnets): # An empty instance info network cache should not be populated from # ports found in Neutron. api = neutronapi.API() fake_inst = objects.Instance() fake_inst.project_id = 'fake' fake_inst.uuid = 'uuid' fake_inst.info_cache = objects.InstanceInfoCache() fake_inst.info_cache.network_info = model.NetworkInfo() fake_ports = [ # admin_state_up=True and status='ACTIVE' thus vif.active=True {'id': 'port1', 'network_id': 'net-id', 'admin_state_up': True, 'status': 'ACTIVE', 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'mac_address': 'de:ad:be:ef:00:01', 'binding:vif_type': model.VIF_TYPE_BRIDGE, 'binding:vnic_type': model.VNIC_TYPE_NORMAL, 'binding:vif_details': {}, }, ] fake_subnets = [model.Subnet(cidr='1.0.0.0/8')] neutronapi.get_client(mox.IgnoreArg(), admin=True).MultipleTimes( ).AndReturn(self.moxed_client) self.moxed_client.list_ports( tenant_id='fake', device_id='uuid').AndReturn( {'ports': fake_ports}) mock_gather_port_ids_and_networks.return_value = ([], []) mock_get_preexisting_port_ids.return_value = [] mock_nw_info_build_network.return_value = (None, None) mock_nw_info_get_ips.return_value = [] mock_nw_info_get_subnets.return_value = fake_subnets self.mox.ReplayAll() neutronapi.get_client('fake') nw_infos = api._build_network_info_model( self.context, fake_inst) self.assertEqual(0, len(nw_infos)) def test_get_subnets_from_port(self): api = neutronapi.API() port_data = copy.copy(self.port_data1[0]) subnet_data1 = copy.copy(self.subnet_data1) subnet_data1[0]['host_routes'] = [ {'destination': '192.168.0.0/24', 'nexthop': '1.0.0.10'} ] self.moxed_client.list_subnets( id=[port_data['fixed_ips'][0]['subnet_id']] ).AndReturn({'subnets': subnet_data1}) self.moxed_client.list_ports( network_id=subnet_data1[0]['network_id'], device_owner='network:dhcp').AndReturn({'ports': []}) self.mox.ReplayAll() subnets = api._get_subnets_from_port(self.context, port_data) self.assertEqual(1, len(subnets)) self.assertEqual(1, len(subnets[0]['routes'])) self.assertEqual(subnet_data1[0]['host_routes'][0]['destination'], subnets[0]['routes'][0]['cidr']) self.assertEqual(subnet_data1[0]['host_routes'][0]['nexthop'], subnets[0]['routes'][0]['gateway']['address']) def test_get_all_empty_list_networks(self): api = neutronapi.API() self.moxed_client.list_networks().AndReturn({'networks': []}) self.mox.ReplayAll() networks = api.get_all(self.context) self.assertIsInstance(networks, objects.NetworkList) self.assertEqual(0, len(networks)) @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock()) def test_get_port_vnic_info_1(self, mock_get_client): api = neutronapi.API() self.mox.ResetAll() test_port = { 'port': {'id': 'my_port_id1', 'network_id': 'net-id', 'binding:vnic_type': model.VNIC_TYPE_DIRECT, }, } test_net = {'network': {'provider:physical_network': 'phynet1'}} mock_client = mock_get_client() mock_client.show_port.return_value = test_port mock_client.show_network.return_value = test_net vnic_type, phynet_name = api._get_port_vnic_info( self.context, mock_client, test_port['port']['id']) mock_client.show_port.assert_called_once_with(test_port['port']['id'], fields=['binding:vnic_type', 'network_id']) mock_client.show_network.assert_called_once_with( test_port['port']['network_id'], fields='provider:physical_network') self.assertEqual(model.VNIC_TYPE_DIRECT, vnic_type) self.assertEqual('phynet1', phynet_name) def _test_get_port_vnic_info(self, mock_get_client, binding_vnic_type=None): api = neutronapi.API() self.mox.ResetAll() test_port = { 'port': {'id': 'my_port_id2', 'network_id': 'net-id', }, } if binding_vnic_type: test_port['port']['binding:vnic_type'] = binding_vnic_type mock_get_client.reset_mock() mock_client = mock_get_client() mock_client.show_port.return_value = test_port vnic_type, phynet_name = api._get_port_vnic_info( self.context, mock_client, test_port['port']['id']) mock_client.show_port.assert_called_once_with(test_port['port']['id'], fields=['binding:vnic_type', 'network_id']) self.assertEqual(model.VNIC_TYPE_NORMAL, vnic_type) self.assertFalse(phynet_name) @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock()) def test_get_port_vnic_info_2(self, mock_get_client): self._test_get_port_vnic_info(mock_get_client, binding_vnic_type=model.VNIC_TYPE_NORMAL) @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock()) def test_get_port_vnic_info_3(self, mock_get_client): self._test_get_port_vnic_info(mock_get_client) @mock.patch.object(neutronapi.API, "_get_port_vnic_info") @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock()) def test_create_pci_requests_for_sriov_ports(self, mock_get_client, mock_get_port_vnic_info): api = neutronapi.API() self.mox.ResetAll() requested_networks = objects.NetworkRequestList( objects = [ objects.NetworkRequest(port_id='my_portid1'), objects.NetworkRequest(network_id='net1'), objects.NetworkRequest(port_id='my_portid2'), objects.NetworkRequest(port_id='my_portid3'), objects.NetworkRequest(port_id='my_portid4')]) pci_requests = objects.InstancePCIRequests(requests=[]) mock_get_port_vnic_info.side_effect = [ (model.VNIC_TYPE_DIRECT, 'phynet1'), (model.VNIC_TYPE_NORMAL, ''), (model.VNIC_TYPE_MACVTAP, 'phynet1'), (model.VNIC_TYPE_MACVTAP, 'phynet2') ] api.create_pci_requests_for_sriov_ports( None, pci_requests, requested_networks) self.assertEqual(3, len(pci_requests.requests)) has_pci_request_id = [net.pci_request_id is not None for net in requested_networks.objects] expected_results = [True, False, False, True, True] self.assertEqual(expected_results, has_pci_request_id) class TestNeutronv2WithMock(test.TestCase): """Used to test Neutron V2 API with mock.""" def setUp(self): super(TestNeutronv2WithMock, self).setUp() self.api = neutronapi.API() self.context = context.RequestContext( 'fake-user', 'fake-project', auth_token='bff4a5a6b9eb4ea2a6efec6eefb77936') @mock.patch('oslo_concurrency.lockutils.lock') def test_get_instance_nw_info_locks_per_instance(self, mock_lock): instance = objects.Instance(uuid=uuid.uuid4()) api = neutronapi.API() mock_lock.side_effect = test.TestingException self.assertRaises(test.TestingException, api.get_instance_nw_info, 'context', instance) mock_lock.assert_called_once_with('refresh_cache-%s' % instance.uuid) @mock.patch('nova.network.neutronv2.api.LOG') def test_get_instance_nw_info_verify_duplicates_ignored(self, mock_log): """test that the returned networks & port_ids from _gather_port_ids_and_networks doesn't contain any duplicates The test fakes an instance with two ports connected to two networks. The _gather_port_ids_and_networks method will be called with the instance and a list of port ids of which one port id is configured already to the instance (== duplicate #1) and a list of networks that already contains a network to which an instance port is connected (== duplicate #2). All-in-all, we expect the resulting port ids list to contain 3 items (["instance_port_1", "port_1", "port_2"]) and the resulting networks list to contain 3 items (["net_1", "net_2", "instance_network_1"]) while the warning message for duplicate items was executed twice (due to "duplicate #1" & "duplicate #2") """ networks = [model.Network(id="net_1"), model.Network(id="net_2")] port_ids = ["port_1", "port_2"] instance_networks = [{"id": "instance_network_1", "name": "fake_network", "tenant_id": "fake_tenant_id"}] instance_port_ids = ["instance_port_1"] network_info = model.NetworkInfo( [{'id': port_ids[0], 'network': networks[0]}, {'id': instance_port_ids[0], 'network': model.Network( id=instance_networks[0]["id"], label=instance_networks[0]["name"], meta={"tenant_id": instance_networks[0]["tenant_id"]})}] ) instance_uuid = uuid.uuid4() instance = objects.Instance(uuid=instance_uuid, info_cache=objects.InstanceInfoCache( context=self.context, instance_uuid=instance_uuid, network_info=network_info)) new_networks, new_port_ids = self.api._gather_port_ids_and_networks( self.context, instance, networks, port_ids) self.assertEqual(new_networks, networks + instance_networks) self.assertEqual(new_port_ids, instance_port_ids + port_ids) self.assertEqual(2, mock_log.warning.call_count) @mock.patch('oslo_concurrency.lockutils.lock') @mock.patch.object(neutronapi.API, '_get_instance_nw_info') @mock.patch('nova.network.base_api.update_instance_cache_with_nw_info') def test_get_instance_nw_info(self, mock_update, mock_get, mock_lock): fake_result = mock.sentinel.get_nw_info_result mock_get.return_value = fake_result instance = fake_instance.fake_instance_obj(self.context) result = self.api.get_instance_nw_info(self.context, instance) mock_get.assert_called_once_with(self.context, instance) mock_update.assert_called_once_with(self.api, self.context, instance, nw_info=fake_result, update_cells=False) self.assertEqual(fake_result, result) def _test_validate_networks_fixed_ip_no_dup(self, nets, requested_networks, ids, list_port_values): def _fake_list_ports(**search_opts): for args, return_value in list_port_values: if args == search_opts: return return_value self.fail('Unexpected call to list_ports %s' % search_opts) with test.nested( mock.patch.object(client.Client, 'list_ports', side_effect=_fake_list_ports), mock.patch.object(client.Client, 'list_networks', return_value={'networks': nets}), mock.patch.object(client.Client, 'show_quota', return_value={'quota': {'port': 50}})) as ( list_ports_mock, list_networks_mock, show_quota_mock): self.api.validate_networks(self.context, requested_networks, 1) self.assertEqual(len(list_port_values), len(list_ports_mock.call_args_list)) list_networks_mock.assert_called_once_with(id=ids) show_quota_mock.assert_called_once_with(tenant_id='fake-project') def test_validate_networks_over_limit_quota(self): """Test validates that a relevant exception is being raised when there are more ports defined, than there is a quota for it. """ requested_networks = [('my_netid1', '10.0.1.2', None, None), ('my_netid2', '10.0.1.3', None, None)] list_port_values = [({'network_id': 'my_netid1', 'fixed_ips': 'ip_address=10.0.1.2', 'fields': 'device_id'}, {'ports': []}), ({'network_id': 'my_netid2', 'fixed_ips': 'ip_address=10.0.1.3', 'fields': 'device_id'}, {'ports': []}), ({'tenant_id': 'fake-project', 'fields': ['id']}, {'ports': [1, 2, 3, 4, 5]})] nets = [{'subnets': '1'}, {'subnets': '2'}] def _fake_list_ports(**search_opts): for args, return_value in list_port_values: if args == search_opts: return return_value with test.nested( mock.patch.object(self.api, '_get_available_networks', return_value=nets), mock.patch.object(client.Client, 'list_ports', side_effect=_fake_list_ports), mock.patch.object(client.Client, 'show_quota', return_value={'quota': {'port': 1}})): exc = self.assertRaises(exception.PortLimitExceeded, self.api.validate_networks, self.context, requested_networks, 1) expected_exception_msg = ('The number of defined ports: ' '%(ports)d is over the limit: ' '%(quota)d' % {'ports': 5, 'quota': 1}) self.assertEqual(expected_exception_msg, str(exc)) def test_validate_networks_fixed_ip_no_dup1(self): # Test validation for a request for a network with a # fixed ip that is not already in use because no fixed ips in use nets1 = [{'id': 'my_netid1', 'name': 'my_netname1', 'subnets': ['mysubnid1'], 'tenant_id': 'fake-project'}] requested_networks = [('my_netid1', '10.0.1.2', None, None)] ids = ['my_netid1'] list_port_values = [({'network_id': 'my_netid1', 'fixed_ips': 'ip_address=10.0.1.2', 'fields': 'device_id'}, {'ports': []}), ({'tenant_id': 'fake-project', 'fields': ['id']}, {'ports': []})] self._test_validate_networks_fixed_ip_no_dup(nets1, requested_networks, ids, list_port_values) def test_validate_networks_fixed_ip_no_dup2(self): # Test validation for a request for a network with a # fixed ip that is not already in use because not used on this net id nets2 = [{'id': 'my_netid1', 'name': 'my_netname1', 'subnets': ['mysubnid1'], 'tenant_id': 'fake-project'}, {'id': 'my_netid2', 'name': 'my_netname2', 'subnets': ['mysubnid2'], 'tenant_id': 'fake-project'}] requested_networks = [('my_netid1', '10.0.1.2', None, None), ('my_netid2', '10.0.1.3', None, None)] ids = ['my_netid1', 'my_netid2'] list_port_values = [({'network_id': 'my_netid1', 'fixed_ips': 'ip_address=10.0.1.2', 'fields': 'device_id'}, {'ports': []}), ({'network_id': 'my_netid2', 'fixed_ips': 'ip_address=10.0.1.3', 'fields': 'device_id'}, {'ports': []}), ({'tenant_id': 'fake-project', 'fields': ['id']}, {'ports': []})] self._test_validate_networks_fixed_ip_no_dup(nets2, requested_networks, ids, list_port_values) def test_validate_networks_fixed_ip_dup(self): # Test validation for a request for a network with a # fixed ip that is already in use requested_networks = [('my_netid1', '10.0.1.2', None, None)] list_port_mock_params = {'network_id': 'my_netid1', 'fixed_ips': 'ip_address=10.0.1.2', 'fields': 'device_id'} list_port_mock_return = {'ports': [({'device_id': 'my_deviceid'})]} with mock.patch.object(client.Client, 'list_ports', return_value=list_port_mock_return) as ( list_ports_mock): self.assertRaises(exception.FixedIpAlreadyInUse, self.api.validate_networks, self.context, requested_networks, 1) list_ports_mock.assert_called_once_with(**list_port_mock_params) def test_allocate_floating_ip_exceed_limit(self): # Verify that the correct exception is thrown when quota exceed pool_name = 'dummy' api = neutronapi.API() with test.nested( mock.patch.object(client.Client, 'create_floatingip'), mock.patch.object(api, '_get_floating_ip_pool_id_by_name_or_id')) as ( create_mock, get_mock): create_mock.side_effect = exceptions.OverQuotaClient() self.assertRaises(exception.FloatingIpLimitExceeded, api.allocate_floating_ip, self.context, pool_name) def test_allocate_floating_ip_no_ipv4_subnet(self): api = neutronapi.API() net_id = uuid.uuid4() error_msg = ('Bad floatingip request: Network %s does not contain ' 'any IPv4 subnet' % net_id) with test.nested( mock.patch.object(client.Client, 'create_floatingip'), mock.patch.object(api, '_get_floating_ip_pool_id_by_name_or_id')) as ( create_mock, get_mock): create_mock.side_effect = exceptions.BadRequest(error_msg) self.assertRaises(exception.FloatingIpBadRequest, api.allocate_floating_ip, self.context, 'ext_net') def test_create_port_for_instance_no_more_ip(self): instance = fake_instance.fake_instance_obj(self.context) net = {'id': 'my_netid1', 'name': 'my_netname1', 'subnets': ['mysubnid1'], 'tenant_id': instance['project_id']} with mock.patch.object(client.Client, 'create_port', side_effect=exceptions.IpAddressGenerationFailureClient()) as ( create_port_mock): zone = 'compute:%s' % instance['availability_zone'] port_req_body = {'port': {'device_id': instance['uuid'], 'device_owner': zone}} self.assertRaises(exception.NoMoreFixedIps, self.api._create_port, neutronapi.get_client(self.context), instance, net['id'], port_req_body) create_port_mock.assert_called_once_with(port_req_body) @mock.patch.object(client.Client, 'create_port', side_effect=exceptions.MacAddressInUseClient()) def test_create_port_for_instance_mac_address_in_use(self, create_port_mock): # Create fake data. instance = fake_instance.fake_instance_obj(self.context) net = {'id': 'my_netid1', 'name': 'my_netname1', 'subnets': ['mysubnid1'], 'tenant_id': instance['project_id']} zone = 'compute:%s' % instance['availability_zone'] port_req_body = {'port': {'device_id': instance['uuid'], 'device_owner': zone, 'mac_address': 'XX:XX:XX:XX:XX:XX'}} available_macs = set(['XX:XX:XX:XX:XX:XX']) # Run the code. self.assertRaises(exception.PortInUse, self.api._create_port, neutronapi.get_client(self.context), instance, net['id'], port_req_body, available_macs=available_macs) # Assert the calls. create_port_mock.assert_called_once_with(port_req_body) @mock.patch.object(client.Client, 'create_port', side_effect=exceptions.IpAddressInUseClient()) def test_create_port_for_fixed_ip_in_use(self, create_port_mock): # Create fake data. instance = fake_instance.fake_instance_obj(self.context) net = {'id': 'my_netid1', 'name': 'my_netname1', 'subnets': ['mysubnid1'], 'tenant_id': instance['project_id']} zone = 'compute:%s' % instance['availability_zone'] port_req_body = {'port': {'device_id': instance['uuid'], 'device_owner': zone, 'mac_address': 'XX:XX:XX:XX:XX:XX'}} fake_ip = '1.1.1.1' # Run the code. self.assertRaises(exception.FixedIpAlreadyInUse, self.api._create_port, neutronapi.get_client(self.context), instance, net['id'], port_req_body, fixed_ip=fake_ip) # Assert the calls. create_port_mock.assert_called_once_with(port_req_body) @mock.patch.object(client.Client, 'create_port', side_effect=exceptions.InvalidIpForNetworkClient()) def test_create_port_with_invalid_ip_for_network(self, create_port_mock): # Create fake data. instance = fake_instance.fake_instance_obj(self.context) net = {'id': 'my_netid1', 'name': 'my_netname1', 'subnets': ['mysubnid1'], 'tenant_id': instance['project_id']} zone = 'compute:%s' % instance['availability_zone'] port_req_body = {'port': {'device_id': instance['uuid'], 'device_owner': zone, 'mac_address': 'XX:XX:XX:XX:XX:XX'}} fake_ip = '1.1.1.1' # Run the code. exc = self.assertRaises(exception.InvalidInput, self.api._create_port, neutronapi.get_client(self.context), instance, net['id'], port_req_body, fixed_ip=fake_ip) # Assert the exception message expected_exception_msg = ('Invalid input received: Fixed IP %(ip)s is ' 'not a valid ip address for network ' '%(net_id)s.' % {'ip': fake_ip, 'net_id': net['id']}) self.assertEqual(expected_exception_msg, str(exc)) # Assert the calls. create_port_mock.assert_called_once_with(port_req_body) def test_get_network_detail_not_found(self): api = neutronapi.API() expected_exc = exceptions.NetworkNotFoundClient() network_uuid = '02cacbca-7d48-4a2c-8011-43eecf8a9786' with mock.patch.object(client.Client, 'show_network', side_effect=expected_exc) as ( fake_show_network): self.assertRaises(exception.NetworkNotFound, api.get, self.context, network_uuid) fake_show_network.assert_called_once_with(network_uuid) @mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids') @mock.patch('nova.network.neutronv2.api.API.' '_refresh_neutron_extensions_cache') def test_deallocate_for_instance_uses_delete_helper(self, mock_refresh, mock_preexisting): # setup fake data instance = fake_instance.fake_instance_obj(self.context) mock_preexisting.return_value = [] port_data = {'ports': [{'id': str(uuid.uuid4())}]} ports = set([port['id'] for port in port_data.get('ports')]) api = neutronapi.API() # setup mocks mock_client = mock.Mock() mock_client.list_ports.return_value = port_data with test.nested( mock.patch.object(neutronapi, 'get_client', return_value=mock_client), mock.patch.object(api, '_delete_ports') ) as ( mock_get_client, mock_delete ): # run the code api.deallocate_for_instance(self.context, instance) # assert the calls mock_client.list_ports.assert_called_once_with( device_id=instance.uuid) mock_delete.assert_called_once_with( mock_client, instance, ports, raise_if_fail=True) def _test_delete_ports(self, expect_raise): results = [exceptions.NeutronClientException, None] mock_client = mock.Mock() with mock.patch.object(mock_client, 'delete_port', side_effect=results): api = neutronapi.API() api._delete_ports(mock_client, {'uuid': 'foo'}, ['port1', 'port2'], raise_if_fail=expect_raise) def test_delete_ports_raise(self): self.assertRaises(exceptions.NeutronClientException, self._test_delete_ports, True) def test_delete_ports_no_raise(self): self._test_delete_ports(False) def test_delete_ports_never_raise_404(self): mock_client = mock.Mock() mock_client.delete_port.side_effect = exceptions.PortNotFoundClient api = neutronapi.API() api._delete_ports(mock_client, {'uuid': 'foo'}, ['port1'], raise_if_fail=True) mock_client.delete_port.assert_called_once_with('port1') @mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids') def test_deallocate_port_for_instance_fails(self, mock_preexisting): mock_preexisting.return_value = [] mock_client = mock.Mock() api = neutronapi.API() with test.nested( mock.patch.object(neutronapi, 'get_client', return_value=mock_client), mock.patch.object(api, '_delete_ports', side_effect=exceptions.Unauthorized), mock.patch.object(api, 'get_instance_nw_info') ) as ( get_client, delete_ports, get_nw_info ): self.assertRaises(exceptions.Unauthorized, api.deallocate_port_for_instance, self.context, instance={'uuid': 'fake'}, port_id='fake') # make sure that we didn't try to reload nw info self.assertFalse(get_nw_info.called) @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock()) def _test_show_port_exceptions(self, client_exc, expected_nova_exc, get_client_mock): show_port_mock = mock.Mock(side_effect=client_exc) get_client_mock.return_value.show_port = show_port_mock self.assertRaises(expected_nova_exc, self.api.show_port, self.context, 'fake_port_id') def test_show_port_not_found(self): self._test_show_port_exceptions(exceptions.PortNotFoundClient, exception.PortNotFound) def test_show_port_forbidden(self): self._test_show_port_exceptions(exceptions.Unauthorized, exception.Forbidden) def test_show_port_unknown_exception(self): self._test_show_port_exceptions(exceptions.NeutronClientException, exception.NovaException) def test_get_network(self): api = neutronapi.API() with mock.patch.object(client.Client, 'show_network') as mock_show: mock_show.return_value = { 'network': {'id': 'fake-uuid', 'name': 'fake-network'} } net_obj = api.get(self.context, 'fake-uuid') self.assertEqual('fake-network', net_obj.label) self.assertEqual('fake-network', net_obj.name) self.assertEqual('fake-uuid', net_obj.uuid) def test_get_all_networks(self): api = neutronapi.API() with mock.patch.object(client.Client, 'list_networks') as mock_list: mock_list.return_value = { 'networks': [ {'id': 'fake-uuid1', 'name': 'fake-network1'}, {'id': 'fake-uuid2', 'name': 'fake-network2'}, ]} net_objs = api.get_all(self.context) self.assertIsInstance(net_objs, objects.NetworkList) self.assertEqual(2, len(net_objs)) self.assertEqual(('fake-uuid1', 'fake-network1'), (net_objs[0].uuid, net_objs[0].name)) self.assertEqual(('fake-uuid2', 'fake-network2'), (net_objs[1].uuid, net_objs[1].name)) @mock.patch.object(neutronapi.API, "_refresh_neutron_extensions_cache") @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock()) def test_update_instance_vnic_index(self, mock_get_client, mock_refresh_extensions): api = neutronapi.API() api.extensions = set([constants.VNIC_INDEX_EXT]) mock_client = mock_get_client() mock_client.update_port.return_value = 'port' instance = {'project_id': '9d049e4b60b64716978ab415e6fbd5c0', 'uuid': str(uuid.uuid4()), 'display_name': 'test_instance', 'availability_zone': 'nova', 'host': 'some_host'} instance = objects.Instance(**instance) vif = {'id': 'fake-port-id'} api.update_instance_vnic_index(self.context, instance, vif, 7) port_req_body = {'port': {'vnic_index': 7}} mock_client.update_port.assert_called_once_with('fake-port-id', port_req_body) @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock()) def test_update_port_bindings_for_instance_same_host(self, get_client_mock): instance = fake_instance.fake_instance_obj(self.context) self.api._has_port_binding_extension = mock.Mock(return_value=True) # We test two ports, one with the same host as the host passed in and # one where binding:host_id isn't set, so we update that port. fake_ports = {'ports': [ {'id': 'fake-port-1', 'binding:host_id': instance.host}, {'id': 'fake-port-2'}]} list_ports_mock = mock.Mock(return_value=fake_ports) get_client_mock.return_value.list_ports = list_ports_mock update_port_mock = mock.Mock() get_client_mock.return_value.update_port = update_port_mock self.api._update_port_binding_for_instance(self.context, instance, instance.host) # Assert that update_port was only called on the port without a host. update_port_mock.assert_called_once_with( 'fake-port-2', {'port': {'binding:host_id': instance.host}}) @mock.patch('nova.network.neutronv2.api.compute_utils') def test_get_preexisting_port_ids(self, mocked_comp_utils): mocked_comp_utils.get_nw_info_for_instance.return_value = [model.VIF( id='1', preserve_on_delete=False), model.VIF( id='2', preserve_on_delete=True), model.VIF( id='3', preserve_on_delete=True)] result = self.api._get_preexisting_port_ids(None) self.assertEqual(['2', '3'], result, "Invalid preexisting ports") def _test_unbind_ports_get_client(self, mock_neutron, mock_has_ext, has_ext=False): mock_ctx = mock.Mock(is_admin=False) mock_has_ext.return_value = has_ext ports = ["1", "2", "3"] self.api._unbind_ports(mock_ctx, ports, mock_neutron) get_client_calls = [] get_client_calls.append(mock.call(mock_ctx) if not has_ext else mock.call(mock_ctx, admin=True)) if has_ext: self.assertEqual(1, mock_neutron.call_count) mock_neutron.assert_has_calls(get_client_calls, True) else: self.assertEqual(0, mock_neutron.call_count) @mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension') @mock.patch('nova.network.neutronv2.api.get_client') def test_unbind_ports_get_client_binding_extension(self, mock_neutron, mock_has_ext): self._test_unbind_ports_get_client(mock_neutron, mock_has_ext, True) @mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension') @mock.patch('nova.network.neutronv2.api.get_client') def test_unbind_ports_get_client(self, mock_neutron, mock_has_ext): self._test_unbind_ports_get_client(mock_neutron, mock_has_ext) def _test_unbind_ports(self, mock_neutron, mock_has_ext, has_ext=False): mock_client = mock.Mock() mock_update_port = mock.Mock() mock_client.update_port = mock_update_port mock_ctx = mock.Mock(is_admin=False) mock_has_ext.return_value = has_ext mock_neutron.return_value = mock_client ports = ["1", "2", "3"] api = neutronapi.API() api._unbind_ports(mock_ctx, ports, mock_client) body = {'port': {'device_id': '', 'device_owner': ''}} if has_ext: body['port']['binding:host_id'] = None body['port']['binding:profile'] = {} update_port_calls = [] for p in ports: update_port_calls.append(mock.call(p, body)) self.assertEqual(3, mock_update_port.call_count) mock_update_port.assert_has_calls(update_port_calls) @mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension') @mock.patch('nova.network.neutronv2.api.get_client') def test_unbind_ports_binding_ext(self, mock_neutron, mock_has_ext): self._test_unbind_ports(mock_neutron, mock_has_ext, True) @mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension') @mock.patch('nova.network.neutronv2.api.get_client') def test_unbind_ports(self, mock_neutron, mock_has_ext): self._test_unbind_ports(mock_neutron, mock_has_ext, False) @mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension') def test_unbind_ports_no_port_ids(self, mock_has_ext): # Tests that None entries in the ports list are filtered out. mock_client = mock.Mock() mock_update_port = mock.Mock() mock_client.update_port = mock_update_port mock_ctx = mock.Mock(is_admin=False) mock_has_ext.return_value = True api = neutronapi.API() api._unbind_ports(mock_ctx, [None], mock_client, mock_client) self.assertFalse(mock_update_port.called) @mock.patch('nova.network.neutronv2.api.API.get_instance_nw_info') @mock.patch('nova.network.neutronv2.api.excutils') @mock.patch('nova.network.neutronv2.api.API._delete_ports') @mock.patch('nova.network.neutronv2.api.API.' '_check_external_network_attach') @mock.patch('nova.network.neutronv2.api.LOG') @mock.patch('nova.network.neutronv2.api.API._unbind_ports') @mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension') @mock.patch('nova.network.neutronv2.api.API.' '_populate_neutron_extension_values') @mock.patch('nova.network.neutronv2.api.API._get_available_networks') @mock.patch('nova.network.neutronv2.api.get_client') def test_allocate_for_instance_unbind(self, mock_ntrn, mock_avail_nets, mock_ext_vals, mock_has_pbe, mock_unbind, mock_log, mock_cena, mock_del_ports, mock_exeu, mock_giwn): mock_nc = mock.Mock() def show_port(port_id): return {'port': {'network_id': 'net-1', 'id': port_id, 'tenant_id': 'proj-1'}} mock_nc.show_port = show_port mock_ntrn.return_value = mock_nc mock_nc.update_port.side_effect = [True, True, Exception] mock_inst = mock.Mock(project_id="proj-1", availability_zone='zone-1', uuid='inst-1') mock_has_pbe.return_value = False nw_req = objects.NetworkRequestList( objects = [objects.NetworkRequest(port_id='fake-port1'), objects.NetworkRequest(port_id='fake-port2'), objects.NetworkRequest(port_id='fail-port')]) mock_avail_nets.return_value = [{'id': 'net-1', 'subnets': ['subnet1']}] self.api.allocate_for_instance(mock.sentinel.ctx, mock_inst, requested_networks=nw_req) mock_unbind.assert_called_once_with(mock.sentinel.ctx, ['fake-port1', 'fake-port2'], mock.ANY, mock.ANY) @mock.patch('nova.network.neutronv2.api.API._process_requested_networks') @mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension') @mock.patch('nova.network.neutronv2.api.API._get_available_networks') @mock.patch('nova.network.neutronv2.api.get_client') def test_allocate_port_for_instance_no_networks(self, mock_getclient, mock_avail_nets, mock_has_pbe, mock_process_request_net): """Tests that if no networks are requested and no networks are available, we fail with InterfaceAttachFailedNoNetwork. """ instance = fake_instance.fake_instance_obj(self.context) mock_has_pbe.return_value = False mock_process_request_net.return_value = ({}, [], [], None) mock_avail_nets.return_value = [] api = neutronapi.API() ex = self.assertRaises(exception.InterfaceAttachFailedNoNetwork, api.allocate_port_for_instance, self.context, instance, port_id=None) self.assertEqual( "No specific network was requested and none are available for " "project 'fake-project'.", six.text_type(ex)) @mock.patch('nova.objects.network_request.utils') @mock.patch('nova.network.neutronv2.api.LOG') @mock.patch('nova.network.neutronv2.api.base_api') @mock.patch('nova.network.neutronv2.api.API._delete_ports') @mock.patch('nova.network.neutronv2.api.API._unbind_ports') @mock.patch('nova.network.neutronv2.api.API._get_preexisting_port_ids') @mock.patch('nova.network.neutronv2.api.get_client') def test_preexisting_deallocate_for_instance(self, mock_ntrn, mock_gppids, mock_unbind, mock_deletep, mock_baseapi, mock_log, req_utils): req_utils.is_neutron.return_value = True mock_inst = mock.Mock(project_id="proj-1", availability_zone='zone-1', uuid='inst-1') mock_nc = mock.Mock() mock_ntrn.return_value = mock_nc mock_nc.list_ports.return_value = {'ports': [ {'id': 'port-1'}, {'id': 'port-2'}, {'id': 'port-3'} ]} nw_req = objects.NetworkRequestList( objects = [objects.NetworkRequest(network_id='net-1', address='192.168.0.3', port_id='port-1', pci_request_id='pci-1')]) mock_gppids.return_value = ['port-3'] self.api.deallocate_for_instance(mock.sentinel.ctx, mock_inst, requested_networks=nw_req) mock_unbind.assert_called_once_with(mock.sentinel.ctx, set(['port-1', 'port-3']), mock.ANY) mock_deletep.assert_called_once_with(mock_nc, mock_inst, set(['port-2']), raise_if_fail=True) @mock.patch('nova.network.neutronv2.api.API.get_instance_nw_info') @mock.patch('nova.network.neutronv2.api.API._unbind_ports') @mock.patch('nova.network.neutronv2.api.compute_utils') @mock.patch('nova.network.neutronv2.api.get_client') def test_preexisting_deallocate_port_for_instance(self, mock_ntrn, mock_comp_utils, mock_unbind, mock_netinfo): mock_comp_utils.get_nw_info_for_instance.return_value = [model.VIF( id='1', preserve_on_delete=False), model.VIF( id='2', preserve_on_delete=True), model.VIF( id='3', preserve_on_delete=True)] mock_inst = mock.Mock(project_id="proj-1", availability_zone='zone-1', uuid='inst-1') mock_client = mock.Mock() mock_ntrn.return_value = mock_client self.api.deallocate_port_for_instance(mock.sentinel.ctx, mock_inst, '2') mock_unbind.assert_called_once_with(mock.sentinel.ctx, ['2'], mock_client) @mock.patch('nova.network.neutronv2.api.API.' '_check_external_network_attach') @mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension') @mock.patch('nova.network.neutronv2.api.API.' '_populate_neutron_extension_values') @mock.patch('nova.network.neutronv2.api.API._get_available_networks') @mock.patch('nova.network.neutronv2.api.get_client') def test_port_binding_failed_created_port(self, mock_ntrn, mock_avail_nets, mock_ext_vals, mock_has_pbe, mock_cena): mock_has_pbe.return_value = True mock_nc = mock.Mock() mock_ntrn.return_value = mock_nc mock_inst = mock.Mock(project_id="proj-1", availability_zone='zone-1', uuid='inst-1') mock_avail_nets.return_value = [{'id': 'net-1', 'subnets': ['subnet1']}] mock_nc.create_port.return_value = {'port': {'id': 'fake_id', 'tenant_id': mock_inst.project_id, 'binding:vif_type': 'binding_failed'}} self.assertRaises(exception.PortBindingFailed, self.api.allocate_for_instance, mock.sentinel.ctx, mock_inst) mock_nc.delete_port.assert_called_once_with('fake_id') @mock.patch('nova.network.neutronv2.api.API._show_port') @mock.patch('nova.network.neutronv2.api.API._has_port_binding_extension') @mock.patch('nova.network.neutronv2.api.get_client') def test_port_binding_failed_with_request(self, mock_ntrn, mock_has_pbe, mock_show_port): mock_has_pbe.return_value = True mock_nc = mock.Mock() mock_ntrn.return_value = mock_nc mock_inst = mock.Mock(project_id="proj-1", availability_zone='zone-1', uuid='inst-1') mock_show_port.return_value = { 'tenant_id': mock_inst.project_id, 'binding:vif_type': 'binding_failed'} nw_req = objects.NetworkRequestList( objects = [objects.NetworkRequest(port_id='fake_id')]) self.assertRaises(exception.PortBindingFailed, self.api.allocate_for_instance, mock.sentinel.ctx, mock_inst, requested_networks=nw_req) @mock.patch('nova.network.neutronv2.api.get_client') def test_get_floating_ip_by_address_not_found_neutron_not_found(self, mock_ntrn): mock_nc = mock.Mock() mock_ntrn.return_value = mock_nc mock_nc.list_floatingips.side_effect = exceptions.NotFound() address = '172.24.4.227' self.assertRaises(exception.FloatingIpNotFoundForAddress, self.api.get_floating_ip_by_address, self.context, address) @mock.patch('nova.network.neutronv2.api.get_client') def test_get_floating_ip_by_address_not_found_neutron_raises_non404(self, mock_ntrn): mock_nc = mock.Mock() mock_ntrn.return_value = mock_nc mock_nc.list_floatingips.side_effect = exceptions.InternalServerError() address = '172.24.4.227' self.assertRaises(exceptions.InternalServerError, self.api.get_floating_ip_by_address, self.context, address) @mock.patch('nova.network.neutronv2.api.get_client') def test_get_floating_ips_by_project_not_found(self, mock_ntrn): mock_nc = mock.Mock() mock_ntrn.return_value = mock_nc mock_nc.list_floatingips.side_effect = exceptions.NotFound() fips = self.api.get_floating_ips_by_project(self.context) self.assertEqual([], fips) @mock.patch('nova.network.neutronv2.api.get_client') def test_get_floating_ips_by_project_not_found_legacy(self, mock_ntrn): # FIXME(danms): Remove this test along with the code path it tests # when bug 1513879 is fixed. mock_nc = mock.Mock() mock_ntrn.return_value = mock_nc # neutronclient doesn't raise NotFound in this scenario, it raises a # NeutronClientException with status_code=404 notfound = exceptions.NeutronClientException(status_code=404) mock_nc.list_floatingips.side_effect = notfound fips = self.api.get_floating_ips_by_project(self.context) self.assertEqual([], fips) @mock.patch('nova.network.neutronv2.api.get_client') def test_get_floating_ips_by_project_raises_non404(self, mock_ntrn): mock_nc = mock.Mock() mock_ntrn.return_value = mock_nc mock_nc.list_floatingips.side_effect = exceptions.InternalServerError() self.assertRaises(exceptions.InternalServerError, self.api.get_floating_ips_by_project, self.context) def test_unbind_ports_reset_dns_name(self): neutron = mock.Mock() port_client = mock.Mock() with mock.patch.object(self.api, '_has_port_binding_extension', return_value=False): self.api.extensions = [constants.DNS_INTEGRATION] ports = [uuids.port_id] self.api._unbind_ports(self.context, ports, neutron, port_client) port_req_body = {'port': {'device_id': '', 'device_owner': '', 'dns_name': ''}} port_client.update_port.assert_called_once_with( uuids.port_id, port_req_body) @mock.patch('nova.network.neutronv2.api.API._get_floating_ip_by_address', return_value={"port_id": "1"}) @mock.patch('nova.network.neutronv2.api.API._show_port', side_effect=exception.PortNotFound(port_id='1')) def test_get_instance_id_by_floating_address_port_not_found(self, mock_show, mock_get): api = neutronapi.API() fip = api.get_instance_id_by_floating_address(self.context, '172.24.4.227') self.assertIsNone(fip) class TestNeutronv2ModuleMethods(test.NoDBTestCase): def test_gather_port_ids_and_networks_wrong_params(self): api = neutronapi.API() # Test with networks not None and port_ids is None self.assertRaises(exception.NovaException, api._gather_port_ids_and_networks, 'fake_context', 'fake_instance', [{'network': {'name': 'foo'}}], None) # Test with networks is None and port_ids not None self.assertRaises(exception.NovaException, api._gather_port_ids_and_networks, 'fake_context', 'fake_instance', None, ['list', 'of', 'port_ids']) def test_ensure_requested_network_ordering_no_preference_ids(self): l = [1, 2, 3] neutronapi._ensure_requested_network_ordering( lambda x: x, l, None) def test_ensure_requested_network_ordering_no_preference_hashes(self): l = [{'id': 3}, {'id': 1}, {'id': 2}] neutronapi._ensure_requested_network_ordering( lambda x: x['id'], l, None) self.assertEqual(l, [{'id': 3}, {'id': 1}, {'id': 2}]) def test_ensure_requested_network_ordering_with_preference(self): l = [{'id': 3}, {'id': 1}, {'id': 2}] neutronapi._ensure_requested_network_ordering( lambda x: x['id'], l, [1, 2, 3]) self.assertEqual(l, [{'id': 1}, {'id': 2}, {'id': 3}]) class TestNeutronv2Portbinding(TestNeutronv2Base): def test_allocate_for_instance_portbinding(self): self._allocate_for_instance(1, portbinding=True, bind_host_id=self.instance.get('host')) def test_populate_neutron_extension_values_binding(self): api = neutronapi.API() neutronapi.get_client(mox.IgnoreArg()).AndReturn( self.moxed_client) self.moxed_client.list_extensions().AndReturn( {'extensions': [{'name': constants.PORTBINDING_EXT}]}) self.mox.ReplayAll() host_id = 'my_host_id' instance = {'host': host_id} port_req_body = {'port': {}} api._populate_neutron_extension_values(self.context, instance, None, port_req_body, bind_host_id=host_id) self.assertEqual(host_id, port_req_body['port']['binding:host_id']) self.assertFalse(port_req_body['port'].get('binding:profile')) @mock.patch.object(pci_whitelist, 'get_pci_device_devspec') @mock.patch.object(pci_manager, 'get_instance_pci_devs') def test_populate_neutron_extension_values_binding_sriov(self, mock_get_instance_pci_devs, mock_get_pci_device_devspec): api = neutronapi.API() host_id = 'my_host_id' instance = {'host': host_id} port_req_body = {'port': {}} pci_req_id = 'my_req_id' pci_dev = {'vendor_id': '1377', 'product_id': '0047', 'address': '0000:0a:00.1', } PciDevice = collections.namedtuple('PciDevice', ['vendor_id', 'product_id', 'address']) mydev = PciDevice(**pci_dev) profile = {'pci_vendor_info': '1377:0047', 'pci_slot': '0000:0a:00.1', 'physical_network': 'phynet1', } mock_get_instance_pci_devs.return_value = [mydev] devspec = mock.Mock() devspec.get_tags.return_value = {'physical_network': 'phynet1'} mock_get_pci_device_devspec.return_value = devspec api._populate_neutron_binding_profile(instance, pci_req_id, port_req_body) self.assertEqual(profile, port_req_body['port']['binding:profile']) def _test_update_port_binding_false(self, func_name, *args): api = neutronapi.API() func = getattr(api, func_name) self.mox.StubOutWithMock(api, '_has_port_binding_extension') api._has_port_binding_extension(mox.IgnoreArg(), refresh_cache=True).AndReturn(False) self.mox.ReplayAll() func(*args) def _test_update_port_binding_true(self, expected_bind_host, func_name, *args): api = neutronapi.API() func = getattr(api, func_name) self.mox.StubOutWithMock(api, '_has_port_binding_extension') api._has_port_binding_extension(mox.IgnoreArg(), refresh_cache=True).AndReturn(True) neutronapi.get_client(mox.IgnoreArg(), admin=True).AndReturn( self.moxed_client) search_opts = {'device_id': self.instance['uuid'], 'tenant_id': self.instance['project_id']} ports = {'ports': [{'id': 'test1'}]} self.moxed_client.list_ports(**search_opts).AndReturn(ports) port_req_body = {'port': {'binding:host_id': expected_bind_host}} self.moxed_client.update_port('test1', port_req_body).AndReturn(None) self.mox.ReplayAll() func(*args) def _test_update_port_true_exception(self, expected_bind_host, func_name, *args): api = neutronapi.API() func = getattr(api, func_name) self.mox.StubOutWithMock(api, '_has_port_binding_extension') api._has_port_binding_extension(mox.IgnoreArg(), refresh_cache=True).AndReturn(True) neutronapi.get_client(mox.IgnoreArg(), admin=True).AndReturn( self.moxed_client) search_opts = {'device_id': self.instance['uuid'], 'tenant_id': self.instance['project_id']} ports = {'ports': [{'id': 'test1'}]} self.moxed_client.list_ports(**search_opts).AndReturn(ports) port_req_body = {'port': {'binding:host_id': expected_bind_host}} self.moxed_client.update_port('test1', port_req_body).AndRaise( Exception("fail to update port")) self.mox.ReplayAll() self.assertRaises(NEUTRON_CLIENT_EXCEPTION, func, *args) def test_migrate_instance_finish_binding_false(self): self._test_update_port_binding_false('migrate_instance_finish', self.context, None, {'dest_compute': 'fake'}) def test_migrate_instance_finish_binding_true(self): migration = {'source_compute': self.instance.get('host'), 'dest_compute': 'dest_host'} instance = self._fake_instance_object(self.instance) self._test_update_port_binding_true('dest_host', 'migrate_instance_finish', self.context, instance, migration) def test_migrate_instance_finish_binding_true_exception(self): migration = {'source_compute': self.instance.get('host'), 'dest_compute': 'dest_host'} instance = self._fake_instance_object(self.instance) self._test_update_port_true_exception('dest_host', 'migrate_instance_finish', self.context, instance, migration) def test_setup_instance_network_on_host_false(self): self._test_update_port_binding_false( 'setup_instance_network_on_host', self.context, None, 'fake_host') def test_setup_instance_network_on_host_true(self): instance = self._fake_instance_object(self.instance) self._test_update_port_binding_true('fake_host', 'setup_instance_network_on_host', self.context, instance, 'fake_host') def test_setup_instance_network_on_host_exception(self): instance = self._fake_instance_object(self.instance) self._test_update_port_true_exception( 'fake_host', 'setup_instance_network_on_host', self.context, instance, 'fake_host') def test_associate_not_implemented(self): api = neutronapi.API() self.assertRaises(NotImplementedError, api.associate, self.context, 'id') class TestNeutronv2ExtraDhcpOpts(TestNeutronv2Base): def setUp(self): super(TestNeutronv2ExtraDhcpOpts, self).setUp() neutronapi.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn( self.moxed_client) def test_allocate_for_instance_1_with_extra_dhcp_opts_turned_off(self): self._allocate_for_instance(1, extra_dhcp_opts=False) def test_allocate_for_instance_extradhcpopts(self): dhcp_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server', 'opt_value': '123.123.123.123'}, {'opt_name': 'server-ip-address', 'opt_value': '123.123.123.456'}] self._allocate_for_instance(1, dhcp_options=dhcp_opts) class TestNeutronv2NeutronHostnameDNS(TestNeutronv2Base): def setUp(self): super(TestNeutronv2NeutronHostnameDNS, self).setUp() neutronapi.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn( self.moxed_client) def test_allocate_for_instance_create_port(self): # The port's dns_name attribute should be set by the port create # request in allocate_for_instance self._allocate_for_instance(1, dns_extension=True) def test_allocate_for_instance_with_requested_port(self): # The port's dns_name attribute should be set by the port update # request in allocate_for_instance requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id='my_portid1')]) self._allocate_for_instance(net_idx=1, dns_extension=True, requested_networks=requested_networks) def test_allocate_for_instance_port_dns_name_preset_equal_hostname(self): # The port's dns_name attribute should be set by the port update # request in allocate_for_instance. The port's dns_name was preset by # the user with a value equal to the instance's hostname requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id='my_portid1')]) self._allocate_for_instance(net_idx=1, dns_extension=True, requested_networks=requested_networks, _dns_name='test-instance') def test_allocate_for_instance_port_dns_name_preset_noteq_hostname(self): # If a pre-existing port has dns_name set, an exception should be # raised if dns_name is not equal to the instance's hostname requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id='my_portid1')]) api = self._stub_allocate_for_instance( requested_networks=requested_networks, dns_extension=True, _break='pre_list_networks', _dns_name='my-instance') self.assertRaises(exception.PortNotUsableDNS, api.allocate_for_instance, self.context, self.instance, requested_networks=requested_networks) class TestNeutronv2NeutronHostnameDNSPortbinding(TestNeutronv2Base): def test_allocate_for_instance_create_port(self): # The port's dns_name attribute should be set by the port create # request in allocate_for_instance self._allocate_for_instance(1, portbinding=True, dns_extension=True, bind_host_id=self.instance.get('host')) def test_allocate_for_instance_with_requested_port(self): # The port's dns_name attribute should be set by the port update # request in allocate_for_instance requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id='my_portid1')]) self._allocate_for_instance(net_idx=1, dns_extension=True, portbinding=True, bind_host_id=self.instance.get('host'), requested_networks=requested_networks) def test_allocate_for_instance_create_port_with_dns_domain(self): # The port's dns_name attribute should be set by the port update # request in _update_port_dns_name. This should happen only when the # port binding extension is enabled and the port's network has a # non-blank dns_domain attribute self._allocate_for_instance(11, portbinding=True, dns_extension=True, bind_host_id=self.instance.get('host')) def test_allocate_for_instance_with_requested_port_with_dns_domain(self): # The port's dns_name attribute should be set by the port update # request in _update_port_dns_name. This should happen only when the # port binding extension is enabled and the port's network has a # non-blank dns_domain attribute requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id='my_portid1')]) self._allocate_for_instance(net_idx=11, dns_extension=True, portbinding=True, bind_host_id=self.instance.get('host'), requested_networks=requested_networks) class TestNeutronClientForAdminScenarios(test.NoDBTestCase): def setUp(self): super(TestNeutronClientForAdminScenarios, self).setUp() # NOTE(morganfainberg): The real configuration fixture here is used # instead o the already existing fixtures to ensure that the new # config options are automatically deregistered at the end of the # test run. Without the use of this fixture, the config options # from the plugin(s) would persist for all subsequent tests from when # these are run (due to glonal conf object) and not be fully # representative of a "clean" slate at the start of a test. self.config_fixture = self.useFixture(config_fixture.Config()) oslo_opts = ks_loading.get_auth_plugin_conf_options('v2password') self.config_fixture.register_opts(oslo_opts, 'neutron') @requests_mock.mock() def _test_get_client_for_admin(self, req_mock, use_id=False, admin_context=False): token_value = uuid.uuid4().hex auth_url = 'http://anyhost/auth' token_resp = V2Token(token_id=token_value) req_mock.post(auth_url + '/tokens', json=token_resp) self.flags(url='http://anyhost/', group='neutron') self.flags(auth_type='v2password', group='neutron') self.flags(auth_url=auth_url, group='neutron') self.flags(timeout=30, group='neutron') if use_id: self.flags(tenant_id='tenant_id', group='neutron') self.flags(user_id='user_id', group='neutron') if admin_context: my_context = context.get_admin_context() else: my_context = context.RequestContext('userid', 'my_tenantid', auth_token='token') # clean global neutronapi.reset_state() if admin_context: # Note that the context does not contain a token but is # an admin context which will force an elevation to admin # credentials. context_client = neutronapi.get_client(my_context) else: # Note that the context is not elevated, but the True is passed in # which will force an elevation to admin credentials even though # the context has an auth_token. context_client = neutronapi.get_client(my_context, True) admin_auth = neutronapi._ADMIN_AUTH self.assertEqual(CONF.neutron.auth_url, admin_auth.auth_url) self.assertEqual(CONF.neutron.password, admin_auth.password) if use_id: self.assertEqual(CONF.neutron.tenant_id, admin_auth.tenant_id) self.assertEqual(CONF.neutron.user_id, admin_auth.user_id) self.assertIsNone(admin_auth.tenant_name) self.assertIsNone(admin_auth.username) else: self.assertEqual(CONF.neutron.username, admin_auth.username) self.assertIsNone(admin_auth.tenant_id) self.assertIsNone(admin_auth.user_id) self.assertEqual(CONF.neutron.timeout, neutronapi._SESSION.timeout) self.assertEqual( token_value, context_client.httpclient.auth.get_token(neutronapi._SESSION)) self.assertEqual( CONF.neutron.url, context_client.httpclient.get_endpoint()) def test_get_client_for_admin(self): self._test_get_client_for_admin() def test_get_client_for_admin_with_id(self): self._test_get_client_for_admin(use_id=True) def test_get_client_for_admin_context(self): self._test_get_client_for_admin(admin_context=True) def test_get_client_for_admin_context_with_id(self): self._test_get_client_for_admin(use_id=True, admin_context=True) class TestNeutronPortSecurity(test.NoDBTestCase): @mock.patch.object(neutronapi.API, 'get_instance_nw_info') @mock.patch.object(neutronapi.API, '_update_port_dns_name') @mock.patch.object(neutronapi.API, '_create_port') @mock.patch.object(neutronapi.API, '_populate_neutron_extension_values') @mock.patch.object(neutronapi.API, '_check_external_network_attach') @mock.patch.object(neutronapi.API, '_process_security_groups') @mock.patch.object(neutronapi.API, '_get_available_networks') @mock.patch.object(neutronapi.API, '_process_requested_networks') @mock.patch.object(neutronapi.API, '_has_port_binding_extension') @mock.patch.object(neutronapi, 'get_client') def test_no_security_groups_requested( self, mock_get_client, mock_has_port_binding_extension, mock_process_requested_networks, mock_get_available_networks, mock_process_security_groups, mock_check_external_network_attach, mock_populate_neutron_extension_values, mock_create_port, mock_update_port_dns_name, mock_get_instance_nw_info): nets = [ {'id': 'net1', 'name': 'net_name1', 'subnets': ['mysubnid1'], 'port_security_enabled': True}, {'id': 'net2', 'name': 'net_name2', 'subnets': ['mysubnid2'], 'port_security_enabled': True}] onets = objects.NetworkRequestList(objects=[ objects.NetworkRequest(network_id='net1'), objects.NetworkRequest(network_id='net2')]) instance = objects.Instance( project_id=1, availability_zone='nova', uuid='uuid1') secgroups = ['default'] # Nova API provides the 'default' mock_process_requested_networks.return_value = [ None, ['net1', 'net2'], onets, None] mock_get_available_networks.return_value = nets mock_process_security_groups.return_value = [] api = neutronapi.API() api.allocate_for_instance( 'context', instance, requested_networks=onets, security_groups=secgroups) mock_process_security_groups.assert_called_once_with( instance, mock.ANY, []) mock_create_port.assert_has_calls([ mock.call( mock.ANY, instance, u'net1', {'port': {'device_owner': u'compute:nova', 'device_id': 'uuid1'}}, None, [], None, None), mock.call( mock.ANY, instance, u'net2', {'port': {'device_owner': u'compute:nova', 'device_id': 'uuid1'}}, None, [], None, None)]) @mock.patch.object(neutronapi.API, 'get_instance_nw_info') @mock.patch.object(neutronapi.API, '_update_port_dns_name') @mock.patch.object(neutronapi.API, '_create_port') @mock.patch.object(neutronapi.API, '_populate_neutron_extension_values') @mock.patch.object(neutronapi.API, '_check_external_network_attach') @mock.patch.object(neutronapi.API, '_process_security_groups') @mock.patch.object(neutronapi.API, '_get_available_networks') @mock.patch.object(neutronapi.API, '_process_requested_networks') @mock.patch.object(neutronapi.API, '_has_port_binding_extension') @mock.patch.object(neutronapi, 'get_client') def test_security_groups_requested( self, mock_get_client, mock_has_port_binding_extension, mock_process_requested_networks, mock_get_available_networks, mock_process_security_groups, mock_check_external_network_attach, mock_populate_neutron_extension_values, mock_create_port, mock_update_port_dns_name, mock_get_instance_nw_info): nets = [ {'id': 'net1', 'name': 'net_name1', 'subnets': ['mysubnid1'], 'port_security_enabled': True}, {'id': 'net2', 'name': 'net_name2', 'subnets': ['mysubnid2'], 'port_security_enabled': True}] onets = objects.NetworkRequestList(objects=[ objects.NetworkRequest(network_id='net1'), objects.NetworkRequest(network_id='net2')]) instance = objects.Instance( project_id=1, availability_zone='nova', uuid='uuid1') secgroups = ['default', 'secgrp1', 'secgrp2'] mock_process_requested_networks.return_value = [ None, ['net1', 'net2'], onets, None] mock_get_available_networks.return_value = nets mock_process_security_groups.return_value = ['default-uuid', 'secgrp-uuid1', 'secgrp-uuid2'] api = neutronapi.API() api.allocate_for_instance( 'context', instance, requested_networks=onets, security_groups=secgroups) mock_process_security_groups.assert_called_once_with( instance, mock.ANY, ['default', 'secgrp1', 'secgrp2']) mock_create_port.assert_has_calls([ mock.call( mock.ANY, instance, u'net1', {'port': {'device_owner': u'compute:nova', 'device_id': 'uuid1'}}, None, ['default-uuid', 'secgrp-uuid1', 'secgrp-uuid2'], None, None), mock.call( mock.ANY, instance, u'net2', {'port': {'device_owner': u'compute:nova', 'device_id': 'uuid1'}}, None, ['default-uuid', 'secgrp-uuid1', 'secgrp-uuid2'], None, None)]) @mock.patch.object(neutronapi.API, 'get_instance_nw_info') @mock.patch.object(neutronapi.API, '_update_port_dns_name') @mock.patch.object(neutronapi.API, '_create_port') @mock.patch.object(neutronapi.API, '_populate_neutron_extension_values') @mock.patch.object(neutronapi.API, '_check_external_network_attach') @mock.patch.object(neutronapi.API, '_process_security_groups') @mock.patch.object(neutronapi.API, '_get_available_networks') @mock.patch.object(neutronapi.API, '_process_requested_networks') @mock.patch.object(neutronapi.API, '_has_port_binding_extension') @mock.patch.object(neutronapi, 'get_client') def test_port_security_disabled_no_security_groups_requested( self, mock_get_client, mock_has_port_binding_extension, mock_process_requested_networks, mock_get_available_networks, mock_process_security_groups, mock_check_external_network_attach, mock_populate_neutron_extension_values, mock_create_port, mock_update_port_dns_name, mock_get_instance_nw_info): nets = [ {'id': 'net1', 'name': 'net_name1', 'subnets': ['mysubnid1'], 'port_security_enabled': False}, {'id': 'net2', 'name': 'net_name2', 'subnets': ['mysubnid2'], 'port_security_enabled': False}] onets = objects.NetworkRequestList(objects=[ objects.NetworkRequest(network_id='net1'), objects.NetworkRequest(network_id='net2')]) instance = objects.Instance( project_id=1, availability_zone='nova', uuid='uuid1') secgroups = ['default'] # Nova API provides the 'default' mock_process_requested_networks.return_value = [ None, ['net1', 'net2'], onets, None] mock_get_available_networks.return_value = nets mock_process_security_groups.return_value = [] api = neutronapi.API() api.allocate_for_instance( 'context', instance, requested_networks=onets, security_groups=secgroups) mock_process_security_groups.assert_called_once_with( instance, mock.ANY, []) mock_create_port.assert_has_calls([ mock.call( mock.ANY, instance, u'net1', {'port': {'device_owner': u'compute:nova', 'device_id': 'uuid1'}}, None, [], None, None), mock.call( mock.ANY, instance, u'net2', {'port': {'device_owner': u'compute:nova', 'device_id': 'uuid1'}}, None, [], None, None)]) @mock.patch.object(neutronapi.API, 'get_instance_nw_info') @mock.patch.object(neutronapi.API, '_update_port_dns_name') @mock.patch.object(neutronapi.API, '_create_port') @mock.patch.object(neutronapi.API, '_populate_neutron_extension_values') @mock.patch.object(neutronapi.API, '_check_external_network_attach') @mock.patch.object(neutronapi.API, '_process_security_groups') @mock.patch.object(neutronapi.API, '_get_available_networks') @mock.patch.object(neutronapi.API, '_process_requested_networks') @mock.patch.object(neutronapi.API, '_has_port_binding_extension') @mock.patch.object(neutronapi, 'get_client') def test_port_security_disabled_and_security_groups_requested( self, mock_get_client, mock_has_port_binding_extension, mock_process_requested_networks, mock_get_available_networks, mock_process_security_groups, mock_check_external_network_attach, mock_populate_neutron_extension_values, mock_create_port, mock_update_port_dns_name, mock_get_instance_nw_info): nets = [ {'id': 'net1', 'name': 'net_name1', 'subnets': ['mysubnid1'], 'port_security_enabled': True}, {'id': 'net2', 'name': 'net_name2', 'subnets': ['mysubnid2'], 'port_security_enabled': False}] onets = objects.NetworkRequestList(objects=[ objects.NetworkRequest(network_id='net1'), objects.NetworkRequest(network_id='net2')]) instance = objects.Instance( project_id=1, availability_zone='nova', uuid='uuid1') secgroups = ['default', 'secgrp1', 'secgrp2'] mock_process_requested_networks.return_value = [ None, ['net1', 'net2'], onets, None] mock_get_available_networks.return_value = nets mock_process_security_groups.return_value = ['default-uuid', 'secgrp-uuid1', 'secgrp-uuid2'] api = neutronapi.API() self.assertRaises( exception.SecurityGroupCannotBeApplied, api.allocate_for_instance, 'context', instance, requested_networks=onets, security_groups=secgroups) mock_process_security_groups.assert_called_once_with( instance, mock.ANY, ['default', 'secgrp1', 'secgrp2']) nova-13.1.4/nova/tests/unit/network/test_api.py0000664000567000056710000007063313064447152022662 0ustar jenkinsjenkins00000000000000# Copyright 2012 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for network API.""" import itertools import uuid import mock from mox3 import mox from oslo_policy import policy as oslo_policy from nova.compute import flavors from nova import context from nova import exception from nova import network from nova.network import api from nova.network import base_api from nova.network import floating_ips from nova.network import model as network_model from nova.network import rpcapi as network_rpcapi from nova import objects from nova.objects import fields from nova import policy from nova import test from nova.tests.unit import fake_instance from nova.tests.unit.objects import test_fixed_ip from nova.tests.unit.objects import test_virtual_interface FAKE_UUID = 'a47ae74e-ab08-547f-9eee-ffd23fc46c16' fake_info_cache = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'instance_uuid': 'fake-uuid', 'network_info': '[]', } class NetworkPolicyTestCase(test.TestCase): def setUp(self): super(NetworkPolicyTestCase, self).setUp() policy.reset() policy.init() self.context = context.get_admin_context() def tearDown(self): super(NetworkPolicyTestCase, self).tearDown() policy.reset() def test_check_policy(self): self.mox.StubOutWithMock(policy, 'enforce') target = { 'project_id': self.context.project_id, 'user_id': self.context.user_id, } policy.enforce(self.context, 'network:get_all', target) self.mox.ReplayAll() api.check_policy(self.context, 'get_all') def test_skip_policy(self): policy.reset() rules = {'network:get_all': '!'} policy.set_rules(oslo_policy.Rules.from_dict(rules)) api = network.API() self.assertRaises(exception.PolicyNotAuthorized, api.get_all, self.context) api = network.API(skip_policy_check=True) api.get_all(self.context) class ApiTestCase(test.TestCase): def setUp(self): super(ApiTestCase, self).setUp() self.network_api = network.API() self.context = context.RequestContext('fake-user', 'fake-project') @mock.patch('nova.objects.NetworkList.get_all') def test_get_all(self, mock_get_all): mock_get_all.return_value = mock.sentinel.get_all self.assertEqual(mock.sentinel.get_all, self.network_api.get_all(self.context)) mock_get_all.assert_called_once_with(self.context, project_only=True) @mock.patch('nova.objects.NetworkList.get_all') def test_get_all_liberal(self, mock_get_all): self.flags(network_manager='nova.network.manager.FlatDHCPManaager') mock_get_all.return_value = mock.sentinel.get_all self.assertEqual(mock.sentinel.get_all, self.network_api.get_all(self.context)) mock_get_all.assert_called_once_with(self.context, project_only="allow_none") @mock.patch('nova.objects.NetworkList.get_all') def test_get_all_no_networks(self, mock_get_all): mock_get_all.side_effect = exception.NoNetworksFound self.assertEqual([], self.network_api.get_all(self.context)) mock_get_all.assert_called_once_with(self.context, project_only=True) @mock.patch('nova.objects.Network.get_by_uuid') def test_get(self, mock_get): mock_get.return_value = mock.sentinel.get_by_uuid self.assertEqual(mock.sentinel.get_by_uuid, self.network_api.get(self.context, 'fake-uuid')) @mock.patch('nova.objects.Network.get_by_id') @mock.patch('nova.db.virtual_interface_get_by_instance') def test_get_vifs_by_instance(self, mock_get_by_instance, mock_get_by_id): mock_get_by_instance.return_value = [ dict(test_virtual_interface.fake_vif, network_id=123)] mock_get_by_id.return_value = objects.Network() mock_get_by_id.return_value.uuid = mock.sentinel.network_uuid instance = objects.Instance(uuid=mock.sentinel.inst_uuid) vifs = self.network_api.get_vifs_by_instance(self.context, instance) self.assertEqual(1, len(vifs)) self.assertEqual(123, vifs[0].network_id) self.assertEqual(str(mock.sentinel.network_uuid), vifs[0].net_uuid) mock_get_by_instance.assert_called_once_with( self.context, str(mock.sentinel.inst_uuid)) mock_get_by_id.assert_called_once_with(self.context, 123, project_only='allow_none') @mock.patch('nova.objects.Network.get_by_id') @mock.patch('nova.db.virtual_interface_get_by_address') def test_get_vif_by_mac_address(self, mock_get_by_address, mock_get_by_id): mock_get_by_address.return_value = dict( test_virtual_interface.fake_vif, network_id=123) mock_get_by_id.return_value = objects.Network( uuid=mock.sentinel.network_uuid) vif = self.network_api.get_vif_by_mac_address(self.context, mock.sentinel.mac) self.assertEqual(123, vif.network_id) self.assertEqual(str(mock.sentinel.network_uuid), vif.net_uuid) mock_get_by_address.assert_called_once_with(self.context, mock.sentinel.mac) mock_get_by_id.assert_called_once_with(self.context, 123, project_only='allow_none') def test_allocate_for_instance_handles_macs_passed(self): # If a macs argument is supplied to the 'nova-network' API, it is just # ignored. This test checks that the call down to the rpcapi layer # doesn't pass macs down: nova-network doesn't support hypervisor # mac address limits (today anyhow). macs = set(['ab:cd:ef:01:23:34']) self.mox.StubOutWithMock( self.network_api.network_rpcapi, "allocate_for_instance") kwargs = dict(zip(['host', 'instance_id', 'project_id', 'requested_networks', 'rxtx_factor', 'vpn', 'macs', 'dhcp_options'], itertools.repeat(mox.IgnoreArg()))) self.network_api.network_rpcapi.allocate_for_instance( mox.IgnoreArg(), **kwargs).AndReturn([]) self.mox.ReplayAll() flavor = flavors.get_default_flavor() flavor['rxtx_factor'] = 0 instance = objects.Instance(id=1, uuid='uuid', project_id='project_id', host='host', system_metadata={}, flavor=flavor) self.network_api.allocate_for_instance( self.context, instance, 'vpn', 'requested_networks', macs=macs) def _do_test_associate_floating_ip(self, orig_instance_uuid): """Test post-association logic.""" new_instance = objects.Instance(uuid=FAKE_UUID) def fake_associate(*args, **kwargs): return orig_instance_uuid self.stubs.Set(floating_ips.FloatingIP, 'associate_floating_ip', fake_associate) def fake_instance_get_by_uuid(context, instance_uuid, columns_to_join=None, use_slave=None): if instance_uuid == orig_instance_uuid: self.assertIn('extra.flavor', columns_to_join) return fake_instance.fake_db_instance(uuid=instance_uuid) self.stubs.Set(self.network_api.db, 'instance_get_by_uuid', fake_instance_get_by_uuid) def fake_get_nw_info(ctxt, instance): class FakeNWInfo(object): def json(self): pass return FakeNWInfo() self.stubs.Set(self.network_api, '_get_instance_nw_info', fake_get_nw_info) if orig_instance_uuid: expected_updated_instances = [new_instance.uuid, orig_instance_uuid] else: expected_updated_instances = [new_instance.uuid] def fake_instance_info_cache_update(context, instance_uuid, cache): self.assertEqual(instance_uuid, expected_updated_instances.pop()) return fake_info_cache self.stubs.Set(self.network_api.db, 'instance_info_cache_update', fake_instance_info_cache_update) def fake_update_instance_cache_with_nw_info(api, context, instance, nw_info=None, update_cells=True): return self.stubs.Set(base_api, "update_instance_cache_with_nw_info", fake_update_instance_cache_with_nw_info) self.network_api.associate_floating_ip(self.context, new_instance, '172.24.4.225', '10.0.0.2') def test_associate_preassociated_floating_ip(self): self._do_test_associate_floating_ip('orig-uuid') def test_associate_unassociated_floating_ip(self): self._do_test_associate_floating_ip(None) def test_get_floating_ip_invalid_id(self): self.assertRaises(exception.InvalidID, self.network_api.get_floating_ip, self.context, '123zzz') @mock.patch('nova.objects.FloatingIP.get_by_id') def test_get_floating_ip(self, mock_get): floating = mock.sentinel.floating mock_get.return_value = floating self.assertEqual(floating, self.network_api.get_floating_ip(self.context, 123)) mock_get.assert_called_once_with(self.context, 123) @mock.patch('nova.objects.FloatingIP.get_pool_names') def test_get_floating_ip_pools(self, mock_get): pools = ['foo', 'bar'] mock_get.return_value = pools self.assertEqual(pools, self.network_api.get_floating_ip_pools( self.context)) @mock.patch('nova.objects.FloatingIP.get_by_address') def test_get_floating_ip_by_address(self, mock_get): floating = mock.sentinel.floating mock_get.return_value = floating self.assertEqual(floating, self.network_api.get_floating_ip_by_address( self.context, mock.sentinel.address)) mock_get.assert_called_once_with(self.context, mock.sentinel.address) @mock.patch('nova.objects.FloatingIPList.get_by_project') def test_get_floating_ips_by_project(self, mock_get): floatings = mock.sentinel.floating_ips mock_get.return_value = floatings self.assertEqual(floatings, self.network_api.get_floating_ips_by_project( self.context)) mock_get.assert_called_once_with(self.context, self.context.project_id) def _stub_migrate_instance_calls(self, method, multi_host, info): fake_flavor = flavors.get_default_flavor() fake_flavor['rxtx_factor'] = 1.21 fake_instance = objects.Instance( uuid=uuid.uuid4().hex, project_id='fake_project_id', instance_type_id=fake_flavor['id'], flavor=fake_flavor, system_metadata={}) fake_migration = {'source_compute': 'fake_compute_source', 'dest_compute': 'fake_compute_dest'} def fake_mig_inst_method(*args, **kwargs): info['kwargs'] = kwargs def fake_get_multi_addresses(*args, **kwargs): return multi_host, ['fake_float1', 'fake_float2'] self.stubs.Set(network_rpcapi.NetworkAPI, method, fake_mig_inst_method) self.stubs.Set(self.network_api, '_get_multi_addresses', fake_get_multi_addresses) expected = {'instance_uuid': fake_instance.uuid, 'source_compute': 'fake_compute_source', 'dest_compute': 'fake_compute_dest', 'rxtx_factor': 1.21, 'project_id': 'fake_project_id', 'floating_addresses': None} if multi_host: expected['floating_addresses'] = ['fake_float1', 'fake_float2'] return fake_instance, fake_migration, expected def test_migrate_instance_start_with_multhost(self): info = {'kwargs': {}} arg1, arg2, expected = self._stub_migrate_instance_calls( 'migrate_instance_start', True, info) expected['host'] = 'fake_compute_source' self.network_api.migrate_instance_start(self.context, arg1, arg2) self.assertEqual(info['kwargs'], expected) def test_migrate_instance_start_without_multhost(self): info = {'kwargs': {}} arg1, arg2, expected = self._stub_migrate_instance_calls( 'migrate_instance_start', False, info) self.network_api.migrate_instance_start(self.context, arg1, arg2) self.assertEqual(info['kwargs'], expected) def test_migrate_instance_finish_with_multhost(self): info = {'kwargs': {}} arg1, arg2, expected = self._stub_migrate_instance_calls( 'migrate_instance_finish', True, info) expected['host'] = 'fake_compute_dest' self.network_api.migrate_instance_finish(self.context, arg1, arg2) self.assertEqual(info['kwargs'], expected) def test_migrate_instance_finish_without_multhost(self): info = {'kwargs': {}} arg1, arg2, expected = self._stub_migrate_instance_calls( 'migrate_instance_finish', False, info) self.network_api.migrate_instance_finish(self.context, arg1, arg2) self.assertEqual(info['kwargs'], expected) def test_is_multi_host_instance_has_no_fixed_ip(self): def fake_fixed_ip_get_by_instance(ctxt, uuid): raise exception.FixedIpNotFoundForInstance(instance_uuid=uuid) self.stubs.Set(self.network_api.db, 'fixed_ip_get_by_instance', fake_fixed_ip_get_by_instance) instance = objects.Instance(uuid=FAKE_UUID) result, floats = self.network_api._get_multi_addresses(self.context, instance) self.assertFalse(result) @mock.patch('nova.objects.fixed_ip.FixedIPList.get_by_instance_uuid') def _test_is_multi_host_network_has_no_project_id(self, is_multi_host, fip_get): network = objects.Network( id=123, project_id=None, multi_host=is_multi_host) fip_get.return_value = [ objects.FixedIP(instance_uuid=FAKE_UUID, network=network, floating_ips=objects.FloatingIPList())] instance = objects.Instance(uuid=FAKE_UUID) result, floats = self.network_api._get_multi_addresses(self.context, instance) self.assertEqual(is_multi_host, result) def test_is_multi_host_network_has_no_project_id_multi(self): self._test_is_multi_host_network_has_no_project_id(True) def test_is_multi_host_network_has_no_project_id_non_multi(self): self._test_is_multi_host_network_has_no_project_id(False) @mock.patch('nova.objects.fixed_ip.FixedIPList.get_by_instance_uuid') def _test_is_multi_host_network_has_project_id(self, is_multi_host, fip_get): network = objects.Network( id=123, project_id=self.context.project_id, multi_host=is_multi_host) fip_get.return_value = [ objects.FixedIP(instance_uuid=FAKE_UUID, network=network, floating_ips=objects.FloatingIPList())] instance = objects.Instance(uuid=FAKE_UUID) result, floats = self.network_api._get_multi_addresses(self.context, instance) self.assertEqual(is_multi_host, result) def test_is_multi_host_network_has_project_id_multi(self): self._test_is_multi_host_network_has_project_id(True) def test_is_multi_host_network_has_project_id_non_multi(self): self._test_is_multi_host_network_has_project_id(False) @mock.patch('nova.objects.Network.get_by_uuid') @mock.patch('nova.objects.Network.disassociate') def test_network_disassociate_project(self, mock_disassociate, mock_get): net_obj = objects.Network(context=self.context, id=1) mock_get.return_value = net_obj self.network_api.associate(self.context, FAKE_UUID, project=None) mock_disassociate.assert_called_once_with(self.context, net_obj.id, host=False, project=True) @mock.patch('nova.objects.Network.get_by_uuid') @mock.patch('nova.objects.Network.disassociate') def test_network_disassociate_host(self, mock_disassociate, mock_get): net_obj = objects.Network(context=self.context, id=1) mock_get.return_value = net_obj self.network_api.associate(self.context, FAKE_UUID, host=None) mock_disassociate.assert_called_once_with(self.context, net_obj.id, host=True, project=False) @mock.patch('nova.objects.Network.get_by_uuid') @mock.patch('nova.objects.Network.associate') def test_network_associate_project(self, mock_associate, mock_get): net_obj = objects.Network(context=self.context, id=1) mock_get.return_value = net_obj project = mock.sentinel.project self.network_api.associate(self.context, FAKE_UUID, project=project) mock_associate.assert_called_once_with(self.context, project, network_id=net_obj.id, force=True) @mock.patch('nova.objects.Network.get_by_uuid') @mock.patch('nova.objects.Network.save') def test_network_associate_host(self, mock_save, mock_get): net_obj = objects.Network(context=self.context, id=1) mock_get.return_value = net_obj host = str(mock.sentinel.host) self.network_api.associate(self.context, FAKE_UUID, host=host) mock_save.assert_called_once_with() self.assertEqual(host, net_obj.host) @mock.patch('nova.objects.Network.get_by_uuid') @mock.patch('nova.objects.Network.disassociate') def test_network_disassociate(self, mock_disassociate, mock_get): mock_get.return_value = objects.Network(context=self.context, id=123) self.network_api.disassociate(self.context, FAKE_UUID) mock_disassociate.assert_called_once_with(self.context, 123, project=True, host=True) def _test_refresh_cache(self, method, *args, **kwargs): # This test verifies that no call to get_instance_nw_info() is made # from the @refresh_cache decorator for the tested method. with test.nested( mock.patch.object(self.network_api.network_rpcapi, method), mock.patch.object(self.network_api.network_rpcapi, 'get_instance_nw_info'), mock.patch.object(network_model.NetworkInfo, 'hydrate'), mock.patch.object(objects.InstanceInfoCache, 'save'), ) as ( method_mock, nwinfo_mock, hydrate_mock, save_mock ): nw_info = network_model.NetworkInfo([]) method_mock.return_value = nw_info hydrate_mock.return_value = nw_info getattr(self.network_api, method)(*args, **kwargs) hydrate_mock.assert_called_once_with(nw_info) self.assertFalse(nwinfo_mock.called) def test_allocate_for_instance_refresh_cache(self): instance = fake_instance.fake_instance_obj(self.context) vpn = 'fake-vpn' requested_networks = 'fake-networks' self._test_refresh_cache('allocate_for_instance', self.context, instance, vpn, requested_networks) def test_add_fixed_ip_to_instance_refresh_cache(self): instance = fake_instance.fake_instance_obj(self.context) network_id = 'fake-network-id' self._test_refresh_cache('add_fixed_ip_to_instance', self.context, instance, network_id) def test_remove_fixed_ip_from_instance_refresh_cache(self): instance = fake_instance.fake_instance_obj(self.context) address = 'fake-address' self._test_refresh_cache('remove_fixed_ip_from_instance', self.context, instance, address) @mock.patch('nova.db.fixed_ip_get_by_address') def test_get_fixed_ip_by_address(self, fip_get): fip_get.return_value = test_fixed_ip.fake_fixed_ip fip = self.network_api.get_fixed_ip_by_address(self.context, 'fake-addr') self.assertIsInstance(fip, objects.FixedIP) @mock.patch('nova.objects.FixedIP.get_by_id') def test_get_fixed_ip(self, mock_get_by_id): mock_get_by_id.return_value = mock.sentinel.fixed_ip self.assertEqual(mock.sentinel.fixed_ip, self.network_api.get_fixed_ip(self.context, mock.sentinel.id)) mock_get_by_id.assert_called_once_with(self.context, mock.sentinel.id) @mock.patch('nova.objects.FixedIP.get_by_floating_address') def test_get_instance_by_floating_address(self, mock_get_by_floating): mock_get_by_floating.return_value = objects.FixedIP( instance_uuid = mock.sentinel.instance_uuid) self.assertEqual(str(mock.sentinel.instance_uuid), self.network_api.get_instance_id_by_floating_address( self.context, mock.sentinel.floating)) mock_get_by_floating.assert_called_once_with(self.context, mock.sentinel.floating) @mock.patch('nova.objects.FixedIP.get_by_floating_address') def test_get_instance_by_floating_address_none(self, mock_get_by_floating): mock_get_by_floating.return_value = None self.assertIsNone( self.network_api.get_instance_id_by_floating_address( self.context, mock.sentinel.floating)) mock_get_by_floating.assert_called_once_with(self.context, mock.sentinel.floating) @mock.patch('nova.network.api.API.migrate_instance_start') def test_cleanup_instance_network_on_host(self, fake_migrate_start): instance = fake_instance.fake_instance_obj(self.context) self.network_api.cleanup_instance_network_on_host( self.context, instance, 'fake_compute_source') fake_migrate_start.assert_called_once_with( self.context, instance, {'source_compute': 'fake_compute_source', 'dest_compute': None}) @mock.patch('nova.network.api.API.migrate_instance_finish') def test_setup_instance_network_on_host(self, fake_migrate_finish): instance = fake_instance.fake_instance_obj(self.context) self.network_api.setup_instance_network_on_host( self.context, instance, 'fake_compute_source') fake_migrate_finish.assert_called_once_with( self.context, instance, {'source_compute': None, 'dest_compute': 'fake_compute_source'}) @mock.patch('oslo_concurrency.lockutils.lock') @mock.patch.object(api.API, '_get_instance_nw_info') @mock.patch('nova.network.base_api.update_instance_cache_with_nw_info') def test_get_instance_nw_info(self, mock_update, mock_get, mock_lock): fake_result = mock.sentinel.get_nw_info_result mock_get.return_value = fake_result instance = fake_instance.fake_instance_obj(self.context) result = self.network_api.get_instance_nw_info(self.context, instance) mock_get.assert_called_once_with(self.context, instance) mock_update.assert_called_once_with(self.network_api, self.context, instance, nw_info=fake_result, update_cells=False) self.assertEqual(fake_result, result) @mock.patch('nova.network.api.API') @mock.patch('nova.db.instance_info_cache_update', return_value=fake_info_cache) class TestUpdateInstanceCache(test.NoDBTestCase): def setUp(self): super(TestUpdateInstanceCache, self).setUp() self.context = context.get_admin_context() self.instance = objects.Instance(uuid=FAKE_UUID) vifs = [network_model.VIF(id='super_vif')] self.nw_info = network_model.NetworkInfo(vifs) self.nw_json = fields.NetworkModel.to_primitive(self, 'network_info', self.nw_info) def test_update_nw_info_none(self, db_mock, api_mock): api_mock._get_instance_nw_info.return_value = self.nw_info base_api.update_instance_cache_with_nw_info(api_mock, self.context, self.instance, None) api_mock._get_instance_nw_info.assert_called_once_with(self.context, self.instance) db_mock.assert_called_once_with(self.context, self.instance.uuid, {'network_info': self.nw_json}) def test_update_nw_info_one_network(self, db_mock, api_mock): api_mock._get_instance_nw_info.return_value = self.nw_info base_api.update_instance_cache_with_nw_info(api_mock, self.context, self.instance, self.nw_info) self.assertFalse(api_mock._get_instance_nw_info.called) db_mock.assert_called_once_with(self.context, self.instance.uuid, {'network_info': self.nw_json}) def test_update_nw_info_empty_list(self, db_mock, api_mock): api_mock._get_instance_nw_info.return_value = self.nw_info base_api.update_instance_cache_with_nw_info(api_mock, self.context, self.instance, network_model.NetworkInfo([])) self.assertFalse(api_mock._get_instance_nw_info.called) db_mock.assert_called_once_with(self.context, self.instance.uuid, {'network_info': '[]'}) def test_decorator_return_object(self, db_mock, api_mock): @base_api.refresh_cache def func(self, context, instance): return network_model.NetworkInfo([]) func(api_mock, self.context, self.instance) self.assertFalse(api_mock._get_instance_nw_info.called) db_mock.assert_called_once_with(self.context, self.instance.uuid, {'network_info': '[]'}) def test_decorator_return_none(self, db_mock, api_mock): @base_api.refresh_cache def func(self, context, instance): pass api_mock._get_instance_nw_info.return_value = self.nw_info func(api_mock, self.context, self.instance) api_mock._get_instance_nw_info.assert_called_once_with(self.context, self.instance) db_mock.assert_called_once_with(self.context, self.instance.uuid, {'network_info': self.nw_json}) class NetworkHooksTestCase(test.BaseHookTestCase): def test_instance_network_info_hook(self): info_func = base_api.update_instance_cache_with_nw_info self.assert_has_hook('instance_network_info', info_func) nova-13.1.4/nova/tests/unit/network/test_manager.py0000664000567000056710000050622313064447152023522 0ustar jenkinsjenkins00000000000000# Copyright 2011 Rackspace # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import mock from mox3 import mox import netaddr from oslo_concurrency import processutils from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging import oslo_messaging as messaging from oslo_utils import importutils from oslo_utils import netutils import six import testtools from nova import context from nova import db from nova.db.sqlalchemy import models from nova import exception from nova import ipv6 from nova.network import floating_ips from nova.network import linux_net from nova.network import manager as network_manager from nova.network import model as net_model from nova import objects from nova.objects import network as network_obj from nova.objects import virtual_interface as vif_obj from nova import quota from nova import test from nova.tests.unit import fake_instance from nova.tests.unit import fake_ldap from nova.tests.unit import fake_network from nova.tests.unit import matchers from nova.tests.unit.objects import test_fixed_ip from nova.tests.unit.objects import test_floating_ip from nova.tests.unit.objects import test_network from nova.tests.unit.objects import test_service from nova.tests.unit import utils as test_utils from nova.tests import uuidsentinel as uuids from nova import utils CONF = cfg.CONF LOG = logging.getLogger(__name__) HOST = "testhost" FAKEUUID = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" fake_inst = fake_instance.fake_db_instance networks = [{'id': 0, 'uuid': FAKEUUID, 'label': 'test0', 'injected': False, 'multi_host': False, 'cidr': '192.168.0.0/24', 'cidr_v6': '2001:db8::/64', 'gateway_v6': '2001:db8::1', 'netmask_v6': '64', 'netmask': '255.255.255.0', 'bridge': 'fa0', 'bridge_interface': 'fake_fa0', 'gateway': '192.168.0.1', 'dhcp_server': '192.168.0.1', 'broadcast': '192.168.0.255', 'dns1': '192.168.0.1', 'dns2': '192.168.0.2', 'vlan': None, 'host': HOST, 'project_id': 'fake_project', 'vpn_public_address': '192.168.0.2', 'vpn_public_port': '22', 'vpn_private_address': '10.0.0.2'}, {'id': 1, 'uuid': 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'label': 'test1', 'injected': False, 'multi_host': False, 'cidr': '192.168.1.0/24', 'cidr_v6': '2001:db9::/64', 'gateway_v6': '2001:db9::1', 'netmask_v6': '64', 'netmask': '255.255.255.0', 'bridge': 'fa1', 'bridge_interface': 'fake_fa1', 'gateway': '192.168.1.1', 'dhcp_server': '192.168.1.1', 'broadcast': '192.168.1.255', 'dns1': '192.168.0.1', 'dns2': '192.168.0.2', 'vlan': None, 'host': HOST, 'project_id': 'fake_project', 'vpn_public_address': '192.168.1.2', 'vpn_public_port': '22', 'vpn_private_address': '10.0.0.2'}] fixed_ips = [{'id': 0, 'network_id': 0, 'address': '192.168.0.100', 'instance_uuid': 0, 'allocated': False, 'virtual_interface_id': 0, 'floating_ips': []}, {'id': 0, 'network_id': 1, 'address': '192.168.1.100', 'instance_uuid': 0, 'allocated': False, 'virtual_interface_id': 0, 'floating_ips': []}, {'id': 0, 'network_id': 1, 'address': '2001:db9:0:1::10', 'instance_uuid': 0, 'allocated': False, 'virtual_interface_id': 0, 'floating_ips': []}] flavor = {'id': 0, 'rxtx_cap': 3} floating_ip_fields = {'id': 0, 'address': '192.168.10.100', 'pool': 'nova', 'interface': 'eth0', 'fixed_ip_id': 0, 'project_id': None, 'auto_assigned': False} vifs = [{'id': 0, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'address': 'DE:AD:BE:EF:00:00', 'uuid': '00000000-0000-0000-0000-0000000000000000', 'network_id': 0, 'instance_uuid': 0}, {'id': 1, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'address': 'DE:AD:BE:EF:00:01', 'uuid': '00000000-0000-0000-0000-0000000000000001', 'network_id': 1, 'instance_uuid': 0}, {'id': 2, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'address': 'DE:AD:BE:EF:00:02', 'uuid': '00000000-0000-0000-0000-0000000000000002', 'network_id': 2, 'instance_uuid': 0}] class FlatNetworkTestCase(test.TestCase): REQUIRES_LOCKING = True def setUp(self): super(FlatNetworkTestCase, self).setUp() self.tempdir = self.useFixture(fixtures.TempDir()).path self.flags(log_dir=self.tempdir) self.network = network_manager.FlatManager(host=HOST) self.network.instance_dns_domain = '' self.network.db = db self.context = context.RequestContext('testuser', 'testproject', is_admin=False) @testtools.skipIf(test_utils.is_osx(), 'IPv6 pretty-printing broken on OSX, see bug 1409135') def test_get_instance_nw_info_fake(self): fake_get_instance_nw_info = fake_network.fake_get_instance_nw_info nw_info = fake_get_instance_nw_info(self, 0, 2) self.assertFalse(nw_info) nw_info = fake_get_instance_nw_info(self, 1, 2) for i, vif in enumerate(nw_info): nid = i + 1 check = {'bridge': 'fake_br%d' % nid, 'cidr': '192.168.%s.0/24' % nid, 'cidr_v6': '2001:db8:0:%x::/64' % nid, 'id': getattr(uuids, 'vif%i' % nid), 'multi_host': False, 'injected': False, 'bridge_interface': None, 'vlan': None, 'broadcast': '192.168.%d.255' % nid, 'dhcp_server': '192.168.1.1', 'dns': ['192.168.%d.3' % nid, '192.168.%d.4' % nid], 'gateway': '192.168.%d.1' % nid, 'gateway_v6': '2001:db8:0:1::1', 'label': 'test%d' % nid, 'mac': 'DE:AD:BE:EF:00:%02x' % nid, 'rxtx_cap': 30, 'vif_type': net_model.VIF_TYPE_BRIDGE, 'vif_devname': None, 'vif_uuid': getattr(uuids, 'vif%i' % nid), 'ovs_interfaceid': None, 'qbh_params': None, 'qbg_params': None, 'should_create_vlan': False, 'should_create_bridge': False, 'ip': '192.168.%d.%03d' % (nid, nid + 99), 'ip_v6': '2001:db8:0:1:dcad:beff:feef:%x' % nid, 'netmask': '255.255.255.0', 'netmask_v6': 64, 'physical_network': None, } network = vif['network'] net_v4 = vif['network']['subnets'][0] net_v6 = vif['network']['subnets'][1] vif_dict = dict(bridge=network['bridge'], cidr=net_v4['cidr'], cidr_v6=net_v6['cidr'], id=vif['id'], multi_host=network.get_meta('multi_host', False), injected=network.get_meta('injected', False), bridge_interface= network.get_meta('bridge_interface'), vlan=network.get_meta('vlan'), broadcast=str(net_v4.as_netaddr().broadcast), dhcp_server=network.get_meta('dhcp_server', net_v4['gateway']['address']), dns=[ip['address'] for ip in net_v4['dns']], gateway=net_v4['gateway']['address'], gateway_v6=net_v6['gateway']['address'], label=network['label'], mac=vif['address'], rxtx_cap=vif.get_meta('rxtx_cap'), vif_type=vif['type'], vif_devname=vif.get('devname'), vif_uuid=vif['id'], ovs_interfaceid=vif.get('ovs_interfaceid'), qbh_params=vif.get('qbh_params'), qbg_params=vif.get('qbg_params'), should_create_vlan= network.get_meta('should_create_vlan', False), should_create_bridge= network.get_meta('should_create_bridge', False), ip=net_v4['ips'][i]['address'], ip_v6=net_v6['ips'][i]['address'], netmask=str(net_v4.as_netaddr().netmask), netmask_v6=net_v6.as_netaddr()._prefixlen, physical_network= network.get_meta('physical_network', None)) self.assertThat(vif_dict, matchers.DictMatches(check)) def test_validate_networks(self): self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address') requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', '192.168.1.100'), ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '192.168.0.100')] ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[1]) ip['network'] = dict(test_network.fake_network, **networks[1]) ip['instance_uuid'] = None db.fixed_ip_get_by_address(mox.IgnoreArg(), mox.IgnoreArg(), columns_to_join=mox.IgnoreArg() ).AndReturn(ip) ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[0]) ip['network'] = dict(test_network.fake_network, **networks[0]) ip['instance_uuid'] = None db.fixed_ip_get_by_address(mox.IgnoreArg(), mox.IgnoreArg(), columns_to_join=mox.IgnoreArg() ).AndReturn(ip) self.mox.ReplayAll() self.network.validate_networks(self.context, requested_networks) def test_validate_networks_valid_fixed_ipv6(self): self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address') requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', '2001:db9:0:1::10')] ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[2]) ip['network'] = dict(test_network.fake_network, **networks[1]) ip['instance_uuid'] = None db.fixed_ip_get_by_address(mox.IgnoreArg(), mox.IgnoreArg(), columns_to_join=mox.IgnoreArg() ).AndReturn(ip) self.mox.ReplayAll() self.network.validate_networks(self.context, requested_networks) def test_validate_reserved(self): context_admin = context.RequestContext('testuser', 'testproject', is_admin=True) nets = self.network.create_networks(context_admin, 'fake', '192.168.0.0/24', False, 1, 256, None, None, None, None, None) self.assertEqual(1, len(nets)) network = nets[0] self.assertEqual(4, db.network_count_reserved_ips(context_admin, network['id'])) def test_validate_reserved_start_end(self): context_admin = context.RequestContext('testuser', 'testproject', is_admin=True) nets = self.network.create_networks(context_admin, 'fake', '192.168.0.0/24', False, 1, 256, dhcp_server='192.168.0.11', allowed_start='192.168.0.10', allowed_end='192.168.0.245') self.assertEqual(1, len(nets)) network = nets[0] # gateway defaults to beginning of allowed_start self.assertEqual('192.168.0.10', network['gateway']) # vpn_server doesn't conflict with dhcp_start self.assertEqual('192.168.0.12', network['vpn_private_address']) # dhcp_start doesn't conflict with dhcp_server self.assertEqual('192.168.0.13', network['dhcp_start']) # NOTE(vish): 10 from the beginning, 10 from the end, and # 1 for the gateway, 1 for the dhcp server, # 1 for the vpn server self.assertEqual(23, db.network_count_reserved_ips(context_admin, network['id'])) def test_validate_reserved_start_out_of_range(self): context_admin = context.RequestContext('testuser', 'testproject', is_admin=True) self.assertRaises(exception.AddressOutOfRange, self.network.create_networks, context_admin, 'fake', '192.168.0.0/24', False, 1, 256, allowed_start='192.168.1.10') def test_validate_reserved_end_invalid(self): context_admin = context.RequestContext('testuser', 'testproject', is_admin=True) self.assertRaises(exception.InvalidAddress, self.network.create_networks, context_admin, 'fake', '192.168.0.0/24', False, 1, 256, allowed_end='invalid') def test_validate_cidr_invalid(self): context_admin = context.RequestContext('testuser', 'testproject', is_admin=True) self.assertRaises(exception.InvalidCidr, self.network.create_networks, context_admin, 'fake', 'invalid', False, 1, 256) def test_validate_non_int_size(self): context_admin = context.RequestContext('testuser', 'testproject', is_admin=True) self.assertRaises(exception.InvalidIntValue, self.network.create_networks, context_admin, 'fake', '192.168.0.0/24', False, 1, 'invalid') def test_validate_networks_none_requested_networks(self): self.network.validate_networks(self.context, None) def test_validate_networks_empty_requested_networks(self): requested_networks = [] self.mox.ReplayAll() self.network.validate_networks(self.context, requested_networks) def test_validate_networks_invalid_fixed_ip(self): requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', '192.168.1.100.1'), ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '192.168.0.100.1')] self.mox.ReplayAll() self.assertRaises(exception.FixedIpInvalid, self.network.validate_networks, self.context, requested_networks) def test_validate_networks_empty_fixed_ip(self): requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', ''), ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '')] self.mox.ReplayAll() self.assertRaises(exception.FixedIpInvalid, self.network.validate_networks, self.context, requested_networks) def test_validate_networks_none_fixed_ip(self): requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', None), ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)] self.mox.ReplayAll() self.network.validate_networks(self.context, requested_networks) @mock.patch('nova.objects.fixed_ip.FixedIPList.get_by_instance_uuid') def test_get_instance_nw_info(self, get): def make_ip(index): vif = objects.VirtualInterface(uuid=index, address=index) network = objects.Network(uuid=index, bridge=index, label=index, project_id=index, injected=False, netmask='255.255.255.0', dns1=None, dns2=None, cidr_v6=None, gateway_v6=None, broadcast_v6=None, netmask_v6=None, rxtx_base=None, gateway='192.168.%s.1' % index, dhcp_server='192.168.%s.1' % index, broadcast='192.168.%s.255' % index, cidr='192.168.%s.0/24' % index) return objects.FixedIP(virtual_interface=vif, network=network, floating_ips=objects.FloatingIPList(), address='192.168.%s.2' % index) objs = [make_ip(index) for index in ('3', '1', '2')] get.return_value = objects.FixedIPList(objects=objs) nw_info = self.network.get_instance_nw_info(self.context, None, None, None) for i, vif in enumerate(nw_info): self.assertEqual(objs[i].network.bridge, vif['network']['bridge']) @mock.patch.object(objects.Network, 'get_by_id') def test_add_fixed_ip_instance_using_id_without_vpn(self, get_by_id): # Allocate a fixed ip from a network and assign it to an instance. # Network is given by network id. network_id = networks[0]['id'] with mock.patch.object(self.network, 'allocate_fixed_ip') as allocate_fixed_ip: self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST, network_id) # Assert that we fetched the network by id, not uuid get_by_id.assert_called_once_with(self.context, network_id, project_only='allow_none') # Assert that we called allocate_fixed_ip for the given network and # instance. We should not have requested a specific address from the # network. allocate_fixed_ip.assert_called_once_with(self.context, FAKEUUID, get_by_id.return_value, address=None) @mock.patch.object(objects.Network, 'get_by_uuid') def test_add_fixed_ip_instance_using_uuid_without_vpn(self, get_by_uuid): # Allocate a fixed ip from a network and assign it to an instance. # Network is given by network uuid. network_uuid = networks[0]['uuid'] with mock.patch.object(self.network, 'allocate_fixed_ip') as allocate_fixed_ip,\ mock.patch.object(self.context, 'elevated', return_value=mock.sentinel.elevated): self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST, network_uuid) # Assert that we fetched the network by uuid, not id, and with elevated # context get_by_uuid.assert_called_once_with(mock.sentinel.elevated, network_uuid) # Assert that we called allocate_fixed_ip for the given network and # instance. We should not have requested a specific address from the # network. allocate_fixed_ip.assert_called_once_with(self.context, FAKEUUID, get_by_uuid.return_value, address=None) def test_mini_dns_driver(self): zone1 = "example.org" zone2 = "example.com" driver = self.network.instance_dns_manager driver.create_entry("hostone", "10.0.0.1", "A", zone1) driver.create_entry("hosttwo", "10.0.0.2", "A", zone1) driver.create_entry("hostthree", "10.0.0.3", "A", zone1) driver.create_entry("hostfour", "10.0.0.4", "A", zone1) driver.create_entry("hostfive", "10.0.0.5", "A", zone2) driver.delete_entry("hostone", zone1) driver.modify_address("hostfour", "10.0.0.1", zone1) driver.modify_address("hostthree", "10.0.0.1", zone1) names = driver.get_entries_by_address("10.0.0.1", zone1) self.assertEqual(2, len(names)) self.assertIn('hostthree', names) self.assertIn('hostfour', names) names = driver.get_entries_by_address("10.0.0.5", zone2) self.assertEqual(1, len(names)) self.assertIn('hostfive', names) addresses = driver.get_entries_by_name("hosttwo", zone1) self.assertEqual(1, len(addresses)) self.assertIn('10.0.0.2', addresses) self.assertRaises(exception.InvalidInput, driver.create_entry, "hostname", "10.10.10.10", "invalidtype", zone1) def test_mini_dns_driver_with_mixed_case(self): zone1 = "example.org" driver = self.network.instance_dns_manager driver.create_entry("HostTen", "10.0.0.10", "A", zone1) addresses = driver.get_entries_by_address("10.0.0.10", zone1) self.assertEqual(1, len(addresses)) for n in addresses: driver.delete_entry(n, zone1) addresses = driver.get_entries_by_address("10.0.0.10", zone1) self.assertEqual(0, len(addresses)) def test_allocate_fixed_ip_instance_dns(self): # Test DNS entries are created when allocating a fixed IP. # Allocate a fixed IP to an instance. Ensure that dns entries have been # created for the instance's name and uuid. network = network_obj.Network._from_db_object( self.context, network_obj.Network(), test_network.fake_network) network.save = mock.MagicMock() # Create a minimal instance object instance_params = { 'display_name': HOST, 'security_groups': [] } instance = fake_instance.fake_instance_obj( context.RequestContext('ignore', 'ignore'), expected_attrs=instance_params.keys(), **instance_params) instance.save = mock.MagicMock() # We don't specify a specific address, so we should get a FixedIP # automatically allocated from the pool. Fix its value here. fip = objects.FixedIP(address='192.168.0.101') fip.save = mock.MagicMock() with mock.patch.object(objects.Instance, 'get_by_uuid', return_value=instance),\ mock.patch.object(objects.FixedIP, 'associate_pool', return_value=fip): self.network.allocate_fixed_ip(self.context, FAKEUUID, network) instance_manager = self.network.instance_dns_manager expected_addresses = ['192.168.0.101'] # Assert that we have a correct entry by instance display name addresses = instance_manager.get_entries_by_name(HOST, self.network.instance_dns_domain) self.assertEqual(expected_addresses, addresses) # Assert that we have a correct entry by instance uuid addresses = instance_manager.get_entries_by_name(FAKEUUID, self.network.instance_dns_domain) self.assertEqual(expected_addresses, addresses) def test_allocate_floating_ip(self): self.assertIsNone(self.network.allocate_floating_ip(self.context, 1, None)) def test_deallocate_floating_ip(self): self.assertIsNone(self.network.deallocate_floating_ip(self.context, 1, None)) def test_associate_floating_ip(self): self.assertIsNone(self.network.associate_floating_ip(self.context, None, None)) def test_disassociate_floating_ip(self): self.assertIsNone(self.network.disassociate_floating_ip(self.context, None, None)) def test_get_networks_by_uuids_ordering(self): self.mox.StubOutWithMock(db, 'network_get_all_by_uuids') requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'] db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( [dict(test_network.fake_network, **net) for net in networks]) self.mox.ReplayAll() res = self.network._get_networks_by_uuids(self.context, requested_networks) self.assertEqual(1, res[0]['id']) self.assertEqual(0, res[1]['id']) @mock.patch('nova.objects.instance.Instance.get_by_uuid') @mock.patch('nova.objects.quotas.Quotas.reserve') @mock.patch('nova.objects.quotas.ids_from_instance') def test_allocate_calculates_quota_auth(self, util_method, reserve, get_by_uuid): inst = objects.Instance() inst['uuid'] = 'nosuch' get_by_uuid.return_value = inst usages = {'fixed_ips': {'in_use': 10, 'reserved': 1}} reserve.side_effect = exception.OverQuota(overs='testing', quotas={'fixed_ips': 10}, usages=usages) util_method.return_value = ('foo', 'bar') self.assertRaises(exception.FixedIpLimitExceeded, self.network.allocate_fixed_ip, self.context, 123, {'uuid': 'nosuch'}) util_method.assert_called_once_with(self.context, inst) @mock.patch('nova.objects.fixed_ip.FixedIP.get_by_address') @mock.patch('nova.objects.quotas.Quotas.reserve') @mock.patch('nova.objects.quotas.ids_from_instance') def test_deallocate_calculates_quota_auth(self, util_method, reserve, get_by_address): inst = objects.Instance(uuid='fake-uuid') fip = objects.FixedIP(instance_uuid='fake-uuid', virtual_interface_id=1) get_by_address.return_value = fip util_method.return_value = ('foo', 'bar') # This will fail right after the reserve call when it tries # to look up the fake instance we created above self.assertRaises(exception.InstanceNotFound, self.network.deallocate_fixed_ip, self.context, '1.2.3.4', instance=inst) util_method.assert_called_once_with(self.context, inst) @mock.patch('nova.objects.instance.Instance.get_by_uuid') @mock.patch('nova.objects.fixed_ip.FixedIP.associate') def test_allocate_fixed_ip_passes_string_address(self, mock_associate, mock_get): mock_associate.side_effect = test.TestingException instance = objects.Instance(context=self.context) instance.create() mock_get.return_value = instance self.assertRaises(test.TestingException, self.network.allocate_fixed_ip, self.context, instance.uuid, {'cidr': '24', 'id': 1, 'uuid': 'nosuch'}, address=netaddr.IPAddress('1.2.3.4')) mock_associate.assert_called_once_with(self.context, '1.2.3.4', instance.uuid, 1, vif_id=1) @mock.patch('nova.objects.instance.Instance.get_by_uuid') @mock.patch('nova.objects.virtual_interface.VirtualInterface' '.get_by_instance_and_network') @mock.patch('nova.objects.fixed_ip.FixedIP.disassociate') @mock.patch('nova.objects.fixed_ip.FixedIP.associate') @mock.patch('nova.objects.fixed_ip.FixedIP.save') def test_allocate_fixed_ip_cleanup(self, mock_fixedip_save, mock_fixedip_associate, mock_fixedip_disassociate, mock_vif_get, mock_instance_get): address = netaddr.IPAddress('1.2.3.4') fip = objects.FixedIP(instance_uuid='fake-uuid', address=address, virtual_interface_id=1) mock_fixedip_associate.return_value = fip instance = objects.Instance(context=self.context) instance.create() mock_instance_get.return_value = instance mock_vif_get.return_value = vif_obj.VirtualInterface( instance_uuid='fake-uuid', id=1) with test.nested( mock.patch.object(self.network, '_setup_network_on_host'), mock.patch.object(self.network, 'instance_dns_manager'), mock.patch.object(self.network, '_do_trigger_security_group_members_refresh_for_instance') ) as (mock_setup_network, mock_dns_manager, mock_ignored): mock_setup_network.side_effect = test.TestingException self.assertRaises(test.TestingException, self.network.allocate_fixed_ip, self.context, instance.uuid, {'cidr': '24', 'id': 1, 'uuid': 'nosuch'}, address=address) mock_dns_manager.delete_entry.assert_has_calls([ mock.call(instance.display_name, ''), mock.call(instance.uuid, '') ]) mock_fixedip_disassociate.assert_called_once_with(self.context) @mock.patch('nova.objects.instance.Instance.get_by_uuid') @mock.patch('nova.objects.virtual_interface.VirtualInterface' '.get_by_instance_and_network') @mock.patch('nova.objects.fixed_ip.FixedIP.disassociate') @mock.patch('nova.objects.fixed_ip.FixedIP.associate_pool') @mock.patch('nova.network.manager.NetworkManager._add_virtual_interface') def test_allocate_fixed_ip_create_new_vifs(self, mock_add, mock_fixedip_associate, mock_fixedip_disassociate, mock_vif_get, mock_instance_get): address = netaddr.IPAddress('1.2.3.4') fip = objects.FixedIP(instance_uuid='fake-uuid', address=address, virtual_interface_id=1000) net = {'cidr': '24', 'id': 1, 'uuid': 'nosuch'} instance = objects.Instance(context=self.context) instance.create() vif = objects.VirtualInterface(context, id=1000, address='00:00:00:00:00:00', instance_uuid=instance.uuid, network_id=net['id'], uuid='nosuch') mock_fixedip_associate.return_value = fip mock_add.return_value = vif mock_instance_get.return_value = instance mock_vif_get.return_value = None with test.nested( mock.patch.object(self.network, '_setup_network_on_host'), mock.patch.object(self.network, 'instance_dns_manager'), mock.patch.object(self.network, '_do_trigger_security_group_members_refresh_for_instance') ) as (mock_setup_network, mock_dns_manager, mock_ignored): self.network.allocate_fixed_ip(self.context, instance['uuid'], net) mock_add.assert_called_once_with(self.context, instance['uuid'], net['id']) self.assertEqual(fip.virtual_interface_id, vif.id) @mock.patch('nova.objects.instance.Instance.get_by_uuid') @mock.patch.object(db, 'virtual_interface_get_by_instance_and_network', return_value=None) @mock.patch('nova.objects.fixed_ip.FixedIP') def test_allocate_fixed_ip_add_vif_fails(self, mock_fixedip, mock_get_vif, mock_instance_get): # Tests that we don't try to do anything with fixed IPs if # _add_virtual_interface fails. instance = fake_instance.fake_instance_obj(self.context) mock_instance_get.return_value = instance network = {'cidr': '24', 'id': 1, 'uuid': '398399b3-f696-4859-8695-a6560e14cb02'} vif_error = exception.VirtualInterfaceMacAddressException() # mock out quotas because we don't care in this test with mock.patch.object(self.network, 'quotas_cls', objects.QuotasNoOp): with mock.patch.object(self.network, '_add_virtual_interface', side_effect=vif_error): self.assertRaises( exception.VirtualInterfaceMacAddressException, self.network.allocate_fixed_ip, self.context, '9d2ee1e3-ffad-4e5f-81ff-c96dd97b0ee0', network) self.assertFalse(mock_fixedip.called, str(mock_fixedip.mock_calls)) class FlatDHCPNetworkTestCase(test.TestCase): REQUIRES_LOCKING = True def setUp(self): super(FlatDHCPNetworkTestCase, self).setUp() self.useFixture(test.SampleNetworks()) self.flags(use_local=True, group='conductor') self.network = network_manager.FlatDHCPManager(host=HOST) self.network.db = db self.context = context.RequestContext('testuser', 'testproject', is_admin=False) self.context_admin = context.RequestContext('testuser', 'testproject', is_admin=True) @mock.patch('nova.objects.fixed_ip.FixedIP.get_by_id') @mock.patch('nova.objects.floating_ip.FloatingIPList.get_by_host') @mock.patch('nova.network.linux_net.iptables_manager._apply') def test_init_host_iptables_defer_apply(self, iptable_apply, floating_get_by_host, fixed_get_by_id): def get_by_id(context, fixed_ip_id, **kwargs): net = objects.Network(bridge='testbridge', cidr='192.168.1.0/24') if fixed_ip_id == 1: return objects.FixedIP(address='192.168.1.4', network=net) elif fixed_ip_id == 2: return objects.FixedIP(address='192.168.1.5', network=net) def fake_apply(): fake_apply.count += 1 fake_apply.count = 0 ctxt = context.RequestContext('testuser', 'testproject', is_admin=True) float1 = objects.FloatingIP(address='1.2.3.4', fixed_ip_id=1) float2 = objects.FloatingIP(address='1.2.3.5', fixed_ip_id=2) float1._context = ctxt float2._context = ctxt iptable_apply.side_effect = fake_apply floating_get_by_host.return_value = [float1, float2] fixed_get_by_id.side_effect = get_by_id self.network.init_host() self.assertEqual(1, fake_apply.count) class VlanNetworkTestCase(test.TestCase): REQUIRES_LOCKING = True def setUp(self): super(VlanNetworkTestCase, self).setUp() self.useFixture(test.SampleNetworks()) self.network = network_manager.VlanManager(host=HOST) self.network.db = db self.context = context.RequestContext('testuser', 'testproject', is_admin=False) self.context_admin = context.RequestContext('testuser', 'testproject', is_admin=True) def test_quota_driver_type(self): self.assertEqual(objects.QuotasNoOp, self.network.quotas_cls) def test_vpn_allocate_fixed_ip(self): self.mox.StubOutWithMock(db, 'fixed_ip_associate') self.mox.StubOutWithMock(db, 'fixed_ip_update') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance_and_network') self.mox.StubOutWithMock(db, 'instance_get_by_uuid') fixed = dict(test_fixed_ip.fake_fixed_ip, address='192.168.0.1') db.fixed_ip_associate(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), network_id=mox.IgnoreArg(), reserved=True, virtual_interface_id=vifs[0]['id'] ).AndReturn(fixed) db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0]) db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg(), columns_to_join=['info_cache', 'security_groups'] ).AndReturn(fake_inst(display_name=HOST, uuid=FAKEUUID)) self.mox.ReplayAll() network = objects.Network._from_db_object( self.context, objects.Network(), dict(test_network.fake_network, **networks[0])) network.vpn_private_address = '192.168.0.2' self.network.allocate_fixed_ip(self.context, FAKEUUID, network, vpn=True) def test_allocate_fixed_ip(self): self.stubs.Set(self.network, '_do_trigger_security_group_members_refresh_for_instance', lambda *a, **kw: None) self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance_and_network') self.mox.StubOutWithMock(db, 'instance_get_by_uuid') fixed = dict(test_fixed_ip.fake_fixed_ip, address='192.168.0.1') db.fixed_ip_associate_pool(mox.IgnoreArg(), mox.IgnoreArg(), instance_uuid=mox.IgnoreArg(), host=None, virtual_interface_id=vifs[0]['id'] ).AndReturn(fixed) db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0]) db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg(), columns_to_join=['info_cache', 'security_groups'] ).AndReturn(fake_inst(display_name=HOST, uuid=FAKEUUID)) self.mox.ReplayAll() network = objects.Network._from_db_object( self.context, objects.Network(), dict(test_network.fake_network, **networks[0])) network.vpn_private_address = '192.168.0.2' self.network.allocate_fixed_ip(self.context, FAKEUUID, network) @mock.patch('nova.network.manager.VlanManager._setup_network_on_host') @mock.patch('nova.network.manager.VlanManager.' '_validate_instance_zone_for_dns_domain') @mock.patch('nova.network.manager.VlanManager.' '_do_trigger_security_group_members_refresh_for_instance') @mock.patch('nova.network.manager.VlanManager._add_virtual_interface') @mock.patch('nova.objects.instance.Instance.get_by_uuid') @mock.patch('nova.objects.fixed_ip.FixedIP.associate') @mock.patch('nova.objects.VirtualInterface.get_by_instance_and_network') def test_allocate_fixed_ip_return_none(self, mock_get, mock_associate, mock_get_uuid, mock_add, mock_trigger, mock_validate, mock_setup): net = {'cidr': '24', 'id': 1, 'uuid': 'nosuch'} fip = objects.FixedIP(instance_uuid='fake-uuid', address=netaddr.IPAddress('1.2.3.4'), virtual_interface_id=1) instance = objects.Instance(context=self.context) instance.create() vif = objects.VirtualInterface(self.context, id=1000, address='00:00:00:00:00:00', instance_uuid=instance.uuid, network_id=net['id'], uuid='nosuch') mock_associate.return_value = fip mock_add.return_value = vif mock_get.return_value = None mock_get_uuid.return_value = instance mock_validate.return_value = False self.network.allocate_fixed_ip(self.context_admin, instance.uuid, net) mock_add.assert_called_once_with(self.context_admin, instance.uuid, net['id']) @mock.patch('nova.objects.instance.Instance.get_by_uuid') @mock.patch('nova.objects.fixed_ip.FixedIP.associate') def test_allocate_fixed_ip_passes_string_address(self, mock_associate, mock_get): mock_associate.side_effect = test.TestingException instance = objects.Instance(context=self.context) instance.create() mock_get.return_value = instance self.assertRaises(test.TestingException, self.network.allocate_fixed_ip, self.context, instance.uuid, {'cidr': '24', 'id': 1, 'uuid': 'nosuch'}, address=netaddr.IPAddress('1.2.3.4')) mock_associate.assert_called_once_with(self.context, '1.2.3.4', instance.uuid, 1, vif_id=1) @mock.patch('nova.objects.instance.Instance.get_by_uuid') @mock.patch('nova.objects.fixed_ip.FixedIP.associate') def test_allocate_fixed_ip_passes_string_address_vpn(self, mock_associate, mock_get): mock_associate.side_effect = test.TestingException instance = objects.Instance(context=self.context) instance.create() mock_get.return_value = instance self.assertRaises(test.TestingException, self.network.allocate_fixed_ip, self.context, instance.uuid, {'cidr': '24', 'id': 1, 'uuid': 'nosuch', 'vpn_private_address': netaddr.IPAddress('1.2.3.4') }, vpn=1) mock_associate.assert_called_once_with(self.context, '1.2.3.4', instance.uuid, 1, reserved=True, vif_id=1) @mock.patch.object(db, 'virtual_interface_get_by_instance_and_network', return_value=None) @mock.patch('nova.objects.fixed_ip.FixedIP') def test_allocate_fixed_ip_add_vif_fails(self, mock_fixedip, mock_get_vif): # Tests that we don't try to do anything with fixed IPs if # _add_virtual_interface fails. vif_error = exception.VirtualInterfaceMacAddressException() with mock.patch.object(self.network, '_add_virtual_interface', side_effect=vif_error): self.assertRaises(exception.VirtualInterfaceMacAddressException, self.network.allocate_fixed_ip, self.context, '9d2ee1e3-ffad-4e5f-81ff-c96dd97b0ee0', networks[0]) self.assertFalse(mock_fixedip.called, str(mock_fixedip.mock_calls)) def test_create_networks_too_big(self): self.assertRaises(ValueError, self.network.create_networks, None, num_networks=4094, vlan_start=1) def test_create_networks_too_many(self): self.assertRaises(ValueError, self.network.create_networks, None, num_networks=100, vlan_start=1, cidr='192.168.0.1/24', network_size=100) def test_duplicate_vlan_raises(self): # VLAN 100 is already used and we force the network to be created # in that vlan (vlan=100). self.assertRaises(exception.DuplicateVlan, self.network.create_networks, self.context_admin, label="fake", num_networks=1, vlan=100, cidr='192.168.0.1/24', network_size=100) def test_vlan_start(self): # VLAN 100 and 101 are used, so this network shoud be created in 102 networks = self.network.create_networks( self.context_admin, label="fake", num_networks=1, vlan_start=100, cidr='192.168.3.1/24', network_size=100) self.assertEqual(102, networks[0]["vlan"]) def test_vlan_start_multiple(self): # VLAN 100 and 101 are used, so these networks shoud be created in 102 # and 103 networks = self.network.create_networks( self.context_admin, label="fake", num_networks=2, vlan_start=100, cidr='192.168.3.1/24', network_size=100) self.assertEqual(102, networks[0]["vlan"]) self.assertEqual(103, networks[1]["vlan"]) def test_vlan_start_used(self): # VLAN 100 and 101 are used, but vlan_start=99. networks = self.network.create_networks( self.context_admin, label="fake", num_networks=1, vlan_start=99, cidr='192.168.3.1/24', network_size=100) self.assertEqual(102, networks[0]["vlan"]) def test_vlan_parameter(self): # vlan parameter could not be greater than 4094 exc = self.assertRaises(ValueError, self.network.create_networks, self.context_admin, label="fake", num_networks=1, vlan=4095, cidr='192.168.0.1/24') error_msg = 'The vlan number cannot be greater than 4094' self.assertIn(error_msg, six.text_type(exc)) # vlan parameter could not be less than 1 exc = self.assertRaises(ValueError, self.network.create_networks, self.context_admin, label="fake", num_networks=1, vlan=0, cidr='192.168.0.1/24') error_msg = 'The vlan number cannot be less than 1' self.assertIn(error_msg, six.text_type(exc)) def test_vlan_be_integer(self): # vlan must be an integer exc = self.assertRaises(ValueError, self.network.create_networks, self.context_admin, label="fake", num_networks=1, vlan='fake', cidr='192.168.0.1/24') error_msg = 'vlan must be an integer' self.assertIn(error_msg, six.text_type(exc)) def test_vlan_multiple_without_dhcp_server(self): networks = self.network.create_networks( self.context_admin, label="fake", num_networks=2, vlan_start=100, cidr='192.168.3.1/24', network_size=100) self.assertEqual("192.168.3.1", networks[0]["dhcp_server"]) self.assertEqual("192.168.3.129", networks[1]["dhcp_server"]) def test_vlan_multiple_with_dhcp_server(self): networks = self.network.create_networks( self.context_admin, label="fake", num_networks=2, vlan_start=100, cidr='192.168.3.1/24', network_size=100, dhcp_server='192.168.3.1') self.assertEqual("192.168.3.1", networks[0]["dhcp_server"]) self.assertEqual("192.168.3.1", networks[1]["dhcp_server"]) def test_validate_networks(self): self.mox.StubOutWithMock(db, "fixed_ip_get_by_address") requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', '192.168.1.100'), ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '192.168.0.100')] db_fixed1 = dict(test_fixed_ip.fake_fixed_ip, network_id=networks[1]['id'], network=dict(test_network.fake_network, **networks[1]), instance_uuid=None) db.fixed_ip_get_by_address(mox.IgnoreArg(), mox.IgnoreArg(), columns_to_join=mox.IgnoreArg() ).AndReturn(db_fixed1) db_fixed2 = dict(test_fixed_ip.fake_fixed_ip, network_id=networks[0]['id'], network=dict(test_network.fake_network, **networks[0]), instance_uuid=None) db.fixed_ip_get_by_address(mox.IgnoreArg(), mox.IgnoreArg(), columns_to_join=mox.IgnoreArg() ).AndReturn(db_fixed2) self.mox.ReplayAll() self.network.validate_networks(self.context, requested_networks) def test_validate_networks_none_requested_networks(self): self.network.validate_networks(self.context, None) def test_validate_networks_empty_requested_networks(self): requested_networks = [] self.mox.ReplayAll() self.network.validate_networks(self.context, requested_networks) def test_validate_networks_invalid_fixed_ip(self): requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', '192.168.1.100.1'), ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '192.168.0.100.1')] self.mox.ReplayAll() self.assertRaises(exception.FixedIpInvalid, self.network.validate_networks, self.context, requested_networks) def test_validate_networks_empty_fixed_ip(self): requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', ''), ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '')] self.mox.ReplayAll() self.assertRaises(exception.FixedIpInvalid, self.network.validate_networks, self.context, requested_networks) def test_validate_networks_none_fixed_ip(self): requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', None), ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)] self.mox.ReplayAll() self.network.validate_networks(self.context, requested_networks) def test_floating_ip_owned_by_project(self): ctxt = context.RequestContext('testuser', 'testproject', is_admin=False) # raises because floating_ip project_id is None floating_ip = objects.FloatingIP(address='10.0.0.1', project_id=None) self.assertRaises(exception.Forbidden, self.network._floating_ip_owned_by_project, ctxt, floating_ip) # raises because floating_ip project_id is not equal to ctxt project_id floating_ip = objects.FloatingIP(address='10.0.0.1', project_id=ctxt.project_id + '1') self.assertRaises(exception.Forbidden, self.network._floating_ip_owned_by_project, ctxt, floating_ip) # does not raise (floating ip is owned by ctxt project) floating_ip = objects.FloatingIP(address='10.0.0.1', project_id=ctxt.project_id) self.network._floating_ip_owned_by_project(ctxt, floating_ip) ctxt = context.RequestContext(None, None, is_admin=True) # does not raise (ctxt is admin) floating_ip = objects.FloatingIP(address='10.0.0.1', project_id=None) self.network._floating_ip_owned_by_project(ctxt, floating_ip) # does not raise (ctxt is admin) floating_ip = objects.FloatingIP(address='10.0.0.1', project_id='testproject') self.network._floating_ip_owned_by_project(ctxt, floating_ip) def test_allocate_floating_ip(self): ctxt = context.RequestContext('testuser', 'testproject', is_admin=False) self.stubs.Set(self.network, '_floating_ip_pool_exists', lambda _x, _y: True) def fake_allocate_address(*args, **kwargs): return {'address': '10.0.0.1', 'project_id': ctxt.project_id} self.stubs.Set(self.network.db, 'floating_ip_allocate_address', fake_allocate_address) self.network.allocate_floating_ip(ctxt, ctxt.project_id) @mock.patch('nova.quota.QUOTAS.reserve') @mock.patch('nova.quota.QUOTAS.commit') def test_deallocate_floating_ip(self, mock_commit, mock_reserve): ctxt = context.RequestContext('testuser', 'testproject', is_admin=False) def fake1(*args, **kwargs): return dict(test_floating_ip.fake_floating_ip) def fake2(*args, **kwargs): return dict(test_floating_ip.fake_floating_ip, address='10.0.0.1', fixed_ip_id=1) def fake3(*args, **kwargs): return dict(test_floating_ip.fake_floating_ip, address='10.0.0.1', fixed_ip_id=None, project_id=ctxt.project_id) self.stubs.Set(self.network.db, 'floating_ip_deallocate', fake1) self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1) # this time should raise because floating ip is associated to fixed_ip self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2) self.assertRaises(exception.FloatingIpAssociated, self.network.deallocate_floating_ip, ctxt, mox.IgnoreArg()) mock_reserve.return_value = 'reserve' # this time should not raise self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3) self.network.deallocate_floating_ip(ctxt, ctxt.project_id) mock_commit.assert_called_once_with(ctxt, 'reserve', project_id='testproject') @mock.patch('nova.db.fixed_ip_get') def test_associate_floating_ip(self, fixed_get): ctxt = context.RequestContext('testuser', 'testproject', is_admin=False) def fake1(*args, **kwargs): return dict(test_fixed_ip.fake_fixed_ip, address='10.0.0.1', network=test_network.fake_network) # floating ip that's already associated def fake2(*args, **kwargs): return dict(test_floating_ip.fake_floating_ip, address='10.0.0.1', pool='nova', interface='eth0', fixed_ip_id=1) # floating ip that isn't associated def fake3(*args, **kwargs): return dict(test_floating_ip.fake_floating_ip, address='10.0.0.1', pool='nova', interface='eth0', fixed_ip_id=None) # fixed ip with remote host def fake4(*args, **kwargs): return dict(test_fixed_ip.fake_fixed_ip, address='10.0.0.1', pool='nova', instance_uuid=FAKEUUID, interface='eth0', network_id=123) def fake4_network(*args, **kwargs): return dict(test_network.fake_network, multi_host=False, host='jibberjabber') # fixed ip with local host def fake5(*args, **kwargs): return dict(test_fixed_ip.fake_fixed_ip, address='10.0.0.1', pool='nova', instance_uuid=FAKEUUID, interface='eth0', network_id=1234) def fake5_network(*args, **kwargs): return dict(test_network.fake_network, multi_host=False, host='testhost') def fake6(ctxt, method, **kwargs): self.local = False def fake7(*args, **kwargs): self.local = True def fake8(*args, **kwargs): raise processutils.ProcessExecutionError('', 'Cannot find device "em0"\n') def fake9(*args, **kwargs): raise test.TestingException() # raises because interface doesn't exist self.stubs.Set(self.network.db, 'floating_ip_fixed_ip_associate', fake1) self.stubs.Set(self.network.db, 'floating_ip_disassociate', fake1) self.stubs.Set(self.network.driver, 'ensure_floating_forward', fake8) self.assertRaises(exception.NoFloatingIpInterface, self.network._associate_floating_ip, ctxt, '1.2.3.4', '1.2.3.5', mox.IgnoreArg(), mox.IgnoreArg()) self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1) # raises because floating_ip is already associated to a fixed_ip self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2) self.stubs.Set(self.network, 'disassociate_floating_ip', fake9) fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip, address='1.2.3.4', instance_uuid='fake_uuid', network=test_network.fake_network) # doesn't raise because we exit early if the address is the same self.network.associate_floating_ip(ctxt, mox.IgnoreArg(), '1.2.3.4') # raises because we call disassociate which is mocked self.assertRaises(test.TestingException, self.network.associate_floating_ip, ctxt, mox.IgnoreArg(), 'new') self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3) # does not raise and makes call remotely self.local = True self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake4) self.stubs.Set(self.network.db, 'network_get', fake4_network) self.stubs.Set(self.network.network_rpcapi.client, 'prepare', lambda **kw: self.network.network_rpcapi.client) self.stubs.Set(self.network.network_rpcapi.client, 'call', fake6) self.network.associate_floating_ip(ctxt, mox.IgnoreArg(), mox.IgnoreArg()) self.assertFalse(self.local) # does not raise and makes call locally self.local = False self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake5) self.stubs.Set(self.network.db, 'network_get', fake5_network) self.stubs.Set(self.network, '_associate_floating_ip', fake7) self.network.associate_floating_ip(ctxt, mox.IgnoreArg(), mox.IgnoreArg()) self.assertTrue(self.local) def test_add_floating_ip_nat_before_bind(self): # Tried to verify order with documented mox record/verify # functionality, but it doesn't seem to work since I can't make it # fail. I'm using stubs and a flag for now, but if this mox feature # can be made to work, it would be a better way to test this. # # self.mox.StubOutWithMock(self.network.driver, # 'ensure_floating_forward') # self.mox.StubOutWithMock(self.network.driver, 'bind_floating_ip') # # self.network.driver.ensure_floating_forward(mox.IgnoreArg(), # mox.IgnoreArg(), # mox.IgnoreArg(), # mox.IgnoreArg()) # self.network.driver.bind_floating_ip(mox.IgnoreArg(), # mox.IgnoreArg()) # self.mox.ReplayAll() nat_called = [False] def fake_nat(*args, **kwargs): nat_called[0] = True def fake_bind(*args, **kwargs): self.assertTrue(nat_called[0]) self.stubs.Set(self.network.driver, 'ensure_floating_forward', fake_nat) self.stubs.Set(self.network.driver, 'bind_floating_ip', fake_bind) self.network.l3driver.add_floating_ip('fakefloat', 'fakefixed', 'fakeiface', 'fakenet') @mock.patch('nova.db.floating_ip_get_all_by_host') @mock.patch('nova.db.fixed_ip_get') def _test_floating_ip_init_host(self, fixed_get, floating_get, public_interface, expected_arg): floating_get.return_value = [ dict(test_floating_ip.fake_floating_ip, interface='foo', address='1.2.3.4'), dict(test_floating_ip.fake_floating_ip, interface='fakeiface', address='1.2.3.5', fixed_ip_id=1), dict(test_floating_ip.fake_floating_ip, interface='bar', address='1.2.3.6', fixed_ip_id=2), ] def fixed_ip_get(_context, fixed_ip_id, get_network): if fixed_ip_id == 1: return dict(test_fixed_ip.fake_fixed_ip, address='1.2.3.4', network=test_network.fake_network) raise exception.FixedIpNotFound(id=fixed_ip_id) fixed_get.side_effect = fixed_ip_get self.mox.StubOutWithMock(self.network.l3driver, 'add_floating_ip') self.flags(public_interface=public_interface) self.network.l3driver.add_floating_ip(netaddr.IPAddress('1.2.3.5'), netaddr.IPAddress('1.2.3.4'), expected_arg, mox.IsA(objects.Network)) self.mox.ReplayAll() self.network.init_host_floating_ips() self.mox.UnsetStubs() self.mox.VerifyAll() def test_floating_ip_init_host_without_public_interface(self): self._test_floating_ip_init_host(public_interface=False, expected_arg='fakeiface') def test_floating_ip_init_host_with_public_interface(self): self._test_floating_ip_init_host(public_interface='fooiface', expected_arg='fooiface') def test_disassociate_floating_ip(self): ctxt = context.RequestContext('testuser', 'testproject', is_admin=False) def fake1(*args, **kwargs): pass # floating ip that isn't associated def fake2(*args, **kwargs): return dict(test_floating_ip.fake_floating_ip, address='10.0.0.1', pool='nova', interface='eth0', fixed_ip_id=None) # floating ip that is associated def fake3(*args, **kwargs): return dict(test_floating_ip.fake_floating_ip, address='10.0.0.1', pool='nova', interface='eth0', fixed_ip_id=1, project_id=ctxt.project_id) # fixed ip with remote host def fake4(*args, **kwargs): return dict(test_fixed_ip.fake_fixed_ip, address='10.0.0.1', pool='nova', instance_uuid=FAKEUUID, interface='eth0', network_id=123) def fake4_network(*args, **kwargs): return dict(test_network.fake_network, multi_host=False, host='jibberjabber') # fixed ip with local host def fake5(*args, **kwargs): return dict(test_fixed_ip.fake_fixed_ip, address='10.0.0.1', pool='nova', instance_uuid=FAKEUUID, interface='eth0', network_id=1234) def fake5_network(*args, **kwargs): return dict(test_network.fake_network, multi_host=False, host='testhost') def fake6(ctxt, method, **kwargs): self.local = False def fake7(*args, **kwargs): self.local = True def fake8(*args, **kwargs): return dict(test_floating_ip.fake_floating_ip, address='10.0.0.1', pool='nova', interface='eth0', fixed_ip_id=1, auto_assigned=True, project_id=ctxt.project_id) self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1) # raises because floating_ip is not associated to a fixed_ip self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2) self.assertRaises(exception.FloatingIpNotAssociated, self.network.disassociate_floating_ip, ctxt, mox.IgnoreArg()) self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3) # does not raise and makes call remotely self.local = True self.stubs.Set(self.network.db, 'fixed_ip_get', fake4) self.stubs.Set(self.network.db, 'network_get', fake4_network) self.stubs.Set(self.network.network_rpcapi.client, 'prepare', lambda **kw: self.network.network_rpcapi.client) self.stubs.Set(self.network.network_rpcapi.client, 'call', fake6) self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg()) self.assertFalse(self.local) # does not raise and makes call locally self.local = False self.stubs.Set(self.network.db, 'fixed_ip_get', fake5) self.stubs.Set(self.network.db, 'network_get', fake5_network) self.stubs.Set(self.network, '_disassociate_floating_ip', fake7) self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg()) self.assertTrue(self.local) # raises because auto_assigned floating IP cannot be disassociated self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake8) self.assertRaises(exception.CannotDisassociateAutoAssignedFloatingIP, self.network.disassociate_floating_ip, ctxt, mox.IgnoreArg()) def test_add_fixed_ip_instance_without_vpn_requested_networks(self): self.stubs.Set(self.network, '_do_trigger_security_group_members_refresh_for_instance', lambda *a, **kw: None) self.mox.StubOutWithMock(db, 'network_get') self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance_and_network') self.mox.StubOutWithMock(db, 'instance_get_by_uuid') self.mox.StubOutWithMock(self.network, 'get_instance_nw_info') db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0]) fixed = dict(test_fixed_ip.fake_fixed_ip, address='192.168.0.101') db.fixed_ip_associate_pool(mox.IgnoreArg(), mox.IgnoreArg(), instance_uuid=mox.IgnoreArg(), host=None, virtual_interface_id=vifs[0]['id'] ).AndReturn(fixed) db.network_get(mox.IgnoreArg(), mox.IgnoreArg(), project_only=mox.IgnoreArg() ).AndReturn(dict(test_network.fake_network, **networks[0])) db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg(), columns_to_join=['info_cache', 'security_groups'] ).AndReturn(fake_inst(display_name=HOST, uuid=FAKEUUID)) self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST, networks[0]['id']) @mock.patch('nova.db.fixed_ip_get_by_address') @mock.patch('nova.db.network_get') def test_ip_association_and_allocation_of_other_project(self, net_get, fixed_get): """Makes sure that we cannot deallocaate or disassociate a public IP of other project. """ net_get.return_value = dict(test_network.fake_network, **networks[1]) context1 = context.RequestContext('user', 'project1') context2 = context.RequestContext('user', 'project2') float_ip = db.floating_ip_create(context1.elevated(), {'address': '1.2.3.4', 'project_id': context1.project_id}) float_addr = float_ip['address'] instance = db.instance_create(context1, {'project_id': 'project1'}) fix_addr = db.fixed_ip_associate_pool(context1.elevated(), 1, instance['uuid']).address fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip, address=fix_addr, instance_uuid=instance.uuid, network=dict(test_network.fake_network, **networks[1])) # Associate the IP with non-admin user context self.assertRaises(exception.Forbidden, self.network.associate_floating_ip, context2, float_addr, fix_addr) # Deallocate address from other project self.assertRaises(exception.Forbidden, self.network.deallocate_floating_ip, context2, float_addr) # Now Associates the address to the actual project self.network.associate_floating_ip(context1, float_addr, fix_addr) # Now try dis-associating from other project self.assertRaises(exception.Forbidden, self.network.disassociate_floating_ip, context2, float_addr) # Clean up the ip addresses self.network.disassociate_floating_ip(context1, float_addr) self.network.deallocate_floating_ip(context1, float_addr) self.network.deallocate_fixed_ip(context1, fix_addr, 'fake') db.floating_ip_destroy(context1.elevated(), float_addr) db.fixed_ip_disassociate(context1.elevated(), fix_addr) @mock.patch('nova.db.fixed_ip_get_by_address') @mock.patch('nova.db.network_get') @mock.patch('nova.db.fixed_ip_update') def test_deallocate_fixed(self, fixed_update, net_get, fixed_get): """Verify that release is called properly. Ensures https://bugs.launchpad.net/nova/+bug/973442 doesn't return """ net_get.return_value = dict(test_network.fake_network, **networks[1]) def vif_get(_context, _vif_id): return vifs[0] self.stub_out('nova.db.virtual_interface_get', vif_get) context1 = context.RequestContext('user', 'project1') instance = db.instance_create(context1, {'project_id': 'project1'}) elevated = context1.elevated() fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid']) fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip, address=fix_addr.address, instance_uuid=instance.uuid, allocated=True, virtual_interface_id=3, network=dict(test_network.fake_network, **networks[1])) self.flags(force_dhcp_release=True) self.mox.StubOutWithMock(linux_net, 'release_dhcp') linux_net.release_dhcp(networks[1]['bridge'], fix_addr.address, 'DE:AD:BE:EF:00:00') self.mox.ReplayAll() self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake') fixed_update.assert_called_once_with(context1, fix_addr.address, {'allocated': False}) @mock.patch('nova.db.fixed_ip_get_by_address') @mock.patch('nova.db.network_get') @mock.patch('nova.db.fixed_ip_update') def _deallocate_fixed_with_dhcp(self, mock_dev_exists, fixed_update, net_get, fixed_get): net_get.return_value = dict(test_network.fake_network, **networks[1]) def vif_get(_context, _vif_id): return vifs[0] with test.nested( mock.patch.object(db, 'virtual_interface_get', vif_get), mock.patch.object( utils, 'execute', side_effect=processutils.ProcessExecutionError()), ) as (_vif_get, _execute): context1 = context.RequestContext('user', 'project1') instance = db.instance_create(context1, {'project_id': 'project1'}) elevated = context1.elevated() fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid']) fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip, address=fix_addr.address, instance_uuid=instance.uuid, allocated=True, virtual_interface_id=3, network=dict( test_network.fake_network, **networks[1])) self.flags(force_dhcp_release=True) self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake') fixed_update.assert_called_once_with(context1, fix_addr.address, {'allocated': False}) mock_dev_exists.assert_called_once_with(networks[1]['bridge']) if mock_dev_exists.return_value: _execute.assert_called_once_with('dhcp_release', networks[1]['bridge'], fix_addr.address, 'DE:AD:BE:EF:00:00', run_as_root=True) @mock.patch('nova.network.linux_net.device_exists', return_value=True) def test_deallocate_fixed_with_dhcp(self, mock_dev_exists): self._deallocate_fixed_with_dhcp(mock_dev_exists) @mock.patch('nova.network.linux_net.device_exists', return_value=False) def test_deallocate_fixed_without_dhcp(self, mock_dev_exists): self._deallocate_fixed_with_dhcp(mock_dev_exists) def test_deallocate_fixed_deleted(self): # Verify doesn't deallocate deleted fixed_ip from deleted network. def teardown_network_on_host(_context, network): if network['id'] == 0: raise test.TestingException() self.stubs.Set(self.network, '_teardown_network_on_host', teardown_network_on_host) context1 = context.RequestContext('user', 'project1') elevated = context1.elevated() instance = db.instance_create(context1, {'project_id': 'project1'}) network = db.network_create_safe(elevated, networks[0]) _fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid']) fix_addr = _fix_addr.address db.fixed_ip_update(elevated, fix_addr, {'deleted': 1}) elevated.read_deleted = 'yes' delfixed = db.fixed_ip_get_by_address(elevated, fix_addr) values = {'address': fix_addr, 'network_id': network.id, 'instance_uuid': delfixed['instance_uuid']} db.fixed_ip_create(elevated, values) elevated.read_deleted = 'no' elevated.read_deleted = 'yes' deallocate = self.network.deallocate_fixed_ip self.assertRaises(test.TestingException, deallocate, context1, fix_addr, 'fake') @mock.patch('nova.db.fixed_ip_get_by_address') @mock.patch('nova.db.network_get') @mock.patch('nova.db.fixed_ip_update') def test_deallocate_fixed_no_vif(self, fixed_update, net_get, fixed_get): """Verify that deallocate doesn't raise when no vif is returned. Ensures https://bugs.launchpad.net/nova/+bug/968457 doesn't return """ net_get.return_value = dict(test_network.fake_network, **networks[1]) def vif_get(_context, _vif_id): return None self.stub_out('nova.db.virtual_interface_get', vif_get) context1 = context.RequestContext('user', 'project1') instance = db.instance_create(context1, {'project_id': 'project1'}) elevated = context1.elevated() fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid']) fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip, address=fix_addr.address, allocated=True, virtual_interface_id=3, instance_uuid=instance.uuid, network=dict(test_network.fake_network, **networks[1])) self.flags(force_dhcp_release=True) fixed_update.return_value = fixed_get.return_value self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake') fixed_update.assert_called_once_with(context1, fix_addr.address, {'allocated': False}) @mock.patch('nova.db.fixed_ip_get_by_address') @mock.patch('nova.db.network_get') @mock.patch('nova.db.fixed_ip_update') def test_fixed_ip_cleanup_fail(self, fixed_update, net_get, fixed_get): # Verify IP is not deallocated if the security group refresh fails. net_get.return_value = dict(test_network.fake_network, **networks[1]) context1 = context.RequestContext('user', 'project1') instance = db.instance_create(context1, {'project_id': 'project1'}) elevated = context1.elevated() fix_addr = objects.FixedIP.associate_pool(elevated, 1, instance['uuid']) def fake_refresh(instance_uuid): raise test.TestingException() self.stubs.Set(self.network, '_do_trigger_security_group_members_refresh_for_instance', fake_refresh) fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip, address=fix_addr.address, allocated=True, virtual_interface_id=3, instance_uuid=instance.uuid, network=dict(test_network.fake_network, **networks[1])) self.assertRaises(test.TestingException, self.network.deallocate_fixed_ip, context1, str(fix_addr.address), 'fake') self.assertFalse(fixed_update.called) def test_get_networks_by_uuids_ordering(self): self.mox.StubOutWithMock(db, 'network_get_all_by_uuids') requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'] db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( [dict(test_network.fake_network, **net) for net in networks]) self.mox.ReplayAll() res = self.network._get_networks_by_uuids(self.context, requested_networks) self.assertEqual(1, res[0]['id']) self.assertEqual(0, res[1]['id']) @mock.patch('nova.objects.fixed_ip.FixedIP.get_by_id') @mock.patch('nova.objects.floating_ip.FloatingIPList.get_by_host') @mock.patch('nova.network.linux_net.iptables_manager._apply') def test_init_host_iptables_defer_apply(self, iptable_apply, floating_get_by_host, fixed_get_by_id): def get_by_id(context, fixed_ip_id, **kwargs): net = objects.Network(bridge='testbridge', cidr='192.168.1.0/24') if fixed_ip_id == 1: return objects.FixedIP(address='192.168.1.4', network=net) elif fixed_ip_id == 2: return objects.FixedIP(address='192.168.1.5', network=net) def fake_apply(): fake_apply.count += 1 fake_apply.count = 0 ctxt = context.RequestContext('testuser', 'testproject', is_admin=True) float1 = objects.FloatingIP(address='1.2.3.4', fixed_ip_id=1) float2 = objects.FloatingIP(address='1.2.3.5', fixed_ip_id=2) float1._context = ctxt float2._context = ctxt iptable_apply.side_effect = fake_apply floating_get_by_host.return_value = [float1, float2] fixed_get_by_id.side_effect = get_by_id self.network.init_host() self.assertEqual(1, fake_apply.count) class _TestDomainObject(object): def __init__(self, **kwargs): for k, v in six.iteritems(kwargs): self.__setattr__(k, v) class CommonNetworkTestCase(test.TestCase): REQUIRES_LOCKING = True def setUp(self): super(CommonNetworkTestCase, self).setUp() self.context = context.RequestContext('fake', 'fake') self.flags(ipv6_backend='rfc2462') ipv6.reset_backend() def test_validate_instance_zone_for_dns_domain(self): domain = 'example.com' az = 'test_az' domains = { domain: _TestDomainObject( domain=domain, availability_zone=az)} def dnsdomain_get(context, instance_domain): return domains.get(instance_domain) self.stub_out('nova.db.dnsdomain_get', dnsdomain_get) fake_instance = {'uuid': FAKEUUID, 'availability_zone': az} manager = network_manager.NetworkManager() res = manager._validate_instance_zone_for_dns_domain(self.context, fake_instance) self.assertTrue(res) def fake_create_fixed_ips(self, context, network_id, fixed_cidr=None, extra_reserved=None, bottom_reserved=0, top_reserved=0): return None def test_get_instance_nw_info_client_exceptions(self): manager = network_manager.NetworkManager() self.mox.StubOutWithMock(manager.db, 'fixed_ip_get_by_instance') manager.db.fixed_ip_get_by_instance( self.context, FAKEUUID).AndRaise(exception.InstanceNotFound( instance_id=FAKEUUID)) self.mox.ReplayAll() self.assertRaises(messaging.ExpectedException, manager.get_instance_nw_info, self.context, FAKEUUID, 'fake_rxtx_factor', HOST) @mock.patch('nova.db.instance_get') @mock.patch('nova.db.fixed_ip_get_by_instance') def test_deallocate_for_instance_passes_host_info(self, fixed_get, instance_get): manager = fake_network.FakeNetworkManager() db = manager.db instance_get.return_value = fake_inst(uuid='ignoreduuid') db.virtual_interface_delete_by_instance = lambda _x, _y: None ctx = context.RequestContext('igonre', 'igonre') fixed_get.return_value = [dict(test_fixed_ip.fake_fixed_ip, address='1.2.3.4', network_id=123)] manager.deallocate_for_instance( ctx, instance=objects.Instance._from_db_object(self.context, objects.Instance(), instance_get.return_value)) self.assertEqual([ (ctx, '1.2.3.4', 'fake-host') ], manager.deallocate_fixed_ip_calls) @mock.patch('nova.db.fixed_ip_get_by_instance') def test_deallocate_for_instance_passes_host_info_with_update_dns_entries( self, fixed_get): self.flags(update_dns_entries=True) manager = fake_network.FakeNetworkManager() db = manager.db db.virtual_interface_delete_by_instance = lambda _x, _y: None ctx = context.RequestContext('igonre', 'igonre') fixed_get.return_value = [dict(test_fixed_ip.fake_fixed_ip, address='1.2.3.4', network_id=123)] with mock.patch.object(manager.network_rpcapi, 'update_dns') as mock_update_dns: manager.deallocate_for_instance( ctx, instance=fake_instance.fake_instance_obj(ctx)) mock_update_dns.assert_called_once_with(ctx, ['123']) self.assertEqual([ (ctx, '1.2.3.4', 'fake-host') ], manager.deallocate_fixed_ip_calls) def test_deallocate_for_instance_with_requested_networks(self): manager = fake_network.FakeNetworkManager() db = manager.db db.virtual_interface_delete_by_instance = mock.Mock() ctx = context.RequestContext('igonre', 'igonre') requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest.from_tuple(t) for t in [('123', '1.2.3.4'), ('123', '4.3.2.1'), ('123', None)]]) manager.deallocate_for_instance( ctx, instance=fake_instance.fake_instance_obj(ctx), requested_networks=requested_networks) self.assertEqual([ (ctx, '1.2.3.4', 'fake-host'), (ctx, '4.3.2.1', 'fake-host') ], manager.deallocate_fixed_ip_calls) def test_deallocate_for_instance_with_update_dns_entries(self): self.flags(update_dns_entries=True) manager = fake_network.FakeNetworkManager() db = manager.db db.virtual_interface_delete_by_instance = mock.Mock() ctx = context.RequestContext('igonre', 'igonre') requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest.from_tuple(t) for t in [('123', '1.2.3.4'), ('123', '4.3.2.1')]]) with mock.patch.object(manager.network_rpcapi, 'update_dns') as mock_update_dns: manager.deallocate_for_instance( ctx, instance=fake_instance.fake_instance_obj(ctx), requested_networks=requested_networks) mock_update_dns.assert_called_once_with(ctx, ['123']) self.assertEqual([ (ctx, '1.2.3.4', 'fake-host'), (ctx, '4.3.2.1', 'fake-host') ], manager.deallocate_fixed_ip_calls) @mock.patch('nova.db.fixed_ip_get_by_instance') @mock.patch('nova.db.fixed_ip_disassociate') def test_remove_fixed_ip_from_instance(self, disassociate, get): manager = fake_network.FakeNetworkManager() get.return_value = [ dict(test_fixed_ip.fake_fixed_ip, **x) for x in manager.db.fixed_ip_get_by_instance(None, FAKEUUID)] manager.remove_fixed_ip_from_instance(self.context, FAKEUUID, HOST, '10.0.0.1') self.assertEqual('10.0.0.1', manager.deallocate_called) disassociate.assert_called_once_with(self.context, '10.0.0.1') @mock.patch('nova.db.fixed_ip_get_by_instance') def test_remove_fixed_ip_from_instance_bad_input(self, get): manager = fake_network.FakeNetworkManager() get.return_value = [] self.assertRaises(exception.FixedIpNotFoundForSpecificInstance, manager.remove_fixed_ip_from_instance, self.context, 99, HOST, 'bad input') def test_validate_cidrs(self): manager = fake_network.FakeNetworkManager() nets = manager.create_networks(self.context.elevated(), 'fake', '192.168.0.0/24', False, 1, 256, None, None, None, None, None) self.assertEqual(1, len(nets)) cidrs = [str(net['cidr']) for net in nets] self.assertIn('192.168.0.0/24', cidrs) def test_validate_cidrs_split_exact_in_half(self): manager = fake_network.FakeNetworkManager() nets = manager.create_networks(self.context.elevated(), 'fake', '192.168.0.0/24', False, 2, 128, None, None, None, None, None) self.assertEqual(2, len(nets)) cidrs = [str(net['cidr']) for net in nets] self.assertIn('192.168.0.0/25', cidrs) self.assertIn('192.168.0.128/25', cidrs) @mock.patch('nova.db.network_get_all') def test_validate_cidrs_split_cidr_in_use_middle_of_range(self, get_all): manager = fake_network.FakeNetworkManager() get_all.return_value = [dict(test_network.fake_network, id=1, cidr='192.168.2.0/24')] nets = manager.create_networks(self.context.elevated(), 'fake', '192.168.0.0/16', False, 4, 256, None, None, None, None, None) self.assertEqual(4, len(nets)) cidrs = [str(net['cidr']) for net in nets] exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24', '192.168.4.0/24'] for exp_cidr in exp_cidrs: self.assertIn(exp_cidr, cidrs) self.assertNotIn('192.168.2.0/24', cidrs) @mock.patch('nova.db.network_get_all') def test_validate_cidrs_smaller_subnet_in_use(self, get_all): manager = fake_network.FakeNetworkManager() get_all.return_value = [dict(test_network.fake_network, id=1, cidr='192.168.2.9/25')] # CidrConflict: requested cidr (192.168.2.0/24) conflicts with # existing smaller cidr args = (self.context.elevated(), 'fake', '192.168.2.0/24', False, 1, 256, None, None, None, None, None) self.assertRaises(exception.CidrConflict, manager.create_networks, *args) @mock.patch('nova.db.network_get_all') def test_validate_cidrs_split_smaller_cidr_in_use(self, get_all): manager = fake_network.FakeNetworkManager() get_all.return_value = [dict(test_network.fake_network, id=1, cidr='192.168.2.0/25')] nets = manager.create_networks(self.context.elevated(), 'fake', '192.168.0.0/16', False, 4, 256, None, None, None, None, None) self.assertEqual(4, len(nets)) cidrs = [str(net['cidr']) for net in nets] exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24', '192.168.4.0/24'] for exp_cidr in exp_cidrs: self.assertIn(exp_cidr, cidrs) self.assertNotIn('192.168.2.0/24', cidrs) @mock.patch('nova.db.network_get_all') def test_validate_cidrs_split_smaller_cidr_in_use2(self, get_all): manager = fake_network.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') get_all.return_value = [dict(test_network.fake_network, id=1, cidr='192.168.2.9/29')] nets = manager.create_networks(self.context.elevated(), 'fake', '192.168.2.0/24', False, 3, 32, None, None, None, None, None) self.assertEqual(3, len(nets)) cidrs = [str(net['cidr']) for net in nets] exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27'] for exp_cidr in exp_cidrs: self.assertIn(exp_cidr, cidrs) self.assertNotIn('192.168.2.0/27', cidrs) @mock.patch('nova.db.network_get_all') def test_validate_cidrs_split_all_in_use(self, get_all): manager = fake_network.FakeNetworkManager() in_use = [dict(test_network.fake_network, **values) for values in [{'id': 1, 'cidr': '192.168.2.9/29'}, {'id': 2, 'cidr': '192.168.2.64/26'}, {'id': 3, 'cidr': '192.168.2.128/26'}]] get_all.return_value = in_use args = (self.context.elevated(), 'fake', '192.168.2.0/24', False, 3, 64, None, None, None, None, None) # CidrConflict: Not enough subnets avail to satisfy requested num_ # networks - some subnets in requested range already # in use self.assertRaises(exception.CidrConflict, manager.create_networks, *args) def test_validate_cidrs_one_in_use(self): manager = fake_network.FakeNetworkManager() args = (None, 'fake', '192.168.0.0/24', False, 2, 256, None, None, None, None, None) # ValueError: network_size * num_networks exceeds cidr size self.assertRaises(ValueError, manager.create_networks, *args) @mock.patch('nova.db.network_get_all') def test_validate_cidrs_already_used(self, get_all): manager = fake_network.FakeNetworkManager() get_all.return_value = [dict(test_network.fake_network, cidr='192.168.0.0/24')] # CidrConflict: cidr already in use args = (self.context.elevated(), 'fake', '192.168.0.0/24', False, 1, 256, None, None, None, None, None) self.assertRaises(exception.CidrConflict, manager.create_networks, *args) def test_validate_cidrs_too_many(self): manager = fake_network.FakeNetworkManager() args = (None, 'fake', '192.168.0.0/24', False, 200, 256, None, None, None, None, None) # ValueError: Not enough subnets avail to satisfy requested # num_networks self.assertRaises(ValueError, manager.create_networks, *args) def test_validate_cidrs_split_partial(self): manager = fake_network.FakeNetworkManager() nets = manager.create_networks(self.context.elevated(), 'fake', '192.168.0.0/16', False, 2, 256, None, None, None, None, None) returned_cidrs = [str(net['cidr']) for net in nets] self.assertIn('192.168.0.0/24', returned_cidrs) self.assertIn('192.168.1.0/24', returned_cidrs) @mock.patch('nova.db.network_get_all') def test_validate_cidrs_conflict_existing_supernet(self, get_all): manager = fake_network.FakeNetworkManager() get_all.return_value = [dict(test_network.fake_network, id=1, cidr='192.168.0.0/8')] args = (self.context.elevated(), 'fake', '192.168.0.0/24', False, 1, 256, None, None, None, None, None) # CidrConflict: requested cidr (192.168.0.0/24) conflicts # with existing supernet self.assertRaises(exception.CidrConflict, manager.create_networks, *args) def test_create_networks(self): cidr = '192.168.0.0/24' manager = fake_network.FakeNetworkManager() self.stubs.Set(manager, '_create_fixed_ips', self.fake_create_fixed_ips) args = [self.context.elevated(), 'foo', cidr, None, 1, 256, 'fd00::/48', None, None, None, None, None] self.assertTrue(manager.create_networks(*args)) def test_create_networks_with_uuid(self): cidr = '192.168.0.0/24' uuid = FAKEUUID manager = fake_network.FakeNetworkManager() self.stubs.Set(manager, '_create_fixed_ips', self.fake_create_fixed_ips) args = [self.context.elevated(), 'foo', cidr, None, 1, 256, 'fd00::/48', None, None, None, None, None] kwargs = {'uuid': uuid} nets = manager.create_networks(*args, **kwargs) self.assertEqual(1, len(nets)) net = nets[0] self.assertEqual(uuid, net['uuid']) @mock.patch('nova.db.network_get_all') def test_create_networks_cidr_already_used(self, get_all): manager = fake_network.FakeNetworkManager() get_all.return_value = [dict(test_network.fake_network, id=1, cidr='192.168.0.0/24')] args = [self.context.elevated(), 'foo', '192.168.0.0/24', None, 1, 256, 'fd00::/48', None, None, None, None, None] self.assertRaises(exception.CidrConflict, manager.create_networks, *args) def test_create_networks_many(self): cidr = '192.168.0.0/16' manager = fake_network.FakeNetworkManager() self.stubs.Set(manager, '_create_fixed_ips', self.fake_create_fixed_ips) args = [self.context.elevated(), 'foo', cidr, None, 10, 256, 'fd00::/48', None, None, None, None, None] self.assertTrue(manager.create_networks(*args)) @mock.patch('nova.db.network_get') @mock.patch('nova.db.fixed_ips_by_virtual_interface') def test_get_instance_uuids_by_ip_regex(self, fixed_get, network_get): manager = fake_network.FakeNetworkManager(self.stubs) fixed_get.side_effect = manager.db.fixed_ips_by_virtual_interface _vifs = manager.db.virtual_interface_get_all(None) fake_context = context.RequestContext('user', 'project') network_get.return_value = dict(test_network.fake_network, **manager.db.network_get(None, 1)) # Greedy get eveything res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip': '.*'}) self.assertEqual(len(_vifs), len(res)) # Doesn't exist res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip': '10.0.0.1'}) self.assertFalse(res) # Get instance 1 res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip': '172.16.0.2'}) self.assertTrue(res) self.assertEqual(1, len(res)) self.assertEqual(_vifs[1]['instance_uuid'], res[0]['instance_uuid']) # Get instance 2 res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip': '173.16.0.2'}) self.assertTrue(res) self.assertEqual(1, len(res)) self.assertEqual(_vifs[2]['instance_uuid'], res[0]['instance_uuid']) # Get instance 0 and 1 res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip': '172.16.0.*'}) self.assertTrue(res) self.assertEqual(2, len(res)) self.assertEqual(_vifs[0]['instance_uuid'], res[0]['instance_uuid']) self.assertEqual(_vifs[1]['instance_uuid'], res[1]['instance_uuid']) # Get instance 1 and 2 res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip': '17..16.0.2'}) self.assertTrue(res) self.assertEqual(2, len(res)) self.assertEqual(_vifs[1]['instance_uuid'], res[0]['instance_uuid']) self.assertEqual(_vifs[2]['instance_uuid'], res[1]['instance_uuid']) @mock.patch('nova.db.network_get') def test_get_instance_uuids_by_ipv6_regex(self, network_get): manager = fake_network.FakeNetworkManager(self.stubs) _vifs = manager.db.virtual_interface_get_all(None) fake_context = context.RequestContext('user', 'project') def _network_get(context, network_id, **args): return dict(test_network.fake_network, **manager.db.network_get(context, network_id)) network_get.side_effect = _network_get # Greedy get eveything res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip6': '.*'}) self.assertEqual(len(_vifs), len(res)) # Doesn't exist res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip6': '.*1034.*'}) self.assertFalse(res) # Get instance 1 res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip6': '2001:.*2'}) self.assertTrue(res) self.assertEqual(1, len(res)) self.assertEqual(_vifs[1]['instance_uuid'], res[0]['instance_uuid']) # Get instance 2 ip6 = '2001:db8:69:1f:dead:beff:feff:ef03' res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip6': ip6}) self.assertTrue(res) self.assertEqual(1, len(res)) self.assertEqual(_vifs[2]['instance_uuid'], res[0]['instance_uuid']) # Get instance 0 and 1 res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip6': '.*ef0[1,2]'}) self.assertTrue(res) self.assertEqual(2, len(res)) self.assertEqual(_vifs[0]['instance_uuid'], res[0]['instance_uuid']) self.assertEqual(_vifs[1]['instance_uuid'], res[1]['instance_uuid']) # Get instance 1 and 2 ip6 = '2001:db8:69:1.:dead:beff:feff:ef0.' res = manager.get_instance_uuids_by_ip_filter(fake_context, {'ip6': ip6}) self.assertTrue(res) self.assertEqual(2, len(res)) self.assertEqual(_vifs[1]['instance_uuid'], res[0]['instance_uuid']) self.assertEqual(_vifs[2]['instance_uuid'], res[1]['instance_uuid']) @mock.patch('nova.db.network_get') @mock.patch('nova.db.fixed_ips_by_virtual_interface') def test_get_instance_uuids_by_ip(self, fixed_get, network_get): manager = fake_network.FakeNetworkManager(self.stubs) fixed_get.side_effect = manager.db.fixed_ips_by_virtual_interface _vifs = manager.db.virtual_interface_get_all(None) fake_context = context.RequestContext('user', 'project') network_get.return_value = dict(test_network.fake_network, **manager.db.network_get(None, 1)) # No regex for you! res = manager.get_instance_uuids_by_ip_filter(fake_context, {'fixed_ip': '.*'}) self.assertFalse(res) # Doesn't exist ip = '10.0.0.1' res = manager.get_instance_uuids_by_ip_filter(fake_context, {'fixed_ip': ip}) self.assertFalse(res) # Get instance 1 ip = '172.16.0.2' res = manager.get_instance_uuids_by_ip_filter(fake_context, {'fixed_ip': ip}) self.assertTrue(res) self.assertEqual(1, len(res)) self.assertEqual(_vifs[1]['instance_uuid'], res[0]['instance_uuid']) # Get instance 2 ip = '173.16.0.2' res = manager.get_instance_uuids_by_ip_filter(fake_context, {'fixed_ip': ip}) self.assertTrue(res) self.assertEqual(1, len(res)) self.assertEqual(_vifs[2]['instance_uuid'], res[0]['instance_uuid']) @mock.patch('nova.db.network_get_by_uuid') def test_get_network(self, get): manager = fake_network.FakeNetworkManager() fake_context = context.RequestContext('user', 'project') get.return_value = dict(test_network.fake_network, **networks[0]) uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' network = manager.get_network(fake_context, uuid) self.assertEqual(uuid, network['uuid']) @mock.patch('nova.db.network_get_by_uuid') def test_get_network_not_found(self, get): manager = fake_network.FakeNetworkManager() fake_context = context.RequestContext('user', 'project') get.side_effect = exception.NetworkNotFoundForUUID(uuid='foo') uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' self.assertRaises(exception.NetworkNotFound, manager.get_network, fake_context, uuid) @mock.patch('nova.db.network_get_all') def test_get_all_networks(self, get_all): manager = fake_network.FakeNetworkManager() fake_context = context.RequestContext('user', 'project') get_all.return_value = [dict(test_network.fake_network, **net) for net in networks] output = manager.get_all_networks(fake_context) self.assertEqual(2, len(networks)) self.assertEqual('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', output[0]['uuid']) self.assertEqual('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', output[1]['uuid']) @mock.patch('nova.db.network_get_by_uuid') @mock.patch('nova.db.network_disassociate') def test_disassociate_network(self, disassociate, get): manager = fake_network.FakeNetworkManager() disassociate.return_value = True fake_context = context.RequestContext('user', 'project') get.return_value = dict(test_network.fake_network, **networks[0]) uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' manager.disassociate_network(fake_context, uuid) @mock.patch('nova.db.network_get_by_uuid') def test_disassociate_network_not_found(self, get): manager = fake_network.FakeNetworkManager() fake_context = context.RequestContext('user', 'project') get.side_effect = exception.NetworkNotFoundForUUID(uuid='fake') uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' self.assertRaises(exception.NetworkNotFound, manager.disassociate_network, fake_context, uuid) def _test_init_host_dynamic_fixed_range(self, net_manager): self.flags(fake_network=True, routing_source_ip='172.16.0.1', metadata_host='172.16.0.1', public_interface='eth1', dmz_cidr=['10.0.3.0/24']) binary_name = linux_net.get_binary_name() # Stub out calls we don't want to really run, mock the db self.stubs.Set(linux_net.iptables_manager, '_apply', lambda: None) self.stubs.Set(floating_ips.FloatingIP, 'init_host_floating_ips', lambda *args: None) self.stubs.Set(net_manager.l3driver, 'initialize_gateway', lambda *args: None) self.mox.StubOutWithMock(db, 'network_get_all_by_host') fake_networks = [dict(test_network.fake_network, **n) for n in networks] db.network_get_all_by_host(mox.IgnoreArg(), mox.IgnoreArg() ).MultipleTimes().AndReturn(fake_networks) self.mox.ReplayAll() net_manager.init_host() # Get the iptables rules that got created current_lines = [] new_lines = linux_net.iptables_manager._modify_rules(current_lines, linux_net.iptables_manager.ipv4['nat'], table_name='nat') expected_lines = ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 ' '-j SNAT --to-source %s -o %s' % (binary_name, networks[0]['cidr'], CONF.routing_source_ip, CONF.public_interface), '[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT' % (binary_name, networks[0]['cidr'], CONF.metadata_host), '[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT' % (binary_name, networks[0]['cidr'], CONF.dmz_cidr[0]), '[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! ' '--ctstate DNAT -j ACCEPT' % (binary_name, networks[0]['cidr'], networks[0]['cidr']), '[0:0] -A %s-snat -s %s -d 0.0.0.0/0 ' '-j SNAT --to-source %s -o %s' % (binary_name, networks[1]['cidr'], CONF.routing_source_ip, CONF.public_interface), '[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT' % (binary_name, networks[1]['cidr'], CONF.metadata_host), '[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT' % (binary_name, networks[1]['cidr'], CONF.dmz_cidr[0]), '[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! ' '--ctstate DNAT -j ACCEPT' % (binary_name, networks[1]['cidr'], networks[1]['cidr'])] # Compare the expected rules against the actual ones for line in expected_lines: self.assertIn(line, new_lines) # Add an additional network and ensure the rules get configured new_network = {'id': 2, 'uuid': 'cccccccc-cccc-cccc-cccc-cccccccc', 'label': 'test2', 'injected': False, 'multi_host': False, 'cidr': '192.168.2.0/24', 'cidr_v6': '2001:dba::/64', 'gateway_v6': '2001:dba::1', 'netmask_v6': '64', 'netmask': '255.255.255.0', 'bridge': 'fa1', 'bridge_interface': 'fake_fa1', 'gateway': '192.168.2.1', 'dhcp_server': '192.168.2.1', 'broadcast': '192.168.2.255', 'dns1': '192.168.2.1', 'dns2': '192.168.2.2', 'vlan': None, 'host': HOST, 'project_id': 'fake_project', 'vpn_public_address': '192.168.2.2', 'vpn_public_port': '22', 'vpn_private_address': '10.0.0.2'} new_network_obj = objects.Network._from_db_object( self.context, objects.Network(), dict(test_network.fake_network, **new_network)) ctxt = context.get_admin_context() net_manager._setup_network_on_host(ctxt, new_network_obj) # Get the new iptables rules that got created from adding a new network current_lines = [] new_lines = linux_net.iptables_manager._modify_rules(current_lines, linux_net.iptables_manager.ipv4['nat'], table_name='nat') # Add the new expected rules to the old ones expected_lines += ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 ' '-j SNAT --to-source %s -o %s' % (binary_name, new_network['cidr'], CONF.routing_source_ip, CONF.public_interface), '[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT' % (binary_name, new_network['cidr'], CONF.metadata_host), '[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT' % (binary_name, new_network['cidr'], CONF.dmz_cidr[0]), '[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ' '! --ctstate DNAT -j ACCEPT' % (binary_name, new_network['cidr'], new_network['cidr'])] # Compare the expected rules (with new network) against the actual ones for line in expected_lines: self.assertIn(line, new_lines) def test_flatdhcpmanager_dynamic_fixed_range(self): """Test FlatDHCPManager NAT rules for fixed_range.""" # Set the network manager self.network = network_manager.FlatDHCPManager(host=HOST) self.network.db = db # Test new behavior: # CONF.fixed_range is not set, defaults to None # Determine networks to NAT based on lookup self._test_init_host_dynamic_fixed_range(self.network) def test_vlanmanager_dynamic_fixed_range(self): """Test VlanManager NAT rules for fixed_range.""" # Set the network manager self.network = network_manager.VlanManager(host=HOST) self.network.db = db # Test new behavior: # CONF.fixed_range is not set, defaults to None # Determine networks to NAT based on lookup self._test_init_host_dynamic_fixed_range(self.network) @mock.patch('nova.objects.quotas.Quotas.rollback') @mock.patch('nova.objects.fixed_ip.FixedIP.get_by_address') @mock.patch('nova.network.manager.NetworkManager.' '_do_trigger_security_group_members_refresh_for_instance') def test_fixed_ip_cleanup_rollback(self, fake_trig, fixed_get, rollback): manager = network_manager.NetworkManager() fake_trig.side_effect = test.TestingException self.assertRaises(test.TestingException, manager.deallocate_fixed_ip, self.context, 'fake', 'fake', instance=fake_inst(uuid='ignoreduuid')) rollback.assert_called_once_with() def test_fixed_cidr_out_of_range(self): manager = network_manager.NetworkManager() ctxt = context.get_admin_context() self.assertRaises(exception.AddressOutOfRange, manager.create_networks, ctxt, label="fake", cidr='10.1.0.0/24', fixed_cidr='10.1.1.0/25') class TestRPCFixedManager(network_manager.RPCAllocateFixedIP, network_manager.NetworkManager): """Dummy manager that implements RPCAllocateFixedIP.""" class RPCAllocateTestCase(test.NoDBTestCase): """Tests nova.network.manager.RPCAllocateFixedIP.""" def setUp(self): super(RPCAllocateTestCase, self).setUp() self.rpc_fixed = TestRPCFixedManager() self.context = context.RequestContext('fake', 'fake') def test_rpc_allocate(self): """Test to verify bug 855030 doesn't resurface. Mekes sure _rpc_allocate_fixed_ip returns a value so the call returns properly and the greenpool completes. """ address = '10.10.10.10' def fake_allocate(*args, **kwargs): return address def fake_network_get(*args, **kwargs): return test_network.fake_network self.stubs.Set(self.rpc_fixed, 'allocate_fixed_ip', fake_allocate) self.stubs.Set(self.rpc_fixed.db, 'network_get', fake_network_get) rval = self.rpc_fixed._rpc_allocate_fixed_ip(self.context, 'fake_instance', 'fake_network') self.assertEqual(address, rval) class TestFloatingIPManager(floating_ips.FloatingIP, network_manager.NetworkManager): """Dummy manager that implements FloatingIP.""" class AllocateTestCase(test.TestCase): REQUIRES_LOCKING = True def setUp(self): super(AllocateTestCase, self).setUp() dns = 'nova.network.noop_dns_driver.NoopDNSDriver' self.flags(instance_dns_manager=dns) self.useFixture(test.SampleNetworks()) self.network = network_manager.VlanManager(host=HOST) self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id, is_admin=True) self.user_context = context.RequestContext('testuser', 'testproject') def test_allocate_for_instance(self): address = "10.10.10.10" self.flags(auto_assign_floating_ip=True) db.floating_ip_create(self.context, {'address': address, 'pool': 'nova'}) inst = objects.Instance(context=self.context) inst.host = HOST inst.display_name = HOST inst.instance_type_id = 1 inst.uuid = FAKEUUID inst.create() networks = db.network_get_all(self.context) for network in networks: db.network_update(self.context, network['id'], {'host': HOST}) project_id = self.user_context.project_id nw_info = self.network.allocate_for_instance(self.user_context, instance_id=inst['id'], instance_uuid=inst['uuid'], host=inst['host'], vpn=None, rxtx_factor=3, project_id=project_id, macs=None) self.assertEqual(1, len(nw_info)) fixed_ip = nw_info.fixed_ips()[0]['address'] self.assertTrue(netutils.is_valid_ipv4(fixed_ip)) self.network.deallocate_for_instance(self.context, instance=inst) def test_allocate_for_instance_illegal_network(self): networks = db.network_get_all(self.context) requested_networks = [] for network in networks: # set all networks to other projects db.network_update(self.context, network['id'], {'host': HOST, 'project_id': 'otherid'}) requested_networks.append((network['uuid'], None)) # set the first network to our project db.network_update(self.context, networks[0]['id'], {'project_id': self.user_context.project_id}) inst = objects.Instance(context=self.context) inst.host = HOST inst.display_name = HOST inst.instance_type_id = 1 inst.uuid = FAKEUUID inst.create() self.assertRaises(exception.NetworkNotFoundForProject, self.network.allocate_for_instance, self.user_context, instance_id=inst['id'], instance_uuid=inst['uuid'], host=inst['host'], vpn=None, rxtx_factor=3, project_id=self.context.project_id, macs=None, requested_networks=requested_networks) def test_allocate_for_instance_with_mac(self): available_macs = set(['ca:fe:de:ad:be:ef']) inst = db.instance_create(self.context, {'host': HOST, 'display_name': HOST, 'instance_type_id': 1}) networks = db.network_get_all(self.context) for network in networks: db.network_update(self.context, network['id'], {'host': HOST}) project_id = self.context.project_id nw_info = self.network.allocate_for_instance(self.user_context, instance_id=inst['id'], instance_uuid=inst['uuid'], host=inst['host'], vpn=None, rxtx_factor=3, project_id=project_id, macs=available_macs) assigned_macs = [vif['address'] for vif in nw_info] self.assertEqual(1, len(assigned_macs)) self.assertEqual(available_macs.pop(), assigned_macs[0]) self.network.deallocate_for_instance(self.context, instance_id=inst['id'], host=self.network.host, project_id=project_id) def test_allocate_for_instance_not_enough_macs(self): available_macs = set() inst = db.instance_create(self.context, {'host': HOST, 'display_name': HOST, 'instance_type_id': 1}) networks = db.network_get_all(self.context) for network in networks: db.network_update(self.context, network['id'], {'host': self.network.host}) project_id = self.context.project_id self.assertRaises(exception.VirtualInterfaceCreateException, self.network.allocate_for_instance, self.user_context, instance_id=inst['id'], instance_uuid=inst['uuid'], host=inst['host'], vpn=None, rxtx_factor=3, project_id=project_id, macs=available_macs) class FloatingIPTestCase(test.TestCase): """Tests nova.network.manager.FloatingIP.""" REQUIRES_LOCKING = True def setUp(self): super(FloatingIPTestCase, self).setUp() self.tempdir = self.useFixture(fixtures.TempDir()).path self.flags(log_dir=self.tempdir) self.network = TestFloatingIPManager() self.network.db = db self.project_id = 'testproject' self.context = context.RequestContext('testuser', self.project_id, is_admin=False) @mock.patch('nova.db.fixed_ip_get') @mock.patch('nova.db.network_get') @mock.patch('nova.db.instance_get_by_uuid') @mock.patch('nova.db.service_get_by_host_and_binary') @mock.patch('nova.db.floating_ip_get_by_address') def test_disassociate_floating_ip_multi_host_calls(self, floating_get, service_get, inst_get, net_get, fixed_get): floating_ip = dict(test_floating_ip.fake_floating_ip, fixed_ip_id=12) fixed_ip = dict(test_fixed_ip.fake_fixed_ip, network_id=None, instance_uuid='instance-uuid') network = dict(test_network.fake_network, multi_host=True) instance = dict(fake_instance.fake_db_instance(host='some-other-host')) ctxt = context.RequestContext('testuser', 'testproject', is_admin=False) self.stubs.Set(self.network, '_floating_ip_owned_by_project', lambda _x, _y: True) floating_get.return_value = floating_ip fixed_get.return_value = fixed_ip net_get.return_value = network inst_get.return_value = instance service_get.return_value = test_service.fake_service self.stubs.Set(self.network.servicegroup_api, 'service_is_up', lambda _x: True) self.mox.StubOutWithMock( self.network.network_rpcapi, '_disassociate_floating_ip') self.network.network_rpcapi._disassociate_floating_ip( ctxt, 'fl_ip', mox.IgnoreArg(), 'some-other-host', 'instance-uuid') self.mox.ReplayAll() self.network.disassociate_floating_ip(ctxt, 'fl_ip', True) @mock.patch('nova.db.fixed_ip_get_by_address') @mock.patch('nova.db.network_get') @mock.patch('nova.db.instance_get_by_uuid') @mock.patch('nova.db.floating_ip_get_by_address') def test_associate_floating_ip_multi_host_calls(self, floating_get, inst_get, net_get, fixed_get): floating_ip = dict(test_floating_ip.fake_floating_ip, fixed_ip_id=None) fixed_ip = dict(test_fixed_ip.fake_fixed_ip, network_id=None, instance_uuid='instance-uuid') network = dict(test_network.fake_network, multi_host=True) instance = dict(fake_instance.fake_db_instance(host='some-other-host')) ctxt = context.RequestContext('testuser', 'testproject', is_admin=False) self.stubs.Set(self.network, '_floating_ip_owned_by_project', lambda _x, _y: True) floating_get.return_value = floating_ip fixed_get.return_value = fixed_ip net_get.return_value = network inst_get.return_value = instance self.mox.StubOutWithMock( self.network.network_rpcapi, '_associate_floating_ip') self.network.network_rpcapi._associate_floating_ip( ctxt, 'fl_ip', 'fix_ip', mox.IgnoreArg(), 'some-other-host', 'instance-uuid') self.mox.ReplayAll() self.network.associate_floating_ip(ctxt, 'fl_ip', 'fix_ip', True) def test_double_deallocation(self): instance_ref = db.instance_create(self.context, {"project_id": self.project_id}) # Run it twice to make it fault if it does not handle # instances without fixed networks # If this fails in either, it does not handle having no addresses self.network.deallocate_for_instance(self.context, instance_id=instance_ref['id']) self.network.deallocate_for_instance(self.context, instance_id=instance_ref['id']) def test_deallocate_floating_ip_quota_rollback(self): ctxt = context.RequestContext('testuser', 'testproject', is_admin=False) def fake(*args, **kwargs): return dict(test_floating_ip.fake_floating_ip, address='10.0.0.1', fixed_ip_id=None, project_id=ctxt.project_id) self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake) self.mox.StubOutWithMock(db, 'floating_ip_deallocate') self.mox.StubOutWithMock(self.network, '_floating_ip_owned_by_project') self.mox.StubOutWithMock(quota.QUOTAS, 'reserve') self.mox.StubOutWithMock(quota.QUOTAS, 'rollback') quota.QUOTAS.reserve(self.context, floating_ips=-1, project_id='testproject').AndReturn('fake-rsv') self.network._floating_ip_owned_by_project(self.context, mox.IgnoreArg()) db.floating_ip_deallocate(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(None) quota.QUOTAS.rollback(self.context, 'fake-rsv', project_id='testproject') self.mox.ReplayAll() self.network.deallocate_floating_ip(self.context, '10.0.0.1') def test_deallocation_deleted_instance(self): self.stubs.Set(self.network, '_teardown_network_on_host', lambda *args, **kwargs: None) instance = objects.Instance(context=self.context) instance.project_id = self.project_id instance.deleted = True instance.create() network = db.network_create_safe(self.context.elevated(), { 'project_id': self.project_id, 'host': CONF.host, 'label': 'foo'}) fixed = db.fixed_ip_create(self.context, {'allocated': True, 'instance_uuid': instance.uuid, 'address': '10.1.1.1', 'network_id': network['id']}) db.floating_ip_create(self.context, { 'address': '10.10.10.10', 'instance_uuid': instance.uuid, 'fixed_ip_id': fixed['id'], 'project_id': self.project_id}) self.network.deallocate_for_instance(self.context, instance=instance) def test_deallocation_duplicate_floating_ip(self): self.stubs.Set(self.network, '_teardown_network_on_host', lambda *args, **kwargs: None) instance = objects.Instance(context=self.context) instance.project_id = self.project_id instance.create() network = db.network_create_safe(self.context.elevated(), { 'project_id': self.project_id, 'host': CONF.host, 'label': 'foo'}) fixed = db.fixed_ip_create(self.context, {'allocated': True, 'instance_uuid': instance.uuid, 'address': '10.1.1.1', 'network_id': network['id']}) db.floating_ip_create(self.context, { 'address': '10.10.10.10', 'deleted': True}) db.floating_ip_create(self.context, { 'address': '10.10.10.10', 'instance_uuid': instance.uuid, 'fixed_ip_id': fixed['id'], 'project_id': self.project_id}) self.network.deallocate_for_instance(self.context, instance=instance) @mock.patch('nova.db.fixed_ip_get') @mock.patch('nova.db.floating_ip_get_by_address') @mock.patch('nova.db.floating_ip_update') def test_migrate_instance_start(self, floating_update, floating_get, fixed_get): called = {'count': 0} def fake_floating_ip_get_by_address(context, address): return dict(test_floating_ip.fake_floating_ip, address=address, fixed_ip_id=0) def fake_is_stale_floating_ip_address(context, floating_ip): return str(floating_ip.address) == '172.24.4.23' floating_get.side_effect = fake_floating_ip_get_by_address fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip, instance_uuid='fake_uuid', address='10.0.0.2', network=test_network.fake_network) floating_update.return_value = fake_floating_ip_get_by_address( None, '1.2.3.4') def fake_remove_floating_ip(floating_addr, fixed_addr, interface, network): called['count'] += 1 def fake_clean_conntrack(fixed_ip): if not str(fixed_ip) == "10.0.0.2": raise exception.FixedIpInvalid(address=fixed_ip) self.stubs.Set(self.network, '_is_stale_floating_ip_address', fake_is_stale_floating_ip_address) self.stubs.Set(self.network.l3driver, 'remove_floating_ip', fake_remove_floating_ip) self.stubs.Set(self.network.driver, 'clean_conntrack', fake_clean_conntrack) self.mox.ReplayAll() addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25'] self.network.migrate_instance_start(self.context, instance_uuid=FAKEUUID, floating_addresses=addresses, rxtx_factor=3, project_id=self.project_id, source='fake_source', dest='fake_dest') self.assertEqual(2, called['count']) @mock.patch('nova.db.fixed_ip_get') @mock.patch('nova.db.floating_ip_update') def test_migrate_instance_finish(self, floating_update, fixed_get): called = {'count': 0} def fake_floating_ip_get_by_address(context, address): return dict(test_floating_ip.fake_floating_ip, address=address, fixed_ip_id=0) def fake_is_stale_floating_ip_address(context, floating_ip): return str(floating_ip.address) == '172.24.4.23' fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip, instance_uuid='fake_uuid', address='10.0.0.2', network=test_network.fake_network) floating_update.return_value = fake_floating_ip_get_by_address( None, '1.2.3.4') def fake_add_floating_ip(floating_addr, fixed_addr, interface, network): called['count'] += 1 self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake_floating_ip_get_by_address) self.stubs.Set(self.network, '_is_stale_floating_ip_address', fake_is_stale_floating_ip_address) self.stubs.Set(self.network.l3driver, 'add_floating_ip', fake_add_floating_ip) self.mox.ReplayAll() addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25'] self.network.migrate_instance_finish(self.context, instance_uuid=FAKEUUID, floating_addresses=addresses, host='fake_dest', rxtx_factor=3, project_id=self.project_id, source='fake_source') self.assertEqual(2, called['count']) def test_floating_dns_create_conflict(self): zone = "example.org" address1 = "10.10.10.11" name1 = "foo" self.network.add_dns_entry(self.context, address1, name1, "A", zone) self.assertRaises(exception.FloatingIpDNSExists, self.network.add_dns_entry, self.context, address1, name1, "A", zone) def test_floating_create_and_get(self): zone = "example.org" address1 = "10.10.10.11" name1 = "foo" name2 = "bar" entries = self.network.get_dns_entries_by_address(self.context, address1, zone) self.assertFalse(entries) self.network.add_dns_entry(self.context, address1, name1, "A", zone) self.network.add_dns_entry(self.context, address1, name2, "A", zone) entries = self.network.get_dns_entries_by_address(self.context, address1, zone) self.assertEqual(2, len(entries)) self.assertEqual(name1, entries[0]) self.assertEqual(name2, entries[1]) entries = self.network.get_dns_entries_by_name(self.context, name1, zone) self.assertEqual(1, len(entries)) self.assertEqual(address1, entries[0]) def test_floating_dns_delete(self): zone = "example.org" address1 = "10.10.10.11" name1 = "foo" name2 = "bar" self.network.add_dns_entry(self.context, address1, name1, "A", zone) self.network.add_dns_entry(self.context, address1, name2, "A", zone) self.network.delete_dns_entry(self.context, name1, zone) entries = self.network.get_dns_entries_by_address(self.context, address1, zone) self.assertEqual(1, len(entries)) self.assertEqual(name2, entries[0]) self.assertRaises(exception.NotFound, self.network.delete_dns_entry, self.context, name1, zone) def test_floating_dns_domains_public(self): domain1 = "example.org" domain2 = "example.com" address1 = '10.10.10.10' entryname = 'testentry' self.network.create_public_dns_domain(self.context, domain1, 'testproject') self.network.create_public_dns_domain(self.context, domain2, 'fakeproject') domains = self.network.get_dns_domains(self.context) self.assertEqual(2, len(domains)) self.assertEqual(domain1, domains[0]['domain']) self.assertEqual(domain2, domains[1]['domain']) self.assertEqual('testproject', domains[0]['project']) self.assertEqual('fakeproject', domains[1]['project']) self.network.add_dns_entry(self.context, address1, entryname, 'A', domain1) entries = self.network.get_dns_entries_by_name(self.context, entryname, domain1) self.assertEqual(1, len(entries)) self.assertEqual(address1, entries[0]) self.network.delete_dns_domain(self.context, domain1) self.network.delete_dns_domain(self.context, domain2) # Verify that deleting the domain deleted the associated entry entries = self.network.get_dns_entries_by_name(self.context, entryname, domain1) self.assertFalse(entries) def test_delete_all_by_ip(self): domain1 = "example.org" domain2 = "example.com" address = "10.10.10.10" name1 = "foo" name2 = "bar" def fake_domains(context): return [{'domain': 'example.org', 'scope': 'public'}, {'domain': 'example.com', 'scope': 'public'}, {'domain': 'test.example.org', 'scope': 'public'}] self.stubs.Set(self.network, 'get_dns_domains', fake_domains) context_admin = context.RequestContext('testuser', 'testproject', is_admin=True) self.network.create_public_dns_domain(context_admin, domain1, 'testproject') self.network.create_public_dns_domain(context_admin, domain2, 'fakeproject') domains = self.network.get_dns_domains(self.context) for domain in domains: self.network.add_dns_entry(self.context, address, name1, "A", domain['domain']) self.network.add_dns_entry(self.context, address, name2, "A", domain['domain']) entries = self.network.get_dns_entries_by_address(self.context, address, domain['domain']) self.assertEqual(2, len(entries)) self.network._delete_all_entries_for_ip(self.context, address) for domain in domains: entries = self.network.get_dns_entries_by_address(self.context, address, domain['domain']) self.assertFalse(entries) self.network.delete_dns_domain(context_admin, domain1) self.network.delete_dns_domain(context_admin, domain2) def test_mac_conflicts(self): # Make sure MAC collisions are retried. self.flags(create_unique_mac_address_attempts=3) ctxt = context.RequestContext('testuser', 'testproject', is_admin=True) macs = ['bb:bb:bb:bb:bb:bb', 'aa:aa:aa:aa:aa:aa'] # Create a VIF with aa:aa:aa:aa:aa:aa crash_test_dummy_vif = { 'address': macs[1], 'instance_uuid': 'fake_uuid', 'network_id': 123, 'uuid': 'fake_uuid', } self.network.db.virtual_interface_create(ctxt, crash_test_dummy_vif) # Hand out a collision first, then a legit MAC def fake_gen_mac(): return macs.pop() self.stubs.Set(utils, 'generate_mac_address', fake_gen_mac) # SQLite doesn't seem to honor the uniqueness constraint on the # address column, so fake the collision-avoidance here def fake_vif_save(vif, session=None): if vif.address == crash_test_dummy_vif['address']: raise db_exc.DBError("If you're smart, you'll retry!") # NOTE(russellb) The VirtualInterface object requires an ID to be # set, and we expect it to get set automatically when we do the # save. vif.id = 1 self.stubs.Set(models.VirtualInterface, 'save', fake_vif_save) # Attempt to add another and make sure that both MACs are consumed # by the retry loop self.network._add_virtual_interface(ctxt, 'fake_uuid', 123) self.assertEqual([], macs) def test_deallocate_client_exceptions(self): # Ensure that FloatingIpNotFoundForAddress is wrapped. self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address') self.network.db.floating_ip_get_by_address( self.context, '1.2.3.4').AndRaise( exception.FloatingIpNotFoundForAddress(address='fake')) self.mox.ReplayAll() self.assertRaises(messaging.ExpectedException, self.network.deallocate_floating_ip, self.context, '1.2.3.4') def test_associate_client_exceptions(self): # Ensure that FloatingIpNotFoundForAddress is wrapped. self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address') self.network.db.floating_ip_get_by_address( self.context, '1.2.3.4').AndRaise( exception.FloatingIpNotFoundForAddress(address='fake')) self.mox.ReplayAll() self.assertRaises(messaging.ExpectedException, self.network.associate_floating_ip, self.context, '1.2.3.4', '10.0.0.1') def test_disassociate_client_exceptions(self): # Ensure that FloatingIpNotFoundForAddress is wrapped. self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address') self.network.db.floating_ip_get_by_address( self.context, '1.2.3.4').AndRaise( exception.FloatingIpNotFoundForAddress(address='fake')) self.mox.ReplayAll() self.assertRaises(messaging.ExpectedException, self.network.disassociate_floating_ip, self.context, '1.2.3.4') def test_get_floating_ip_client_exceptions(self): # Ensure that FloatingIpNotFoundForAddress is wrapped. self.mox.StubOutWithMock(self.network.db, 'floating_ip_get') self.network.db.floating_ip_get(self.context, 'fake-id').AndRaise( exception.FloatingIpNotFound(id='fake')) self.mox.ReplayAll() self.assertRaises(messaging.ExpectedException, self.network.get_floating_ip, self.context, 'fake-id') def _test_associate_floating_ip_failure(self, stdout, expected_exception): def _fake_catchall(*args, **kwargs): return dict(test_fixed_ip.fake_fixed_ip, network=test_network.fake_network) def _fake_add_floating_ip(*args, **kwargs): raise processutils.ProcessExecutionError(stdout) self.stubs.Set(self.network.db, 'floating_ip_fixed_ip_associate', _fake_catchall) self.stubs.Set(self.network.db, 'floating_ip_disassociate', _fake_catchall) self.stubs.Set(self.network.l3driver, 'add_floating_ip', _fake_add_floating_ip) self.assertRaises(expected_exception, self.network._associate_floating_ip, self.context, '1.2.3.4', '1.2.3.5', '', '') def test_associate_floating_ip_failure(self): self._test_associate_floating_ip_failure(None, processutils.ProcessExecutionError) def test_associate_floating_ip_failure_interface_not_found(self): self._test_associate_floating_ip_failure('Cannot find device', exception.NoFloatingIpInterface) @mock.patch('nova.objects.FloatingIP.get_by_address') def test_get_floating_ip_by_address(self, mock_get): mock_get.return_value = mock.sentinel.floating self.assertEqual(mock.sentinel.floating, self.network.get_floating_ip_by_address( self.context, mock.sentinel.address)) mock_get.assert_called_once_with(self.context, mock.sentinel.address) @mock.patch('nova.objects.FloatingIPList.get_by_project') def test_get_floating_ips_by_project(self, mock_get): mock_get.return_value = mock.sentinel.floatings self.assertEqual(mock.sentinel.floatings, self.network.get_floating_ips_by_project( self.context)) mock_get.assert_called_once_with(self.context, self.context.project_id) @mock.patch('nova.objects.FloatingIPList.get_by_fixed_address') def test_get_floating_ips_by_fixed_address(self, mock_get): mock_get.return_value = [objects.FloatingIP(address='1.2.3.4'), objects.FloatingIP(address='5.6.7.8')] self.assertEqual(['1.2.3.4', '5.6.7.8'], self.network.get_floating_ips_by_fixed_address( self.context, mock.sentinel.address)) mock_get.assert_called_once_with(self.context, mock.sentinel.address) @mock.patch('nova.db.floating_ip_get_pools') def test_floating_ip_pool_exists(self, floating_ip_get_pools): floating_ip_get_pools.return_value = [{'name': 'public'}] self.assertTrue(self.network._floating_ip_pool_exists(self.context, 'public')) @mock.patch('nova.db.floating_ip_get_pools') def test_floating_ip_pool_does_not_exist(self, floating_ip_get_pools): floating_ip_get_pools.return_value = [] self.assertFalse(self.network._floating_ip_pool_exists(self.context, 'public')) class InstanceDNSTestCase(test.TestCase): """Tests nova.network.manager instance DNS.""" def setUp(self): super(InstanceDNSTestCase, self).setUp() self.tempdir = self.useFixture(fixtures.TempDir()).path self.flags(log_dir=self.tempdir) self.network = TestFloatingIPManager() self.network.db = db self.project_id = 'testproject' self.context = context.RequestContext('testuser', self.project_id, is_admin=False) def test_dns_domains_private(self): zone1 = 'testzone' domain1 = 'example.org' self.network.create_private_dns_domain(self.context, domain1, zone1) domains = self.network.get_dns_domains(self.context) self.assertEqual(1, len(domains)) self.assertEqual(domain1, domains[0]['domain']) self.assertEqual(zone1, domains[0]['availability_zone']) self.network.delete_dns_domain(self.context, domain1) domain1 = "example.org" domain2 = "example.com" class LdapDNSTestCase(test.NoDBTestCase): """Tests nova.network.ldapdns.LdapDNS.""" def setUp(self): super(LdapDNSTestCase, self).setUp() self.useFixture(fixtures.MonkeyPatch( 'nova.network.ldapdns.ldap', fake_ldap)) dns_class = 'nova.network.ldapdns.LdapDNS' self.driver = importutils.import_object(dns_class) attrs = {'objectClass': ['domainrelatedobject', 'dnsdomain', 'domain', 'dcobject', 'top'], 'associateddomain': ['root'], 'dc': ['root']} self.driver.lobj.add_s("ou=hosts,dc=example,dc=org", attrs.items()) self.driver.create_domain(domain1) self.driver.create_domain(domain2) def tearDown(self): self.driver.delete_domain(domain1) self.driver.delete_domain(domain2) super(LdapDNSTestCase, self).tearDown() def test_ldap_dns_domains(self): domains = self.driver.get_domains() self.assertEqual(2, len(domains)) self.assertIn(domain1, domains) self.assertIn(domain2, domains) def test_ldap_dns_create_conflict(self): address1 = "10.10.10.11" name1 = "foo" self.driver.create_entry(name1, address1, "A", domain1) self.assertRaises(exception.FloatingIpDNSExists, self.driver.create_entry, name1, address1, "A", domain1) def test_ldap_dns_create_and_get(self): address1 = "10.10.10.11" name1 = "foo" name2 = "bar" entries = self.driver.get_entries_by_address(address1, domain1) self.assertFalse(entries) self.driver.create_entry(name1, address1, "A", domain1) self.driver.create_entry(name2, address1, "A", domain1) entries = self.driver.get_entries_by_address(address1, domain1) self.assertEqual(2, len(entries)) self.assertEqual(name1, entries[0]) self.assertEqual(name2, entries[1]) entries = self.driver.get_entries_by_name(name1, domain1) self.assertEqual(1, len(entries)) self.assertEqual(address1, entries[0]) def test_ldap_dns_delete(self): address1 = "10.10.10.11" name1 = "foo" name2 = "bar" self.driver.create_entry(name1, address1, "A", domain1) self.driver.create_entry(name2, address1, "A", domain1) entries = self.driver.get_entries_by_address(address1, domain1) self.assertEqual(2, len(entries)) self.driver.delete_entry(name1, domain1) entries = self.driver.get_entries_by_address(address1, domain1) LOG.debug("entries: %s" % entries) self.assertEqual(1, len(entries)) self.assertEqual(name2, entries[0]) self.assertRaises(exception.NotFound, self.driver.delete_entry, name1, domain1) class NetworkManagerNoDBTestCase(test.NoDBTestCase): """Tests nova.network.manager.NetworkManager without a database.""" def setUp(self): super(NetworkManagerNoDBTestCase, self).setUp() self.context = context.RequestContext('fake-user', 'fake-project') self.manager = network_manager.NetworkManager() @mock.patch.object(objects.FixedIP, 'get_by_address') def test_release_fixed_ip_not_associated(self, mock_fip_get_by_addr): # Tests that the method is a no-op when the fixed IP is not associated # to an instance. fip = objects.FixedIP._from_db_object( self.context, objects.FixedIP(), fake_network.next_fixed_ip(1)) fip.instance_uuid = None with mock.patch.object(fip, 'disassociate') as mock_disassociate: self.manager.release_fixed_ip(self.context, fip.address) self.assertFalse(mock_disassociate.called, str(mock_disassociate.mock_calls)) @mock.patch.object(objects.FixedIP, 'get_by_address') def test_release_fixed_ip_allocated(self, mock_fip_get_by_addr): # Tests that the fixed IP is not disassociated if it's allocated. fip = objects.FixedIP._from_db_object( self.context, objects.FixedIP(), fake_network.next_fixed_ip(1)) fip.leased = False fip.allocated = True with mock.patch.object(fip, 'disassociate') as mock_disassociate: self.manager.release_fixed_ip(self.context, fip.address) self.assertFalse(mock_disassociate.called, str(mock_disassociate.mock_calls)) @mock.patch.object(objects.FixedIP, 'get_by_address') @mock.patch.object(objects.VirtualInterface, 'get_by_address') def test_release_fixed_ip_mac_matches_associated_instance(self, mock_vif_get_by_addr, mock_fip_get_by_addr): # Tests that the fixed IP is disassociated when the mac passed to # release_fixed_ip matches the VIF which has the same instance_uuid # as the instance associated to the FixedIP object. Also tests # that the fixed IP is marked as not leased in the database if it was # currently leased. instance = fake_instance.fake_instance_obj(self.context) fip = fake_network.next_fixed_ip(1) fip['instance_uuid'] = instance.uuid fip['leased'] = True vif = fip['virtual_interface'] vif['instance_uuid'] = instance.uuid vif = objects.VirtualInterface._from_db_object( self.context, objects.VirtualInterface(), vif) fip = objects.FixedIP._from_db_object( self.context, objects.FixedIP(), fip) mock_fip_get_by_addr.return_value = fip mock_vif_get_by_addr.return_value = vif with mock.patch.object(fip, 'save') as mock_fip_save: with mock.patch.object(fip, 'disassociate') as mock_disassociate: self.manager.release_fixed_ip( self.context, fip.address, vif.address) mock_fip_save.assert_called_once_with() self.assertFalse(fip.leased) mock_vif_get_by_addr.assert_called_once_with(self.context, vif.address) mock_disassociate.assert_called_once_with() @mock.patch.object(objects.FixedIP, 'get_by_address') @mock.patch.object(objects.VirtualInterface, 'get_by_address', return_value=None) def test_release_fixed_ip_vif_not_found_for_mac(self, mock_vif_get_by_addr, mock_fip_get_by_addr): # Tests that the fixed IP is disassociated when the fixed IP is marked # as deallocated and there is no VIF found in the database for the mac # passed in. fip = fake_network.next_fixed_ip(1) fip['leased'] = False mac = fip['virtual_interface']['address'] fip = objects.FixedIP._from_db_object( self.context, objects.FixedIP(), fip) mock_fip_get_by_addr.return_value = fip with mock.patch.object(fip, 'disassociate') as mock_disassociate: self.manager.release_fixed_ip(self.context, fip.address, mac) mock_vif_get_by_addr.assert_called_once_with(self.context, mac) mock_disassociate.assert_called_once_with() @mock.patch.object(objects.FixedIP, 'get_by_address') def test_release_fixed_ip_no_mac(self, mock_fip_get_by_addr): # Tests that the fixed IP is disassociated when the fixed IP is # deallocated and there is no mac address passed in (like before # the network rpc api version bump to pass it in). fip = fake_network.next_fixed_ip(1) fip['leased'] = False fip = objects.FixedIP._from_db_object( self.context, objects.FixedIP(), fip) mock_fip_get_by_addr.return_value = fip with mock.patch.object(fip, 'disassociate') as mock_disassociate: self.manager.release_fixed_ip(self.context, fip.address) mock_disassociate.assert_called_once_with() @mock.patch.object(objects.FixedIP, 'get_by_address') @mock.patch.object(objects.VirtualInterface, 'get_by_address') def test_release_fixed_ip_mac_mismatch_associated_instance(self, mock_vif_get_by_addr, mock_fip_get_by_addr): # Tests that the fixed IP is not disassociated when the VIF for the mac # passed to release_fixed_ip does not have an instance_uuid that # matches fixed_ip.instance_uuid. old_instance = fake_instance.fake_instance_obj(self.context) new_instance = fake_instance.fake_instance_obj(self.context) fip = fake_network.next_fixed_ip(1) fip['instance_uuid'] = new_instance.uuid fip['leased'] = False vif = fip['virtual_interface'] vif['instance_uuid'] = old_instance.uuid vif = objects.VirtualInterface._from_db_object( self.context, objects.VirtualInterface(), vif) fip = objects.FixedIP._from_db_object( self.context, objects.FixedIP(), fip) mock_fip_get_by_addr.return_value = fip mock_vif_get_by_addr.return_value = vif with mock.patch.object(fip, 'disassociate') as mock_disassociate: self.manager.release_fixed_ip( self.context, fip.address, vif.address) mock_vif_get_by_addr.assert_called_once_with(self.context, vif.address) self.assertFalse(mock_disassociate.called, str(mock_disassociate.mock_calls)) @mock.patch.object(objects.FixedIP, 'get_by_address') @mock.patch.object(objects.VirtualInterface, 'get_by_id') @mock.patch.object(objects.Quotas, 'reserve') def test_deallocate_fixed_ip_explicit_disassociate(self, mock_quota_reserve, mock_vif_get_by_id, mock_fip_get_by_addr): # Tests that we explicitly call FixedIP.disassociate when the fixed IP # is not leased and has an associated instance (race with dnsmasq). self.flags(force_dhcp_release=True) fake_inst = fake_instance.fake_instance_obj(self.context) fip = fake_network.next_fixed_ip(1) fip['instance_uuid'] = fake_inst.uuid fip['leased'] = False vif = fip['virtual_interface'] vif['instance_uuid'] = fake_inst.uuid vif = objects.VirtualInterface._from_db_object( self.context, objects.VirtualInterface(), vif) fip = objects.FixedIP._from_db_object( self.context, objects.FixedIP(), fip) fip.network = fake_network.fake_network_obj(self.context, fip.network_id) mock_fip_get_by_addr.return_value = fip mock_vif_get_by_id.return_value = vif @mock.patch.object(self.manager, '_do_trigger_security_group_members_refresh_for_instance') @mock.patch.object(self.manager, '_validate_instance_zone_for_dns_domain', return_value=False) @mock.patch.object(self.manager, '_teardown_network_on_host') @mock.patch.object(fip, 'save') @mock.patch.object(fip, 'disassociate') def do_test(mock_disassociate, mock_fip_save, mock_teardown_network_on_host, mock_validate_zone, mock_trigger_secgroup_refresh): self.assertEqual(fake_inst.uuid, fip.instance_uuid) self.assertFalse(fip.leased) self.manager.deallocate_fixed_ip( self.context, fip['address'], instance=fake_inst) mock_trigger_secgroup_refresh.assert_called_once_with( fake_inst.uuid) mock_teardown_network_on_host.assert_called_once_with(self.context, fip.network) mock_disassociate.assert_called_once_with() do_test() nova-13.1.4/nova/tests/unit/network/interfaces-override.template0000664000567000056710000000235013064447140026161 0ustar jenkinsjenkins00000000000000# Injected by Nova on instance boot # # This file describes the network interfaces available on your system # and how to activate them. For more information, see interfaces(5). # The loopback network interface auto lo iface lo inet loopback {% for ifc in interfaces %} auto {{ ifc.name }} iface {{ ifc.name }} inet static address {{ ifc.address }} netmask {{ ifc.netmask }} broadcast {{ ifc.broadcast }} {% if ifc.gateway %} gateway {{ ifc.gateway }} {% endif %} {% if ifc.dns %} dns-nameservers {{ ifc.dns }} {% endif %} {% for route in ifc.routes %} post-up ip route add {{ route.cidr }} via {{ route.gateway }} dev {{ ifc.name }} pre-down ip route del {{ route.cidr }} via {{ route.gateway }} dev {{ ifc.name }} {% endfor %} {% if use_ipv6 %} {% if libvirt_virt_type == 'lxc' %} {% if ifc.address_v6 %} post-up ip -6 addr add {{ ifc.address_v6 }}/{{ifc.netmask_v6 }} dev ${IFACE} {% endif %} {% if ifc.gateway_v6 %} post-up ip -6 route add default via {{ ifc.gateway_v6 }} dev ${IFACE} {% endif %} {% else %} iface {{ ifc.name }} inet6 static address {{ ifc.address_v6 }} netmask {{ ifc.netmask_v6 }} {% if ifc.gateway_v6 %} gateway {{ ifc.gateway_v6 }} {% endif %} {% endif %} {% endif %} {% endfor %} nova-13.1.4/nova/tests/unit/network/test_config.py0000664000567000056710000000604313064447152023350 0ustar jenkinsjenkins00000000000000# Copyright 2016 HPE, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import nova.network import nova.network.security_group.neutron_driver import nova.network.security_group.openstack_driver as sgapi import nova.test class FileATicket(object): def __init__(self, **kwargs): pass class NetworkAPIConfigTest(nova.test.NoDBTestCase): """Test the transition from legacy to use_neutron config options.""" def test_default(self): netapi = nova.network.API() self.assertIsInstance(netapi, nova.network.api.API) def test_use_neutron(self): self.flags(use_neutron=True) netapi = nova.network.API() self.assertIsInstance(netapi, nova.network.neutronv2.api.API) def test_dont_use_neutron(self): self.flags(use_neutron=False) netapi = nova.network.API() self.assertIsInstance(netapi, nova.network.api.API) def test_legacy_use_neutron(self): """use neutron even if config is false because of legacy option.""" self.flags(use_neutron=False) self.flags(network_api_class='nova.network.neutronv2.api.API') netapi = nova.network.API() self.assertIsInstance(netapi, nova.network.neutronv2.api.API) def test_legacy_custom_class(self): """use neutron even if config is false because of legacy option.""" self.flags(network_api_class= 'nova.tests.unit.network.test_config.FileATicket') netapi = nova.network.API() self.assertIsInstance(netapi, FileATicket) class SecurityGroupAPIConfigTest(nova.test.NoDBTestCase): def test_use_neutron(self): self.flags(use_neutron=True) driver = sgapi.get_openstack_security_group_driver() self.assertIsInstance( driver, nova.network.security_group.neutron_driver.SecurityGroupAPI) def test_sg_nova(self): self.flags(security_group_api='nova') driver = sgapi.get_openstack_security_group_driver() self.assertIsInstance( driver, nova.compute.api.SecurityGroupAPI) def test_sg_neutron(self): self.flags(security_group_api='neutron') driver = sgapi.get_openstack_security_group_driver() self.assertIsInstance( driver, nova.network.security_group.neutron_driver.SecurityGroupAPI) def test_sg_custom(self): self.flags(security_group_api= 'nova.tests.unit.network.test_config.FileATicket') driver = sgapi.get_openstack_security_group_driver() self.assertIsInstance(driver, FileATicket) nova-13.1.4/nova/tests/unit/network/__init__.py0000664000567000056710000000000013064447140022563 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/network/test_network_info.py0000664000567000056710000012510713064447140024607 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova import exception from nova.network import model from nova import test from nova.tests.unit import fake_network_cache_model from nova.virt import netutils class RouteTests(test.NoDBTestCase): def test_create_route_with_attrs(self): route = fake_network_cache_model.new_route() fake_network_cache_model.new_ip(dict(address='192.168.1.1')) self.assertEqual('0.0.0.0/24', route['cidr']) self.assertEqual('192.168.1.1', route['gateway']['address']) self.assertEqual('eth0', route['interface']) def test_routes_equal(self): route1 = model.Route() route2 = model.Route() self.assertEqual(route1, route2) def test_routes_not_equal(self): route1 = model.Route(cidr='1.1.1.0/24') route2 = model.Route(cidr='2.2.2.0/24') self.assertNotEqual(route1, route2) route1 = model.Route(cidr='1.1.1.1/24', gateway='1.1.1.1') route2 = model.Route(cidr='1.1.1.1/24', gateway='1.1.1.2') self.assertNotEqual(route1, route2) route1 = model.Route(cidr='1.1.1.1/24', interface='tap0') route2 = model.Route(cidr='1.1.1.1/24', interface='tap1') self.assertNotEqual(route1, route2) def test_hydrate(self): route = model.Route.hydrate( {'gateway': fake_network_cache_model.new_ip( dict(address='192.168.1.1'))}) self.assertIsNone(route['cidr']) self.assertEqual('192.168.1.1', route['gateway']['address']) self.assertIsNone(route['interface']) class IPTests(test.NoDBTestCase): def test_ip_equal(self): ip1 = model.IP(address='127.0.0.1') ip2 = model.IP(address='127.0.0.1') self.assertEqual(ip1, ip2) def test_ip_not_equal(self): ip1 = model.IP(address='127.0.0.1') ip2 = model.IP(address='172.0.0.3') self.assertNotEqual(ip1, ip2) ip1 = model.IP(address='127.0.0.1', type=1) ip2 = model.IP(address='172.0.0.1', type=2) self.assertNotEqual(ip1, ip2) ip1 = model.IP(address='127.0.0.1', version=4) ip2 = model.IP(address='172.0.0.1', version=6) self.assertNotEqual(ip1, ip2) class FixedIPTests(test.NoDBTestCase): def test_createnew_fixed_ip_with_attrs(self): fixed_ip = model.FixedIP(address='192.168.1.100') self.assertEqual('192.168.1.100', fixed_ip['address']) self.assertEqual([], fixed_ip['floating_ips']) self.assertEqual('fixed', fixed_ip['type']) self.assertEqual(4, fixed_ip['version']) def test_create_fixed_ipv6(self): fixed_ip = model.FixedIP(address='::1') self.assertEqual('::1', fixed_ip['address']) self.assertEqual([], fixed_ip['floating_ips']) self.assertEqual('fixed', fixed_ip['type']) self.assertEqual(6, fixed_ip['version']) def test_create_fixed_bad_ip_fails(self): self.assertRaises(exception.InvalidIpAddressError, model.FixedIP, address='picklespicklespickles') def test_equate_two_fixed_ips(self): fixed_ip = model.FixedIP(address='::1') fixed_ip2 = model.FixedIP(address='::1') self.assertEqual(fixed_ip, fixed_ip2) def test_equate_two_dissimilar_fixed_ips_fails(self): fixed_ip = model.FixedIP(address='::1') fixed_ip2 = model.FixedIP(address='::2') self.assertNotEqual(fixed_ip, fixed_ip2) fixed_ip = model.FixedIP(address='::1', type='1') fixed_ip2 = model.FixedIP(address='::1', type='2') self.assertNotEqual(fixed_ip, fixed_ip2) fixed_ip = model.FixedIP(address='::1', version='6') fixed_ip2 = model.FixedIP(address='::1', version='4') self.assertNotEqual(fixed_ip, fixed_ip2) fixed_ip = model.FixedIP(address='::1', floating_ips='1.1.1.1') fixed_ip2 = model.FixedIP(address='::1', floating_ips='8.8.8.8') self.assertNotEqual(fixed_ip, fixed_ip2) def test_hydrate(self): fixed_ip = model.FixedIP.hydrate({}) self.assertEqual([], fixed_ip['floating_ips']) self.assertIsNone(fixed_ip['address']) self.assertEqual('fixed', fixed_ip['type']) self.assertIsNone(fixed_ip['version']) def test_add_floating_ip(self): fixed_ip = model.FixedIP(address='192.168.1.100') fixed_ip.add_floating_ip('192.168.1.101') self.assertEqual(['192.168.1.101'], fixed_ip['floating_ips']) def test_add_floating_ip_repeatedly_only_one_instance(self): fixed_ip = model.FixedIP(address='192.168.1.100') for i in range(10): fixed_ip.add_floating_ip('192.168.1.101') self.assertEqual(['192.168.1.101'], fixed_ip['floating_ips']) class SubnetTests(test.NoDBTestCase): def test_create_subnet_with_attrs(self): subnet = fake_network_cache_model.new_subnet() route1 = fake_network_cache_model.new_route() self.assertEqual('10.10.0.0/24', subnet['cidr']) self.assertEqual( [fake_network_cache_model.new_ip(dict(address='1.2.3.4')), fake_network_cache_model.new_ip(dict(address='2.3.4.5'))], subnet['dns']) self.assertEqual('10.10.0.1', subnet['gateway']['address']) self.assertEqual( [fake_network_cache_model.new_fixed_ip( dict(address='10.10.0.2')), fake_network_cache_model.new_fixed_ip( dict(address='10.10.0.3'))], subnet['ips']) self.assertEqual([route1], subnet['routes']) self.assertEqual(4, subnet['version']) def test_subnet_equal(self): subnet1 = fake_network_cache_model.new_subnet() subnet2 = fake_network_cache_model.new_subnet() self.assertEqual(subnet1, subnet2) def test_subnet_not_equal(self): subnet1 = model.Subnet(cidr='1.1.1.0/24') subnet2 = model.Subnet(cidr='2.2.2.0/24') self.assertNotEqual(subnet1, subnet2) subnet1 = model.Subnet(dns='1.1.1.0/24') subnet2 = model.Subnet(dns='2.2.2.0/24') self.assertNotEqual(subnet1, subnet2) subnet1 = model.Subnet(gateway='1.1.1.1/24') subnet2 = model.Subnet(gateway='2.2.2.1/24') self.assertNotEqual(subnet1, subnet2) subnet1 = model.Subnet(ips='1.1.1.0/24') subnet2 = model.Subnet(ips='2.2.2.0/24') self.assertNotEqual(subnet1, subnet2) subnet1 = model.Subnet(routes='1.1.1.0/24') subnet2 = model.Subnet(routes='2.2.2.0/24') self.assertNotEqual(subnet1, subnet2) subnet1 = model.Subnet(version='4') subnet2 = model.Subnet(version='6') self.assertNotEqual(subnet1, subnet2) def test_add_route(self): subnet = fake_network_cache_model.new_subnet() route1 = fake_network_cache_model.new_route() route2 = fake_network_cache_model.new_route({'cidr': '1.1.1.1/24'}) subnet.add_route(route2) self.assertEqual([route1, route2], subnet['routes']) def test_add_route_a_lot(self): subnet = fake_network_cache_model.new_subnet() route1 = fake_network_cache_model.new_route() route2 = fake_network_cache_model.new_route({'cidr': '1.1.1.1/24'}) for i in range(10): subnet.add_route(route2) self.assertEqual([route1, route2], subnet['routes']) def test_add_dns(self): subnet = fake_network_cache_model.new_subnet() dns = fake_network_cache_model.new_ip(dict(address='9.9.9.9')) subnet.add_dns(dns) self.assertEqual( [fake_network_cache_model.new_ip(dict(address='1.2.3.4')), fake_network_cache_model.new_ip(dict(address='2.3.4.5')), fake_network_cache_model.new_ip(dict(address='9.9.9.9'))], subnet['dns']) def test_add_dns_a_lot(self): subnet = fake_network_cache_model.new_subnet() for i in range(10): subnet.add_dns(fake_network_cache_model.new_ip( dict(address='9.9.9.9'))) self.assertEqual( [fake_network_cache_model.new_ip(dict(address='1.2.3.4')), fake_network_cache_model.new_ip(dict(address='2.3.4.5')), fake_network_cache_model.new_ip(dict(address='9.9.9.9'))], subnet['dns']) def test_add_ip(self): subnet = fake_network_cache_model.new_subnet() subnet.add_ip(fake_network_cache_model.new_ip( dict(address='192.168.1.102'))) self.assertEqual( [fake_network_cache_model.new_fixed_ip( dict(address='10.10.0.2')), fake_network_cache_model.new_fixed_ip( dict(address='10.10.0.3')), fake_network_cache_model.new_ip( dict(address='192.168.1.102'))], subnet['ips']) def test_add_ip_a_lot(self): subnet = fake_network_cache_model.new_subnet() for i in range(10): subnet.add_ip(fake_network_cache_model.new_fixed_ip( dict(address='192.168.1.102'))) self.assertEqual( [fake_network_cache_model.new_fixed_ip( dict(address='10.10.0.2')), fake_network_cache_model.new_fixed_ip( dict(address='10.10.0.3')), fake_network_cache_model.new_fixed_ip( dict(address='192.168.1.102'))], subnet['ips']) def test_hydrate(self): subnet_dict = { 'cidr': '255.255.255.0', 'dns': [fake_network_cache_model.new_ip(dict(address='1.1.1.1'))], 'ips': [fake_network_cache_model.new_fixed_ip( dict(address='2.2.2.2'))], 'routes': [fake_network_cache_model.new_route()], 'version': 4, 'gateway': fake_network_cache_model.new_ip( dict(address='3.3.3.3'))} subnet = model.Subnet.hydrate(subnet_dict) self.assertEqual('255.255.255.0', subnet['cidr']) self.assertEqual([fake_network_cache_model.new_ip( dict(address='1.1.1.1'))], subnet['dns']) self.assertEqual('3.3.3.3', subnet['gateway']['address']) self.assertEqual([fake_network_cache_model.new_fixed_ip( dict(address='2.2.2.2'))], subnet['ips']) self.assertEqual([fake_network_cache_model.new_route()], subnet['routes']) self.assertEqual(4, subnet['version']) class NetworkTests(test.NoDBTestCase): def test_create_network(self): network = fake_network_cache_model.new_network() self.assertEqual(1, network['id']) self.assertEqual('br0', network['bridge']) self.assertEqual('public', network['label']) self.assertEqual( [fake_network_cache_model.new_subnet(), fake_network_cache_model.new_subnet( dict(cidr='255.255.255.255'))], network['subnets']) def test_add_subnet(self): network = fake_network_cache_model.new_network() network.add_subnet(fake_network_cache_model.new_subnet( dict(cidr='0.0.0.0'))) self.assertEqual( [fake_network_cache_model.new_subnet(), fake_network_cache_model.new_subnet( dict(cidr='255.255.255.255')), fake_network_cache_model.new_subnet(dict(cidr='0.0.0.0'))], network['subnets']) def test_add_subnet_a_lot(self): network = fake_network_cache_model.new_network() for i in range(10): network.add_subnet(fake_network_cache_model.new_subnet( dict(cidr='0.0.0.0'))) self.assertEqual( [fake_network_cache_model.new_subnet(), fake_network_cache_model.new_subnet( dict(cidr='255.255.255.255')), fake_network_cache_model.new_subnet(dict(cidr='0.0.0.0'))], network['subnets']) def test_network_equal(self): network1 = model.Network() network2 = model.Network() self.assertEqual(network1, network2) def test_network_not_equal(self): network1 = model.Network(id='1') network2 = model.Network(id='2') self.assertNotEqual(network1, network2) network1 = model.Network(bridge='br-int') network2 = model.Network(bridge='br0') self.assertNotEqual(network1, network2) network1 = model.Network(label='net1') network2 = model.Network(label='net2') self.assertNotEqual(network1, network2) network1 = model.Network(subnets='1.1.1.0/24') network2 = model.Network(subnets='2.2.2.0/24') self.assertNotEqual(network1, network2) def test_hydrate(self): fake_network_cache_model.new_subnet() fake_network_cache_model.new_subnet(dict(cidr='255.255.255.255')) network = model.Network.hydrate(fake_network_cache_model.new_network()) self.assertEqual(1, network['id']) self.assertEqual('br0', network['bridge']) self.assertEqual('public', network['label']) self.assertEqual( [fake_network_cache_model.new_subnet(), fake_network_cache_model.new_subnet( dict(cidr='255.255.255.255'))], network['subnets']) class VIFTests(test.NoDBTestCase): def test_create_vif(self): vif = fake_network_cache_model.new_vif() self.assertEqual(1, vif['id']) self.assertEqual('aa:aa:aa:aa:aa:aa', vif['address']) self.assertEqual(fake_network_cache_model.new_network(), vif['network']) def test_vif_equal(self): vif1 = model.VIF() vif2 = model.VIF() self.assertEqual(vif1, vif2) def test_vif_not_equal(self): vif1 = model.VIF(id=1) vif2 = model.VIF(id=2) self.assertNotEqual(vif1, vif2) vif1 = model.VIF(address='00:00:00:00:00:11') vif2 = model.VIF(address='00:00:00:00:00:22') self.assertNotEqual(vif1, vif2) vif1 = model.VIF(network='net1') vif2 = model.VIF(network='net2') self.assertNotEqual(vif1, vif2) vif1 = model.VIF(type='ovs') vif2 = model.VIF(type='linuxbridge') self.assertNotEqual(vif1, vif2) vif1 = model.VIF(devname='ovs1234') vif2 = model.VIF(devname='linuxbridge1234') self.assertNotEqual(vif1, vif2) vif1 = model.VIF(qbh_params=1) vif2 = model.VIF(qbh_params=None) self.assertNotEqual(vif1, vif2) vif1 = model.VIF(qbg_params=1) vif2 = model.VIF(qbg_params=None) self.assertNotEqual(vif1, vif2) vif1 = model.VIF(active=True) vif2 = model.VIF(active=False) self.assertNotEqual(vif1, vif2) vif1 = model.VIF(vnic_type=model.VNIC_TYPE_NORMAL) vif2 = model.VIF(vnic_type=model.VNIC_TYPE_DIRECT) self.assertNotEqual(vif1, vif2) vif1 = model.VIF(profile={'pci_slot': '0000:0a:00.1'}) vif2 = model.VIF(profile={'pci_slot': '0000:0a:00.2'}) self.assertNotEqual(vif1, vif2) vif1 = model.VIF(preserve_on_delete=True) vif2 = model.VIF(preserve_on_delete=False) self.assertNotEqual(vif1, vif2) def test_create_vif_with_type(self): vif_dict = dict( id=1, address='aa:aa:aa:aa:aa:aa', network=fake_network_cache_model.new_network(), type='bridge') vif = fake_network_cache_model.new_vif(vif_dict) self.assertEqual(1, vif['id']) self.assertEqual('aa:aa:aa:aa:aa:aa', vif['address']) self.assertEqual('bridge', vif['type']) self.assertEqual(fake_network_cache_model.new_network(), vif['network']) def test_vif_get_fixed_ips(self): vif = fake_network_cache_model.new_vif() fixed_ips = vif.fixed_ips() ips = [ fake_network_cache_model.new_fixed_ip(dict(address='10.10.0.2')), fake_network_cache_model.new_fixed_ip(dict(address='10.10.0.3')) ] * 2 self.assertEqual(fixed_ips, ips) def test_vif_get_floating_ips(self): vif = fake_network_cache_model.new_vif() vif['network']['subnets'][0]['ips'][0].add_floating_ip('192.168.1.1') floating_ips = vif.floating_ips() self.assertEqual(['192.168.1.1'], floating_ips) def test_vif_get_labeled_ips(self): vif = fake_network_cache_model.new_vif() labeled_ips = vif.labeled_ips() ip_dict = { 'network_id': 1, 'ips': [fake_network_cache_model.new_ip( {'address': '10.10.0.2', 'type': 'fixed'}), fake_network_cache_model.new_ip( {'address': '10.10.0.3', 'type': 'fixed'})] * 2, 'network_label': 'public'} self.assertEqual(ip_dict, labeled_ips) def test_hydrate(self): fake_network_cache_model.new_network() vif = model.VIF.hydrate(fake_network_cache_model.new_vif()) self.assertEqual(1, vif['id']) self.assertEqual('aa:aa:aa:aa:aa:aa', vif['address']) self.assertEqual(fake_network_cache_model.new_network(), vif['network']) def test_hydrate_vif_with_type(self): vif_dict = dict( id=1, address='aa:aa:aa:aa:aa:aa', network=fake_network_cache_model.new_network(), type='bridge') vif = model.VIF.hydrate(fake_network_cache_model.new_vif(vif_dict)) self.assertEqual(1, vif['id']) self.assertEqual('aa:aa:aa:aa:aa:aa', vif['address']) self.assertEqual('bridge', vif['type']) self.assertEqual(fake_network_cache_model.new_network(), vif['network']) class NetworkInfoTests(test.NoDBTestCase): def test_create_model(self): ninfo = model.NetworkInfo([fake_network_cache_model.new_vif(), fake_network_cache_model.new_vif( {'address': 'bb:bb:bb:bb:bb:bb'})]) self.assertEqual( [fake_network_cache_model.new_fixed_ip( {'address': '10.10.0.2'}), fake_network_cache_model.new_fixed_ip( {'address': '10.10.0.3'})] * 4, ninfo.fixed_ips()) def test_create_async_model(self): def async_wrapper(): return model.NetworkInfo( [fake_network_cache_model.new_vif(), fake_network_cache_model.new_vif( {'address': 'bb:bb:bb:bb:bb:bb'})]) ninfo = model.NetworkInfoAsyncWrapper(async_wrapper) self.assertEqual( [fake_network_cache_model.new_fixed_ip( {'address': '10.10.0.2'}), fake_network_cache_model.new_fixed_ip( {'address': '10.10.0.3'})] * 4, ninfo.fixed_ips()) def test_create_async_model_exceptions(self): def async_wrapper(): raise test.TestingException() ninfo = model.NetworkInfoAsyncWrapper(async_wrapper) self.assertRaises(test.TestingException, ninfo.wait) # 2nd one doesn't raise self.assertIsNone(ninfo.wait()) # Test that do_raise=False works on .wait() ninfo = model.NetworkInfoAsyncWrapper(async_wrapper) self.assertIsNone(ninfo.wait(do_raise=False)) # Test we also raise calling a method ninfo = model.NetworkInfoAsyncWrapper(async_wrapper) self.assertRaises(test.TestingException, ninfo.fixed_ips) def test_get_floating_ips(self): vif = fake_network_cache_model.new_vif() vif['network']['subnets'][0]['ips'][0].add_floating_ip('192.168.1.1') ninfo = model.NetworkInfo([vif, fake_network_cache_model.new_vif( {'address': 'bb:bb:bb:bb:bb:bb'})]) self.assertEqual(['192.168.1.1'], ninfo.floating_ips()) def test_hydrate(self): ninfo = model.NetworkInfo([fake_network_cache_model.new_vif(), fake_network_cache_model.new_vif( {'address': 'bb:bb:bb:bb:bb:bb'})]) model.NetworkInfo.hydrate(ninfo) self.assertEqual( [fake_network_cache_model.new_fixed_ip( {'address': '10.10.0.2'}), fake_network_cache_model.new_fixed_ip( {'address': '10.10.0.3'})] * 4, ninfo.fixed_ips()) def _setup_injected_network_scenario(self, should_inject=True, use_ipv4=True, use_ipv6=False, gateway=True, dns=True, two_interfaces=False, libvirt_virt_type=None): """Check that netutils properly decides whether to inject based on whether the supplied subnet is static or dynamic. """ network = fake_network_cache_model.new_network({'subnets': []}) subnet_dict = {} if not gateway: subnet_dict['gateway'] = None if not dns: subnet_dict['dns'] = None if not should_inject: subnet_dict['dhcp_server'] = '10.10.0.1' if use_ipv4: network.add_subnet( fake_network_cache_model.new_subnet(subnet_dict)) if should_inject and use_ipv6: gateway_ip = fake_network_cache_model.new_ip(dict( address='1234:567::1')) ip = fake_network_cache_model.new_ip(dict( address='1234:567::2')) ipv6_subnet_dict = dict( cidr='1234:567::/48', gateway=gateway_ip, dns=[fake_network_cache_model.new_ip( dict(address='2001:4860:4860::8888')), fake_network_cache_model.new_ip( dict(address='2001:4860:4860::8844'))], ips=[ip]) if not gateway: ipv6_subnet_dict['gateway'] = None network.add_subnet(fake_network_cache_model.new_subnet( ipv6_subnet_dict)) # Behave as though CONF.flat_injected is True network['meta']['injected'] = True vif = fake_network_cache_model.new_vif({'network': network}) vifs = [vif] if two_interfaces: vifs.append(vif) nwinfo = model.NetworkInfo(vifs) return netutils.get_injected_network_template( nwinfo, use_ipv6=use_ipv6, libvirt_virt_type=libvirt_virt_type) def test_injection_dynamic(self): expected = None template = self._setup_injected_network_scenario(should_inject=False) self.assertEqual(expected, template) def test_injection_static(self): expected = """\ # Injected by Nova on instance boot # # This file describes the network interfaces available on your system # and how to activate them. For more information, see interfaces(5). # The loopback network interface auto lo iface lo inet loopback auto eth0 iface eth0 inet static hwaddress ether aa:aa:aa:aa:aa:aa address 10.10.0.2 netmask 255.255.255.0 broadcast 10.10.0.255 gateway 10.10.0.1 dns-nameservers 1.2.3.4 2.3.4.5 """ template = self._setup_injected_network_scenario() self.assertEqual(expected, template) def test_injection_static_no_gateway(self): expected = """\ # Injected by Nova on instance boot # # This file describes the network interfaces available on your system # and how to activate them. For more information, see interfaces(5). # The loopback network interface auto lo iface lo inet loopback auto eth0 iface eth0 inet static hwaddress ether aa:aa:aa:aa:aa:aa address 10.10.0.2 netmask 255.255.255.0 broadcast 10.10.0.255 dns-nameservers 1.2.3.4 2.3.4.5 """ template = self._setup_injected_network_scenario(gateway=False) self.assertEqual(expected, template) def test_injection_static_no_dns(self): expected = """\ # Injected by Nova on instance boot # # This file describes the network interfaces available on your system # and how to activate them. For more information, see interfaces(5). # The loopback network interface auto lo iface lo inet loopback auto eth0 iface eth0 inet static hwaddress ether aa:aa:aa:aa:aa:aa address 10.10.0.2 netmask 255.255.255.0 broadcast 10.10.0.255 gateway 10.10.0.1 """ template = self._setup_injected_network_scenario(dns=False) self.assertEqual(expected, template) def test_injection_static_overriden_template(self): cfg.CONF.set_override( 'injected_network_template', 'nova/tests/unit/network/interfaces-override.template') expected = """\ # Injected by Nova on instance boot # # This file describes the network interfaces available on your system # and how to activate them. For more information, see interfaces(5). # The loopback network interface auto lo iface lo inet loopback auto eth0 iface eth0 inet static address 10.10.0.2 netmask 255.255.255.0 broadcast 10.10.0.255 gateway 10.10.0.1 dns-nameservers 1.2.3.4 2.3.4.5 post-up ip route add 0.0.0.0/24 via 192.168.1.1 dev eth0 pre-down ip route del 0.0.0.0/24 via 192.168.1.1 dev eth0 """ template = self._setup_injected_network_scenario() self.assertEqual(expected, template) def test_injection_static_ipv6(self): expected = """\ # Injected by Nova on instance boot # # This file describes the network interfaces available on your system # and how to activate them. For more information, see interfaces(5). # The loopback network interface auto lo iface lo inet loopback auto eth0 iface eth0 inet static hwaddress ether aa:aa:aa:aa:aa:aa address 10.10.0.2 netmask 255.255.255.0 broadcast 10.10.0.255 gateway 10.10.0.1 dns-nameservers 1.2.3.4 2.3.4.5 iface eth0 inet6 static hwaddress ether aa:aa:aa:aa:aa:aa address 1234:567::2 netmask 48 gateway 1234:567::1 dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844 """ template = self._setup_injected_network_scenario(use_ipv6=True) self.assertEqual(expected, template) def test_injection_static_ipv6_no_gateway(self): expected = """\ # Injected by Nova on instance boot # # This file describes the network interfaces available on your system # and how to activate them. For more information, see interfaces(5). # The loopback network interface auto lo iface lo inet loopback auto eth0 iface eth0 inet static hwaddress ether aa:aa:aa:aa:aa:aa address 10.10.0.2 netmask 255.255.255.0 broadcast 10.10.0.255 dns-nameservers 1.2.3.4 2.3.4.5 iface eth0 inet6 static hwaddress ether aa:aa:aa:aa:aa:aa address 1234:567::2 netmask 48 dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844 """ template = self._setup_injected_network_scenario(use_ipv6=True, gateway=False) self.assertEqual(expected, template) def test_injection_static_with_ipv4_off(self): expected = None template = self._setup_injected_network_scenario(use_ipv4=False) self.assertEqual(expected, template) def test_injection_ipv6_two_interfaces(self): expected = """\ # Injected by Nova on instance boot # # This file describes the network interfaces available on your system # and how to activate them. For more information, see interfaces(5). # The loopback network interface auto lo iface lo inet loopback auto eth0 iface eth0 inet static hwaddress ether aa:aa:aa:aa:aa:aa address 10.10.0.2 netmask 255.255.255.0 broadcast 10.10.0.255 gateway 10.10.0.1 dns-nameservers 1.2.3.4 2.3.4.5 iface eth0 inet6 static hwaddress ether aa:aa:aa:aa:aa:aa address 1234:567::2 netmask 48 gateway 1234:567::1 dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844 auto eth1 iface eth1 inet static hwaddress ether aa:aa:aa:aa:aa:aa address 10.10.0.2 netmask 255.255.255.0 broadcast 10.10.0.255 gateway 10.10.0.1 dns-nameservers 1.2.3.4 2.3.4.5 iface eth1 inet6 static hwaddress ether aa:aa:aa:aa:aa:aa address 1234:567::2 netmask 48 gateway 1234:567::1 dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844 """ template = self._setup_injected_network_scenario(use_ipv6=True, two_interfaces=True) self.assertEqual(expected, template) def test_injection_ipv6_with_lxc(self): expected = """\ # Injected by Nova on instance boot # # This file describes the network interfaces available on your system # and how to activate them. For more information, see interfaces(5). # The loopback network interface auto lo iface lo inet loopback auto eth0 iface eth0 inet static hwaddress ether aa:aa:aa:aa:aa:aa address 10.10.0.2 netmask 255.255.255.0 broadcast 10.10.0.255 gateway 10.10.0.1 dns-nameservers 1.2.3.4 2.3.4.5 post-up ip -6 addr add 1234:567::2/48 dev ${IFACE} post-up ip -6 route add default via 1234:567::1 dev ${IFACE} auto eth1 iface eth1 inet static hwaddress ether aa:aa:aa:aa:aa:aa address 10.10.0.2 netmask 255.255.255.0 broadcast 10.10.0.255 gateway 10.10.0.1 dns-nameservers 1.2.3.4 2.3.4.5 post-up ip -6 addr add 1234:567::2/48 dev ${IFACE} post-up ip -6 route add default via 1234:567::1 dev ${IFACE} """ template = self._setup_injected_network_scenario( use_ipv6=True, two_interfaces=True, libvirt_virt_type='lxc') self.assertEqual(expected, template) def test_injection_ipv6_with_lxc_no_gateway(self): expected = """\ # Injected by Nova on instance boot # # This file describes the network interfaces available on your system # and how to activate them. For more information, see interfaces(5). # The loopback network interface auto lo iface lo inet loopback auto eth0 iface eth0 inet static hwaddress ether aa:aa:aa:aa:aa:aa address 10.10.0.2 netmask 255.255.255.0 broadcast 10.10.0.255 dns-nameservers 1.2.3.4 2.3.4.5 post-up ip -6 addr add 1234:567::2/48 dev ${IFACE} auto eth1 iface eth1 inet static hwaddress ether aa:aa:aa:aa:aa:aa address 10.10.0.2 netmask 255.255.255.0 broadcast 10.10.0.255 dns-nameservers 1.2.3.4 2.3.4.5 post-up ip -6 addr add 1234:567::2/48 dev ${IFACE} """ template = self._setup_injected_network_scenario( use_ipv6=True, gateway=False, two_interfaces=True, libvirt_virt_type='lxc') self.assertEqual(expected, template) class TestNetworkMetadata(test.NoDBTestCase): def setUp(self): super(TestNetworkMetadata, self).setUp() self.netinfo = model.NetworkInfo([fake_network_cache_model.new_vif( {'type': 'ethernet'})]) # Give this vif ipv4 and ipv6 dhcp subnets ipv4_subnet = fake_network_cache_model.new_subnet(version=4) ipv6_subnet = fake_network_cache_model.new_subnet(version=6) self.netinfo[0]['network']['subnets'][0] = ipv4_subnet self.netinfo[0]['network']['subnets'][1] = ipv6_subnet self.netinfo[0]['network']['meta']['mtu'] = 1500 def test_get_network_metadata_json(self): net_metadata = netutils.get_network_metadata(self.netinfo, use_ipv6=True) # Physical Ethernet self.assertEqual( { 'id': 'interface0', 'type': 'phy', 'ethernet_mac_address': 'aa:aa:aa:aa:aa:aa', 'vif_id': 1, 'mtu': 1500 }, net_metadata['links'][0]) # IPv4 Network self.assertEqual( { 'id': 'network0', 'link': 'interface0', 'type': 'ipv4', 'ip_address': '10.10.0.2', 'netmask': '255.255.255.0', 'routes': [ { 'network': '0.0.0.0', 'netmask': '0.0.0.0', 'gateway': '10.10.0.1' }, { 'network': '0.0.0.0', 'netmask': '255.255.255.0', 'gateway': '192.168.1.1' } ], 'network_id': 1 }, net_metadata['networks'][0]) self.assertEqual( { 'id': 'network1', 'link': 'interface0', 'type': 'ipv6', 'ip_address': 'fd00::2', 'netmask': 'ffff:ffff:ffff::', 'routes': [ { 'network': '::', 'netmask': '::', 'gateway': 'fd00::1' }, { 'network': '::', 'netmask': 'ffff:ffff:ffff::', 'gateway': 'fd00::1:1' } ], 'network_id': 1 }, net_metadata['networks'][1]) def test_get_network_metadata_json_dhcp(self): ipv4_subnet = fake_network_cache_model.new_subnet( subnet_dict=dict(dhcp_server='1.1.1.1'), version=4) ipv6_subnet = fake_network_cache_model.new_subnet( subnet_dict=dict(dhcp_server='1234:567::'), version=6) self.netinfo[0]['network']['subnets'][0] = ipv4_subnet self.netinfo[0]['network']['subnets'][1] = ipv6_subnet net_metadata = netutils.get_network_metadata(self.netinfo, use_ipv6=True) # IPv4 Network self.assertEqual( { 'id': 'network0', 'link': 'interface0', 'type': 'ipv4_dhcp', 'network_id': 1 }, net_metadata['networks'][0]) # IPv6 Network self.assertEqual( { 'id': 'network1', 'link': 'interface0', 'type': 'ipv6_dhcp', 'network_id': 1 }, net_metadata['networks'][1]) def test__get_nets(self): expected_net = { 'id': 'network0', 'ip_address': '10.10.0.2', 'link': 1, 'netmask': '255.255.255.0', 'network_id': 1, 'routes': [ { 'gateway': '10.10.0.1', 'netmask': '0.0.0.0', 'network': '0.0.0.0'}, { 'gateway': '192.168.1.1', 'netmask': '255.255.255.0', 'network': '0.0.0.0'}], 'type': 'ipv4' } net = netutils._get_nets( self.netinfo[0], self.netinfo[0]['network']['subnets'][0], 4, 0, 1) self.assertEqual(expected_net, net) def test__get_eth_link(self): expected_link = { 'id': 'interface0', 'vif_id': 1, 'type': 'vif', 'ethernet_mac_address': 'aa:aa:aa:aa:aa:aa', 'mtu': 1500 } self.netinfo[0]['type'] = 'vif' link = netutils._get_eth_link(self.netinfo[0], 0) self.assertEqual(expected_link, link) def test__get_eth_link_physical(self): expected_link = { 'id': 'interface1', 'vif_id': 1, 'type': 'phy', 'ethernet_mac_address': 'aa:aa:aa:aa:aa:aa', 'mtu': 1500 } link = netutils._get_eth_link(self.netinfo[0], 1) self.assertEqual(expected_link, link) def test__get_default_route(self): v4_expected = [{ 'network': '0.0.0.0', 'netmask': '0.0.0.0', 'gateway': '10.10.0.1', }] v6_expected = [{ 'network': '::', 'netmask': '::', 'gateway': 'fd00::1' }] v4 = netutils._get_default_route( 4, self.netinfo[0]['network']['subnets'][0]) self.assertEqual(v4_expected, v4) v6 = netutils._get_default_route( 6, self.netinfo[0]['network']['subnets'][1]) self.assertEqual(v6_expected, v6) # Test for no gateway self.netinfo[0]['network']['subnets'][0]['gateway'] = None no_route = netutils._get_default_route( 4, self.netinfo[0]['network']['subnets'][0]) self.assertEqual([], no_route) def test__get_dns_services(self): expected_dns = [ {'type': 'dns', 'address': '1.2.3.4'}, {'type': 'dns', 'address': '2.3.4.5'}, {'type': 'dns', 'address': '3.4.5.6'} ] subnet = fake_network_cache_model.new_subnet(version=4) subnet['dns'].append(fake_network_cache_model.new_ip( {'address': '3.4.5.6'})) dns = netutils._get_dns_services(subnet) self.assertEqual(expected_dns, dns) def test_get_network_metadata(self): expected_json = { "links": [ { "ethernet_mac_address": "aa:aa:aa:aa:aa:aa", "id": "interface0", "type": "phy", "vif_id": 1, "mtu": 1500 }, { "ethernet_mac_address": "aa:aa:aa:aa:aa:ab", "id": "interface1", "type": "phy", "vif_id": 1, "mtu": 1500 }, ], "networks": [ { "id": "network0", "ip_address": "10.10.0.2", "link": "interface0", "netmask": "255.255.255.0", "network_id": "00000000-0000-0000-0000-000000000000", "routes": [ { "gateway": "10.10.0.1", "netmask": "0.0.0.0", "network": "0.0.0.0" }, { "gateway": "192.168.1.1", "netmask": "255.255.255.0", "network": "0.0.0.0" } ], "type": "ipv4" }, { 'id': 'network1', 'ip_address': 'fd00::2', 'link': 'interface0', 'netmask': 'ffff:ffff:ffff::', 'network_id': '00000000-0000-0000-0000-000000000000', 'routes': [{'gateway': 'fd00::1', 'netmask': '::', 'network': '::'}, {'gateway': 'fd00::1:1', 'netmask': 'ffff:ffff:ffff::', 'network': '::'}], 'type': 'ipv6' }, { "id": "network2", "ip_address": "192.168.0.2", "link": "interface1", "netmask": "255.255.255.0", "network_id": "11111111-1111-1111-1111-111111111111", "routes": [ { "gateway": "192.168.0.1", "netmask": "0.0.0.0", "network": "0.0.0.0" } ], "type": "ipv4" } ], 'services': [ {'address': '1.2.3.4', 'type': 'dns'}, {'address': '2.3.4.5', 'type': 'dns'}, {'address': '1:2:3:4::', 'type': 'dns'}, {'address': '2:3:4:5::', 'type': 'dns'} ] } self.netinfo[0]['network']['id'] = ( '00000000-0000-0000-0000-000000000000') # Add a second NIC self.netinfo.append(fake_network_cache_model.new_vif({ 'type': 'ethernet', 'address': 'aa:aa:aa:aa:aa:ab'})) address = fake_network_cache_model.new_ip({'address': '192.168.0.2'}) gateway_address = fake_network_cache_model.new_ip( {'address': '192.168.0.1'}) ipv4_subnet = fake_network_cache_model.new_subnet( {'cidr': '192.168.0.0/24', 'gateway': gateway_address, 'ips': [address], 'routes': []}) self.netinfo[1]['network']['id'] = ( '11111111-1111-1111-1111-111111111111') self.netinfo[1]['network']['subnets'][0] = ipv4_subnet self.netinfo[1]['network']['meta']['mtu'] = 1500 network_json = netutils.get_network_metadata(self.netinfo) self.assertEqual(expected_json, network_json) def test_get_network_metadata_no_ipv4(self): expected_json = { "services": [ { "type": "dns", "address": "1:2:3:4::" }, { "type": "dns", "address": "2:3:4:5::" } ], "networks": [ { "network_id": 1, "type": "ipv6", "netmask": "ffff:ffff:ffff::", "link": "interface0", "routes": [ { "netmask": "::", "network": "::", "gateway": "fd00::1" }, { "netmask": "ffff:ffff:ffff::", "network": "::", "gateway": "fd00::1:1" } ], "ip_address": "fd00::2", "id": "network0" } ], "links": [ { "ethernet_mac_address": "aa:aa:aa:aa:aa:aa", "mtu": 1500, "type": "phy", "id": "interface0", "vif_id": 1 } ] } # drop the ipv4 subnet self.netinfo[0]['network']['subnets'].pop(0) network_json = netutils.get_network_metadata(self.netinfo) self.assertEqual(expected_json, network_json) nova-13.1.4/nova/tests/unit/network/test_linux_net.py0000664000567000056710000017253513064447152024122 0ustar jenkinsjenkins00000000000000# Copyright 2011 NTT # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import calendar import datetime import os import re import time import mock from mox3 import mox import netifaces from oslo_concurrency import processutils from oslo_config import cfg from oslo_serialization import jsonutils from oslo_utils import fileutils from oslo_utils import timeutils from nova import context from nova import db from nova import exception from nova.network import driver from nova.network import linux_net from nova.network import model as network_model from nova import objects from nova import test from nova import utils CONF = cfg.CONF CONF.import_opt('share_dhcp_address', 'nova.objects.network') CONF.import_opt('network_device_mtu', 'nova.objects.network') HOST = "testhost" instances = {'00000000-0000-0000-0000-0000000000000000': {'id': 0, 'uuid': '00000000-0000-0000-0000-0000000000000000', 'host': 'fake_instance00', 'created_at': datetime.datetime(1955, 11, 5, 0, 0, 0), 'updated_at': datetime.datetime(1985, 10, 26, 1, 35, 0), 'hostname': 'fake_instance00'}, '00000000-0000-0000-0000-0000000000000001': {'id': 1, 'uuid': '00000000-0000-0000-0000-0000000000000001', 'host': 'fake_instance01', 'created_at': datetime.datetime(1955, 11, 5, 0, 0, 0), 'updated_at': datetime.datetime(1985, 10, 26, 1, 35, 0), 'hostname': 'fake_instance01'}, '00000000-0000-0000-0000-0000000000000002': {'id': 2, 'uuid': '00000000-0000-0000-0000-0000000000000002', 'host': 'fake_instance02', 'created_at': datetime.datetime(1955, 11, 5, 0, 0, 0), 'updated_at': datetime.datetime(1985, 10, 26, 1, 35, 0), 'hostname': 'really_long_fake_instance02_to_test_hostname_' 'truncation_when_too_long'}} addresses = [{"address": "10.0.0.1"}, {"address": "10.0.0.2"}, {"address": "10.0.0.3"}, {"address": "10.0.0.4"}, {"address": "10.0.0.5"}, {"address": "10.0.0.6"}] networks = [{'id': 0, 'uuid': "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", 'label': 'test0', 'injected': False, 'multi_host': False, 'cidr': '192.168.0.0/24', 'cidr_v6': '2001:db8::/64', 'gateway_v6': '2001:db8::1', 'netmask_v6': '64', 'netmask': '255.255.255.0', 'bridge': 'fa0', 'bridge_interface': 'fake_fa0', 'gateway': '192.168.0.1', 'broadcast': '192.168.0.255', 'dns1': '192.168.0.1', 'dns2': '192.168.0.2', 'dhcp_server': '192.168.0.1', 'dhcp_start': '192.168.100.1', 'vlan': None, 'host': None, 'project_id': 'fake_project', 'vpn_public_address': '192.168.0.2', 'mtu': None, 'enable_dhcp': True, 'share_address': False}, {'id': 1, 'uuid': "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb", 'label': 'test1', 'injected': False, 'multi_host': True, 'cidr': '192.168.1.0/24', 'cidr_v6': '2001:db9::/64', 'gateway_v6': '2001:db9::1', 'netmask_v6': '64', 'netmask': '255.255.255.0', 'bridge': 'fa1', 'bridge_interface': 'fake_fa1', 'gateway': '192.168.1.1', 'broadcast': '192.168.1.255', 'dns1': '192.168.0.1', 'dns2': '192.168.0.2', 'dhcp_server': '192.168.1.1', 'dhcp_start': '192.168.100.1', 'vlan': None, 'host': None, 'project_id': 'fake_project', 'vpn_public_address': '192.168.1.2', 'mtu': None, 'enable_dhcp': True, 'share_address': False}, {'id': 2, 'uuid': "cccccccc-cccc-cccc-cccc-cccccccccccc", 'label': 'test2', 'injected': False, 'multi_host': True, 'cidr': '192.168.2.0/24', 'cidr_v6': '2001:db10::/64', 'gateway_v6': '2001:db10::1', 'netmask_v6': '64', 'netmask': '255.255.255.0', 'bridge': 'fa2', 'bridge_interface': 'fake_fa2', 'gateway': '192.168.2.1', 'broadcast': '192.168.2.255', 'dns1': '192.168.0.1', 'dns2': '192.168.0.2', 'dhcp_server': '192.168.2.1', 'dhcp_start': '192.168.100.1', 'vlan': None, 'host': None, 'project_id': 'fake_project', 'vpn_public_address': '192.168.2.2', 'mtu': None, 'enable_dhcp': True, 'share_address': False}] fixed_ips = [{'id': 0, 'network_id': 0, 'address': '192.168.0.100', 'instance_id': 0, 'allocated': True, 'leased': True, 'virtual_interface_id': 0, 'default_route': True, 'instance_uuid': '00000000-0000-0000-0000-0000000000000000', 'floating_ips': []}, {'id': 1, 'network_id': 1, 'address': '192.168.1.100', 'instance_id': 0, 'allocated': True, 'leased': True, 'virtual_interface_id': 1, 'default_route': False, 'instance_uuid': '00000000-0000-0000-0000-0000000000000000', 'floating_ips': []}, {'id': 2, 'network_id': 1, 'address': '192.168.0.101', 'instance_id': 1, 'allocated': True, 'leased': True, 'virtual_interface_id': 2, 'default_route': True, 'instance_uuid': '00000000-0000-0000-0000-0000000000000001', 'floating_ips': []}, {'id': 3, 'network_id': 0, 'address': '192.168.1.101', 'instance_id': 1, 'allocated': True, 'leased': True, 'virtual_interface_id': 3, 'default_route': False, 'instance_uuid': '00000000-0000-0000-0000-0000000000000001', 'floating_ips': []}, {'id': 4, 'network_id': 0, 'address': '192.168.0.102', 'instance_id': 0, 'allocated': True, 'leased': False, 'virtual_interface_id': 4, 'default_route': False, 'instance_uuid': '00000000-0000-0000-0000-0000000000000000', 'floating_ips': []}, {'id': 5, 'network_id': 1, 'address': '192.168.1.102', 'instance_id': 1, 'allocated': True, 'leased': False, 'virtual_interface_id': 5, 'default_route': False, 'instance_uuid': '00000000-0000-0000-0000-0000000000000001', 'floating_ips': []}, {'id': 6, 'network_id': 1, 'address': '192.168.1.103', 'instance_id': 1, 'allocated': False, 'leased': True, 'virtual_interface_id': 6, 'default_route': False, 'instance_uuid': '00000000-0000-0000-0000-0000000000000001', 'floating_ips': []}, {'id': 7, 'network_id': 2, 'address': '192.168.2.100', 'instance_id': 2, 'allocated': True, 'leased': False, 'virtual_interface_id': 7, 'default_route': False, 'instance_uuid': '00000000-0000-0000-0000-0000000000000002', 'floating_ips': []}] vifs = [{'id': 0, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'address': 'DE:AD:BE:EF:00:00', 'uuid': '00000000-0000-0000-0000-0000000000000000', 'network_id': 0, 'instance_uuid': '00000000-0000-0000-0000-0000000000000000'}, {'id': 1, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'address': 'DE:AD:BE:EF:00:01', 'uuid': '00000000-0000-0000-0000-0000000000000001', 'network_id': 1, 'instance_uuid': '00000000-0000-0000-0000-0000000000000000'}, {'id': 2, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'address': 'DE:AD:BE:EF:00:02', 'uuid': '00000000-0000-0000-0000-0000000000000002', 'network_id': 1, 'instance_uuid': '00000000-0000-0000-0000-0000000000000001'}, {'id': 3, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'address': 'DE:AD:BE:EF:00:03', 'uuid': '00000000-0000-0000-0000-0000000000000003', 'network_id': 0, 'instance_uuid': '00000000-0000-0000-0000-0000000000000001'}, {'id': 4, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'address': 'DE:AD:BE:EF:00:04', 'uuid': '00000000-0000-0000-0000-0000000000000004', 'network_id': 0, 'instance_uuid': '00000000-0000-0000-0000-0000000000000000'}, {'id': 5, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'address': 'DE:AD:BE:EF:00:05', 'uuid': '00000000-0000-0000-0000-0000000000000005', 'network_id': 1, 'instance_uuid': '00000000-0000-0000-0000-0000000000000001'}, {'id': 6, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'address': 'DE:AD:BE:EF:00:06', 'uuid': '00000000-0000-0000-0000-0000000000000006', 'network_id': 1, 'instance_uuid': '00000000-0000-0000-0000-0000000000000001'}, {'id': 7, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'address': 'DE:AD:BE:EF:00:07', 'uuid': '00000000-0000-0000-0000-0000000000000007', 'network_id': 2, 'instance_uuid': '00000000-0000-0000-0000-0000000000000002'}] def get_associated(context, network_id, host=None, address=None): result = [] for datum in fixed_ips: if (datum['network_id'] == network_id and datum['instance_uuid'] is not None and datum['virtual_interface_id'] is not None): instance = instances[datum['instance_uuid']] if host and host != instance['host']: continue if address and address != datum['address']: continue cleaned = {} cleaned['address'] = datum['address'] cleaned['instance_uuid'] = datum['instance_uuid'] cleaned['network_id'] = datum['network_id'] cleaned['vif_id'] = datum['virtual_interface_id'] vif = vifs[datum['virtual_interface_id']] cleaned['vif_address'] = vif['address'] cleaned['instance_hostname'] = instance['hostname'] cleaned['instance_updated'] = instance['updated_at'] cleaned['instance_created'] = instance['created_at'] cleaned['allocated'] = datum['allocated'] cleaned['leased'] = datum['leased'] cleaned['default_route'] = datum['default_route'] result.append(cleaned) return result class LinuxNetworkUtilsTestCase(test.NoDBTestCase): def test_is_pid_cmdline_correct(self): # Negative general case fake_open = mock.mock_open(read_data='no-such-process') with mock.patch.object(linux_net, 'open', fake_open, create=True): self.assertFalse(linux_net.is_pid_cmdline_correct(1, "foo"), "foo should not be in 'no-such-process'") # Negative case that would be a thing we would want to skip fake_open = mock.mock_open( read_data=('/usr/sbin/dnsmasq ' '--conf-file=/var/run/NetworkManager/dnsmasq.conf')) with mock.patch.object(linux_net, 'open', fake_open, create=True): self.assertFalse( linux_net.is_pid_cmdline_correct(1, "nova-br100.conf"), "nova-br100.conf should not have been found") # Positive matching case fake_open = mock.mock_open( read_data=('/usr/sbin/dnsmasq ' '--dhcp-hostsfile=' '/opt/stack/data/nova/networks/nova-br100.conf')) with mock.patch.object(linux_net, 'open', fake_open, create=True): self.assertTrue( linux_net.is_pid_cmdline_correct(1, "nova-br100.conf"), 'nova-br100.conf should have been found') # Negative case. This would match except we throw an IOError/OSError # because the file couldn't be read or opened, this should then fail. for err in (IOError, OSError): fake_open = mock.mock_open( read_data=('/usr/sbin/dnsmasq ' '--dhcp-hostsfile=' '/opt/stack/data/nova/networks/nova-br100.conf')) fake_open.side_effect = err with mock.patch.object(linux_net, 'open', fake_open, create=True): self.assertFalse( linux_net.is_pid_cmdline_correct(1, "nova-br100.conf"), 'nova-br100.conf should not have been found') class LinuxNetworkTestCase(test.NoDBTestCase): REQUIRES_LOCKING = True def setUp(self): super(LinuxNetworkTestCase, self).setUp() self.driver = driver.load_network_driver() self.driver.db = db self.context = context.RequestContext('testuser', 'testproject', is_admin=True) def get_vifs(_context, instance_uuid, use_slave): return [vif for vif in vifs if vif['instance_uuid'] == instance_uuid] def get_instance(_context, instance_id): return instances[instance_id] self.stub_out('nova.db.virtual_interface_get_by_instance', get_vifs) self.stub_out('nova.db.instance_get', get_instance) self.stub_out('nova.db.network_get_associated_fixed_ips', get_associated) def _test_add_snat_rule(self, expected, is_external): def verify_add_rule(chain, rule): self.assertEqual('snat', chain) self.assertEqual(expected, rule) self.called = True self.stubs.Set(linux_net.iptables_manager.ipv4['nat'], 'add_rule', verify_add_rule) self.called = False linux_net.add_snat_rule('10.0.0.0/24', is_external) if expected: self.assertTrue(self.called) def test_add_snat_rule_no_ext(self): self.flags(routing_source_ip='10.10.10.1') expected = ('-s 10.0.0.0/24 -d 0.0.0.0/0 ' '-j SNAT --to-source 10.10.10.1 -o eth0') self._test_add_snat_rule(expected, False) def test_add_snat_rule_ext(self): self.flags(routing_source_ip='10.10.10.1') expected = () self._test_add_snat_rule(expected, True) def test_add_snat_rule_snat_range_no_ext(self): self.flags(routing_source_ip='10.10.10.1', force_snat_range=['10.10.10.0/24']) expected = ('-s 10.0.0.0/24 -d 0.0.0.0/0 ' '-j SNAT --to-source 10.10.10.1 -o eth0') self._test_add_snat_rule(expected, False) def test_add_snat_rule_snat_range_ext(self): self.flags(routing_source_ip='10.10.10.1', force_snat_range=['10.10.10.0/24']) expected = ('-s 10.0.0.0/24 -d 10.10.10.0/24 ' '-j SNAT --to-source 10.10.10.1') self._test_add_snat_rule(expected, True) def test_update_dhcp_for_nw00(self): self.flags(use_single_default_gateway=True) self.mox.StubOutWithMock(self.driver, 'write_to_file') self.mox.StubOutWithMock(fileutils, 'ensure_tree') self.mox.StubOutWithMock(os, 'chmod') self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg()) self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg()) fileutils.ensure_tree(mox.IgnoreArg()) fileutils.ensure_tree(mox.IgnoreArg()) fileutils.ensure_tree(mox.IgnoreArg()) fileutils.ensure_tree(mox.IgnoreArg()) fileutils.ensure_tree(mox.IgnoreArg()) fileutils.ensure_tree(mox.IgnoreArg()) fileutils.ensure_tree(mox.IgnoreArg()) os.chmod(mox.IgnoreArg(), mox.IgnoreArg()) os.chmod(mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self.driver.update_dhcp(self.context, "eth0", networks[0]) def test_update_dhcp_for_nw01(self): self.flags(use_single_default_gateway=True) self.mox.StubOutWithMock(self.driver, 'write_to_file') self.mox.StubOutWithMock(fileutils, 'ensure_tree') self.mox.StubOutWithMock(os, 'chmod') self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg()) self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg()) fileutils.ensure_tree(mox.IgnoreArg()) fileutils.ensure_tree(mox.IgnoreArg()) fileutils.ensure_tree(mox.IgnoreArg()) fileutils.ensure_tree(mox.IgnoreArg()) fileutils.ensure_tree(mox.IgnoreArg()) fileutils.ensure_tree(mox.IgnoreArg()) fileutils.ensure_tree(mox.IgnoreArg()) os.chmod(mox.IgnoreArg(), mox.IgnoreArg()) os.chmod(mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self.driver.update_dhcp(self.context, "eth0", networks[0]) def _get_fixedips(self, network, host=None): return objects.FixedIPList.get_by_network(self.context, network, host=host) def test_get_dhcp_hosts_for_nw00(self): self.flags(use_single_default_gateway=True) expected = ( "DE:AD:BE:EF:00:00,fake_instance00.novalocal," "192.168.0.100,net:NW-0\n" "DE:AD:BE:EF:00:03,fake_instance01.novalocal," "192.168.1.101,net:NW-3\n" "DE:AD:BE:EF:00:04,fake_instance00.novalocal," "192.168.0.102,net:NW-4" ) fixedips = self._get_fixedips(networks[0]) actual_hosts = self.driver.get_dhcp_hosts(self.context, networks[0], fixedips) self.assertEqual(expected, actual_hosts) def test_get_dhcp_hosts_for_nw01(self): self.flags(use_single_default_gateway=True) expected = ( "DE:AD:BE:EF:00:02,fake_instance01.novalocal," "192.168.0.101,net:NW-2\n" "DE:AD:BE:EF:00:05,fake_instance01.novalocal," "192.168.1.102,net:NW-5" ) fixedips = self._get_fixedips(networks[1], host='fake_instance01') actual_hosts = self.driver.get_dhcp_hosts(self.context, networks[1], fixedips) self.assertEqual(expected, actual_hosts) def test_get_dns_hosts_for_nw00(self): expected = ( "192.168.0.100\tfake_instance00.novalocal\n" "192.168.1.101\tfake_instance01.novalocal\n" "192.168.0.102\tfake_instance00.novalocal" ) actual_hosts = self.driver.get_dns_hosts(self.context, networks[0]) self.assertEqual(expected, actual_hosts) def test_get_dns_hosts_for_nw01(self): expected = ( "192.168.1.100\tfake_instance00.novalocal\n" "192.168.0.101\tfake_instance01.novalocal\n" "192.168.1.102\tfake_instance01.novalocal" ) actual_hosts = self.driver.get_dns_hosts(self.context, networks[1]) self.assertEqual(expected, actual_hosts) def test_get_dhcp_opts_for_nw00(self): self.flags(use_single_default_gateway=True) expected_opts = 'NW-0,3,192.168.0.1\nNW-3,3\nNW-4,3' fixedips = self._get_fixedips(networks[0]) actual_opts = self.driver.get_dhcp_opts(self.context, networks[0], fixedips) self.assertEqual(expected_opts, actual_opts) def test_get_dhcp_opts_for_nw00_no_single_default_gateway(self): self.flags(use_single_default_gateway=False) expected_opts = '3,192.168.0.1' fixedips = self._get_fixedips(networks[0]) actual_opts = self.driver.get_dhcp_opts(self.context, networks[0], fixedips) self.assertEqual(expected_opts, actual_opts) def test_get_dhcp_opts_for_nw01(self): self.flags(use_single_default_gateway=True) expected_opts = "NW-2,3,192.168.1.1\nNW-5,3" fixedips = self._get_fixedips(networks[1], 'fake_instance01') actual_opts = self.driver.get_dhcp_opts(self.context, networks[1], fixedips) self.assertEqual(expected_opts, actual_opts) def test_get_dhcp_leases_for_nw00(self): timestamp = timeutils.utcnow() seconds_since_epoch = calendar.timegm(timestamp.utctimetuple()) leases = self.driver.get_dhcp_leases(self.context, networks[0]) leases = leases.split('\n') for lease in leases: lease = lease.split(' ') data = get_associated(self.context, 0, address=lease[2])[0] self.assertTrue(data['allocated']) self.assertTrue(data['leased']) self.assertTrue(int(lease[0]) > seconds_since_epoch) self.assertEqual(data['vif_address'], lease[1]) self.assertEqual(data['address'], lease[2]) self.assertEqual(data['instance_hostname'], lease[3]) self.assertEqual('*', lease[4]) def test_get_dhcp_leases_for_nw01(self): self.flags(host='fake_instance01') timestamp = timeutils.utcnow() seconds_since_epoch = calendar.timegm(timestamp.utctimetuple()) leases = self.driver.get_dhcp_leases(self.context, networks[1]) leases = leases.split('\n') for lease in leases: lease = lease.split(' ') data = get_associated(self.context, 1, address=lease[2])[0] self.assertTrue(data['leased']) self.assertTrue(int(lease[0]) > seconds_since_epoch) self.assertEqual(data['vif_address'], lease[1]) self.assertEqual(data['address'], lease[2]) self.assertEqual(data['instance_hostname'], lease[3]) self.assertEqual('*', lease[4]) def test_dhcp_opts_not_default_gateway_network(self): expected = "NW-0,3" fixedip = objects.FixedIPList.get_by_network(self.context, {'id': 0})[0] actual = self.driver._host_dhcp_opts(fixedip.virtual_interface_id) self.assertEqual(expected, actual) def test_host_dhcp_without_default_gateway_network(self): expected = ','.join(['DE:AD:BE:EF:00:00', 'fake_instance00.novalocal', '192.168.0.100']) fixedip = objects.FixedIPList.get_by_network(self.context, {'id': 0})[0] actual = self.driver._host_dhcp(fixedip) self.assertEqual(expected, actual) def test_host_dhcp_truncated_hostname(self): expected = ','.join(['DE:AD:BE:EF:00:07', 're-ng_fake_instance02_to_test_hostname_' 'truncation_when_too_long.novalocal', '192.168.2.100']) fixedip = objects.FixedIPList.get_by_network(self.context, {'id': 2})[0] actual = self.driver._host_dhcp(fixedip) self.assertEqual(expected, actual) def test_host_dns_without_default_gateway_network(self): expected = "192.168.0.100\tfake_instance00.novalocal" fixedip = objects.FixedIPList.get_by_network(self.context, {'id': 0})[0] actual = self.driver._host_dns(fixedip) self.assertEqual(expected, actual) def test_linux_bridge_driver_plug(self): """Makes sure plug doesn't drop FORWARD by default. Ensures bug 890195 doesn't reappear. """ def fake_execute(*args, **kwargs): return "", "" self.stubs.Set(utils, 'execute', fake_execute) def verify_add_rule(chain, rule): self.assertEqual('FORWARD', chain) self.assertIn('ACCEPT', rule) self.stubs.Set(linux_net.iptables_manager.ipv4['filter'], 'add_rule', verify_add_rule) driver = linux_net.LinuxBridgeInterfaceDriver() driver.plug({"bridge": "br100", "bridge_interface": "eth0", "share_address": False}, "fakemac") def test_linux_ovs_driver_plug_exception(self): self.flags(fake_network=False) def fake_execute(*args, **kwargs): raise processutils.ProcessExecutionError('specific_error') def fake_device_exists(*args, **kwargs): return False self.stubs.Set(utils, 'execute', fake_execute) self.stubs.Set(linux_net, 'device_exists', fake_device_exists) driver = linux_net.LinuxOVSInterfaceDriver() exc = self.assertRaises(exception.OvsConfigurationFailure, driver.plug, {'uuid': 'fake_network_uuid'}, 'fake_mac') self.assertRegex( str(exc), re.compile("OVS configuration failed with: .*specific_error.*", re.DOTALL)) self.assertIsInstance(exc.kwargs['inner_exception'], processutils.ProcessExecutionError) def test_vlan_override(self): """Makes sure vlan_interface flag overrides network bridge_interface. Allows heterogeneous networks a la bug 833426 """ driver = linux_net.LinuxBridgeInterfaceDriver() info = {} @staticmethod def test_ensure(vlan, bridge, interface, network, mac_address, mtu): info['passed_interface'] = interface self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver, 'ensure_vlan_bridge', test_ensure) network = { "bridge": "br100", "bridge_interface": "base_interface", "share_address": False, "vlan": "fake" } self.flags(vlan_interface="") driver.plug(network, "fakemac") self.assertEqual("base_interface", info['passed_interface']) self.flags(vlan_interface="override_interface") driver.plug(network, "fakemac") self.assertEqual("override_interface", info['passed_interface']) driver.plug(network, "fakemac") def test_flat_override(self): """Makes sure flat_interface flag overrides network bridge_interface. Allows heterogeneous networks a la bug 833426 """ driver = linux_net.LinuxBridgeInterfaceDriver() info = {} @staticmethod def test_ensure(bridge, interface, network, gateway): info['passed_interface'] = interface self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver, 'ensure_bridge', test_ensure) network = { "bridge": "br100", "bridge_interface": "base_interface", "share_address": False, } driver.plug(network, "fakemac") self.assertEqual("base_interface", info['passed_interface']) self.flags(flat_interface="override_interface") driver.plug(network, "fakemac") self.assertEqual("override_interface", info['passed_interface']) def _test_dnsmasq_execute(self, extra_expected=None): network_ref = {'id': 'fake', 'label': 'fake', 'gateway': '10.0.0.1', 'multi_host': False, 'cidr': '10.0.0.0/24', 'netmask': '255.255.255.0', 'dns1': '8.8.4.4', 'dhcp_start': '1.0.0.2', 'dhcp_server': '10.0.0.1', 'share_address': False} def fake_execute(*args, **kwargs): executes.append(args) return "", "" def fake_add_dhcp_mangle_rule(*args, **kwargs): executes.append(args) self.stubs.Set(linux_net, '_execute', fake_execute) self.stubs.Set(linux_net, '_add_dhcp_mangle_rule', fake_add_dhcp_mangle_rule) self.stub_out('os.chmod', lambda *a, **kw: None) self.stubs.Set(linux_net, 'write_to_file', lambda *a, **kw: None) self.stubs.Set(linux_net, '_dnsmasq_pid_for', lambda *a, **kw: None) dev = 'br100' default_domain = CONF.dhcp_domain for domain in ('', default_domain): executes = [] self.flags(dhcp_domain=domain) fixedips = self._get_fixedips(network_ref) linux_net.restart_dhcp(self.context, dev, network_ref, fixedips) expected = ['env', 'CONFIG_FILE=%s' % jsonutils.dumps(CONF.dhcpbridge_flagfile), 'NETWORK_ID=fake', 'dnsmasq', '--strict-order', '--bind-interfaces', '--conf-file=%s' % CONF.dnsmasq_config_file, '--pid-file=%s' % linux_net._dhcp_file(dev, 'pid'), '--dhcp-optsfile=%s' % linux_net._dhcp_file(dev, 'opts'), '--listen-address=%s' % network_ref['dhcp_server'], '--except-interface=lo', "--dhcp-range=set:%s,%s,static,%s,%ss" % (network_ref['label'], network_ref['dhcp_start'], network_ref['netmask'], CONF.dhcp_lease_time), '--dhcp-lease-max=256', '--dhcp-hostsfile=%s' % linux_net._dhcp_file(dev, 'conf'), '--dhcp-script=%s' % CONF.dhcpbridge, '--no-hosts', '--leasefile-ro'] if CONF.dhcp_domain: expected.append('--domain=%s' % CONF.dhcp_domain) if extra_expected: expected += extra_expected self.assertEqual([(dev,), tuple(expected)], executes) def test_dnsmasq_execute(self): self._test_dnsmasq_execute() def test_dnsmasq_execute_dns_servers(self): self.flags(dns_server=['1.1.1.1', '2.2.2.2']) expected = [ '--no-resolv', '--server=1.1.1.1', '--server=2.2.2.2', ] self._test_dnsmasq_execute(expected) def test_dnsmasq_execute_use_network_dns_servers(self): self.flags(use_network_dns_servers=True) expected = [ '--no-resolv', '--server=8.8.4.4', ] self._test_dnsmasq_execute(expected) def test_isolated_host(self): self.flags(fake_network=False, share_dhcp_address=True) # NOTE(vish): use a fresh copy of the manager for each test self.stubs.Set(linux_net, 'iptables_manager', linux_net.IptablesManager()) self.stubs.Set(linux_net, 'binary_name', 'test') executes = [] def fake_execute(*args, **kwargs): executes.append(args) return "", "" self.stubs.Set(utils, 'execute', fake_execute) driver = linux_net.LinuxBridgeInterfaceDriver() @staticmethod def fake_ensure(bridge, interface, network, gateway): return bridge self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver, 'ensure_bridge', fake_ensure) iface = 'eth0' dhcp = '192.168.1.1' network = {'dhcp_server': dhcp, 'share_address': False, 'bridge': 'br100', 'bridge_interface': iface} driver.plug(network, 'fakemac') expected = [ ('ebtables', '--concurrent', '-t', 'filter', '-D', 'INPUT', '-p', 'ARP', '-i', iface, '--arp-ip-dst', dhcp, '-j', 'DROP'), ('ebtables', '--concurrent', '-t', 'filter', '-I', 'INPUT', '-p', 'ARP', '-i', iface, '--arp-ip-dst', dhcp, '-j', 'DROP'), ('ebtables', '--concurrent', '-t', 'filter', '-D', 'OUTPUT', '-p', 'ARP', '-o', iface, '--arp-ip-src', dhcp, '-j', 'DROP'), ('ebtables', '--concurrent', '-t', 'filter', '-I', 'OUTPUT', '-p', 'ARP', '-o', iface, '--arp-ip-src', dhcp, '-j', 'DROP'), ('ebtables', '--concurrent', '-t', 'filter', '-D', 'FORWARD', '-p', 'IPv4', '-i', iface, '--ip-protocol', 'udp', '--ip-destination-port', '67:68', '-j', 'DROP'), ('ebtables', '--concurrent', '-t', 'filter', '-I', 'FORWARD', '-p', 'IPv4', '-i', iface, '--ip-protocol', 'udp', '--ip-destination-port', '67:68', '-j', 'DROP'), ('ebtables', '--concurrent', '-t', 'filter', '-D', 'FORWARD', '-p', 'IPv4', '-o', iface, '--ip-protocol', 'udp', '--ip-destination-port', '67:68', '-j', 'DROP'), ('ebtables', '--concurrent', '-t', 'filter', '-I', 'FORWARD', '-p', 'IPv4', '-o', iface, '--ip-protocol', 'udp', '--ip-destination-port', '67:68', '-j', 'DROP'), ('iptables-save', '-c'), ('iptables-restore', '-c'), ('ip6tables-save', '-c'), ('ip6tables-restore', '-c'), ] self.assertEqual(expected, executes) executes = [] @staticmethod def fake_remove(bridge, gateway): return self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver, 'remove_bridge', fake_remove) driver.unplug(network) expected = [ ('ebtables', '--concurrent', '-t', 'filter', '-D', 'INPUT', '-p', 'ARP', '-i', iface, '--arp-ip-dst', dhcp, '-j', 'DROP'), ('ebtables', '--concurrent', '-t', 'filter', '-D', 'OUTPUT', '-p', 'ARP', '-o', iface, '--arp-ip-src', dhcp, '-j', 'DROP'), ('ebtables', '--concurrent', '-t', 'filter', '-D', 'FORWARD', '-p', 'IPv4', '-i', iface, '--ip-protocol', 'udp', '--ip-destination-port', '67:68', '-j', 'DROP'), ('ebtables', '--concurrent', '-t', 'filter', '-D', 'FORWARD', '-p', 'IPv4', '-o', iface, '--ip-protocol', 'udp', '--ip-destination-port', '67:68', '-j', 'DROP'), ] self.assertEqual(expected, executes) def _test_initialize_gateway(self, existing, expected, routes=''): self.flags(fake_network=False) executes = [] def fake_execute(*args, **kwargs): executes.append(args) if args[0] == 'ip' and args[1] == 'addr' and args[2] == 'show': return existing, "" if args[0] == 'ip' and args[1] == 'route' and args[2] == 'show': return routes, "" if args[0] == 'sysctl': return '1\n', '' self.stubs.Set(utils, 'execute', fake_execute) network = {'dhcp_server': '192.168.1.1', 'cidr': '192.168.1.0/24', 'broadcast': '192.168.1.255', 'cidr_v6': '2001:db8::/64'} self.driver.initialize_gateway_device('eth0', network) self.assertEqual(expected, executes) def test_initialize_gateway_moves_wrong_ip(self): existing = ("2: eth0: " " mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n" " link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n" " inet 192.168.0.1/24 brd 192.168.0.255 scope global eth0\n" " inet6 dead::beef:dead:beef:dead/64 scope link\n" " valid_lft forever preferred_lft forever\n") expected = [ ('sysctl', '-n', 'net.ipv4.ip_forward'), ('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'), ('ip', 'route', 'show', 'dev', 'eth0'), ('ip', 'addr', 'del', '192.168.0.1/24', 'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'), ('ip', 'addr', 'add', '192.168.1.1/24', 'brd', '192.168.1.255', 'dev', 'eth0'), ('ip', 'addr', 'add', '192.168.0.1/24', 'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'), ('ip', '-f', 'inet6', 'addr', 'change', '2001:db8::/64', 'dev', 'eth0'), ] self._test_initialize_gateway(existing, expected) def test_initialize_gateway_ip_with_dynamic_flag(self): existing = ("2: eth0: " " mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n" " link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n" " inet 192.168.0.1/24 brd 192.168.0.255 scope global " "dynamic eth0\n" " inet6 dead::beef:dead:beef:dead/64 scope link\n" " valid_lft forever preferred_lft forever\n") expected = [ ('sysctl', '-n', 'net.ipv4.ip_forward'), ('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'), ('ip', 'route', 'show', 'dev', 'eth0'), ('ip', 'addr', 'del', '192.168.0.1/24', 'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'), ('ip', 'addr', 'add', '192.168.1.1/24', 'brd', '192.168.1.255', 'dev', 'eth0'), ('ip', 'addr', 'add', '192.168.0.1/24', 'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'), ('ip', '-f', 'inet6', 'addr', 'change', '2001:db8::/64', 'dev', 'eth0'), ] self._test_initialize_gateway(existing, expected) def test_initialize_gateway_resets_route(self): routes = ("default via 192.168.0.1 dev eth0\n" "192.168.100.0/24 via 192.168.0.254 dev eth0 proto static\n") existing = ("2: eth0: " " mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n" " link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n" " inet 192.168.0.1/24 brd 192.168.0.255 scope global eth0\n" " inet6 dead::beef:dead:beef:dead/64 scope link\n" " valid_lft forever preferred_lft forever\n") expected = [ ('sysctl', '-n', 'net.ipv4.ip_forward'), ('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'), ('ip', 'route', 'show', 'dev', 'eth0'), ('ip', 'route', 'del', 'default', 'dev', 'eth0'), ('ip', 'route', 'del', '192.168.100.0/24', 'dev', 'eth0'), ('ip', 'addr', 'del', '192.168.0.1/24', 'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'), ('ip', 'addr', 'add', '192.168.1.1/24', 'brd', '192.168.1.255', 'dev', 'eth0'), ('ip', 'addr', 'add', '192.168.0.1/24', 'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'), ('ip', 'route', 'add', 'default', 'via', '192.168.0.1', 'dev', 'eth0'), ('ip', 'route', 'add', '192.168.100.0/24', 'via', '192.168.0.254', 'dev', 'eth0', 'proto', 'static'), ('ip', '-f', 'inet6', 'addr', 'change', '2001:db8::/64', 'dev', 'eth0'), ] self._test_initialize_gateway(existing, expected, routes) def test_initialize_gateway_no_move_right_ip(self): existing = ("2: eth0: " " mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n" " link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n" " inet 192.168.1.1/24 brd 192.168.1.255 scope global eth0\n" " inet 192.168.0.1/24 brd 192.168.0.255 scope global eth0\n" " inet6 dead::beef:dead:beef:dead/64 scope link\n" " valid_lft forever preferred_lft forever\n") expected = [ ('sysctl', '-n', 'net.ipv4.ip_forward'), ('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'), ('ip', '-f', 'inet6', 'addr', 'change', '2001:db8::/64', 'dev', 'eth0'), ] self._test_initialize_gateway(existing, expected) def test_initialize_gateway_add_if_blank(self): existing = ("2: eth0: " " mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n" " link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n" " inet6 dead::beef:dead:beef:dead/64 scope link\n" " valid_lft forever preferred_lft forever\n") expected = [ ('sysctl', '-n', 'net.ipv4.ip_forward'), ('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'), ('ip', 'route', 'show', 'dev', 'eth0'), ('ip', 'addr', 'add', '192.168.1.1/24', 'brd', '192.168.1.255', 'dev', 'eth0'), ('ip', '-f', 'inet6', 'addr', 'change', '2001:db8::/64', 'dev', 'eth0'), ] self._test_initialize_gateway(existing, expected) def test_ensure_floating_no_duplicate_forwards(self): ln = linux_net self.stubs.Set(ln.iptables_manager, 'apply', lambda: None) self.stubs.Set(ln, 'ensure_ebtables_rules', lambda *a, **kw: None) net = {'bridge': 'br100', 'cidr': '10.0.0.0/24'} ln.ensure_floating_forward('10.10.10.10', '10.0.0.1', 'eth0', net) ln.ensure_floating_forward('10.10.10.11', '10.0.0.10', 'eth0', net) two_forward_rules = len(linux_net.iptables_manager.ipv4['nat'].rules) ln.ensure_floating_forward('10.10.10.10', '10.0.0.3', 'eth0', net) dup_forward_rules = len(linux_net.iptables_manager.ipv4['nat'].rules) self.assertEqual(two_forward_rules, dup_forward_rules) def test_apply_ran(self): manager = linux_net.IptablesManager() manager.iptables_apply_deferred = False self.mox.StubOutWithMock(manager, '_apply') manager._apply() self.mox.ReplayAll() empty_ret = manager.apply() self.assertIsNone(empty_ret) def test_apply_not_run(self): manager = linux_net.IptablesManager() manager.iptables_apply_deferred = True self.mox.StubOutWithMock(manager, '_apply') self.mox.ReplayAll() manager.apply() def test_deferred_unset_apply_ran(self): manager = linux_net.IptablesManager() manager.iptables_apply_deferred = True self.mox.StubOutWithMock(manager, '_apply') manager._apply() self.mox.ReplayAll() manager.defer_apply_off() self.assertFalse(manager.iptables_apply_deferred) def _test_add_metadata_accept_rule(self, expected): def verify_add_rule(chain, rule): self.assertEqual('INPUT', chain) self.assertEqual(expected, rule) self.stubs.Set(linux_net.iptables_manager.ipv4['filter'], 'add_rule', verify_add_rule) linux_net.metadata_accept() def _test_add_metadata_accept_ipv6_rule(self, expected): def verify_add_rule(chain, rule): self.assertEqual('INPUT', chain) self.assertEqual(expected, rule) self.stubs.Set(linux_net.iptables_manager.ipv6['filter'], 'add_rule', verify_add_rule) linux_net.metadata_accept() def test_metadata_accept(self): self.flags(metadata_port='8775') self.flags(metadata_host='10.10.10.1') expected = ('-p tcp -m tcp --dport 8775 ' '-d 10.10.10.1 -j ACCEPT') self._test_add_metadata_accept_rule(expected) def test_metadata_accept_ipv6(self): self.flags(metadata_port='8775') self.flags(metadata_host='2600::') expected = ('-p tcp -m tcp --dport 8775 ' '-d 2600:: -j ACCEPT') self._test_add_metadata_accept_ipv6_rule(expected) def test_metadata_accept_localhost(self): self.flags(metadata_port='8775') self.flags(metadata_host='127.0.0.1') expected = ('-p tcp -m tcp --dport 8775 ' '-m addrtype --dst-type LOCAL -j ACCEPT') self._test_add_metadata_accept_rule(expected) def test_metadata_accept_ipv6_localhost(self): self.flags(metadata_port='8775') self.flags(metadata_host='::1') expected = ('-p tcp -m tcp --dport 8775 ' '-m addrtype --dst-type LOCAL -j ACCEPT') self._test_add_metadata_accept_ipv6_rule(expected) def _test_add_metadata_forward_rule(self, expected): def verify_add_rule(chain, rule): self.assertEqual('PREROUTING', chain) self.assertEqual(expected, rule) self.stubs.Set(linux_net.iptables_manager.ipv4['nat'], 'add_rule', verify_add_rule) linux_net.metadata_forward() def test_metadata_forward(self): self.flags(metadata_port='8775') self.flags(metadata_host='10.10.10.1') expected = ('-s 0.0.0.0/0 -d 169.254.169.254/32 -p tcp -m tcp ' '--dport 80 -j DNAT --to-destination 10.10.10.1:8775') self._test_add_metadata_forward_rule(expected) def test_metadata_forward_localhost(self): self.flags(metadata_port='8775') self.flags(metadata_host='127.0.0.1') expected = ('-s 0.0.0.0/0 -d 169.254.169.254/32 -p tcp -m tcp ' '--dport 80 -j REDIRECT --to-ports 8775') self._test_add_metadata_forward_rule(expected) def test_ensure_bridge_brings_up_interface(self): # We have to bypass the CONF.fake_network check so that netifaces # is actually called. self.flags(fake_network=False) fake_mac = 'aa:bb:cc:00:11:22' fake_ifaces = { netifaces.AF_LINK: [{'addr': fake_mac}] } calls = { 'device_exists': [mock.call('bridge')], '_execute': [ mock.call('brctl', 'addif', 'bridge', 'eth0', run_as_root=True, check_exit_code=False), mock.call('ip', 'link', 'set', 'bridge', 'address', fake_mac, run_as_root=True), mock.call('ip', 'link', 'set', 'eth0', 'up', run_as_root=True, check_exit_code=False), mock.call('ip', 'route', 'show', 'dev', 'eth0'), mock.call('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'), ] } with test.nested( mock.patch.object(linux_net, 'device_exists', return_value=True), mock.patch.object(linux_net, '_execute', return_value=('', '')), mock.patch.object(netifaces, 'ifaddresses') ) as (device_exists, _execute, ifaddresses): ifaddresses.return_value = fake_ifaces driver = linux_net.LinuxBridgeInterfaceDriver() driver.ensure_bridge('bridge', 'eth0') device_exists.assert_has_calls(calls['device_exists']) _execute.assert_has_calls(calls['_execute']) ifaddresses.assert_called_once_with('eth0') def test_ensure_bridge_brclt_addif_exception(self): def fake_execute(*cmd, **kwargs): if ('brctl', 'addif', 'bridge', 'eth0') == cmd: return ('', 'some error happens') else: return ('', '') with test.nested( mock.patch.object(linux_net, 'device_exists', return_value=True), mock.patch.object(linux_net, '_execute', fake_execute) ) as (device_exists, _): driver = linux_net.LinuxBridgeInterfaceDriver() self.assertRaises(exception.NovaException, driver.ensure_bridge, 'bridge', 'eth0') device_exists.assert_called_once_with('bridge') def test_ensure_bridge_brclt_addbr_neutron_race(self): def fake_execute(*cmd, **kwargs): if ('brctl', 'addbr', 'brq1234567-89') == cmd: return ('', "device brq1234567-89 already exists; " "can't create bridge with the same name\n") else: return ('', '') with test.nested( mock.patch.object(linux_net, 'device_exists', return_value=False), mock.patch.object(linux_net, '_execute', fake_execute) ) as (device_exists, _): driver = linux_net.LinuxBridgeInterfaceDriver() driver.ensure_bridge('brq1234567-89', '') device_exists.assert_called_once_with('brq1234567-89') def test_set_device_mtu_configured(self): self.flags(network_device_mtu=10000) calls = [ mock.call('ip', 'link', 'set', 'fake-dev', 'mtu', 10000, run_as_root=True, check_exit_code=[0, 2, 254]) ] with mock.patch.object(utils, 'execute', return_value=('', '')) as ex: linux_net._set_device_mtu('fake-dev') ex.assert_has_calls(calls) def test_set_device_mtu_default(self): calls = [] with mock.patch.object(utils, 'execute', return_value=('', '')) as ex: linux_net._set_device_mtu('fake-dev') ex.assert_has_calls(calls) def _ovs_vif_port(self, calls, interface_type=None): with mock.patch.object(utils, 'execute', return_value=('', '')) as ex: linux_net.create_ovs_vif_port('fake-bridge', 'fake-dev', 'fake-iface-id', 'fake-mac', 'fake-instance-uuid', interface_type=interface_type) ex.assert_has_calls(calls) def test_ovs_vif_port_cmd(self): expected = ['--', '--if-exists', 'del-port', 'fake-dev', '--', 'add-port', 'fake-bridge', 'fake-dev', '--', 'set', 'Interface', 'fake-dev', 'external-ids:iface-id=fake-iface-id', 'external-ids:iface-status=active', 'external-ids:attached-mac=fake-mac', 'external-ids:vm-uuid=fake-instance-uuid' ] cmd = linux_net._create_ovs_vif_cmd('fake-bridge', 'fake-dev', 'fake-iface-id', 'fake-mac', 'fake-instance-uuid') self.assertEqual(expected, cmd) expected += ['type=fake-type'] cmd = linux_net._create_ovs_vif_cmd('fake-bridge', 'fake-dev', 'fake-iface-id', 'fake-mac', 'fake-instance-uuid', 'fake-type') self.assertEqual(expected, cmd) def test_ovs_vif_port(self): calls = [ mock.call('ovs-vsctl', '--timeout=120', '--', '--if-exists', 'del-port', 'fake-dev', '--', 'add-port', 'fake-bridge', 'fake-dev', '--', 'set', 'Interface', 'fake-dev', 'external-ids:iface-id=fake-iface-id', 'external-ids:iface-status=active', 'external-ids:attached-mac=fake-mac', 'external-ids:vm-uuid=fake-instance-uuid', run_as_root=True) ] self._ovs_vif_port(calls) @mock.patch.object(linux_net, '_ovs_vsctl') @mock.patch.object(linux_net, '_create_ovs_vif_cmd') @mock.patch.object(linux_net, '_set_device_mtu') def test_ovs_vif_port_with_type_vhostuser(self, mock_set_device_mtu, mock_create_cmd, mock_vsctl): linux_net.create_ovs_vif_port( 'fake-bridge', 'fake-dev', 'fake-iface-id', 'fake-mac', "fake-instance-uuid", mtu=1500, interface_type=network_model.OVS_VHOSTUSER_INTERFACE_TYPE) mock_create_cmd.assert_called_once_with('fake-bridge', 'fake-dev', 'fake-iface-id', 'fake-mac', "fake-instance-uuid", network_model.OVS_VHOSTUSER_INTERFACE_TYPE) self.assertFalse(mock_set_device_mtu.called) self.assertTrue(mock_vsctl.called) def test_ovs_vif_port_with_mtu(self): self.flags(network_device_mtu=10000) calls = [ mock.call('ovs-vsctl', '--timeout=120', '--', '--if-exists', 'del-port', 'fake-dev', '--', 'add-port', 'fake-bridge', 'fake-dev', '--', 'set', 'Interface', 'fake-dev', 'external-ids:iface-id=fake-iface-id', 'external-ids:iface-status=active', 'external-ids:attached-mac=fake-mac', 'external-ids:vm-uuid=fake-instance-uuid', run_as_root=True), mock.call('ip', 'link', 'set', 'fake-dev', 'mtu', 10000, run_as_root=True, check_exit_code=[0, 2, 254]) ] self._ovs_vif_port(calls) def _create_veth_pair(self, calls): with mock.patch.object(utils, 'execute', return_value=('', '')) as ex: linux_net._create_veth_pair('fake-dev1', 'fake-dev2') ex.assert_has_calls(calls) def test_create_veth_pair(self): calls = [ mock.call('ip', 'link', 'add', 'fake-dev1', 'type', 'veth', 'peer', 'name', 'fake-dev2', run_as_root=True), mock.call('ip', 'link', 'set', 'fake-dev1', 'up', run_as_root=True), mock.call('ip', 'link', 'set', 'fake-dev1', 'promisc', 'on', run_as_root=True), mock.call('ip', 'link', 'set', 'fake-dev2', 'up', run_as_root=True), mock.call('ip', 'link', 'set', 'fake-dev2', 'promisc', 'on', run_as_root=True) ] self._create_veth_pair(calls) def test_create_veth_pair_with_mtu(self): self.flags(network_device_mtu=10000) calls = [ mock.call('ip', 'link', 'add', 'fake-dev1', 'type', 'veth', 'peer', 'name', 'fake-dev2', run_as_root=True), mock.call('ip', 'link', 'set', 'fake-dev1', 'up', run_as_root=True), mock.call('ip', 'link', 'set', 'fake-dev1', 'promisc', 'on', run_as_root=True), mock.call('ip', 'link', 'set', 'fake-dev1', 'mtu', 10000, run_as_root=True, check_exit_code=[0, 2, 254]), mock.call('ip', 'link', 'set', 'fake-dev2', 'up', run_as_root=True), mock.call('ip', 'link', 'set', 'fake-dev2', 'promisc', 'on', run_as_root=True), mock.call('ip', 'link', 'set', 'fake-dev2', 'mtu', 10000, run_as_root=True, check_exit_code=[0, 2, 254]) ] self._create_veth_pair(calls) def test_exec_ebtables_success(self): executes = [] def fake_execute(*args, **kwargs): executes.append(args) return "", "" self.stubs.Set(self.driver, '_execute', fake_execute) self.driver._exec_ebtables('fake') self.assertEqual(1, len(executes)) self.mox.UnsetStubs() def _ebtables_race_stderr(self): return (u"Unable to update the kernel. Two possible causes:\n" "1. Multiple ebtables programs were executing simultaneously." " The ebtables\n userspace tool doesn't by default support " "multiple ebtables programs running\n concurrently. The " "ebtables option --concurrent or a tool like flock can be\n " "used to support concurrent scripts that update the ebtables " "kernel tables.\n2. The kernel doesn't support a certain " "ebtables extension, consider\n recompiling your kernel or " "insmod the extension.\n.\n") def test_exec_ebtables_fail_all(self): executes = [] def fake_sleep(interval): pass def fake_execute(*args, **kwargs): executes.append(args) raise processutils.ProcessExecutionError('error', stderr=self._ebtables_race_stderr()) self.stubs.Set(time, 'sleep', fake_sleep) self.stubs.Set(self.driver, '_execute', fake_execute) self.assertRaises(processutils.ProcessExecutionError, self.driver._exec_ebtables, 'fake') max_calls = CONF.ebtables_exec_attempts self.assertEqual(max_calls, len(executes)) self.mox.UnsetStubs() def test_exec_ebtables_fail_no_retry(self): executes = [] def fake_sleep(interval): pass def fake_execute(*args, **kwargs): executes.append(args) raise processutils.ProcessExecutionError('error', stderr="Sorry, rule does not exist") self.stubs.Set(time, 'sleep', fake_sleep) self.stubs.Set(self.driver, '_execute', fake_execute) self.assertRaises(processutils.ProcessExecutionError, self.driver._exec_ebtables, 'fake') self.assertEqual(1, len(executes)) self.mox.UnsetStubs() def test_exec_ebtables_fail_once(self): executes = [] def fake_sleep(interval): pass def fake_execute(*args, **kwargs): executes.append(args) if len(executes) == 1: raise processutils.ProcessExecutionError('error', stderr=self._ebtables_race_stderr()) else: return "", "" self.stubs.Set(time, 'sleep', fake_sleep) self.stubs.Set(self.driver, '_execute', fake_execute) self.driver._exec_ebtables('fake') self.assertEqual(2, len(executes)) self.mox.UnsetStubs() @mock.patch('os.path.exists', return_value=True) @mock.patch('nova.utils.execute') def test_remove_bridge(self, mock_execute, mock_exists): linux_net.LinuxBridgeInterfaceDriver.remove_bridge('fake-bridge') expected_exists_args = mock.call('/sys/class/net/fake-bridge') expected_execute_args = [ mock.call('ip', 'link', 'set', 'fake-bridge', 'down', run_as_root=True), mock.call('brctl', 'delbr', 'fake-bridge', run_as_root=True)] self.assertIn(expected_exists_args, mock_exists.mock_calls) self.assertEqual(expected_execute_args, mock_execute.mock_calls) @mock.patch.object(linux_net, '_execute') @mock.patch.object(linux_net, 'device_exists', return_value=False) @mock.patch.object(linux_net, '_set_device_mtu') def test_ensure_vlan(self, mock_set_device_mtu, mock_device_exists, mock_execute): interface = linux_net.LinuxBridgeInterfaceDriver.ensure_vlan( 1, 'eth0', 'MAC', 'MTU', "vlan_name") self.assertEqual("vlan_name", interface) mock_device_exists.assert_called_once_with('vlan_name') expected_execute_args = [ mock.call('ip', 'link', 'add', 'link', 'eth0', 'name', 'vlan_name', 'type', 'vlan', 'id', 1, check_exit_code=[0, 2, 254], run_as_root=True), mock.call('ip', 'link', 'set', 'vlan_name', 'address', 'MAC', check_exit_code=[0, 2, 254], run_as_root=True), mock.call('ip', 'link', 'set', 'vlan_name', 'up', check_exit_code=[0, 2, 254], run_as_root=True)] self.assertEqual(expected_execute_args, mock_execute.mock_calls) mock_set_device_mtu.assert_called_once_with('vlan_name', 'MTU') @mock.patch.object(linux_net, '_execute') @mock.patch.object(linux_net, 'device_exists', return_value=True) @mock.patch.object(linux_net, '_set_device_mtu') def test_ensure_vlan_device_exists(self, mock_set_device_mtu, mock_device_exists, mock_execute): interface = linux_net.LinuxBridgeInterfaceDriver.ensure_vlan(1, 'eth0') self.assertEqual("vlan1", interface) mock_device_exists.assert_called_once_with('vlan1') self.assertFalse(mock_execute.called) mock_set_device_mtu.assert_called_once_with('vlan1', None) @mock.patch('os.path.exists', return_value=True) @mock.patch('nova.utils.execute', side_effect=processutils.ProcessExecutionError()) def test_remove_bridge_negative(self, mock_execute, mock_exists): self.assertRaises(processutils.ProcessExecutionError, linux_net.LinuxBridgeInterfaceDriver.remove_bridge, 'fake-bridge') nova-13.1.4/nova/tests/unit/network/test_l3.py0000664000567000056710000000167513064447140022424 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.network import l3 from nova import test class L3DriverTestCase(test.NoDBTestCase): def test_linuxnetl3_driver_signatures(self): self.assertPublicAPISignatures(l3.L3Driver, l3.LinuxNetL3) def test_nulll3_driver_signatures(self): self.assertPublicAPISignatures(l3.L3Driver, l3.NullL3) nova-13.1.4/nova/tests/unit/network/test_rpcapi.py0000664000567000056710000003710513064447152023364 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for nova.network.rpcapi """ import collections import mock from mox3 import mox from oslo_config import cfg from nova import context from nova.network import rpcapi as network_rpcapi from nova.objects import base as objects_base from nova import test from nova.tests.unit import fake_instance from nova.tests.unit import fake_network CONF = cfg.CONF class NetworkRpcAPITestCase(test.NoDBTestCase): def setUp(self): super(NetworkRpcAPITestCase, self).setUp() self.flags(multi_host=True) # Used to specify the default value expected if no real value is passed DefaultArg = collections.namedtuple('DefaultArg', ['value']) def _test_network_api(self, method, rpc_method, **kwargs): ctxt = context.RequestContext('fake_user', 'fake_project') rpcapi = network_rpcapi.NetworkAPI() self.assertIsNotNone(rpcapi.client) self.assertEqual(CONF.network_topic, rpcapi.client.target.topic) expected_retval = 'foo' if rpc_method == 'call' else None expected_version = kwargs.pop('version', None) expected_fanout = kwargs.pop('fanout', None) expected_kwargs = kwargs.copy() for k, v in expected_kwargs.items(): if isinstance(v, self.DefaultArg): expected_kwargs[k] = v.value kwargs.pop(k) prepare_kwargs = {} if expected_version: prepare_kwargs['version'] = expected_version if expected_fanout: prepare_kwargs['fanout'] = True if 'source_compute' in expected_kwargs: # Fix up for migrate_instance_* calls. expected_kwargs['source'] = expected_kwargs.pop('source_compute') expected_kwargs['dest'] = expected_kwargs.pop('dest_compute') targeted_methods = [ 'lease_fixed_ip', 'release_fixed_ip', 'rpc_setup_network_on_host', '_rpc_allocate_fixed_ip', 'deallocate_fixed_ip', 'update_dns', '_associate_floating_ip', '_disassociate_floating_ip', 'lease_fixed_ip', 'release_fixed_ip', 'migrate_instance_start', 'migrate_instance_finish', 'allocate_for_instance', 'deallocate_for_instance', ] targeted_by_instance = ['deallocate_for_instance'] if method in targeted_methods and ('host' in expected_kwargs or 'instance' in expected_kwargs): if method in targeted_by_instance: host = expected_kwargs['instance']['host'] else: host = expected_kwargs['host'] if method not in ['allocate_for_instance', 'deallocate_fixed_ip']: expected_kwargs.pop('host') if CONF.multi_host: prepare_kwargs['server'] = host self.mox.StubOutWithMock(rpcapi, 'client') version_check = [ 'deallocate_for_instance', 'deallocate_fixed_ip', 'allocate_for_instance', 'release_fixed_ip', 'set_network_host', 'setup_networks_on_host' ] if method in version_check: rpcapi.client.can_send_version(mox.IgnoreArg()).AndReturn(True) if prepare_kwargs: rpcapi.client.prepare(**prepare_kwargs).AndReturn(rpcapi.client) rpc_method = getattr(rpcapi.client, rpc_method) rpc_method(ctxt, method, **expected_kwargs).AndReturn('foo') self.mox.ReplayAll() retval = getattr(rpcapi, method)(ctxt, **kwargs) self.assertEqual(expected_retval, retval) def test_create_networks(self): self._test_network_api('create_networks', rpc_method='call', arg1='arg', arg2='arg') def test_delete_network(self): self._test_network_api('delete_network', rpc_method='call', uuid='fake_uuid', fixed_range='range') def test_allocate_for_instance(self): self._test_network_api('allocate_for_instance', rpc_method='call', instance_id='fake_id', project_id='fake_id', host='fake_host', rxtx_factor='fake_factor', vpn=False, requested_networks={}, macs=[], version='1.13') def test_deallocate_for_instance(self): instance = fake_instance.fake_instance_obj(context.get_admin_context()) self._test_network_api('deallocate_for_instance', rpc_method='call', requested_networks=self.DefaultArg(None), instance=instance, version='1.11') def test_deallocate_for_instance_with_expected_networks(self): instance = fake_instance.fake_instance_obj(context.get_admin_context()) self._test_network_api('deallocate_for_instance', rpc_method='call', instance=instance, requested_networks={}, version='1.11') def test_add_fixed_ip_to_instance(self): self._test_network_api('add_fixed_ip_to_instance', rpc_method='call', instance_id='fake_id', rxtx_factor='fake_factor', host='fake_host', network_id='fake_id', version='1.9') def test_remove_fixed_ip_from_instance(self): self._test_network_api('remove_fixed_ip_from_instance', rpc_method='call', instance_id='fake_id', rxtx_factor='fake_factor', host='fake_host', address='fake_address', version='1.9') def test_add_network_to_project(self): self._test_network_api('add_network_to_project', rpc_method='call', project_id='fake_id', network_uuid='fake_uuid') def test_get_instance_nw_info(self): self._test_network_api('get_instance_nw_info', rpc_method='call', instance_id='fake_id', rxtx_factor='fake_factor', host='fake_host', project_id='fake_id', version='1.9') def test_validate_networks(self): self._test_network_api('validate_networks', rpc_method='call', networks={}) def test_get_dns_domains(self): self._test_network_api('get_dns_domains', rpc_method='call') def test_add_dns_entry(self): self._test_network_api('add_dns_entry', rpc_method='call', address='addr', name='name', dns_type='foo', domain='domain') def test_modify_dns_entry(self): self._test_network_api('modify_dns_entry', rpc_method='call', address='addr', name='name', domain='domain') def test_delete_dns_entry(self): self._test_network_api('delete_dns_entry', rpc_method='call', name='name', domain='domain') def test_delete_dns_domain(self): self._test_network_api('delete_dns_domain', rpc_method='call', domain='fake_domain') def test_get_dns_entries_by_address(self): self._test_network_api('get_dns_entries_by_address', rpc_method='call', address='fake_address', domain='fake_domain') def test_get_dns_entries_by_name(self): self._test_network_api('get_dns_entries_by_name', rpc_method='call', name='fake_name', domain='fake_domain') def test_create_private_dns_domain(self): self._test_network_api('create_private_dns_domain', rpc_method='call', domain='fake_domain', av_zone='fake_zone') def test_create_public_dns_domain(self): self._test_network_api('create_public_dns_domain', rpc_method='call', domain='fake_domain', project='fake_project') def test_setup_networks_on_host(self): ctxt = context.RequestContext('fake_user', 'fake_project') instance = fake_instance.fake_instance_obj(ctxt) self._test_network_api('setup_networks_on_host', rpc_method='call', instance_id=instance.id, host='fake_host', teardown=False, instance=instance, version='1.16') def test_setup_networks_on_host_v1_0(self): ctxt = context.RequestContext('fake_user', 'fake_project') instance = fake_instance.fake_instance_obj(ctxt) host = 'fake_host' teardown = True rpcapi = network_rpcapi.NetworkAPI() call_mock = mock.Mock() cctxt_mock = mock.Mock(call=call_mock) with test.nested( mock.patch.object(rpcapi.client, 'can_send_version', return_value=False), mock.patch.object(rpcapi.client, 'prepare', return_value=cctxt_mock) ) as ( can_send_mock, prepare_mock ): rpcapi.setup_networks_on_host(ctxt, instance.id, host, teardown, instance) # assert our mocks were called as expected can_send_mock.assert_called_once_with('1.16') prepare_mock.assert_called_once_with(version='1.0') call_mock.assert_called_once_with(ctxt, 'setup_networks_on_host', host=host, teardown=teardown, instance_id=instance.id) def test_lease_fixed_ip(self): self._test_network_api('lease_fixed_ip', rpc_method='cast', host='fake_host', address='fake_addr') def test_release_fixed_ip(self): self._test_network_api('release_fixed_ip', rpc_method='cast', host='fake_host', address='fake_addr', mac='fake_mac', version='1.14') def test_release_fixed_ip_no_mac_support(self): # Tests that the mac kwarg is not passed when we can't send version # 1.14 to the network manager. ctxt = context.RequestContext('fake_user', 'fake_project') address = '192.168.65.158' host = 'fake-host' mac = '00:0c:29:2c:b2:64' rpcapi = network_rpcapi.NetworkAPI() cast_mock = mock.Mock() cctxt_mock = mock.Mock(cast=cast_mock) with test.nested( mock.patch.object(rpcapi.client, 'can_send_version', return_value=False), mock.patch.object(rpcapi.client, 'prepare', return_value=cctxt_mock) ) as ( can_send_mock, prepare_mock ): rpcapi.release_fixed_ip(ctxt, address, host, mac) # assert our mocks were called as expected 232 can_send_mock.assert_called_once_with('1.14') prepare_mock.assert_called_once_with(server=host, version='1.0') cast_mock.assert_called_once_with(ctxt, 'release_fixed_ip', address=address) def test_set_network_host(self): network = fake_network.fake_network_obj(context.get_admin_context()) self._test_network_api('set_network_host', rpc_method='call', network_ref=network, version='1.15') def test_set_network_host_network_object_to_primitive(self): # Tests that the network object is converted to a primitive if it # can't send version 1.15. ctxt = context.RequestContext('fake_user', 'fake_project') network = fake_network.fake_network_obj(ctxt) network_dict = objects_base.obj_to_primitive(network) rpcapi = network_rpcapi.NetworkAPI() call_mock = mock.Mock() cctxt_mock = mock.Mock(call=call_mock) with test.nested( mock.patch.object(rpcapi.client, 'can_send_version', return_value=False), mock.patch.object(rpcapi.client, 'prepare', return_value=cctxt_mock) ) as ( can_send_mock, prepare_mock ): rpcapi.set_network_host(ctxt, network) # assert our mocks were called as expected can_send_mock.assert_called_once_with('1.15') prepare_mock.assert_called_once_with(version='1.0') call_mock.assert_called_once_with(ctxt, 'set_network_host', network_ref=network_dict) def test_rpc_setup_network_on_host(self): self._test_network_api('rpc_setup_network_on_host', rpc_method='call', network_id='fake_id', teardown=False, host='fake_host') def test_rpc_allocate_fixed_ip(self): self._test_network_api('_rpc_allocate_fixed_ip', rpc_method='call', instance_id='fake_id', network_id='fake_id', address='addr', vpn=True, host='fake_host') def test_deallocate_fixed_ip(self): instance = fake_instance.fake_db_instance() self._test_network_api('deallocate_fixed_ip', rpc_method='call', address='fake_addr', host='fake_host', instance=instance, version='1.12') def test_update_dns(self): self._test_network_api('update_dns', rpc_method='cast', fanout=True, network_ids='fake_id', version='1.3') def test__associate_floating_ip(self): self._test_network_api('_associate_floating_ip', rpc_method='call', floating_address='fake_addr', fixed_address='fixed_address', interface='fake_interface', host='fake_host', instance_uuid='fake_uuid', version='1.6') def test__disassociate_floating_ip(self): self._test_network_api('_disassociate_floating_ip', rpc_method='call', address='fake_addr', interface='fake_interface', host='fake_host', instance_uuid='fake_uuid', version='1.6') def test_migrate_instance_start(self): self._test_network_api('migrate_instance_start', rpc_method='call', instance_uuid='fake_instance_uuid', rxtx_factor='fake_factor', project_id='fake_project', source_compute='fake_src_compute', dest_compute='fake_dest_compute', floating_addresses='fake_floating_addresses', host=self.DefaultArg(None), version='1.2') def test_migrate_instance_start_multi_host(self): self._test_network_api('migrate_instance_start', rpc_method='call', instance_uuid='fake_instance_uuid', rxtx_factor='fake_factor', project_id='fake_project', source_compute='fake_src_compute', dest_compute='fake_dest_compute', floating_addresses='fake_floating_addresses', host='fake_host', version='1.2') def test_migrate_instance_finish(self): self._test_network_api('migrate_instance_finish', rpc_method='call', instance_uuid='fake_instance_uuid', rxtx_factor='fake_factor', project_id='fake_project', source_compute='fake_src_compute', dest_compute='fake_dest_compute', floating_addresses='fake_floating_addresses', host=self.DefaultArg(None), version='1.2') def test_migrate_instance_finish_multi_host(self): self._test_network_api('migrate_instance_finish', rpc_method='call', instance_uuid='fake_instance_uuid', rxtx_factor='fake_factor', project_id='fake_project', source_compute='fake_src_compute', dest_compute='fake_dest_compute', floating_addresses='fake_floating_addresses', host='fake_host', version='1.2') nova-13.1.4/nova/tests/unit/test_safeutils.py0000664000567000056710000000476413064447140022416 0ustar jenkinsjenkins00000000000000# Copyright 2011 Justin Santa Barbara # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from nova import safe_utils from nova import test class WrappedCodeTestCase(test.NoDBTestCase): """Test the get_wrapped_function utility method.""" def _wrapper(self, function): @functools.wraps(function) def decorated_function(self, *args, **kwargs): function(self, *args, **kwargs) return decorated_function def test_single_wrapped(self): @self._wrapper def wrapped(self, instance, red=None, blue=None): pass func = safe_utils.get_wrapped_function(wrapped) func_code = func.__code__ self.assertEqual(4, len(func_code.co_varnames)) self.assertIn('self', func_code.co_varnames) self.assertIn('instance', func_code.co_varnames) self.assertIn('red', func_code.co_varnames) self.assertIn('blue', func_code.co_varnames) def test_double_wrapped(self): @self._wrapper @self._wrapper def wrapped(self, instance, red=None, blue=None): pass func = safe_utils.get_wrapped_function(wrapped) func_code = func.__code__ self.assertEqual(4, len(func_code.co_varnames)) self.assertIn('self', func_code.co_varnames) self.assertIn('instance', func_code.co_varnames) self.assertIn('red', func_code.co_varnames) self.assertIn('blue', func_code.co_varnames) def test_triple_wrapped(self): @self._wrapper @self._wrapper @self._wrapper def wrapped(self, instance, red=None, blue=None): pass func = safe_utils.get_wrapped_function(wrapped) func_code = func.__code__ self.assertEqual(4, len(func_code.co_varnames)) self.assertIn('self', func_code.co_varnames) self.assertIn('instance', func_code.co_varnames) self.assertIn('red', func_code.co_varnames) self.assertIn('blue', func_code.co_varnames) nova-13.1.4/nova/tests/unit/test_baserpc.py0000664000567000056710000000312313064447152022025 0ustar jenkinsjenkins00000000000000# # Copyright 2013 - Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Test the base rpc API. """ from oslo_config import cfg from nova import baserpc from nova import context from nova import test CONF = cfg.CONF class BaseAPITestCase(test.TestCase): def setUp(self): super(BaseAPITestCase, self).setUp() self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id) self.conductor = self.start_service( 'conductor', manager=CONF.conductor.manager) self.compute = self.start_service('compute') self.base_rpcapi = baserpc.BaseAPI(CONF.compute_topic) def test_ping(self): res = self.base_rpcapi.ping(self.context, 'foo') self.assertEqual({'service': 'compute', 'arg': 'foo'}, res) def test_get_backdoor_port(self): res = self.base_rpcapi.get_backdoor_port(self.context, self.compute.host) self.assertEqual(self.compute.backdoor_port, res) nova-13.1.4/nova/tests/unit/fake_pci_device_pools.py0000664000567000056710000000266113064447140023646 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.objects import pci_device_pool # This represents the format that PCI device pool info was stored in the DB # before this info was made into objects. fake_pool_dict = { 'product_id': 'fake-product', 'vendor_id': 'fake-vendor', 'numa_node': 1, 't1': 'v1', 't2': 'v2', 'count': 2, } fake_pool = pci_device_pool.PciDevicePool(count=5, product_id='foo', vendor_id='bar', numa_node=0, tags={'t1': 'v1', 't2': 'v2'}) fake_pool_primitive = fake_pool.obj_to_primitive() fake_pool_list = pci_device_pool.PciDevicePoolList(objects=[fake_pool]) fake_pool_list_primitive = fake_pool_list.obj_to_primitive() nova-13.1.4/nova/tests/unit/servicegroup/0000775000567000056710000000000013064447471021517 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/servicegroup/test_db_servicegroup.py0000664000567000056710000001160013064447152026304 0ustar jenkinsjenkins00000000000000# Copyright 2012 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_db import exception as db_exception import oslo_messaging as messaging from oslo_utils import fixture as utils_fixture from oslo_utils import timeutils from nova import objects from nova import servicegroup from nova import test class DBServiceGroupTestCase(test.NoDBTestCase): def setUp(self): super(DBServiceGroupTestCase, self).setUp() self.down_time = 15 self.flags(service_down_time=self.down_time, servicegroup_driver='db') self.servicegroup_api = servicegroup.API() def test_is_up(self): now = timeutils.utcnow() service = objects.Service( host='fake-host', topic='compute', binary='nova-compute', created_at=now, updated_at=now, last_seen_up=now, forced_down=False, ) time_fixture = self.useFixture(utils_fixture.TimeFixture(now)) # Up (equal) result = self.servicegroup_api.service_is_up(service) self.assertTrue(result) # Up time_fixture.advance_time_seconds(self.down_time) result = self.servicegroup_api.service_is_up(service) self.assertTrue(result) # Down time_fixture.advance_time_seconds(1) result = self.servicegroup_api.service_is_up(service) self.assertFalse(result) # "last_seen_up" says down, "updated_at" says up. # This can happen if we do a service disable/enable while it's down. service.updated_at = timeutils.utcnow() result = self.servicegroup_api.service_is_up(service) self.assertFalse(result) def test_join(self): service = mock.MagicMock(report_interval=1) self.servicegroup_api.join('fake-host', 'fake-topic', service) fn = self.servicegroup_api._driver._report_state service.tg.add_timer.assert_called_once_with(1, fn, 5, service) @mock.patch.object(objects.Service, 'save') def test_report_state(self, upd_mock): service_ref = objects.Service(host='fake-host', topic='compute', report_count=10) service = mock.MagicMock(model_disconnected=False, service_ref=service_ref) fn = self.servicegroup_api._driver._report_state fn(service) upd_mock.assert_called_once_with() self.assertEqual(11, service_ref.report_count) self.assertFalse(service.model_disconnected) @mock.patch.object(objects.Service, 'save') def _test_report_state_error(self, exc_cls, upd_mock): upd_mock.side_effect = exc_cls("service save failed") service_ref = objects.Service(host='fake-host', topic='compute', report_count=10) service = mock.MagicMock(model_disconnected=False, service_ref=service_ref) fn = self.servicegroup_api._driver._report_state fn(service) # fail if exception not caught self.assertTrue(service.model_disconnected) def test_report_state_remote_error_handling(self): # test error handling using remote conductor self.flags(use_local=False, group='conductor') self._test_report_state_error(messaging.RemoteError) def test_report_state_remote_error_handling_timeout(self): # test error handling using remote conductor self.flags(use_local=False, group='conductor') self._test_report_state_error(messaging.MessagingTimeout) def test_report_state_remote_unexpected_error(self): # unexpected errors must be handled, but disconnected flag not touched self.flags(use_local=False, group='conductor') self._test_report_state_error(RuntimeError) def test_report_state_local_error_handling(self): # if using local conductor, the db driver must handle DB errors self.flags(use_local=True, group='conductor') # mock an oslo.db DBError as it's an exception base class for # oslo.db DB errors (eg DBConnectionError) self._test_report_state_error(db_exception.DBError) def test_report_state_local_unexpected_error(self): # unexpected errors must be handled, but disconnected flag not touched self.flags(use_local=True, group='conductor') self._test_report_state_error(RuntimeError) nova-13.1.4/nova/tests/unit/servicegroup/test_api.py0000664000567000056710000000374513064447152023706 0ustar jenkinsjenkins00000000000000# Copyright 2015 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test the base class for the servicegroup API """ import mock from nova import servicegroup from nova import test class ServiceGroupApiTestCase(test.NoDBTestCase): def setUp(self): super(ServiceGroupApiTestCase, self).setUp() self.flags(servicegroup_driver='db') self.servicegroup_api = servicegroup.API() self.driver = self.servicegroup_api._driver def test_join(self): """""" member = {'host': "fake-host", "topic": "compute"} group = "group" self.driver.join = mock.MagicMock(return_value=None) result = self.servicegroup_api.join(member, group) self.assertIsNone(result) self.driver.join.assert_called_with(member, group, None) def test_service_is_up(self): """""" member = {"host": "fake-host", "topic": "compute", "forced_down": False} for retval in (True, False): driver = self.servicegroup_api._driver driver.is_up = mock.MagicMock(return_value=retval) result = self.servicegroup_api.service_is_up(member) self.assertIs(result, retval) driver.is_up.assert_called_with(member) member["forced_down"] = True for retval in (True, False): driver = self.servicegroup_api._driver result = self.servicegroup_api.service_is_up(member) self.assertIs(result, False) nova-13.1.4/nova/tests/unit/servicegroup/__init__.py0000664000567000056710000000000013064447140023607 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/servicegroup/test_mc_servicegroup.py0000664000567000056710000000472213064447152026325 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Akira Yoshiyama # # This is derived from test_db_servicegroup.py. # Copyright 2012 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import servicegroup from nova import test class MemcachedServiceGroupTestCase(test.NoDBTestCase): @mock.patch('nova.cache_utils.get_memcached_client') def setUp(self, mgc_mock): super(MemcachedServiceGroupTestCase, self).setUp() self.mc_client = mock.MagicMock() mgc_mock.return_value = self.mc_client self.flags(memcached_servers='ignored', servicegroup_driver='mc') self.servicegroup_api = servicegroup.API() def test_is_up(self): service_ref = { 'host': 'fake-host', 'topic': 'compute' } self.mc_client.get.return_value = None self.assertFalse(self.servicegroup_api.service_is_up(service_ref)) self.mc_client.get.assert_called_once_with('compute:fake-host') self.mc_client.reset_mock() self.mc_client.get.return_value = True self.assertTrue(self.servicegroup_api.service_is_up(service_ref)) self.mc_client.get.assert_called_once_with('compute:fake-host') def test_join(self): service = mock.MagicMock(report_interval=1) self.servicegroup_api.join('fake-host', 'fake-topic', service) fn = self.servicegroup_api._driver._report_state service.tg.add_timer.assert_called_once_with(1, fn, 5, service) def test_report_state(self): service_ref = { 'host': 'fake-host', 'topic': 'compute' } service = mock.MagicMock(model_disconnected=False, service_ref=service_ref) fn = self.servicegroup_api._driver._report_state fn(service) self.mc_client.set.assert_called_once_with('compute:fake-host', mock.ANY) nova-13.1.4/nova/tests/unit/fake_volume.py0000664000567000056710000002442313064447152021652 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of a fake volume API.""" import uuid from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils from nova import exception LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.import_opt('cross_az_attach', 'nova.volume.cinder', group='cinder') class fake_volume(object): user_uuid = '4a3cd440-b9c2-11e1-afa6-0800200c9a66' instance_uuid = '4a3cd441-b9c2-11e1-afa6-0800200c9a66' def __init__(self, size, name, description, volume_id, snapshot, volume_type, metadata, availability_zone): snapshot_id = None if snapshot is not None: snapshot_id = snapshot['id'] if volume_id is None: volume_id = str(uuid.uuid4()) self.vol = { 'created_at': timeutils.utcnow(), 'deleted_at': None, 'updated_at': timeutils.utcnow(), 'uuid': 'WTF', 'deleted': False, 'id': volume_id, 'user_id': self.user_uuid, 'project_id': 'fake-project-id', 'snapshot_id': snapshot_id, 'host': None, 'size': size, 'availability_zone': availability_zone, 'instance_uuid': None, 'mountpoint': None, 'attach_time': timeutils.utcnow(), 'status': 'available', 'attach_status': 'detached', 'scheduled_at': None, 'launched_at': None, 'terminated_at': None, 'display_name': name, 'display_description': description, 'provider_location': 'fake-location', 'provider_auth': 'fake-auth', 'volume_type_id': 99, 'multiattach': False } def get(self, key, default=None): return self.vol[key] def __setitem__(self, key, value): self.vol[key] = value def __getitem__(self, key): return self.vol[key] class fake_snapshot(object): user_uuid = '4a3cd440-b9c2-11e1-afa6-0800200c9a66' instance_uuid = '4a3cd441-b9c2-11e1-afa6-0800200c9a66' def __init__(self, volume_id, size, name, desc, id=None): if id is None: id = str(uuid.uuid4()) self.snap = { 'created_at': timeutils.utcnow(), 'deleted_at': None, 'updated_at': timeutils.utcnow(), 'uuid': 'WTF', 'deleted': False, 'id': str(id), 'volume_id': volume_id, 'status': 'available', 'progress': '100%', 'volume_size': 1, 'display_name': name, 'display_description': desc, 'user_id': self.user_uuid, 'project_id': 'fake-project-id' } def get(self, key, default=None): return self.snap[key] def __setitem__(self, key, value): self.snap[key] = value def __getitem__(self, key): return self.snap[key] class API(object): volume_list = [] snapshot_list = [] _instance = None class Singleton(object): def __init__(self): self.API = None def __init__(self): if API._instance is None: API._instance = API.Singleton() self._EventHandler_instance = API._instance def create(self, context, size, name, description, snapshot=None, volume_type=None, metadata=None, availability_zone=None): v = fake_volume(size, name, description, None, snapshot, volume_type, metadata, availability_zone) self.volume_list.append(v.vol) LOG.info('creating volume %s', v.vol['id']) return v.vol def create_with_kwargs(self, context, **kwargs): volume_id = kwargs.get('volume_id', None) v = fake_volume(kwargs['size'], kwargs['name'], kwargs['description'], str(volume_id), None, None, None, None) if kwargs.get('status', None) is not None: v.vol['status'] = kwargs['status'] if kwargs['host'] is not None: v.vol['host'] = kwargs['host'] if kwargs['attach_status'] is not None: v.vol['attach_status'] = kwargs['attach_status'] if kwargs.get('snapshot_id', None) is not None: v.vol['snapshot_id'] = kwargs['snapshot_id'] self.volume_list.append(v.vol) return v.vol def get(self, context, volume_id): if str(volume_id) == '87654321': return {'id': volume_id, 'attach_time': '13:56:24', 'attach_status': 'attached', 'status': 'in-use'} for v in self.volume_list: if v['id'] == str(volume_id): return v raise exception.VolumeNotFound(volume_id=volume_id) def get_all(self, context): return self.volume_list def delete(self, context, volume_id): LOG.info('deleting volume %s', volume_id) self.volume_list = [v for v in self.volume_list if v['id'] != volume_id] def check_attach(self, context, volume, instance=None): if volume['status'] != 'available': msg = "Status of volume '%s' must be available" % volume raise exception.InvalidVolume(reason=msg) if volume['attach_status'] == 'attached': msg = "already attached" raise exception.InvalidVolume(reason=msg) if instance and not CONF.cinder.cross_az_attach: if instance['availability_zone'] != volume['availability_zone']: msg = "Instance and volume not in same availability_zone" raise exception.InvalidVolume(reason=msg) def check_detach(self, context, volume, instance=None): if volume['status'] == "available": msg = "already detached" raise exception.InvalidVolume(reason=msg) if volume['attach_status'] == 'detached': msg = "Volume must be attached in order to detach." raise exception.InvalidVolume(reason=msg) if instance and not volume.get('attachments', {}).get(instance.uuid): raise exception.VolumeUnattached(volume_id=volume['id']) def attach(self, context, volume_id, instance_uuid, mountpoint, mode='rw'): LOG.info('attaching volume %s', volume_id) volume = self.get(context, volume_id) volume['status'] = 'in-use' volume['attach_status'] = 'attached' volume['attach_time'] = timeutils.utcnow() volume['multiattach'] = True volume['attachments'] = {instance_uuid: {'attachment_id': str(uuid.uuid4()), 'mountpoint': mountpoint}} def reset_fake_api(self, context): del self.volume_list[:] del self.snapshot_list[:] def detach(self, context, volume_id, instance_uuid, attachment_id=None): LOG.info('detaching volume %s', volume_id) volume = self.get(context, volume_id) volume['status'] = 'available' volume['attach_status'] = 'detached' def initialize_connection(self, context, volume_id, connector): return {'driver_volume_type': 'iscsi', 'data': {}} def terminate_connection(self, context, volume_id, connector): return None def get_snapshot(self, context, snapshot_id): for snap in self.snapshot_list: if snap['id'] == str(snapshot_id): return snap def get_all_snapshots(self, context): return self.snapshot_list def create_snapshot(self, context, volume_id, name, description, id=None): volume = self.get(context, volume_id) snapshot = fake_snapshot(volume['id'], volume['size'], name, description, id) self.snapshot_list.append(snapshot.snap) return snapshot.snap def create_snapshot_with_kwargs(self, context, **kwargs): snapshot = fake_snapshot(kwargs.get('volume_id'), kwargs.get('volume_size'), kwargs.get('name'), kwargs.get('description'), kwargs.get('snap_id')) status = kwargs.get('status', None) snapshot.snap['status'] = status self.snapshot_list.append(snapshot.snap) return snapshot.snap def create_snapshot_force(self, context, volume_id, name, description, id=None): volume = self.get(context, volume_id) snapshot = fake_snapshot(volume['id'], volume['size'], name, description, id) self.snapshot_list.append(snapshot.snap) return snapshot.snap def delete_snapshot(self, context, snapshot_id): self.snapshot_list = [s for s in self.snapshot_list if s['id'] != snapshot_id] def reserve_volume(self, context, volume_id): LOG.info('reserving volume %s', volume_id) volume = self.get(context, volume_id) volume['status'] = 'attaching' def unreserve_volume(self, context, volume_id): LOG.info('unreserving volume %s', volume_id) volume = self.get(context, volume_id) volume['status'] = 'available' def begin_detaching(self, context, volume_id): LOG.info('begin detaching volume %s', volume_id) volume = self.get(context, volume_id) volume['status'] = 'detaching' def roll_detaching(self, context, volume_id): LOG.info('roll detaching volume %s', volume_id) volume = self.get(context, volume_id) volume['status'] = 'in-use' nova-13.1.4/nova/tests/unit/test_policy.py0000664000567000056710000007070613064447152021720 0ustar jenkinsjenkins00000000000000# Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test of Policy Engine For Nova.""" import os.path from oslo_policy import policy as oslo_policy from oslo_serialization import jsonutils import requests_mock from nova import context from nova import exception from nova import policy from nova import test from nova.tests.unit import fake_policy from nova.tests.unit import policy_fixture from nova import utils class PolicyFileTestCase(test.NoDBTestCase): def setUp(self): super(PolicyFileTestCase, self).setUp() self.context = context.RequestContext('fake', 'fake') self.target = {} def test_modified_policy_reloads(self): with utils.tempdir() as tmpdir: tmpfilename = os.path.join(tmpdir, 'policy') self.flags(policy_file=tmpfilename, group='oslo_policy') # NOTE(uni): context construction invokes policy check to determin # is_admin or not. As a side-effect, policy reset is needed here # to flush existing policy cache. policy.reset() action = "example:test" with open(tmpfilename, "w") as policyfile: policyfile.write('{"example:test": ""}') policy.enforce(self.context, action, self.target) with open(tmpfilename, "w") as policyfile: policyfile.write('{"example:test": "!"}') policy._ENFORCER.load_rules(True) self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) class PolicyTestCase(test.NoDBTestCase): def setUp(self): super(PolicyTestCase, self).setUp() rules = { "true": '@', "example:allowed": '@', "example:denied": "!", "example:get_http": "http://www.example.com", "example:my_file": "role:compute_admin or " "project_id:%(project_id)s", "example:early_and_fail": "! and @", "example:early_or_success": "@ or !", "example:lowercase_admin": "role:admin or role:sysadmin", "example:uppercase_admin": "role:ADMIN or role:sysadmin", } policy.reset() policy.init() policy.set_rules(oslo_policy.Rules.from_dict(rules)) self.context = context.RequestContext('fake', 'fake', roles=['member']) self.target = {} def test_enforce_nonexistent_action_throws(self): action = "example:noexist" self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) def test_enforce_bad_action_throws(self): action = "example:denied" self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) def test_enforce_bad_action_noraise(self): action = "example:denied" result = policy.enforce(self.context, action, self.target, False) self.assertFalse(result) def test_enforce_good_action(self): action = "example:allowed" result = policy.enforce(self.context, action, self.target) self.assertTrue(result) @requests_mock.mock() def test_enforce_http_true(self, req_mock): req_mock.post('http://www.example.com/', text='True') action = "example:get_http" target = {} result = policy.enforce(self.context, action, target) self.assertTrue(result) @requests_mock.mock() def test_enforce_http_false(self, req_mock): req_mock.post('http://www.example.com/', text='False') action = "example:get_http" target = {} self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, action, target) def test_templatized_enforcement(self): target_mine = {'project_id': 'fake'} target_not_mine = {'project_id': 'another'} action = "example:my_file" policy.enforce(self.context, action, target_mine) self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, action, target_not_mine) def test_early_AND_enforcement(self): action = "example:early_and_fail" self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) def test_early_OR_enforcement(self): action = "example:early_or_success" policy.enforce(self.context, action, self.target) def test_ignore_case_role_check(self): lowercase_action = "example:lowercase_admin" uppercase_action = "example:uppercase_admin" # NOTE(dprince) we mix case in the Admin role here to ensure # case is ignored admin_context = context.RequestContext('admin', 'fake', roles=['AdMiN']) policy.enforce(admin_context, lowercase_action, self.target) policy.enforce(admin_context, uppercase_action, self.target) class DefaultPolicyTestCase(test.NoDBTestCase): def setUp(self): super(DefaultPolicyTestCase, self).setUp() self.rules = { "default": '', "example:exist": "!", } self._set_rules('default') self.context = context.RequestContext('fake', 'fake') def _set_rules(self, default_rule): policy.reset() rules = oslo_policy.Rules.from_dict(self.rules) policy.init(rules=rules, default_rule=default_rule, use_conf=False) def test_policy_called(self): self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, "example:exist", {}) def test_not_found_policy_calls_default(self): policy.enforce(self.context, "example:noexist", {}) def test_default_not_found(self): self._set_rules("default_noexist") self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, "example:noexist", {}) class IsAdminCheckTestCase(test.NoDBTestCase): def setUp(self): super(IsAdminCheckTestCase, self).setUp() policy.init() def test_init_true(self): check = policy.IsAdminCheck('is_admin', 'True') self.assertEqual(check.kind, 'is_admin') self.assertEqual(check.match, 'True') self.assertTrue(check.expected) def test_init_false(self): check = policy.IsAdminCheck('is_admin', 'nottrue') self.assertEqual(check.kind, 'is_admin') self.assertEqual(check.match, 'False') self.assertFalse(check.expected) def test_call_true(self): check = policy.IsAdminCheck('is_admin', 'True') self.assertEqual(check('target', dict(is_admin=True), policy._ENFORCER), True) self.assertEqual(check('target', dict(is_admin=False), policy._ENFORCER), False) def test_call_false(self): check = policy.IsAdminCheck('is_admin', 'False') self.assertEqual(check('target', dict(is_admin=True), policy._ENFORCER), False) self.assertEqual(check('target', dict(is_admin=False), policy._ENFORCER), True) class AdminRolePolicyTestCase(test.NoDBTestCase): def setUp(self): super(AdminRolePolicyTestCase, self).setUp() self.policy = self.useFixture(policy_fixture.RoleBasedPolicyFixture()) self.context = context.RequestContext('fake', 'fake', roles=['member']) self.actions = policy.get_rules().keys() self.target = {} def test_enforce_admin_actions_with_nonadmin_context_throws(self): """Check if non-admin context passed to admin actions throws Policy not authorized exception """ for action in self.actions: self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) class RealRolePolicyTestCase(test.NoDBTestCase): def setUp(self): super(RealRolePolicyTestCase, self).setUp() self.policy = self.useFixture(policy_fixture.RealPolicyFixture()) self.non_admin_context = context.RequestContext('fake', 'fake', roles=['member']) self.admin_context = context.RequestContext('fake', 'fake', True, roles=['member']) self.target = {} self.fake_policy = jsonutils.loads(fake_policy.policy_data) self.admin_only_rules = ( "cells_scheduler_filter:TargetCellFilter", "compute:unlock_override", "compute:get_all_tenants", "compute:create:forced_host", "compute_extension:accounts", "compute_extension:admin_actions", "compute_extension:admin_actions:resetNetwork", "compute_extension:admin_actions:injectNetworkInfo", "compute_extension:admin_actions:migrateLive", "compute_extension:admin_actions:resetState", "compute_extension:admin_actions:migrate", "compute_extension:aggregates", "compute_extension:agents", "compute_extension:baremetal_nodes", "compute_extension:cells", "compute_extension:cells:create", "compute_extension:cells:delete", "compute_extension:cells:update", "compute_extension:cells:sync_instances", "compute_extension:cloudpipe", "compute_extension:cloudpipe_update", "compute_extension:evacuate", "compute_extension:extended_server_attributes", "compute_extension:fixed_ips", "compute_extension:flavor_access:addTenantAccess", "compute_extension:flavor_access:removeTenantAccess", "compute_extension:flavorextraspecs:create", "compute_extension:flavorextraspecs:update", "compute_extension:flavorextraspecs:delete", "compute_extension:flavormanage", "compute_extension:floating_ips_bulk", "compute_extension:fping:all_tenants", "compute_extension:hosts", "compute_extension:hypervisors", "compute_extension:instance_actions:events", "compute_extension:instance_usage_audit_log", "compute_extension:networks", "compute_extension:networks_associate", "compute_extension:quotas:update", "compute_extension:quotas:delete", "compute_extension:security_group_default_rules", "compute_extension:server_diagnostics", "compute_extension:services", "compute_extension:shelveOffload", "compute_extension:simple_tenant_usage:list", "compute_extension:users", "compute_extension:availability_zone:detail", "compute_extension:used_limits_for_admin", "compute_extension:migrations:index", "compute_extension:os-assisted-volume-snapshots:create", "compute_extension:os-assisted-volume-snapshots:delete", "compute_extension:console_auth_tokens", "compute_extension:os-server-external-events:create", "os_compute_api:servers:create:forced_host", "os_compute_api:servers:detail:get_all_tenants", "os_compute_api:servers:index:get_all_tenants", "os_compute_api:servers:show:host_status", "os_compute_api:servers:migrations:force_complete", "os_compute_api:servers:migrations:delete", "network:attach_external_network", "os_compute_api:os-admin-actions", "os_compute_api:os-admin-actions:reset_network", "os_compute_api:os-admin-actions:inject_network_info", "os_compute_api:os-admin-actions:reset_state", "os_compute_api:os-aggregates:index", "os_compute_api:os-aggregates:create", "os_compute_api:os-aggregates:show", "os_compute_api:os-aggregates:update", "os_compute_api:os-aggregates:delete", "os_compute_api:os-aggregates:add_host", "os_compute_api:os-aggregates:remove_host", "os_compute_api:os-aggregates:set_metadata", "os_compute_api:os-agents", "os_compute_api:os-baremetal-nodes", "os_compute_api:os-cells", "os_compute_api:os-cells:create", "os_compute_api:os-cells:delete", "os_compute_api:os-cells:update", "os_compute_api:os-cells:sync_instances", "os_compute_api:os-cloudpipe", "os_compute_api:os-evacuate", "os_compute_api:os-extended-server-attributes", "os_compute_api:os-fixed-ips", "os_compute_api:os-flavor-access:remove_tenant_access", "os_compute_api:os-flavor-access:add_tenant_access", "os_compute_api:os-flavor-extra-specs:create", "os_compute_api:os-flavor-extra-specs:update", "os_compute_api:os-flavor-extra-specs:delete", "os_compute_api:os-flavor-manage", "os_compute_api:os-floating-ips-bulk", "os_compute_api:os-floating-ip-dns:domain:delete", "os_compute_api:os-floating-ip-dns:domain:update", "os_compute_api:os-fping:all_tenants", "os_compute_api:os-hosts", "os_compute_api:os-hypervisors", "os_compute_api:os-instance-actions:events", "os_compute_api:os-instance-usage-audit-log", "os_compute_api:os-lock-server:unlock:unlock_override", "os_compute_api:os-migrate-server:migrate", "os_compute_api:os-migrate-server:migrate_live", "os_compute_api:os-networks", "os_compute_api:os-networks-associate", "os_compute_api:os-pci:index", "os_compute_api:os-pci:detail", "os_compute_api:os-pci:show", "os_compute_api:os-quota-sets:update", "os_compute_api:os-quota-sets:delete", "os_compute_api:os-quota-sets:detail", "os_compute_api:os-security-group-default-rules", "os_compute_api:os-server-diagnostics", "os_compute_api:os-services", "os_compute_api:os-shelve:shelve_offload", "os_compute_api:os-simple-tenant-usage:list", "os_compute_api:os-availability-zone:detail", "os_compute_api:os-used-limits", "os_compute_api:os-migrations:index", "os_compute_api:os-assisted-volume-snapshots:create", "os_compute_api:os-assisted-volume-snapshots:delete", "os_compute_api:os-console-auth-tokens", "os_compute_api:os-quota-class-sets:update", "os_compute_api:os-server-external-events:create", "os_compute_api:servers:migrations:index", "os_compute_api:servers:migrations:show", ) self.admin_or_owner_rules = ( "default", "compute:start", "compute:stop", "compute:delete", "compute:soft_delete", "compute:force_delete", "compute:lock", "compute:unlock", "compute_extension:admin_actions:pause", "compute_extension:admin_actions:unpause", "compute_extension:admin_actions:suspend", "compute_extension:admin_actions:resume", "compute_extension:admin_actions:lock", "compute_extension:admin_actions:unlock", "compute_extension:admin_actions:createBackup", "compute_extension:simple_tenant_usage:show", "os_compute_api:servers:start", "os_compute_api:servers:stop", "os_compute_api:servers:trigger_crash_dump", "os_compute_api:os-create-backup", "os_compute_api:ips:index", "os_compute_api:ips:show", "os_compute_api:os-keypairs:create", "os_compute_api:os-keypairs:delete", "os_compute_api:os-keypairs:index", "os_compute_api:os-keypairs:show", "os_compute_api:os-lock-server:lock", "os_compute_api:os-lock-server:unlock", "os_compute_api:os-pause-server:pause", "os_compute_api:os-pause-server:unpause", "os_compute_api:os-quota-sets:show", "os_compute_api:server-metadata:index", "os_compute_api:server-metadata:show", "os_compute_api:server-metadata:delete", "os_compute_api:server-metadata:create", "os_compute_api:server-metadata:update", "os_compute_api:server-metadata:update_all", "os_compute_api:os-simple-tenant-usage:show", "os_compute_api:os-suspend-server:suspend", "os_compute_api:os-suspend-server:resume", "os_compute_api:os-tenant-networks", "compute:create", "compute:create:attach_network", "compute:create:attach_volume", "compute:get_all_instance_metadata", "compute:get_all_instance_system_metadata", "compute:get_console_output", "compute:get_diagnostics", "compute:delete_instance_metadata", "compute:get", "compute:get_all", "compute:shelve", "compute:shelve_offload", "compute:snapshot_volume_backed", "compute:unshelve", "compute:resize", "compute:confirm_resize", "compute:revert_resize", "compute:rebuild", "compute:reboot", "compute:volume_snapshot_create", "compute:volume_snapshot_delete", "compute:add_fixed_ip", "compute:attach_interface", "compute:detach_interface", "compute:attach_volume", "compute:detach_volume", "compute:backup", "compute:get_instance_diagnostics", "compute:get_instance_metadata", "compute:get_mks_console", "compute:get_rdp_console", "compute:get_serial_console", "compute:get_spice_console", "compute:get_vnc_console", "compute:inject_network_info", "compute:pause", "compute:remove_fixed_ip", "compute:rescue", "compute:reset_network", "compute:restore", "compute:resume", "compute:security_groups:add_to_instance", "compute:security_groups:remove_from_instance", "compute:set_admin_password", "compute:snapshot", "compute:suspend", "compute:swap_volume", "compute:unpause", "compute:unrescue", "compute:update", "compute:update_instance_metadata", "compute_extension:config_drive", "compute_extension:os-tenant-networks", "network:get_vif_by_mac_address", "os_compute_api:extensions", "os_compute_api:os-config-drive", "os_compute_api:servers:confirm_resize", "os_compute_api:servers:create", "os_compute_api:servers:create:attach_network", "os_compute_api:servers:create:attach_volume", "os_compute_api:servers:create_image", "os_compute_api:servers:delete", "os_compute_api:servers:detail", "os_compute_api:servers:index", "os_compute_api:servers:reboot", "os_compute_api:servers:rebuild", "os_compute_api:servers:resize", "os_compute_api:servers:revert_resize", "os_compute_api:servers:show", "os_compute_api:servers:update", "compute_extension:attach_interfaces", "compute_extension:certificates", "compute_extension:console_output", "compute_extension:consoles", "compute_extension:createserverext", "compute_extension:deferred_delete", "compute_extension:disk_config", "compute_extension:extended_status", "compute_extension:extended_availability_zone", "compute_extension:extended_ips", "compute_extension:extended_ips_mac", "compute_extension:extended_vif_net", "compute_extension:extended_volumes", "compute_extension:flavor_access", "compute_extension:flavor_disabled", "compute_extension:flavor_rxtx", "compute_extension:flavor_swap", "compute_extension:flavorextradata", "compute_extension:flavorextraspecs:index", "compute_extension:flavorextraspecs:show", "compute_extension:floating_ip_dns", "compute_extension:floating_ip_pools", "compute_extension:floating_ips", "compute_extension:fping", "compute_extension:image_size", "compute_extension:instance_actions", "compute_extension:keypairs", "compute_extension:keypairs:index", "compute_extension:keypairs:show", "compute_extension:keypairs:create", "compute_extension:keypairs:delete", "compute_extension:multinic", "compute_extension:networks:view", "compute_extension:quotas:show", "compute_extension:quota_classes", "compute_extension:rescue", "compute_extension:security_groups", "compute_extension:server_groups", "compute_extension:server_password", "compute_extension:server_usage", "compute_extension:shelve", "compute_extension:unshelve", "compute_extension:virtual_interfaces", "compute_extension:virtual_storage_arrays", "compute_extension:volumes", "compute_extension:volume_attachments:index", "compute_extension:volume_attachments:show", "compute_extension:volume_attachments:create", "compute_extension:volume_attachments:update", "compute_extension:volume_attachments:delete", "compute_extension:volumetypes", "compute_extension:availability_zone:list", "network:get_all", "network:get", "network:create", "network:delete", "network:associate", "network:disassociate", "network:get_vifs_by_instance", "network:allocate_for_instance", "network:deallocate_for_instance", "network:validate_networks", "network:get_instance_uuids_by_ip_filter", "network:get_instance_id_by_floating_address", "network:setup_networks_on_host", "network:get_backdoor_port", "network:get_floating_ip", "network:get_floating_ip_pools", "network:get_floating_ip_by_address", "network:get_floating_ips_by_project", "network:get_floating_ips_by_fixed_address", "network:allocate_floating_ip", "network:associate_floating_ip", "network:disassociate_floating_ip", "network:release_floating_ip", "network:migrate_instance_start", "network:migrate_instance_finish", "network:get_fixed_ip", "network:get_fixed_ip_by_address", "network:add_fixed_ip_to_instance", "network:remove_fixed_ip_from_instance", "network:add_network_to_project", "network:get_instance_nw_info", "network:get_dns_domains", "network:add_dns_entry", "network:modify_dns_entry", "network:delete_dns_entry", "network:get_dns_entries_by_address", "network:get_dns_entries_by_name", "network:create_private_dns_domain", "network:create_public_dns_domain", "network:delete_dns_domain", "os_compute_api:servers:create_image:allow_volume_backed", "os_compute_api:os-access-ips", "os_compute_api:os-admin-password", "os_compute_api:os-attach-interfaces", "os_compute_api:os-certificates:create", "os_compute_api:os-certificates:show", "os_compute_api:os-consoles:create", "os_compute_api:os-consoles:delete", "os_compute_api:os-consoles:index", "os_compute_api:os-consoles:show", "os_compute_api:os-console-output", "os_compute_api:os-remote-consoles", "os_compute_api:os-deferred-delete", "os_compute_api:os-disk-config", "os_compute_api:os-extended-status", "os_compute_api:os-extended-availability-zone", "os_compute_api:os-extended-volumes", "os_compute_api:os-flavor-access", "os_compute_api:os-flavor-rxtx", "os_compute_api:flavors", "os_compute_api:os-flavor-extra-specs:index", "os_compute_api:os-flavor-extra-specs:show", "os_compute_api:os-floating-ip-dns", "os_compute_api:os-floating-ip-pools", "os_compute_api:os-floating-ips", "os_compute_api:os-fping", "os_compute_api:image-size", "os_compute_api:os-instance-actions", "os_compute_api:os-keypairs", "os_compute_api:limits", "os_compute_api:os-multinic", "os_compute_api:os-networks:view", "os_compute_api:os-pci:pci_servers", "os_compute_api:os-rescue", "os_compute_api:os-security-groups", "os_compute_api:os-server-password", "os_compute_api:os-server-usage", "os_compute_api:os-server-groups", "os_compute_api:os-shelve:shelve", "os_compute_api:os-shelve:unshelve", "os_compute_api:os-virtual-interfaces", "os_compute_api:os-volumes", "os_compute_api:os-volumes-attachments:index", "os_compute_api:os-volumes-attachments:show", "os_compute_api:os-volumes-attachments:create", "os_compute_api:os-volumes-attachments:update", "os_compute_api:os-volumes-attachments:delete", "os_compute_api:os-availability-zone:list", ) self.non_admin_only_rules = ( "compute_extension:hide_server_addresses", "os_compute_api:os-hide-server-addresses") self.allow_all_rules = ( "os_compute_api:os-quota-sets:defaults", "os_compute_api:extensions:discoverable", "os_compute_api:os-access-ips:discoverable", "os_compute_api:os-admin-actions:discoverable", "os_compute_api:os-admin-password:discoverable", "os_compute_api:os-aggregates:discoverable", "os_compute_api:os-agents:discoverable", "os_compute_api:os-attach-interfaces:discoverable", "os_compute_api:os-baremetal-nodes:discoverable", "os_compute_api:os-block-device-mapping-v1:discoverable", "os_compute_api:os-cells:discoverable", "os_compute_api:os-certificates:discoverable", "os_compute_api:os-cloudpipe:discoverable", "os_compute_api:os-consoles:discoverable", "os_compute_api:os-console-output:discoverable", "os_compute_api:os-remote-consoles:discoverable", "os_compute_api:os-create-backup:discoverable", "os_compute_api:os-deferred-delete:discoverable", "os_compute_api:os-disk-config:discoverable", "os_compute_api:os-evacuate:discoverable", "os_compute_api:os-extended-server-attributes:discoverable", "os_compute_api:os-extended-status:discoverable", "os_compute_api:os-extended-availability-zone:discoverable", "os_compute_api:extension_info:discoverable", "os_compute_api:os-extended-volumes:discoverable", "os_compute_api:os-fixed-ips:discoverable", "os_compute_api:os-flavor-access:discoverable", "os_compute_api:os-flavor-rxtx:discoverable", "os_compute_api:flavors:discoverable", "os_compute_api:os-flavor-extra-specs:discoverable", "os_compute_api:os-flavor-manage:discoverable", "os_compute_api:os-floating-ip-dns:discoverable", "os_compute_api:os-floating-ip-pools:discoverable", "os_compute_api:os-floating-ips:discoverable", "os_compute_api:os-floating-ips-bulk:discoverable", "os_compute_api:os-fping:discoverable", "os_compute_api:os-hide-server-addresses:discoverable", "os_compute_api:os-hosts:discoverable", "os_compute_api:os-hypervisors:discoverable", "os_compute_api:images:discoverable", "os_compute_api:image-size:discoverable", "os_compute_api:os-instance-actions:discoverable", "os_compute_api:os-instance-usage-audit-log:discoverable", "os_compute_api:ips:discoverable", "os_compute_api:os-keypairs:discoverable", "os_compute_api:limits:discoverable", "os_compute_api:os-lock-server:discoverable", "os_compute_api:os-migrate-server:discoverable", "os_compute_api:os-multinic:discoverable", "os_compute_api:os-networks:discoverable", "os_compute_api:os-networks-associate:discoverable", "os_compute_api:os-pause-server:discoverable", "os_compute_api:os-pci:discoverable", "os_compute_api:os-personality:discoverable", "os_compute_api:os-preserve-ephemeral-rebuild:discoverable", "os_compute_api:os-quota-sets:discoverable", "os_compute_api:os-quota-class-sets:discoverable", "os_compute_api:os-rescue:discoverable", "os_compute_api:os-scheduler-hints:discoverable", "os_compute_api:os-security-group-default-rules:discoverable", "os_compute_api:os-security-groups:discoverable", "os_compute_api:os-server-diagnostics:discoverable", "os_compute_api:os-server-password:discoverable", "os_compute_api:os-server-usage:discoverable", "os_compute_api:os-server-groups:discoverable", "os_compute_api:os-services:discoverable", "os_compute_api:server-metadata:discoverable", "os_compute_api:servers:discoverable", "os_compute_api:os-shelve:shelve:discoverable", "os_compute_api:os-simple-tenant-usage:discoverable", "os_compute_api:os-suspend-server:discoverable", "os_compute_api:os-tenant-networks:discoverable", "os_compute_api:os-user-data:discoverable", "os_compute_api:os-virtual-interfaces:discoverable", "os_compute_api:os-volumes:discoverable", "os_compute_api:os-volumes-attachments:discoverable", "os_compute_api:os-availability-zone:discoverable", "os_compute_api:os-used-limits:discoverable", "os_compute_api:os-migrations:discoverable", "os_compute_api:os-assisted-volume-snapshots:discoverable", ) def test_all_rules_in_sample_file(self): special_rules = ["context_is_admin", "admin_or_owner", "default"] for (name, rule) in self.fake_policy.items(): if name in special_rules: continue self.assertIn(name, policy.get_rules()) def test_admin_only_rules(self): for rule in self.admin_only_rules: self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.non_admin_context, rule, self.target) policy.enforce(self.admin_context, rule, self.target) def test_non_admin_only_rules(self): for rule in self.non_admin_only_rules: self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.admin_context, rule, self.target) policy.enforce(self.non_admin_context, rule, self.target) def test_admin_or_owner_rules(self): for rule in self.admin_or_owner_rules: self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.non_admin_context, rule, self.target) policy.enforce(self.non_admin_context, rule, {'project_id': 'fake', 'user_id': 'fake'}) def test_no_empty_rules(self): rules = policy.get_rules() for rule in rules: self.assertNotEqual('', str(rule), '%s should not be empty, use "@" instead if the policy ' 'should allow everything' % rule) def test_allow_all_rules(self): for rule in self.allow_all_rules: policy.enforce(self.non_admin_context, rule, self.target) def test_rule_missing(self): rules = policy.get_rules() # eliqiao os_compute_api:os-quota-class-sets:show requires # admin=True or quota_class match, this rule won't belong to # admin_only, non_admin, admin_or_user, empty_rule special_rules = ('admin_api', 'admin_or_owner', 'context_is_admin', 'os_compute_api:os-quota-class-sets:show') result = set(rules.keys()) - set(self.admin_only_rules + self.admin_or_owner_rules + self.non_admin_only_rules + self.allow_all_rules + special_rules) self.assertEqual(set([]), result) nova-13.1.4/nova/tests/unit/test_weights.py0000664000567000056710000000525413064447140022064 0ustar jenkinsjenkins00000000000000# Copyright 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For weights. """ import mock from nova.scheduler import weights as scheduler_weights from nova.scheduler.weights import ram from nova import test from nova.tests.unit.scheduler import fakes from nova import weights class TestWeigher(test.NoDBTestCase): def test_no_multiplier(self): class FakeWeigher(weights.BaseWeigher): def _weigh_object(self, *args, **kwargs): pass self.assertEqual(1.0, FakeWeigher().weight_multiplier()) def test_no_weight_object(self): class FakeWeigher(weights.BaseWeigher): def weight_multiplier(self, *args, **kwargs): pass self.assertRaises(TypeError, FakeWeigher) def test_normalization(self): # weight_list, expected_result, minval, maxval map_ = ( ((), (), None, None), ((0.0, 0.0), (0.0, 0.0), None, None), ((1.0, 1.0), (0.0, 0.0), None, None), ((20.0, 50.0), (0.0, 1.0), None, None), ((20.0, 50.0), (0.0, 0.375), None, 100.0), ((20.0, 50.0), (0.4, 1.0), 0.0, None), ((20.0, 50.0), (0.2, 0.5), 0.0, 100.0), ) for seq, result, minval, maxval in map_: ret = weights.normalize(seq, minval=minval, maxval=maxval) self.assertEqual(tuple(ret), result) @mock.patch('nova.weights.BaseWeigher.weigh_objects') def test_only_one_host(self, mock_weigh): host_values = [ ('host1', 'node1', {'free_ram_mb': 512}), ] hostinfo = [fakes.FakeHostState(host, node, values) for host, node, values in host_values] weight_handler = scheduler_weights.HostWeightHandler() weighers = [ram.RAMWeigher()] weighed_host = weight_handler.get_weighed_objects(weighers, hostinfo, {}) self.assertEqual(1, len(weighed_host)) self.assertEqual('host1', weighed_host[0].obj.host) self.assertFalse(mock_weigh.called) nova-13.1.4/nova/tests/unit/cast_as_call.py0000664000567000056710000000345513064447140021764 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import oslo_messaging as messaging class CastAsCall(fixtures.Fixture): """Make RPC 'cast' behave like a 'call'. This is a little hack for tests that need to know when a cast operation has completed. The idea is that we wait for the RPC endpoint method to complete and return before continuing on the caller. See Ia7f40718533e450f00cd3e7d753ac65755c70588 for more background. """ def __init__(self, stubs): super(CastAsCall, self).__init__() self.stubs = stubs @staticmethod def _stub_out(stubs, obj): orig_prepare = obj.prepare def prepare(self, *args, **kwargs): # Casts with fanout=True would throw errors if its monkeypatched to # the call method, so we must override fanout to False if 'fanout' in kwargs: kwargs['fanout'] = False cctxt = orig_prepare(self, *args, **kwargs) CastAsCall._stub_out(stubs, cctxt) # woo, recurse! return cctxt stubs.Set(obj, 'prepare', prepare) stubs.Set(obj, 'cast', obj.call) def setUp(self): super(CastAsCall, self).setUp() self._stub_out(self.stubs, messaging.RPCClient) nova-13.1.4/nova/tests/unit/utils.py0000664000567000056710000001557013064447152020520 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import platform import socket import sys from oslo_config import cfg from six.moves import range from nova.compute import flavors import nova.context import nova.db from nova import exception from nova.image import glance from nova.network import minidns from nova.network import model as network_model from nova import objects import nova.utils CONF = cfg.CONF CONF.import_opt('use_ipv6', 'nova.netconf') def get_test_admin_context(): return nova.context.get_admin_context() def get_test_image_object(context, instance_ref): if not context: context = get_test_admin_context() image_ref = instance_ref['image_ref'] image_service, image_id = glance.get_remote_image_service(context, image_ref) return objects.ImageMeta.from_dict( image_service.show(context, image_id)) def get_test_flavor(context=None, options=None): options = options or {} if not context: context = get_test_admin_context() test_flavor = {'name': 'kinda.big', 'flavorid': 'someid', 'memory_mb': 2048, 'vcpus': 4, 'root_gb': 40, 'ephemeral_gb': 80, 'swap': 1024} test_flavor.update(options) try: flavor_ref = nova.db.flavor_create(context, test_flavor) except (exception.FlavorExists, exception.FlavorIdExists): flavor_ref = nova.db.flavor_get_by_name(context, 'kinda.big') return flavor_ref def get_test_instance(context=None, flavor=None, obj=False): if not context: context = get_test_admin_context() if not flavor: flavor = get_test_flavor(context) test_instance = {'memory_kb': '2048000', 'basepath': '/some/path', 'bridge_name': 'br100', 'vcpus': 4, 'root_gb': 40, 'bridge': 'br101', 'image_ref': 'cedef40a-ed67-4d10-800e-17455edce175', 'instance_type_id': flavor['id'], 'system_metadata': {}, 'extra_specs': {}, 'user_id': context.user_id, 'project_id': context.project_id, } if obj: instance = objects.Instance(context, **test_instance) instance.flavor = objects.Flavor.get_by_id(context, flavor['id']) instance.create() else: flavors.save_flavor_info(test_instance['system_metadata'], flavor, '') instance = nova.db.instance_create(context, test_instance) return instance def get_test_network_info(count=1): ipv6 = CONF.use_ipv6 fake = 'fake' fake_ip = '0.0.0.0' fake_vlan = 100 fake_bridge_interface = 'eth0' def current(): subnet_4 = network_model.Subnet(cidr=fake_ip, dns=[network_model.IP(fake_ip), network_model.IP(fake_ip)], gateway=network_model.IP(fake_ip), ips=[network_model.IP(fake_ip), network_model.IP(fake_ip)], routes=None, dhcp_server=fake_ip) subnet_6 = network_model.Subnet(cidr=fake_ip, gateway=network_model.IP(fake_ip), ips=[network_model.IP(fake_ip), network_model.IP(fake_ip), network_model.IP(fake_ip)], routes=None, version=6) subnets = [subnet_4] if ipv6: subnets.append(subnet_6) network = network_model.Network(id=None, bridge=fake, label=None, subnets=subnets, vlan=fake_vlan, bridge_interface=fake_bridge_interface, injected=False) vif = network_model.VIF(id='vif-xxx-yyy-zzz', address=fake, network=network, type=network_model.VIF_TYPE_BRIDGE, devname=None, ovs_interfaceid=None) return vif return network_model.NetworkInfo([current() for x in range(0, count)]) def is_osx(): return platform.mac_ver()[0] != '' def is_linux(): return platform.system() == 'Linux' def coreutils_readlink_available(): _out, err = nova.utils.trycmd('readlink', '-nm', '/') return err == '' test_dns_managers = [] def dns_manager(): global test_dns_managers manager = minidns.MiniDNS() test_dns_managers.append(manager) return manager def cleanup_dns_managers(): global test_dns_managers for manager in test_dns_managers: manager.delete_dns_file() test_dns_managers = [] def killer_xml_body(): return ((""" ]> %(d)s """) % { 'a': 'A' * 10, 'b': '&a;' * 10, 'c': '&b;' * 10, 'd': '&c;' * 9999, }).strip() def is_ipv6_supported(): has_ipv6_support = socket.has_ipv6 try: s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) s.close() except socket.error as e: if e.errno == errno.EAFNOSUPPORT: has_ipv6_support = False else: raise # check if there is at least one interface with ipv6 if has_ipv6_support and sys.platform.startswith('linux'): try: with open('/proc/net/if_inet6') as f: if not f.read(): has_ipv6_support = False except IOError: has_ipv6_support = False return has_ipv6_support def get_api_version(request): if request.path[2:3].isdigit(): return int(request.path[2:3]) nova-13.1.4/nova/tests/unit/test_ipv6.py0000664000567000056710000000657413064447152021307 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test suite for IPv6.""" from nova import ipv6 from nova import test class IPv6RFC2462TestCase(test.NoDBTestCase): """Unit tests for IPv6 rfc2462 backend operations.""" def setUp(self): super(IPv6RFC2462TestCase, self).setUp() self.flags(ipv6_backend='rfc2462') ipv6.reset_backend() def test_to_global(self): addr = ipv6.to_global('2001:db8::', '02:16:3e:33:44:55', 'test') self.assertEqual(addr, '2001:db8::16:3eff:fe33:4455') def test_to_mac(self): mac = ipv6.to_mac('2001:db8::216:3eff:fe33:4455') self.assertEqual(mac, '00:16:3e:33:44:55') def test_to_global_with_bad_mac(self): bad_mac = '02:16:3e:33:44:5Z' self.assertRaises(TypeError, ipv6.to_global, '2001:db8::', bad_mac, 'test') def test_to_global_with_bad_prefix(self): bad_prefix = '82' self.assertRaises(TypeError, ipv6.to_global, bad_prefix, '2001:db8::216:3eff:fe33:4455', 'test') def test_to_global_with_bad_project(self): bad_project = 'non-existent-project-name' self.assertRaises(TypeError, ipv6.to_global, '2001:db8::', '2001:db8::a94a:8fe5:ff33:4455', bad_project) class IPv6AccountIdentiferTestCase(test.NoDBTestCase): """Unit tests for IPv6 account_identifier backend operations.""" def setUp(self): super(IPv6AccountIdentiferTestCase, self).setUp() self.flags(ipv6_backend='account_identifier') ipv6.reset_backend() def test_to_global(self): addr = ipv6.to_global('2001:db8::', '02:16:3e:33:44:55', 'test') self.assertEqual(addr, '2001:db8::a94a:8fe5:ff33:4455') def test_to_mac(self): mac = ipv6.to_mac('2001:db8::a94a:8fe5:ff33:4455') self.assertEqual(mac, '02:16:3e:33:44:55') def test_to_global_with_bad_mac(self): bad_mac = '02:16:3e:33:44:5X' self.assertRaises(TypeError, ipv6.to_global, '2001:db8::', bad_mac, 'test') def test_to_global_with_bad_prefix(self): bad_prefix = '78' self.assertRaises(TypeError, ipv6.to_global, bad_prefix, '2001:db8::a94a:8fe5:ff33:4455', 'test') def test_to_global_with_bad_project(self): bad_project = 'non-existent-project-name' self.assertRaises(TypeError, ipv6.to_global, '2001:db8::', '2001:db8::a94a:8fe5:ff33:4455', bad_project) nova-13.1.4/nova/tests/unit/ssl_cert/0000775000567000056710000000000013064447471020620 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/ssl_cert/privatekey.key0000664000567000056710000000625313064447140023514 0ustar jenkinsjenkins00000000000000-----BEGIN RSA PRIVATE KEY----- MIIJKAIBAAKCAgEA0Em2/KEwoGff3uETvH0J783L7gQ1B+v1Wse68Y0JHdb+5RtB p9011kJg0AY+urZPcIRgrQgnFmOvYe5NxIr1K0EwVq5N4g+pVRWQ7dOOrVGS/4ue n97oOEeSMJToSr+MjDeJzRCjEvKBPJsbFeAmBWgHMawm5/M5DXi1+TMZ4bITcvnP kOkI1CpYtYXuygzHeQzmEGb4tZ6iIE+OawRdBj5RiIOVItb/ojznlf/DANbn460/ r/WVZR9zIDIiqPe3SmbFl5jvFCdptaOFdPZ1Mr52XBGNvvvDrrkCB4XVHmWlO9TJ PVpDCAfWR30Z5BuQEPBZJUsf1ug8+B0wu1faKlSBAAASEg5RZc5hlUdU1eFXoKuM CC990Xf1z2Ga5ei88/biKRbgv53lE8TtWr0onhTOKPD0aG7iVBAJFnxII1khgmdP Pnf7p+z3dbCecQ+2j20VEp41a3ePH/COW4MWGEFNk7iyc0ir3GcJhIRAAixEFVYC G/supYuKBTLtr0cwjKLe1ARk300IPLgE24kO7fddYstUzA2siJHmZ0JQWhEq38T5 g+Pts2XNkxNIYugEU8zvUud/Kb0drMcMTUrG9PiDNgy5srhjTuZ7JuIvhA29TTwh OAVToMAahv7L8YerD5uZ7PnLQX/oWXJX5KJKttUQS3On0TDKvFvzk4kUrS0CAwEA AQKCAgAkdpMrPMi3fBfL+9kpqTYhHgTyYRgrj9o/DzIh8U/EQowS7aebzHUNUkeC g2Vd6GaVywblo8S7/a2JVl+U5cKv1NSyiAcoaRd6xrC9gci7fMlgJUAauroqiBUG njrgQxJGxb5BAQWbXorTYk/mj3v4fFKuFnYlKwY03on020ZPpY4UFbmJo9Ig2lz3 QkAgbQZKocBw5KXrnZ7CS0siXvwuCKDbZjWoiLzt2P2t2712myizSfQZSMPjlRLh cwVwURVsV/uFY4ePHqs52iuV40N3I7KywXvwEEEciFTbnklF7gN0Kvcj33ZWpJCV qUfsEAsze/APQEyNodBymyGZ2nJdn9PqaQYnVhE9xpjiXejQHZsuMnrA3jYr8Mtx j0EZiX4ICI4Njt9oI/EtWhQtcDt86hTEtBlyFRU6jhW8O5Ai7hzxCYgUJ7onWVOE PtCC9FoOwumXWgdZNz/hMqQSn91O8trferccdUGIfx8N/G4QkyzOLI0Hc6Mubby7 +GGRwVXnLsIGxpFc+VBHY/J6offCkXx3MPbfn57x0LGZu1GtHoep391yLUrBs9jx nJrUI9OuwaeOG0iesTuGT+PbZWxDrJEtA7DRM1FBMNMvn5BTTg7yx8EqUM35hnFf 5J1XEf0DW5nUPH1Qadgi1LZjCAhiD5OuNooFsTmN7dSdleF+PQKCAQEA7jq7drTu O1ePCO+dQeECauy1qv9SO2LIHfLZ/L4OwcEtEnE8xBbvrZfUqkbUITCS6rR8UITp 6ru0MyhUEsRsk4FHIJV2P1pB2Zy+8tV4Dm3aHh4bCoECqAPHMgXUkP+9kIOn2QsE uRXnsEiQAl0SxSTcduy5F+WIWLVl4A72ry3cSvrEGwMEz0sjaEMmCZ2B8X8EJt64 uWUSHDaAMSg80bADy3p+OhmWMGZTDl/KRCz9pJLyICMxsotfbvE0BadAZr+UowSe ldqKlgRYlYL3pAhwjeMO/QxmMfRxjvG09romqe0Bcs8BDNII/ShAjjHQUwxcEszQ P14g8QwmTQVm5wKCAQEA39M3GveyIhX6vmyR4DUlxE5+yloTACdlCZu6wvFlRka8 3FEw8DWKVfnmYYFt/RPukYeBRmXwqLciGSly7PnaBXeNFqNXiykKETzS2UISZoqT Dur06GmcI+Lk1my9v5gLB1LT/D8XWjwmjA5hNO1J1UYmp+X4dgaYxWzOKBsTTJ8j SVaEaxBUwLHy58ehoQm+G5+QqL5yU/n1hPwXx1XYvd33OscSGQRbALrH2ZxsqxMZ yvNa2NYt3TnihXcF36Df5861DTNI7NDqpY72C4U8RwaqgTdDkD+t8zrk/r3LUa5d NGkGQF+59spBcb64IPZ4DuJ9//GaEsyj0jPF/FTMywKCAQEA1DiB83eumjKf+yfq AVv/GV2RYKleigSvnO5QfrSY1MXP7xPtPAnqrcwJ6T57jq2E04zBCcG92BwqpUAR 1T4iMy0BPeenlTxEWSUnfY/pCYGWwymykSLoSOBEvS0wdZM9PdXq2pDUPkVjRkj9 8P0U0YbK1y5+nOkfE1dVT8pEuz2xdyH5PM7to/SdsC3RXtNvhMDP5AiYqp99CKEM hb4AoBOa7dNLS1qrzqX4618uApnJwqgdBcAUb6d09pHs8/RQjLeyI57j3z72Ijnw 6A/pp7jU+7EAEzDOgUXvO5Xazch61PmLRsldeBxLYapQB9wcZz8lbqICCdFCqzlV jVt4lQKCAQA9CYxtfj7FrNjENTdSvSufbQiGhinIUPXsuNslbk7/6yp1qm5+Exu2 dn+s927XJShZ52oJmKMYX1idJACDP1+FPiTrl3+4I2jranrVZH9AF2ojF0/SUXqT Drz4/I6CQSRAywWkNFBZ+y1H5GP92vfXgVnpT32CMipXLGTL6xZIPt2QkldqGvoB 0oU7T+Vz1QRS5CC+47Cp1fBuY5DYe0CwBmf1T3RP/jAS8tytK0s3G+5cuiB8IWxA eBid7OddJLHqtSQKhYHNkutqWqIeYicd92Nn+XojTDpTqivojDl1/ObN9BYQWAqO knlmW2w7EPuMk5doxKoPll7WY+gJ99YhAoIBAHf5HYRh4ZuYkx+R1ow8/Ahp7N4u BGFRNnCpMG358Zws95wvBg5dkW8VU0M3256M0kFkw2AOyyyNsHqIhMNakzHesGo/ TWhqCh23p1xBLY5p14K8K6iOc1Jfa1LqGsL2TZ06TeNNyONMGqq0yOyD62CdLRDj 0ACL/z2j494LmfqhV45hYuqjQbrLizjrr6ln75g2WJ32U+zwl7KUHnBL7IEwb4Be KOl1bfVwZAs0GtHuaiScBYRLUaSC/Qq7YPjTh1nmg48DQC/HUCNGMqhoZ950kp9k 76HX+MpwUi5y49moFmn/3qDvefGFpX1td8vYMokx+eyKTXGFtxBUwPnMUSQ= -----END RSA PRIVATE KEY----- nova-13.1.4/nova/tests/unit/ssl_cert/ca.key0000664000567000056710000000625313064447140021714 0ustar jenkinsjenkins00000000000000-----BEGIN RSA PRIVATE KEY----- MIIJJwIBAAKCAgEAwILIMebpHYK1E1zhyi6713GGTQ9DFeLOE1T25+XTJqAkO7ef QzZfB8QwCXy/8bmbhmKgQQ7APuuDci8SKCkYeWCxqJRGmg0tZVlj5gCfrV2u+olw S+XyaOGCFkYScs6D34BaE2rGD2GDryoSPc2feAt6X4+ZkDPZnvaHQP6j9Ofq/4Wm sECEas0IO5X8SDF8afA47U9ZXFkcgQK6HCHDcokLaaZxEyZFSaPex6ZAESNthkGO xEThRPxAkJhqYCeMl3Hff98XEUcFNzuAOmcnQJJgRemwJO2hS5KS3Y3p9/nBRlh3 tSAG1nbY5kXSpyaq296D9x/esnXlt+9JUmn1rKyvmaFBC/SbzyyQoO3MT5r8rKte 0bulLw1bZOZNlhxSv2KCg5RD6vlNrnpsZszw4nj28fBroeFp0JMeT8jcqGs3qdm8 sXLcBgiTalLYtiCNV9wZjOduQotuFN6mDwZvfa6hzZjcBNfqeLyTEnFb5k6pIla0 wydWx/jvBAzoxOkEcVjak747A+p/rriD5hVUBH0BuNaWcEgKe9jcHnLvU8hUxFtg PxUHOOR+eMa+FS3ApKf9sJ/zVUq0uxyA9hUnsvnqv/CywLSvaNKBiKQTL0QLEXnw 6EQb7g/XuwC5mmt+l30wGh9M1U/QMaU/+YzT4sVLTXIHJ7ExRTbEecbNbjsCAwEA AQKCAgA0ySd/l2NANkDUaFl5CMt0zaoXoyGv9Jqw7lEtUPVO2AZXYYgH8/amuIK7 dztiWpRsisqKTDMmjYljW8jMvkf5sCvGn7GkOAzEh3g+7tjZvqBmDh1+kjSf0YXL +bbBSCMcu6L3RAW+3ewvsYeC7sjVL8CER2nCApWfYtW/WpM2agkju0/zcB1e841Y WU3ttbP5kGbrmyBTlBOexFKnuBJRa4Z3l63VpF7HTGmfsNRMXrx/XaZ55rEmK0zA 2SoB55ZDSHQSKee3UxP5CxWj7fjzWa+QO/2Sgp4BjNU8btdCqXb3hPZ98aQuVjQv H+Ic9xtOYnso3dJAeNdeUfx23psAHhUqYruD+xrjwTJV5viGO05AHjp/i4dKjOaD CMFKP/AGUcGAsL/Mjq5oMbWovbqhGaaOw4I0Xl/JuB0XQXWwr5D2cLUjMaCS9bLq WV8lfEitoCVihAi21s8MIyQWHvl4m4d/aD5KNh0MJYo3vYCrs6A256dhbmlEmGBr DY1++4yxz4YkY07jYbQYkDlCtwu51g+YE8lKAE9+Mz+PDgbRB7dgw7K3Q9SsXp1P ui7/vnrgqppnYm4aaHvXEZ1qwwt2hpoumhQo/k1xrSzVKQ83vjzjXoDc9o84Vsv2 dmcLGKPpu+cm2ks8q6x2EI09dfkJjb/7N9SpU0AOjU7CgDye0QKCAQEA5/mosLuC vXwh5FkJuV/wpipwqkS4vu+KNQiN83wdz+Yxw6siAz6/SIjr0sRmROop6CNCaBNq 887+mgm62rEe5eU4vHRlBOlYQD0qa+il09uwYPU0JunSOabxUCBhSuW/LXZyq7rA ywGB7OVSTWwgb6Y0X1pUcOXK5qYaWJUdUEi2oVrU160phbDAcZNH+vAyl+IRJmVJ LP7f1QwVrnIvIBgpIvPLRigagn84ecXPITClq4KjGNy2Qq/iarEwY7llFG10xHmK xbzQ8v5XfPZ4Swmp+35kwNhfp6HRVWV3RftX4ftFArcFGYEIActItIz10rbLJ+42 fc8oZKq/MB9NlwKCAQEA1HLOuODXrFsKtLaQQzupPLpdyfYWR7A6tbghH5paKkIg A+BSO/b91xOVx0jN2lkxe0Ns1QCpHZU8BXZ9MFCaZgr75z0+vhIRjrMTXXirlray 1mptar018j79sDJLFBF8VQFfi7Edd3OwB2dbdDFJhzNUbNJIVkVo+bXYfuWGlotG EVWxX/CnPgnKknl6vX/8YSg6qJCwcUTmQRoqermd02VtrMrGgytcOG6QdKYTT/ct b3zDNXdeLOJKyLZS1eW4V2Pcl4Njbaxq/U7KYkjWWZzVVsiCjWA8H0RXGf+Uk9Gu cUg5hm5zxXcOGdI6yRVxHEU7CKc25Ks5xw4xPkhA/QKCAQBd7yC6ABQe+qcWul9P q2PdRY49xHozBvimJQKmN/oyd3prS18IhV4b1yX3QQRQn6m8kJqRXluOwqEiaxI5 AEQMv9dLqK5HYN4VlS8aZyjPM0Sm3mPx5fj0038f/RyooYPauv4QQB1VlxSvguTi 6QfxbhIDEqbi2Ipi/5vnhupJ2kfp6sgJVdtcgYhL9WHOYXl7O1XKgHUzPToSIUSe USp4CpCN0L7dd9vUQAP0e382Z2aOnuXAaY98TZCXt4xqtWYS8Ye5D6Z8D8tkuk1f Esb/S7iDWFkgJf4F+Wa099NmiTK7FW6KfOYZv8AoSdL1GadpXg/B6ZozM7Gdoe6t Y9+dAoIBABH2Rv4gnHuJEwWmbdoRYESvKSDbOpUDFGOq1roaTcdG4fgR7kH9pwaZ NE+uGyF76xAV6ky0CphitrlrhDgiiHtaMGQjrHtbgbqD7342pqNOfR5dzzR4HOiH ZOGRzwE6XT2+qPphljE0SczGc1gGlsXklB3DRbRtl+uM8WoBM/jke58ZlK6c5Tb8 kvEBblw5Rvhb82GvIgvhnGoisTbBHNPzvmseldwfPWPUDUifhgB70I6diM+rcP3w gAwqRiSpkIVq/wqcZDqwmjcigz/+EolvFiaJO2iCm3K1T3v2PPSmhM41Ig/4pLcs UrfiK3A27OJMBCq+IIkC5RasX4N5jm0CggEAXT9oyIO+a7ggpfijuba0xuhFwf+r NY49hx3YshWXX5T3LfKZpTh+A1vjGcj57MZacRcTkFQgHVcyu+haA9lI4vsFMesU 9GqenrJNvxsV4i3avIxGjjx7d0Ok/7UuawTDuRea8m13se/oJOl5ftQK+ZoVqtO8 SzeNNpakiuCxmIEqaD8HUwWvgfA6n0HPJNc0vFAqu6Y5oOr8GDHd5JoKA8Sb15N9 AdFqwCbW9SqUVsvHDuiOKXy8lCr3OiuyjgBfbIyuuWbaU0PqIiKW++lTluXkl7Uz vUawgfgX85sY6A35g1O/ydEQw2+h2tzDvQdhhyTYpMZjZwzIIPjCQMgHPA== -----END RSA PRIVATE KEY----- nova-13.1.4/nova/tests/unit/ssl_cert/certificate.crt0000664000567000056710000000474113064447140023613 0ustar jenkinsjenkins00000000000000-----BEGIN CERTIFICATE----- MIIHHjCCBQagAwIBAgIBATANBgkqhkiG9w0BAQ0FADCBsDELMAkGA1UEBhMCVVMx DjAMBgNVBAgTBVRleGFzMQ8wDQYDVQQHEwZBdXN0aW4xHTAbBgNVBAoTFE9wZW5T dGFjayBGb3VuZGF0aW9uMR0wGwYDVQQLExRPcGVuU3RhY2sgRGV2ZWxvcGVyczEQ MA4GA1UEAxMHVGVzdCBDQTEwMC4GCSqGSIb3DQEJARYhb3BlbnN0YWNrLWRldkBs aXN0cy5vcGVuc3RhY2sub3JnMB4XDTE1MDEwODAyNTQzNVoXDTI1MDEwODAyNTQz NVoweDELMAkGA1UEBhMCVVMxDjAMBgNVBAgTBVRleGFzMQ8wDQYDVQQHEwZBdXN0 aW4xHTAbBgNVBAoTFE9wZW5TdGFjayBGb3VuZGF0aW9uMR0wGwYDVQQLExRPcGVu U3RhY2sgRGV2ZWxvcGVyczEKMAgGA1UEAxQBKjCCAiIwDQYJKoZIhvcNAQEBBQAD ggIPADCCAgoCggIBANBJtvyhMKBn397hE7x9Ce/Ny+4ENQfr9VrHuvGNCR3W/uUb QafdNdZCYNAGPrq2T3CEYK0IJxZjr2HuTcSK9StBMFauTeIPqVUVkO3Tjq1Rkv+L np/e6DhHkjCU6Eq/jIw3ic0QoxLygTybGxXgJgVoBzGsJufzOQ14tfkzGeGyE3L5 z5DpCNQqWLWF7soMx3kM5hBm+LWeoiBPjmsEXQY+UYiDlSLW/6I855X/wwDW5+Ot P6/1lWUfcyAyIqj3t0pmxZeY7xQnabWjhXT2dTK+dlwRjb77w665AgeF1R5lpTvU yT1aQwgH1kd9GeQbkBDwWSVLH9boPPgdMLtX2ipUgQAAEhIOUWXOYZVHVNXhV6Cr jAgvfdF39c9hmuXovPP24ikW4L+d5RPE7Vq9KJ4Uzijw9Ghu4lQQCRZ8SCNZIYJn Tz53+6fs93WwnnEPto9tFRKeNWt3jx/wjluDFhhBTZO4snNIq9xnCYSEQAIsRBVW Ahv7LqWLigUy7a9HMIyi3tQEZN9NCDy4BNuJDu33XWLLVMwNrIiR5mdCUFoRKt/E +YPj7bNlzZMTSGLoBFPM71Lnfym9HazHDE1KxvT4gzYMubK4Y07meybiL4QNvU08 ITgFU6DAGob+y/GHqw+bmez5y0F/6FlyV+SiSrbVEEtzp9Ewyrxb85OJFK0tAgMB AAGjggF4MIIBdDBLBgNVHREERDBCgglsb2NhbGhvc3SCDWlwNi1sb2NhbGhvc3SC CTEyNy4wLjAuMYIDOjoxhwR/AAABhxAAAAAAAAAAAAAAAAAAAAABMB0GA1UdDgQW BBSjWxD0qedj9eeGUWyGphy5PU67dDCB5QYDVR0jBIHdMIHagBQTWz2WEB0sJg9c xfM5JeJMIAJq0qGBtqSBszCBsDELMAkGA1UEBhMCVVMxDjAMBgNVBAgTBVRleGFz MQ8wDQYDVQQHEwZBdXN0aW4xHTAbBgNVBAoTFE9wZW5TdGFjayBGb3VuZGF0aW9u MR0wGwYDVQQLExRPcGVuU3RhY2sgRGV2ZWxvcGVyczEQMA4GA1UEAxMHVGVzdCBD QTEwMC4GCSqGSIb3DQEJARYhb3BlbnN0YWNrLWRldkBsaXN0cy5vcGVuc3RhY2su b3JnggkA6M8Ysv1UOGMwCQYDVR0TBAIwADATBgNVHSUEDDAKBggrBgEFBQcDATAN BgkqhkiG9w0BAQ0FAAOCAgEAIGx/acXQEiGYFBJUduE6/Y6LBuHEVMcj0yfbLzja Eb35xKWHuX7tgQPwXy6UGlYM8oKIptIp/9eEuYXte6u5ncvD7e/JldCUVd0fW8hm fBOhfqVstcTmlfZ6WqTJD6Bp/FjUH+8qf8E+lsjNy7i0EsmcQOeQm4mkocHG1AA4 MEeuDg33lV6XCjW450BoZ/FTfwZSuTlGgFlEzUUrAe/ETdajF9G9aJ+0OvXzE1tU pvbvkU8eg4pLXxrzboOhyQMEmCikdkMYjo/0ZQrXrrJ1W8mCinkJdz6CToc7nUkU F8tdAY0rKMEM8SYHngMJU2943lpGbQhE5B4oms8I+SMTyCVz2Vu5I43Px68Y0GUN Bn5qu0w2Vj8eradoPF8pEAIVICIvlbiRepPbNZ7FieSsY2TEfLtxBd2DLE1YWeE5 p/RDBxqcDrGQuSg6gFSoLEhYgQcGnYgD75EIE8f/LrHFOAeSYEOhibFbK5G8p/2h EHcKZ9lvTgqwHn0FiTqZ3LWxVFsZiTsiyXErpJ2Nu2WTzo0k1xJMUpJqHuUZraei N5fA5YuDp2ShXRoZyVieRvp0TCmm6sHL8Pn0K8weJchYrvV1yvPKeuISN/fVCQev 88yih5Rh5R2szwoY3uVImpd99bMm0e1bXrQug43ZUz9rC4ABN6+lZvuorDWRVI7U I1M= -----END CERTIFICATE----- nova-13.1.4/nova/tests/unit/ssl_cert/ca.crt0000664000567000056710000000467013064447140021715 0ustar jenkinsjenkins00000000000000-----BEGIN CERTIFICATE----- MIIHADCCBOigAwIBAgIJAOjPGLL9VDhjMA0GCSqGSIb3DQEBDQUAMIGwMQswCQYD VQQGEwJVUzEOMAwGA1UECBMFVGV4YXMxDzANBgNVBAcTBkF1c3RpbjEdMBsGA1UE ChMUT3BlblN0YWNrIEZvdW5kYXRpb24xHTAbBgNVBAsTFE9wZW5TdGFjayBEZXZl bG9wZXJzMRAwDgYDVQQDEwdUZXN0IENBMTAwLgYJKoZIhvcNAQkBFiFvcGVuc3Rh Y2stZGV2QGxpc3RzLm9wZW5zdGFjay5vcmcwHhcNMTUwMTA4MDIyOTEzWhcNMjUw MTA4MDIyOTEzWjCBsDELMAkGA1UEBhMCVVMxDjAMBgNVBAgTBVRleGFzMQ8wDQYD VQQHEwZBdXN0aW4xHTAbBgNVBAoTFE9wZW5TdGFjayBGb3VuZGF0aW9uMR0wGwYD VQQLExRPcGVuU3RhY2sgRGV2ZWxvcGVyczEQMA4GA1UEAxMHVGVzdCBDQTEwMC4G CSqGSIb3DQEJARYhb3BlbnN0YWNrLWRldkBsaXN0cy5vcGVuc3RhY2sub3JnMIIC IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAwILIMebpHYK1E1zhyi6713GG TQ9DFeLOE1T25+XTJqAkO7efQzZfB8QwCXy/8bmbhmKgQQ7APuuDci8SKCkYeWCx qJRGmg0tZVlj5gCfrV2u+olwS+XyaOGCFkYScs6D34BaE2rGD2GDryoSPc2feAt6 X4+ZkDPZnvaHQP6j9Ofq/4WmsECEas0IO5X8SDF8afA47U9ZXFkcgQK6HCHDcokL aaZxEyZFSaPex6ZAESNthkGOxEThRPxAkJhqYCeMl3Hff98XEUcFNzuAOmcnQJJg RemwJO2hS5KS3Y3p9/nBRlh3tSAG1nbY5kXSpyaq296D9x/esnXlt+9JUmn1rKyv maFBC/SbzyyQoO3MT5r8rKte0bulLw1bZOZNlhxSv2KCg5RD6vlNrnpsZszw4nj2 8fBroeFp0JMeT8jcqGs3qdm8sXLcBgiTalLYtiCNV9wZjOduQotuFN6mDwZvfa6h zZjcBNfqeLyTEnFb5k6pIla0wydWx/jvBAzoxOkEcVjak747A+p/rriD5hVUBH0B uNaWcEgKe9jcHnLvU8hUxFtgPxUHOOR+eMa+FS3ApKf9sJ/zVUq0uxyA9hUnsvnq v/CywLSvaNKBiKQTL0QLEXnw6EQb7g/XuwC5mmt+l30wGh9M1U/QMaU/+YzT4sVL TXIHJ7ExRTbEecbNbjsCAwEAAaOCARkwggEVMB0GA1UdDgQWBBQTWz2WEB0sJg9c xfM5JeJMIAJq0jCB5QYDVR0jBIHdMIHagBQTWz2WEB0sJg9cxfM5JeJMIAJq0qGB tqSBszCBsDELMAkGA1UEBhMCVVMxDjAMBgNVBAgTBVRleGFzMQ8wDQYDVQQHEwZB dXN0aW4xHTAbBgNVBAoTFE9wZW5TdGFjayBGb3VuZGF0aW9uMR0wGwYDVQQLExRP cGVuU3RhY2sgRGV2ZWxvcGVyczEQMA4GA1UEAxMHVGVzdCBDQTEwMC4GCSqGSIb3 DQEJARYhb3BlbnN0YWNrLWRldkBsaXN0cy5vcGVuc3RhY2sub3JnggkA6M8Ysv1U OGMwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQ0FAAOCAgEAIfAD6uVorT5WomG1 2DWRm3kuwa+EDimgVF6VRvxCzyHx7e/6KJQj149KpMQ6e0ZPjqQw+pZ+jJSgq6TP MEjCHgIDwdKhi9LmQWIlo8xdzgfZW2VQkVLvwkqAnWWhCy9oGc/Ypk8pjiZfCx+/ DSJBbFnopI9f8epAKMq7N3jJyEMoTctzmI0KckrZnJ1Gq4MZpoxGmkJiGhWoUk8p r8apXZ6B1DzO1XxpGw2BIcrUC3bQS/vPrg5/XbyaAu2BSgu6iF7ULqkBsEd0yK/L i2gO9eTacaX3zJBQOlMJFsIAgIiVw6Rq6BuhU9zxDoopY4feta/NDOpk1OjY3MV7 4rcLTU6XYaItMDRe+dmjBOK+xspsaCU4kHEkA7mHL5YZhEEWLHj6QY8tAiIQMVQZ RuTpQIbNkjLW8Ls+CbwL2LkUFB19rKu9tFpzEJ1IIeFmt5HZsL5ri6W2qkSPIbIe Qq15kl/a45jgBbgn2VNA5ecjW20hhXyaS9AKWXK+AeFBaFIFDUrB2UP4YSDbJWUJ 0LKe+QuumXdl+iRdkgb1Tll7qme8gXAeyzVGHK2AsaBg+gkEeSyVLRKIixceyy+3 6yqlKJhk2qeV3ceOfVm9ZdvRlzWyVctaTcGIpDFqf4y8YyVhL1e2KGKcmYtbLq+m rtku4CM3HldxcM4wqSB1VcaTX8o= -----END CERTIFICATE----- nova-13.1.4/nova/tests/unit/__init__.py0000664000567000056710000000266113064447140021111 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`nova.tests.unit` -- Nova Unittests ===================================================== .. automodule:: nova.tests.unit :platform: Unix """ import eventlet from nova import objects eventlet.monkey_patch(os=False) # NOTE(alaski): Make sure this is done after eventlet monkey patching otherwise # the threading.local() store used in oslo_messaging will be initialized to # threadlocal storage rather than greenthread local. This will cause context # sets and deletes in that storage to clobber each other. # NOTE(comstud): Make sure we have all of the objects loaded. We do this # at module import time, because we may be using mock decorators in our # tests that run at import time. objects.register_all() nova-13.1.4/nova/tests/unit/fake_request_spec.py0000664000567000056710000000627513064447152023052 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from oslo_utils import uuidutils from nova import context from nova import objects from nova.tests.unit import fake_flavor INSTANCE_NUMA_TOPOLOGY = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell(id=0, cpuset=set([1, 2]), memory=512), objects.InstanceNUMACell(id=1, cpuset=set([3, 4]), memory=512)]) INSTANCE_NUMA_TOPOLOGY.obj_reset_changes(recursive=True) IMAGE_META = objects.ImageMeta.from_dict( {'status': 'active', 'container_format': 'bare', 'min_ram': 0, 'updated_at': '2014-12-12T11:16:36.000000', 'min_disk': '0', 'owner': '2d8b9502858c406ebee60f0849486222', 'protected': 'yes', 'properties': { 'os_type': 'Linux', 'hw_video_model': 'vga', 'hw_video_ram': '512', 'hw_qemu_guest_agent': 'yes', 'hw_scsi_model': 'virtio-scsi', }, 'size': 213581824, 'name': 'f16-x86_64-openstack-sda', 'checksum': '755122332caeb9f661d5c978adb8b45f', 'created_at': '2014-12-10T16:23:14.000000', 'disk_format': 'qcow2', 'id': 'c8b1790e-a07d-4971-b137-44f2432936cd', } ) IMAGE_META.obj_reset_changes(recursive=True) PCI_REQUESTS = objects.InstancePCIRequests( requests=[objects.InstancePCIRequest(count=1), objects.InstancePCIRequest(count=2)]) PCI_REQUESTS.obj_reset_changes(recursive=True) def fake_db_spec(): req_obj = fake_spec_obj() db_request_spec = { 'id': 1, 'instance_uuid': req_obj.instance_uuid, 'spec': jsonutils.dumps(req_obj.obj_to_primitive()), } return db_request_spec def fake_spec_obj(remove_id=False): ctxt = context.RequestContext('fake', 'fake') req_obj = objects.RequestSpec(ctxt) if not remove_id: req_obj.id = 42 req_obj.instance_uuid = uuidutils.generate_uuid() req_obj.image = IMAGE_META req_obj.numa_topology = INSTANCE_NUMA_TOPOLOGY req_obj.pci_requests = PCI_REQUESTS req_obj.flavor = fake_flavor.fake_flavor_obj(ctxt) req_obj.retry = objects.SchedulerRetries() req_obj.limits = objects.SchedulerLimits() req_obj.instance_group = objects.InstanceGroup() req_obj.project_id = 'fake' req_obj.num_instances = 1 req_obj.availability_zone = None req_obj.ignore_hosts = ['host2', 'host4'] req_obj.force_hosts = ['host1', 'host3'] req_obj.force_nodes = ['node1', 'node2'] req_obj.scheduler_hints = {'hint': ['over-there']} # This should never be a changed field req_obj.obj_reset_changes(['id']) return req_obj nova-13.1.4/nova/tests/unit/consoleauth/0000775000567000056710000000000013064447471021326 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/consoleauth/test_consoleauth.py0000664000567000056710000002164613064447152025270 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for Consoleauth Code. """ import mock from mox3 import mox from oslo_utils import timeutils from nova.consoleauth import manager from nova import context from nova import test class ConsoleauthTestCase(test.NoDBTestCase): """Test Case for consoleauth.""" def setUp(self): super(ConsoleauthTestCase, self).setUp() self.manager_api = self.manager = manager.ConsoleAuthManager() self.context = context.get_admin_context() self.instance_uuid = '00000000-0000-0000-0000-000000000000' def test_reset(self): with mock.patch('nova.compute.rpcapi.ComputeAPI') as mock_rpc: old_rpcapi = self.manager_api.compute_rpcapi self.manager_api.reset() mock_rpc.assert_called_once_with() self.assertNotEqual(old_rpcapi, self.manager_api.compute_rpcapi) @mock.patch('nova.objects.instance.Instance.get_by_uuid') def test_tokens_expire(self, mock_get): mock_get.return_value = None # Test that tokens expire correctly. self.useFixture(test.TimeOverride()) token = u'mytok' self.flags(console_token_ttl=1) self._stub_validate_console_port(True) self.manager_api.authorize_console(self.context, token, 'novnc', '127.0.0.1', '8080', 'host', self.instance_uuid) self.assertIsNotNone(self.manager_api.check_token(self.context, token)) timeutils.advance_time_seconds(1) self.assertIsNone(self.manager_api.check_token(self.context, token)) def _stub_validate_console_port(self, result): def fake_validate_console_port(ctxt, instance, port, console_type): return result self.stubs.Set(self.manager.compute_rpcapi, 'validate_console_port', fake_validate_console_port) @mock.patch('nova.objects.instance.Instance.get_by_uuid') def test_multiple_tokens_for_instance(self, mock_get): mock_get.return_value = None tokens = [u"token" + str(i) for i in range(10)] self._stub_validate_console_port(True) for token in tokens: self.manager_api.authorize_console(self.context, token, 'novnc', '127.0.0.1', '8080', 'host', self.instance_uuid) for token in tokens: self.assertIsNotNone( self.manager_api.check_token(self.context, token)) def test_delete_tokens_for_instance(self): tokens = [u"token" + str(i) for i in range(10)] for token in tokens: self.manager_api.authorize_console(self.context, token, 'novnc', '127.0.0.1', '8080', 'host', self.instance_uuid) self.manager_api.delete_tokens_for_instance(self.context, self.instance_uuid) stored_tokens = self.manager._get_tokens_for_instance( self.instance_uuid) self.assertEqual(len(stored_tokens), 0) for token in tokens: self.assertIsNone( self.manager_api.check_token(self.context, token)) @mock.patch('nova.objects.instance.Instance.get_by_uuid') def test_wrong_token_has_port(self, mock_get): mock_get.return_value = None token = u'mytok' self._stub_validate_console_port(False) self.manager_api.authorize_console(self.context, token, 'novnc', '127.0.0.1', '8080', 'host', instance_uuid=self.instance_uuid) self.assertIsNone(self.manager_api.check_token(self.context, token)) def test_delete_expired_tokens(self): self.useFixture(test.TimeOverride()) token = u'mytok' self.flags(console_token_ttl=1) self._stub_validate_console_port(True) self.manager_api.authorize_console(self.context, token, 'novnc', '127.0.0.1', '8080', 'host', self.instance_uuid) timeutils.advance_time_seconds(1) self.assertIsNone(self.manager_api.check_token(self.context, token)) token1 = u'mytok2' self.manager_api.authorize_console(self.context, token1, 'novnc', '127.0.0.1', '8080', 'host', self.instance_uuid) stored_tokens = self.manager._get_tokens_for_instance( self.instance_uuid) # when trying to store token1, expired token is removed fist. self.assertEqual(len(stored_tokens), 1) self.assertEqual(stored_tokens[0], token1) class ControlauthMemcacheEncodingTestCase(test.NoDBTestCase): def setUp(self): super(ControlauthMemcacheEncodingTestCase, self).setUp() self.manager = manager.ConsoleAuthManager() self.context = context.get_admin_context() self.u_token = u"token" self.u_instance = u"instance" def test_authorize_console_encoding(self): with test.nested( mock.patch.object(self.manager.mc_instance, 'set', return_value=True), mock.patch.object(self.manager.mc_instance, 'get', return_value='["token"]'), mock.patch.object(self.manager.mc, 'set', return_value=True), mock.patch.object(self.manager.mc, 'get', return_value=None), mock.patch.object(self.manager.mc, 'get_multi', return_value=["token1"]), ) as ( mock_instance_set, mock_instance_get, mock_set, mock_get, mock_get_multi): self.manager.authorize_console(self.context, self.u_token, 'novnc', '127.0.0.1', '8080', 'host', self.u_instance) mock_set.assert_has_calls([mock.call('token', mock.ANY)]) mock_instance_get.assert_has_calls([mock.call('instance')]) mock_get_multi.assert_has_calls([mock.call(['token'])]) mock_instance_set.assert_has_calls( [mock.call('instance', mock.ANY)]) def test_check_token_encoding(self): self.mox.StubOutWithMock(self.manager.mc, "get") self.manager.mc.get(mox.IsA(str)).AndReturn(None) self.mox.ReplayAll() self.manager.check_token(self.context, self.u_token) def test_delete_tokens_for_instance_encoding(self): with test.nested( mock.patch.object(self.manager.mc_instance, 'get', return_value='["token"]'), mock.patch.object(self.manager.mc_instance, 'delete', return_value=True), mock.patch.object(self.manager.mc, 'get'), mock.patch.object(self.manager.mc, 'delete_multi', return_value=True), ) as ( mock_instance_get, mock_instance_delete, mock_get, mock_delete_multi): self.manager.delete_tokens_for_instance(self.context, self.u_instance) mock_instance_get.assert_has_calls([mock.call('instance')]) mock_instance_delete.assert_has_calls([mock.call('instance')]) mock_delete_multi.assert_has_calls([mock.call(['token'])]) class CellsConsoleauthTestCase(ConsoleauthTestCase): """Test Case for consoleauth w/ cells enabled.""" def setUp(self): super(CellsConsoleauthTestCase, self).setUp() self.flags(enable=True, group='cells') def _stub_validate_console_port(self, result): def fake_validate_console_port(ctxt, instance_uuid, console_port, console_type): return result self.stubs.Set(self.manager.cells_rpcapi, 'validate_console_port', fake_validate_console_port) nova-13.1.4/nova/tests/unit/consoleauth/__init__.py0000664000567000056710000000000013064447140023416 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/consoleauth/test_rpcapi.py0000664000567000056710000000661113064447152024215 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for nova.consoleauth.rpcapi """ import mock from oslo_config import cfg import six from nova.consoleauth import rpcapi as consoleauth_rpcapi from nova import context from nova import test CONF = cfg.CONF class ConsoleAuthRpcAPITestCase(test.NoDBTestCase): DROPPED_ARG = object() def _test_consoleauth_api(self, method, **kwargs): do_cast = kwargs.pop('_do_cast', False) ctxt = context.RequestContext('fake_user', 'fake_project') rpcapi = consoleauth_rpcapi.ConsoleAuthAPI() self.assertIsNotNone(rpcapi.client) self.assertEqual(rpcapi.client.target.topic, CONF.consoleauth_topic) orig_prepare = rpcapi.client.prepare version = kwargs.pop('version', None) rpc_kwargs = {k: v for k, v in six.iteritems(kwargs) if v is not self.DROPPED_ARG} with test.nested( mock.patch.object(rpcapi.client, 'cast' if do_cast else 'call'), mock.patch.object(rpcapi.client, 'prepare'), mock.patch.object(rpcapi.client, 'can_send_version'), ) as ( rpc_mock, prepare_mock, csv_mock ): prepare_mock.return_value = rpcapi.client rpc_mock.return_value = None if do_cast else 'foo' def fake_csv(v): if version: return orig_prepare( version_cap=version).can_send_version(version=v) else: return orig_prepare().can_send_version() csv_mock.side_effect = fake_csv retval = getattr(rpcapi, method)(ctxt, **kwargs) self.assertEqual(retval, rpc_mock.return_value) if version: prepare_mock.assert_called_once_with(version=version) else: prepare_mock.assert_called_once_with() rpc_mock.assert_called_once_with(ctxt, method, **rpc_kwargs) def test_authorize_console(self): self._test_consoleauth_api('authorize_console', token='token', console_type='ctype', host='h', port='p', internal_access_path='iap', instance_uuid="instance", access_url=self.DROPPED_ARG, version='2.0') def test_authorize_console_access_url(self): self._test_consoleauth_api('authorize_console', token='token', console_type='ctype', host='h', port='p', internal_access_path='iap', instance_uuid="instance", access_url="fake_access_url", version='2.1') def test_check_token(self): self._test_consoleauth_api('check_token', token='t') def test_delete_tokens_for_instnace(self): self._test_consoleauth_api('delete_tokens_for_instance', _do_cast=True, instance_uuid="instance") nova-13.1.4/nova/tests/unit/fake_loadables/0000775000567000056710000000000013064447471021716 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/fake_loadables/__init__.py0000664000567000056710000000154313064447140024023 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Fakes For Loadable class handling. """ from nova import loadables class FakeLoadable(object): pass class FakeLoader(loadables.BaseLoader): def __init__(self): super(FakeLoader, self).__init__(FakeLoadable) nova-13.1.4/nova/tests/unit/fake_loadables/fake_loadable2.py0000664000567000056710000000215013064447140025072 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Fake Loadable subclasses module #2 """ from nova.tests.unit import fake_loadables class FakeLoadableSubClass5(fake_loadables.FakeLoadable): pass class FakeLoadableSubClass6(fake_loadables.FakeLoadable): pass class _FakeLoadableSubClass7(fake_loadables.FakeLoadable): """Classes beginning with '_' will be ignored.""" pass class FakeLoadableSubClass8(BaseException): """Not a correct subclass.""" def return_valid_class(): return [FakeLoadableSubClass6] nova-13.1.4/nova/tests/unit/fake_loadables/fake_loadable1.py0000664000567000056710000000237013064447140025075 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Fake Loadable subclasses module #1 """ from nova.tests.unit import fake_loadables class FakeLoadableSubClass1(fake_loadables.FakeLoadable): pass class FakeLoadableSubClass2(fake_loadables.FakeLoadable): pass class _FakeLoadableSubClass3(fake_loadables.FakeLoadable): """Classes beginning with '_' will be ignored.""" pass class FakeLoadableSubClass4(object): """Not a correct subclass.""" def return_valid_classes(): return [FakeLoadableSubClass1, FakeLoadableSubClass2] def return_invalid_classes(): return [FakeLoadableSubClass1, _FakeLoadableSubClass3, FakeLoadableSubClass4] nova-13.1.4/nova/tests/unit/policy_fixture.py0000664000567000056710000001002313064447152022411 0ustar jenkinsjenkins00000000000000# Copyright 2012 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import fixtures from oslo_config import cfg from oslo_policy import policy as oslo_policy from oslo_serialization import jsonutils import six from nova import paths import nova.policy from nova.tests.unit import fake_policy CONF = cfg.CONF class RealPolicyFixture(fixtures.Fixture): """Load the live policy for tests. A base policy fixture that starts with the assumption that you'd like to load and enforce the shipped default policy in tests. Provides interfaces to tinker with both the contents and location of the policy file before loading to allow overrides. To do this implement ``_prepare_policy`` in the subclass, and adjust the ``policy_file`` accordingly. """ def _prepare_policy(self): """Allow changing of the policy before we get started""" pass def setUp(self): super(RealPolicyFixture, self).setUp() # policy_file can be overridden by subclasses self.policy_file = paths.state_path_def('etc/nova/policy.json') self._prepare_policy() CONF.set_override('policy_file', self.policy_file, group='oslo_policy') nova.policy.reset() nova.policy.init() self.addCleanup(nova.policy.reset) def set_rules(self, rules): policy = nova.policy._ENFORCER policy.set_rules(oslo_policy.Rules.from_dict(rules)) class PolicyFixture(RealPolicyFixture): """Load a fake policy from nova.tests.unit.fake_policy This overrides the policy with a completely fake and synthetic policy file. NOTE(sdague): the use of this is deprecated, and we should unwind the tests so that they can function with the real policy. This is mostly legacy because our default test instances and default test contexts don't match up. It appears that in many cases fake_policy was just modified to whatever makes tests pass, which makes it dangerous to be used in tree. Long term a NullPolicy fixture might be better in those cases. """ def _prepare_policy(self): self.policy_dir = self.useFixture(fixtures.TempDir()) self.policy_file = os.path.join(self.policy_dir.path, 'policy.json') with open(self.policy_file, 'w') as f: f.write(fake_policy.policy_data) CONF.set_override('policy_dirs', [], group='oslo_policy') class RoleBasedPolicyFixture(RealPolicyFixture): """Load a modified policy which allows all actions only be a single roll. This fixture can be used for testing role based permissions as it provides a version of the policy which stomps over all previous declaration and makes every action only available to a single role. NOTE(sdague): we could probably do this simpler by only loading a single default rule. """ def __init__(self, role="admin", *args, **kwargs): super(RoleBasedPolicyFixture, self).__init__(*args, **kwargs) self.role = role def _prepare_policy(self): policy = jsonutils.load(open(CONF.oslo_policy.policy_file)) # Convert all actions to require specified role for action, rule in six.iteritems(policy): policy[action] = 'role:%s' % self.role self.policy_dir = self.useFixture(fixtures.TempDir()) self.policy_file = os.path.join(self.policy_dir.path, 'policy.json') with open(self.policy_file, 'w') as f: jsonutils.dump(policy, f) nova-13.1.4/nova/tests/unit/test_nova_manage.py0000664000567000056710000010423113064447152022663 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2011 Ilya Alekseyev # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from six.moves import StringIO import sys import fixtures import mock from oslo_utils import uuidutils from nova.cmd import manage from nova import context from nova import db from nova.db import migration from nova.db.sqlalchemy import migration as sqla_migration from nova import exception from nova import objects from nova import test from nova.tests.unit.db import fakes as db_fakes from nova.tests.unit import fake_instance from nova.tests.unit.objects import test_network from nova.tests.unit import test_flavors class FixedIpCommandsTestCase(test.TestCase): def setUp(self): super(FixedIpCommandsTestCase, self).setUp() db_fakes.stub_out_db_network_api(self) self.commands = manage.FixedIpCommands() def test_reserve(self): self.commands.reserve('192.168.0.100') address = db.fixed_ip_get_by_address(context.get_admin_context(), '192.168.0.100') self.assertTrue(address['reserved']) def test_reserve_nonexistent_address(self): self.assertEqual(2, self.commands.reserve('55.55.55.55')) def test_unreserve(self): self.commands.unreserve('192.168.0.100') address = db.fixed_ip_get_by_address(context.get_admin_context(), '192.168.0.100') self.assertFalse(address['reserved']) def test_unreserve_nonexistent_address(self): self.assertEqual(2, self.commands.unreserve('55.55.55.55')) def test_list(self): self.useFixture(fixtures.MonkeyPatch('sys.stdout', StringIO())) self.commands.list() self.assertNotEqual(1, sys.stdout.getvalue().find('192.168.0.100')) def test_list_just_one_host(self): def fake_fixed_ip_get_by_host(*args, **kwargs): return [db_fakes.fixed_ip_fields] self.useFixture(fixtures.MonkeyPatch( 'nova.db.fixed_ip_get_by_host', fake_fixed_ip_get_by_host)) self.useFixture(fixtures.MonkeyPatch('sys.stdout', StringIO())) self.commands.list('banana') self.assertNotEqual(1, sys.stdout.getvalue().find('192.168.0.100')) class FloatingIpCommandsTestCase(test.NoDBTestCase): def setUp(self): super(FloatingIpCommandsTestCase, self).setUp() db_fakes.stub_out_db_network_api(self) self.commands = manage.FloatingIpCommands() def test_address_to_hosts(self): def assert_loop(result, expected): for ip in result: self.assertIn(str(ip), expected) address_to_hosts = self.commands.address_to_hosts # /32 and /31 self.assertRaises(exception.InvalidInput, address_to_hosts, '192.168.100.1/32') self.assertRaises(exception.InvalidInput, address_to_hosts, '192.168.100.1/31') # /30 expected = ["192.168.100.%s" % i for i in range(1, 3)] result = address_to_hosts('192.168.100.0/30') self.assertEqual(2, len(list(result))) assert_loop(result, expected) # /29 expected = ["192.168.100.%s" % i for i in range(1, 7)] result = address_to_hosts('192.168.100.0/29') self.assertEqual(6, len(list(result))) assert_loop(result, expected) # /28 expected = ["192.168.100.%s" % i for i in range(1, 15)] result = address_to_hosts('192.168.100.0/28') self.assertEqual(14, len(list(result))) assert_loop(result, expected) # /16 result = address_to_hosts('192.168.100.0/16') self.assertEqual(65534, len(list(result))) # NOTE(dripton): I don't test /13 because it makes the test take 3s. # /12 gives over a million IPs, which is ridiculous. self.assertRaises(exception.InvalidInput, address_to_hosts, '192.168.100.1/12') class NetworkCommandsTestCase(test.NoDBTestCase): def setUp(self): super(NetworkCommandsTestCase, self).setUp() self.commands = manage.NetworkCommands() self.net = {'id': 0, 'label': 'fake', 'injected': False, 'cidr': '192.168.0.0/24', 'cidr_v6': 'dead:beef::/64', 'multi_host': False, 'gateway_v6': 'dead:beef::1', 'netmask_v6': '64', 'netmask': '255.255.255.0', 'bridge': 'fa0', 'bridge_interface': 'fake_fa0', 'gateway': '192.168.0.1', 'broadcast': '192.168.0.255', 'dns1': '8.8.8.8', 'dns2': '8.8.4.4', 'vlan': 200, 'vlan_start': 201, 'vpn_public_address': '10.0.0.2', 'vpn_public_port': '2222', 'vpn_private_address': '192.168.0.2', 'dhcp_start': '192.168.0.3', 'project_id': 'fake_project', 'host': 'fake_host', 'uuid': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'} def fake_network_get_by_cidr(context, cidr): self.assertTrue(context.to_dict()['is_admin']) self.assertEqual(cidr, self.fake_net['cidr']) return db_fakes.FakeModel(dict(test_network.fake_network, **self.fake_net)) def fake_network_get_by_uuid(context, uuid): self.assertTrue(context.to_dict()['is_admin']) self.assertEqual(uuid, self.fake_net['uuid']) return db_fakes.FakeModel(dict(test_network.fake_network, **self.fake_net)) def fake_network_update(context, network_id, values): self.assertTrue(context.to_dict()['is_admin']) self.assertEqual(network_id, self.fake_net['id']) self.assertEqual(values, self.fake_update_value) self.fake_network_get_by_cidr = fake_network_get_by_cidr self.fake_network_get_by_uuid = fake_network_get_by_uuid self.fake_network_update = fake_network_update def test_create(self): def fake_create_networks(obj, context, **kwargs): self.assertTrue(context.to_dict()['is_admin']) self.assertEqual(kwargs['label'], 'Test') self.assertEqual(kwargs['cidr'], '10.2.0.0/24') self.assertFalse(kwargs['multi_host']) self.assertEqual(kwargs['num_networks'], 1) self.assertEqual(kwargs['network_size'], 256) self.assertEqual(kwargs['vlan'], 200) self.assertEqual(kwargs['vlan_start'], 201) self.assertEqual(kwargs['vpn_start'], 2000) self.assertEqual(kwargs['cidr_v6'], 'fd00:2::/120') self.assertEqual(kwargs['gateway'], '10.2.0.1') self.assertEqual(kwargs['gateway_v6'], 'fd00:2::22') self.assertEqual(kwargs['bridge'], 'br200') self.assertEqual(kwargs['bridge_interface'], 'eth0') self.assertEqual(kwargs['dns1'], '8.8.8.8') self.assertEqual(kwargs['dns2'], '8.8.4.4') self.flags(network_manager='nova.network.manager.VlanManager') from nova.network import manager as net_manager self.stubs.Set(net_manager.VlanManager, 'create_networks', fake_create_networks) self.commands.create( label='Test', cidr='10.2.0.0/24', num_networks=1, network_size=256, multi_host='F', vlan=200, vlan_start=201, vpn_start=2000, cidr_v6='fd00:2::/120', gateway='10.2.0.1', gateway_v6='fd00:2::22', bridge='br200', bridge_interface='eth0', dns1='8.8.8.8', dns2='8.8.4.4', uuid='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa') def test_list(self): def fake_network_get_all(context): return [db_fakes.FakeModel(self.net)] self.stub_out('nova.db.network_get_all', fake_network_get_all) output = StringIO() sys.stdout = output self.commands.list() sys.stdout = sys.__stdout__ result = output.getvalue() _fmt = "\t".join(["%(id)-5s", "%(cidr)-18s", "%(cidr_v6)-15s", "%(dhcp_start)-15s", "%(dns1)-15s", "%(dns2)-15s", "%(vlan)-15s", "%(project_id)-15s", "%(uuid)-15s"]) head = _fmt % {'id': 'id', 'cidr': 'IPv4', 'cidr_v6': 'IPv6', 'dhcp_start': 'start address', 'dns1': 'DNS1', 'dns2': 'DNS2', 'vlan': 'VlanID', 'project_id': 'project', 'uuid': "uuid"} body = _fmt % {'id': self.net['id'], 'cidr': self.net['cidr'], 'cidr_v6': self.net['cidr_v6'], 'dhcp_start': self.net['dhcp_start'], 'dns1': self.net['dns1'], 'dns2': self.net['dns2'], 'vlan': self.net['vlan'], 'project_id': self.net['project_id'], 'uuid': self.net['uuid']} answer = '%s\n%s\n' % (head, body) self.assertEqual(result, answer) def test_delete(self): self.fake_net = self.net self.fake_net['project_id'] = None self.fake_net['host'] = None self.stub_out('nova.db.network_get_by_uuid', self.fake_network_get_by_uuid) def fake_network_delete_safe(context, network_id): self.assertTrue(context.to_dict()['is_admin']) self.assertEqual(network_id, self.fake_net['id']) self.stub_out('nova.db.network_delete_safe', fake_network_delete_safe) self.commands.delete(uuid=self.fake_net['uuid']) def test_delete_by_cidr(self): self.fake_net = self.net self.fake_net['project_id'] = None self.fake_net['host'] = None self.stub_out('nova.db.network_get_by_cidr', self.fake_network_get_by_cidr) def fake_network_delete_safe(context, network_id): self.assertTrue(context.to_dict()['is_admin']) self.assertEqual(network_id, self.fake_net['id']) self.stub_out('nova.db.network_delete_safe', fake_network_delete_safe) self.commands.delete(fixed_range=self.fake_net['cidr']) def _test_modify_base(self, update_value, project, host, dis_project=None, dis_host=None): self.fake_net = self.net self.fake_update_value = update_value self.stub_out('nova.db.network_get_by_cidr', self.fake_network_get_by_cidr) self.stub_out('nova.db.network_update', self.fake_network_update) self.commands.modify(self.fake_net['cidr'], project=project, host=host, dis_project=dis_project, dis_host=dis_host) def test_modify_associate(self): self._test_modify_base(update_value={'project_id': 'test_project', 'host': 'test_host'}, project='test_project', host='test_host') def test_modify_unchanged(self): self._test_modify_base(update_value={}, project=None, host=None) def test_modify_disassociate(self): self._test_modify_base(update_value={'project_id': None, 'host': None}, project=None, host=None, dis_project=True, dis_host=True) class NeutronV2NetworkCommandsTestCase(test.NoDBTestCase): def setUp(self): super(NeutronV2NetworkCommandsTestCase, self).setUp() self.flags(use_neutron=True) self.commands = manage.NetworkCommands() def test_create(self): self.assertEqual(2, self.commands.create()) def test_list(self): self.assertEqual(2, self.commands.list()) def test_delete(self): self.assertEqual(2, self.commands.delete()) def test_modify(self): self.assertEqual(2, self.commands.modify('192.168.0.1')) class ProjectCommandsTestCase(test.TestCase): def setUp(self): super(ProjectCommandsTestCase, self).setUp() self.commands = manage.ProjectCommands() def test_quota(self): output = StringIO() sys.stdout = output self.commands.quota(project_id='admin', key='instances', value='unlimited', ) sys.stdout = sys.__stdout__ result = output.getvalue() print_format = "%-36s %-10s" % ('instances', 'unlimited') self.assertIn(print_format, result) def test_quota_update_invalid_key(self): self.assertEqual(2, self.commands.quota('admin', 'volumes1', '10')) class VmCommandsTestCase(test.NoDBTestCase): def setUp(self): super(VmCommandsTestCase, self).setUp() self.commands = manage.VmCommands() self.fake_flavor = objects.Flavor(**test_flavors.DEFAULT_FLAVORS[0]) def test_list_without_host(self): output = StringIO() sys.stdout = output with mock.patch.object(objects.InstanceList, 'get_by_filters') as get: get.return_value = objects.InstanceList( objects=[fake_instance.fake_instance_obj( context.get_admin_context(), host='foo-host', flavor=self.fake_flavor, system_metadata={})]) self.commands.list() sys.stdout = sys.__stdout__ result = output.getvalue() self.assertIn('node', result) # check the header line self.assertIn('m1.tiny', result) # flavor.name self.assertIn('foo-host', result) def test_list_with_host(self): output = StringIO() sys.stdout = output with mock.patch.object(objects.InstanceList, 'get_by_host') as get: get.return_value = objects.InstanceList( objects=[fake_instance.fake_instance_obj( context.get_admin_context(), flavor=self.fake_flavor, system_metadata={})]) self.commands.list(host='fake-host') sys.stdout = sys.__stdout__ result = output.getvalue() self.assertIn('node', result) # check the header line self.assertIn('m1.tiny', result) # flavor.name self.assertIn('fake-host', result) class DBCommandsTestCase(test.NoDBTestCase): def setUp(self): super(DBCommandsTestCase, self).setUp() self.commands = manage.DbCommands() def test_archive_deleted_rows_negative(self): self.assertEqual(1, self.commands.archive_deleted_rows(-1)) def test_archive_deleted_rows_large_number(self): large_number = '1' * 100 self.assertEqual(1, self.commands.archive_deleted_rows(large_number)) @mock.patch.object(db, 'archive_deleted_rows', return_value=dict(instances=10, consoles=5)) def _test_archive_deleted_rows(self, mock_db_archive, verbose=False): self.useFixture(fixtures.MonkeyPatch('sys.stdout', StringIO())) self.commands.archive_deleted_rows(20, verbose=verbose) mock_db_archive.assert_called_once_with(20) output = sys.stdout.getvalue() if verbose: expected = '''\ +-----------+-------------------------+ | Table | Number of Rows Archived | +-----------+-------------------------+ | consoles | 5 | | instances | 10 | +-----------+-------------------------+ ''' self.assertEqual(expected, output) else: self.assertEqual(0, len(output)) def test_archive_deleted_rows(self): # Tests that we don't show any table output (not verbose). self._test_archive_deleted_rows() def test_archive_deleted_rows_verbose(self): # Tests that we get table output. self._test_archive_deleted_rows(verbose=True) @mock.patch.object(db, 'archive_deleted_rows', return_value={}) def test_archive_deleted_rows_verbose_no_results(self, mock_db_archive): self.useFixture(fixtures.MonkeyPatch('sys.stdout', StringIO())) self.commands.archive_deleted_rows(20, verbose=True) mock_db_archive.assert_called_once_with(20) output = sys.stdout.getvalue() self.assertIn('Nothing was archived.', output) @mock.patch.object(migration, 'db_null_instance_uuid_scan', return_value={'foo': 0}) def test_null_instance_uuid_scan_no_records_found(self, mock_scan): self.useFixture(fixtures.MonkeyPatch('sys.stdout', StringIO())) self.commands.null_instance_uuid_scan() self.assertIn("There were no records found", sys.stdout.getvalue()) @mock.patch.object(migration, 'db_null_instance_uuid_scan', return_value={'foo': 1, 'bar': 0}) def _test_null_instance_uuid_scan(self, mock_scan, delete): self.useFixture(fixtures.MonkeyPatch('sys.stdout', StringIO())) self.commands.null_instance_uuid_scan(delete) output = sys.stdout.getvalue() if delete: self.assertIn("Deleted 1 records from table 'foo'.", output) self.assertNotIn("Deleted 0 records from table 'bar'.", output) else: self.assertIn("1 records in the 'foo' table", output) self.assertNotIn("0 records in the 'bar' table", output) self.assertNotIn("There were no records found", output) def test_null_instance_uuid_scan_readonly(self): self._test_null_instance_uuid_scan(delete=False) def test_null_instance_uuid_scan_delete(self): self._test_null_instance_uuid_scan(delete=True) @mock.patch.object(sqla_migration, 'db_version', return_value=2) def test_version(self, sqla_migrate): self.commands.version() sqla_migrate.assert_called_once_with(database='main') @mock.patch.object(sqla_migration, 'db_sync') def test_sync(self, sqla_sync): self.commands.sync(version=4) sqla_sync.assert_called_once_with(version=4, database='main') def _fake_db_command(self, migrations=None): if migrations is None: mock_mig_1 = mock.MagicMock(__name__="mock_mig_1") mock_mig_2 = mock.MagicMock(__name__="mock_mig_2") mock_mig_1.return_value = (5, 4) mock_mig_2.return_value = (6, 6) migrations = (mock_mig_1, mock_mig_2) class _CommandSub(manage.DbCommands): online_migrations = migrations return _CommandSub @mock.patch('nova.context.get_admin_context') def test_online_migrations(self, mock_get_context): ctxt = mock_get_context.return_value command_cls = self._fake_db_command() command = command_cls() command.online_data_migrations(10) command_cls.online_migrations[0].assert_called_once_with(ctxt, 10) command_cls.online_migrations[1].assert_called_once_with(ctxt, 6) @mock.patch('nova.context.get_admin_context') def test_online_migrations_no_max_count(self, mock_get_context): total = [120] batches = [50, 40, 30, 0] runs = [] def fake_migration(context, count): self.assertEqual(mock_get_context.return_value, context) runs.append(count) count = batches.pop(0) total[0] -= count return total[0], count command_cls = self._fake_db_command((fake_migration,)) command = command_cls() command.online_data_migrations(None) self.assertEqual([], batches) self.assertEqual(0, total[0]) self.assertEqual([50, 50, 50, 50], runs) def test_online_migrations_error(self): fake_migration = mock.MagicMock() fake_migration.side_effect = Exception command_cls = self._fake_db_command((fake_migration,)) command = command_cls() command.online_data_migrations(None) class ApiDbCommandsTestCase(test.NoDBTestCase): def setUp(self): super(ApiDbCommandsTestCase, self).setUp() self.commands = manage.ApiDbCommands() @mock.patch.object(sqla_migration, 'db_version', return_value=2) def test_version(self, sqla_migrate): self.commands.version() sqla_migrate.assert_called_once_with(database='api') @mock.patch.object(sqla_migration, 'db_sync') def test_sync(self, sqla_sync): self.commands.sync(version=4) sqla_sync.assert_called_once_with(version=4, database='api') class ServiceCommandsTestCase(test.TestCase): def setUp(self): super(ServiceCommandsTestCase, self).setUp() self.commands = manage.ServiceCommands() def test_service_enable_invalid_params(self): self.assertEqual(2, self.commands.enable('nohost', 'noservice')) def test_service_disable_invalid_params(self): self.assertEqual(2, self.commands.disable('nohost', 'noservice')) class CellCommandsTestCase(test.NoDBTestCase): def setUp(self): super(CellCommandsTestCase, self).setUp() self.commands = manage.CellCommands() def test_create_transport_hosts_multiple(self): """Test the _create_transport_hosts method when broker_hosts is set. """ brokers = "127.0.0.1:5672,127.0.0.2:5671" thosts = self.commands._create_transport_hosts( 'guest', 'devstack', broker_hosts=brokers) self.assertEqual(2, len(thosts)) self.assertEqual('127.0.0.1', thosts[0].hostname) self.assertEqual(5672, thosts[0].port) self.assertEqual('127.0.0.2', thosts[1].hostname) self.assertEqual(5671, thosts[1].port) def test_create_transport_hosts_single(self): """Test the _create_transport_hosts method when hostname is passed.""" thosts = self.commands._create_transport_hosts('guest', 'devstack', hostname='127.0.0.1', port=80) self.assertEqual(1, len(thosts)) self.assertEqual('127.0.0.1', thosts[0].hostname) self.assertEqual(80, thosts[0].port) def test_create_transport_hosts_single_broker(self): """Test the _create_transport_hosts method for single broker_hosts.""" thosts = self.commands._create_transport_hosts( 'guest', 'devstack', broker_hosts='127.0.0.1:5672') self.assertEqual(1, len(thosts)) self.assertEqual('127.0.0.1', thosts[0].hostname) self.assertEqual(5672, thosts[0].port) def test_create_transport_hosts_both(self): """Test the _create_transport_hosts method when both broker_hosts and hostname/port are passed. """ thosts = self.commands._create_transport_hosts( 'guest', 'devstack', broker_hosts='127.0.0.1:5672', hostname='127.0.0.2', port=80) self.assertEqual(1, len(thosts)) self.assertEqual('127.0.0.1', thosts[0].hostname) self.assertEqual(5672, thosts[0].port) def test_create_transport_hosts_wrong_val(self): """Test the _create_transport_hosts method when broker_hosts is wrongly sepcified """ self.assertRaises(ValueError, self.commands._create_transport_hosts, 'guest', 'devstack', broker_hosts='127.0.0.1:5672,127.0.0.1') def test_create_transport_hosts_wrong_port_val(self): """Test the _create_transport_hosts method when port in broker_hosts is wrongly sepcified """ self.assertRaises(ValueError, self.commands._create_transport_hosts, 'guest', 'devstack', broker_hosts='127.0.0.1:') def test_create_transport_hosts_wrong_port_arg(self): """Test the _create_transport_hosts method when port argument is wrongly sepcified """ self.assertRaises(ValueError, self.commands._create_transport_hosts, 'guest', 'devstack', hostname='127.0.0.1', port='ab') @mock.patch.object(context, 'get_admin_context') @mock.patch.object(db, 'cell_create') def test_create_broker_hosts(self, mock_db_cell_create, mock_ctxt): """Test the create function when broker_hosts is passed """ cell_tp_url = "fake://guest:devstack@127.0.0.1:5432" cell_tp_url += ",guest:devstack@127.0.0.2:9999/" ctxt = mock.sentinel mock_ctxt.return_value = mock.sentinel self.commands.create("test", broker_hosts='127.0.0.1:5432,127.0.0.2:9999', woffset=0, wscale=0, username="guest", password="devstack") exp_values = {'name': "test", 'is_parent': False, 'transport_url': cell_tp_url, 'weight_offset': 0.0, 'weight_scale': 0.0} mock_db_cell_create.assert_called_once_with(ctxt, exp_values) @mock.patch.object(context, 'get_admin_context') @mock.patch.object(db, 'cell_create') def test_create_broker_hosts_with_url_decoding_fix(self, mock_db_cell_create, mock_ctxt): """Test the create function when broker_hosts is passed """ cell_tp_url = "fake://the=user:the=password@127.0.0.1:5432/" ctxt = mock.sentinel mock_ctxt.return_value = mock.sentinel self.commands.create("test", broker_hosts='127.0.0.1:5432', woffset=0, wscale=0, username="the=user", password="the=password") exp_values = {'name': "test", 'is_parent': False, 'transport_url': cell_tp_url, 'weight_offset': 0.0, 'weight_scale': 0.0} mock_db_cell_create.assert_called_once_with(ctxt, exp_values) @mock.patch.object(context, 'get_admin_context') @mock.patch.object(db, 'cell_create') def test_create_hostname(self, mock_db_cell_create, mock_ctxt): """Test the create function when hostname and port is passed """ cell_tp_url = "fake://guest:devstack@127.0.0.1:9999/" ctxt = mock.sentinel mock_ctxt.return_value = mock.sentinel self.commands.create("test", hostname='127.0.0.1', port="9999", woffset=0, wscale=0, username="guest", password="devstack") exp_values = {'name': "test", 'is_parent': False, 'transport_url': cell_tp_url, 'weight_offset': 0.0, 'weight_scale': 0.0} mock_db_cell_create.assert_called_once_with(ctxt, exp_values) class CellV2CommandsTestCase(test.TestCase): def setUp(self): super(CellV2CommandsTestCase, self).setUp() self.useFixture(fixtures.MonkeyPatch('sys.stdout', StringIO())) self.commands = manage.CellV2Commands() def test_map_cell_and_hosts(self): # Create some fake compute nodes and check if they get host mappings ctxt = context.RequestContext() values = { 'vcpus': 4, 'memory_mb': 4096, 'local_gb': 1024, 'vcpus_used': 2, 'memory_mb_used': 2048, 'local_gb_used': 512, 'hypervisor_type': 'Hyper-Dan-VM-ware', 'hypervisor_version': 1001, 'cpu_info': 'Schmintel i786', } for i in range(3): host = 'host%s' % i compute_node = objects.ComputeNode(ctxt, host=host, **values) compute_node.create() cell_transport_url = "fake://guest:devstack@127.0.0.1:9999/" self.commands.map_cell_and_hosts(cell_transport_url, name='ssd', verbose=True) cell_mapping_uuid = sys.stdout.getvalue().strip() # Verify the cell mapping cell_mapping = objects.CellMapping.get_by_uuid(ctxt, cell_mapping_uuid) self.assertEqual('ssd', cell_mapping.name) self.assertEqual(cell_transport_url, cell_mapping.transport_url) # Verify the host mappings for i in range(3): host = 'host%s' % i host_mapping = objects.HostMapping.get_by_host(ctxt, host) self.assertEqual(cell_mapping.uuid, host_mapping.cell_mapping.uuid) def test_map_cell_and_hosts_duplicate(self): # Create a cell mapping and hosts and check that nothing new is created ctxt = context.RequestContext() cell_mapping_uuid = uuidutils.generate_uuid() cell_mapping = objects.CellMapping( ctxt, uuid=cell_mapping_uuid, name='fake', transport_url='fake://', database_connection='fake://') cell_mapping.create() # Create compute nodes that will map to the cell values = { 'vcpus': 4, 'memory_mb': 4096, 'local_gb': 1024, 'vcpus_used': 2, 'memory_mb_used': 2048, 'local_gb_used': 512, 'hypervisor_type': 'Hyper-Dan-VM-ware', 'hypervisor_version': 1001, 'cpu_info': 'Schmintel i786', } for i in range(3): host = 'host%s' % i compute_node = objects.ComputeNode(ctxt, host=host, **values) compute_node.create() host_mapping = objects.HostMapping( ctxt, host=host, cell_mapping=cell_mapping) host_mapping.create() cell_transport_url = "fake://guest:devstack@127.0.0.1:9999/" retval = self.commands.map_cell_and_hosts(cell_transport_url, name='ssd', verbose=True) self.assertEqual(0, retval) output = sys.stdout.getvalue().strip() expected = '' for i in range(3): expected += ('Host host%s is already mapped to cell %s\n' % (i, cell_mapping_uuid)) expected += 'All hosts are already mapped to cell(s), exiting.' self.assertEqual(expected, output) def test_map_cell_and_hosts_partial_update(self): # Create a cell mapping and partial hosts and check that # missing HostMappings are created ctxt = context.RequestContext() cell_mapping_uuid = uuidutils.generate_uuid() cell_mapping = objects.CellMapping( ctxt, uuid=cell_mapping_uuid, name='fake', transport_url='fake://', database_connection='fake://') cell_mapping.create() # Create compute nodes that will map to the cell values = { 'vcpus': 4, 'memory_mb': 4096, 'local_gb': 1024, 'vcpus_used': 2, 'memory_mb_used': 2048, 'local_gb_used': 512, 'hypervisor_type': 'Hyper-Dan-VM-ware', 'hypervisor_version': 1001, 'cpu_info': 'Schmintel i786', } for i in range(3): host = 'host%s' % i compute_node = objects.ComputeNode(ctxt, host=host, **values) compute_node.create() # Only create 2 existing HostMappings out of 3 for i in range(2): host = 'host%s' % i host_mapping = objects.HostMapping( ctxt, host=host, cell_mapping=cell_mapping) host_mapping.create() cell_transport_url = "fake://guest:devstack@127.0.0.1:9999/" self.commands.map_cell_and_hosts(cell_transport_url, name='ssd', verbose=True) # Verify the HostMapping for the last host was created host_mapping = objects.HostMapping.get_by_host(ctxt, 'host2') self.assertEqual(cell_mapping.uuid, host_mapping.cell_mapping.uuid) # Verify the output output = sys.stdout.getvalue().strip() expected = '' for i in range(2): expected += ('Host host%s is already mapped to cell %s\n' % (i, cell_mapping_uuid)) # The expected CellMapping UUID for the last host should be the same expected += cell_mapping.uuid self.assertEqual(expected, output) def test_map_cell_and_hosts_no_hosts_found(self): cell_transport_url = "fake://guest:devstack@127.0.0.1:9999/" retval = self.commands.map_cell_and_hosts(cell_transport_url, name='ssd', verbose=True) self.assertEqual(0, retval) output = sys.stdout.getvalue().strip() expected = 'No hosts found to map to cell, exiting.' self.assertEqual(expected, output) nova-13.1.4/nova/tests/unit/README.rst0000664000567000056710000000535613064447140020473 0ustar jenkinsjenkins00000000000000===================================== OpenStack Nova Testing Infrastructure ===================================== This README file attempts to provide current and prospective contributors with everything they need to know in order to start creating unit tests for nova. Note: the content for the rest of this file will be added as the work items in the following blueprint are completed: https://blueprints.launchpad.net/nova/+spec/consolidate-testing-infrastructure Test Types: Unit vs. Functional vs. Integration ----------------------------------------------- TBD Writing Unit Tests ------------------ TBD Using Fakes ~~~~~~~~~~~ TBD test.TestCase ------------- The TestCase class from nova.test (generally imported as test) will automatically manage self.stubs using the stubout module and self.mox using the mox module during the setUp step. They will automatically verify and clean up during the tearDown step. If using test.TestCase, calling the super class setUp is required and calling the super class tearDown is required to be last if tearDown is overridden. Writing Functional Tests ------------------------ TBD Writing Integration Tests ------------------------- TBD Tests and Exceptions -------------------- A properly written test asserts that particular behavior occurs. This can be a success condition or a failure condition, including an exception. When asserting that a particular exception is raised, the most specific exception possible should be used. In particular, testing for Exception being raised is almost always a mistake since it will match (almost) every exception, even those unrelated to the exception intended to be tested. This applies to catching exceptions manually with a try/except block, or using assertRaises(). Example:: self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid, elevated, instance_uuid) If a stubbed function/method needs a generic exception for testing purposes, test.TestingException is available. Example:: def stubbed_method(self): raise test.TestingException() self.stubs.Set(cls, 'inner_method', stubbed_method) obj = cls() self.assertRaises(test.TestingException, obj.outer_method) Stubbing and Mocking -------------------- Whenever possible, tests SHOULD NOT stub and mock out the same function. If it's unavoidable, tests SHOULD define stubs before mocks since the `TestCase` cleanup routine will un-mock before un-stubbing. Doing otherwise results in a test that leaks stubbed functions, causing hard-to-debug interference between tests [1]_. If a mock must take place before a stub, any stubs after the mock call MUST be manually unset using `self.cleanUp` calls within the test. .. [1] https://bugs.launchpad.net/nova/+bug/1180671 nova-13.1.4/nova/tests/unit/test_availability_zones.py0000664000567000056710000002730513064447140024303 0ustar jenkinsjenkins00000000000000# Copyright 2013 Netease Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for availability zones """ import mock import six from nova import availability_zones as az import nova.conf from nova import context from nova import db from nova import objects from nova import test CONF = nova.conf.CONF class AvailabilityZoneTestCases(test.TestCase): """Test case for aggregate based availability zone.""" def setUp(self): super(AvailabilityZoneTestCases, self).setUp() self.host = 'me' self.availability_zone = 'nova-test' self.default_az = CONF.default_availability_zone self.default_in_az = CONF.internal_service_availability_zone self.context = context.get_admin_context() self.agg = self._create_az('az_agg', self.availability_zone) def tearDown(self): db.aggregate_delete(self.context, self.agg['id']) super(AvailabilityZoneTestCases, self).tearDown() def _create_az(self, agg_name, az_name): agg_meta = {'name': agg_name} agg = db.aggregate_create(self.context, agg_meta) metadata = {'availability_zone': az_name} db.aggregate_metadata_add(self.context, agg['id'], metadata) return agg def _update_az(self, aggregate, az_name): metadata = {'availability_zone': az_name} db.aggregate_update(self.context, aggregate['id'], metadata) def _create_service_with_topic(self, topic, host, disabled=False): values = { 'binary': 'bin', 'host': host, 'topic': topic, 'disabled': disabled, } return db.service_create(self.context, values) def _destroy_service(self, service): return db.service_destroy(self.context, service['id']) def _add_to_aggregate(self, service, aggregate): return db.aggregate_host_add(self.context, aggregate['id'], service['host']) def _delete_from_aggregate(self, service, aggregate): return db.aggregate_host_delete(self.context, aggregate['id'], service['host']) def test_rest_availability_zone_reset_cache(self): az._get_cache().add('cache', 'fake_value') az.reset_cache() self.assertIsNone(az._get_cache().get('cache')) def test_update_host_availability_zone_cache(self): """Test availability zone cache could be update.""" service = self._create_service_with_topic('compute', self.host) # Create a new aggregate with an AZ and add the host to the AZ az_name = 'az1' cache_key = az._make_cache_key(self.host) agg_az1 = self._create_az('agg-az1', az_name) self._add_to_aggregate(service, agg_az1) az.update_host_availability_zone_cache(self.context, self.host) self.assertEqual('az1', az._get_cache().get(cache_key)) az.update_host_availability_zone_cache(self.context, self.host, 'az2') self.assertEqual('az2', az._get_cache().get(cache_key)) def test_set_availability_zone_compute_service(self): """Test for compute service get right availability zone.""" service = self._create_service_with_topic('compute', self.host) services = db.service_get_all(self.context) # The service is not add into aggregate, so confirm it is default # availability zone. new_service = az.set_availability_zones(self.context, services)[0] self.assertEqual(self.default_az, new_service['availability_zone']) # The service is added into aggregate, confirm return the aggregate # availability zone. self._add_to_aggregate(service, self.agg) new_service = az.set_availability_zones(self.context, services)[0] self.assertEqual(self.availability_zone, new_service['availability_zone']) self._destroy_service(service) def test_set_availability_zone_unicode_key(self): """Test set availability zone cache key is unicode.""" service = self._create_service_with_topic('network', self.host) services = db.service_get_all(self.context) az.set_availability_zones(self.context, services) self.assertIsInstance(services[0]['host'], six.text_type) cached_key = az._make_cache_key(services[0]['host']) self.assertIsInstance(cached_key, str) self._destroy_service(service) def test_set_availability_zone_not_compute_service(self): """Test not compute service get right availability zone.""" service = self._create_service_with_topic('network', self.host) services = db.service_get_all(self.context) new_service = az.set_availability_zones(self.context, services)[0] self.assertEqual(self.default_in_az, new_service['availability_zone']) self._destroy_service(service) def test_get_host_availability_zone(self): """Test get right availability zone by given host.""" self.assertEqual(self.default_az, az.get_host_availability_zone(self.context, self.host)) service = self._create_service_with_topic('compute', self.host) self._add_to_aggregate(service, self.agg) self.assertEqual(self.availability_zone, az.get_host_availability_zone(self.context, self.host)) def test_update_host_availability_zone(self): """Test availability zone could be update by given host.""" service = self._create_service_with_topic('compute', self.host) # Create a new aggregate with an AZ and add the host to the AZ az_name = 'az1' agg_az1 = self._create_az('agg-az1', az_name) self._add_to_aggregate(service, agg_az1) self.assertEqual(az_name, az.get_host_availability_zone(self.context, self.host)) # Update AZ new_az_name = 'az2' self._update_az(agg_az1, new_az_name) self.assertEqual(new_az_name, az.get_host_availability_zone(self.context, self.host)) def test_delete_host_availability_zone(self): """Test availability zone could be deleted successfully.""" service = self._create_service_with_topic('compute', self.host) # Create a new aggregate with an AZ and add the host to the AZ az_name = 'az1' agg_az1 = self._create_az('agg-az1', az_name) self._add_to_aggregate(service, agg_az1) self.assertEqual(az_name, az.get_host_availability_zone(self.context, self.host)) # Delete the AZ via deleting the aggregate self._delete_from_aggregate(service, agg_az1) self.assertEqual(self.default_az, az.get_host_availability_zone(self.context, self.host)) def test_get_availability_zones(self): """Test get_availability_zones.""" # When the param get_only_available of get_availability_zones is set # to default False, it returns two lists, zones with at least one # enabled services, and zones with no enabled services, # when get_only_available is set to True, only return a list of zones # with at least one enabled services. # Use the following test data: # # zone host enabled # nova-test host1 Yes # nova-test host2 No # nova-test2 host3 Yes # nova-test3 host4 No # host5 No agg2 = self._create_az('agg-az2', 'nova-test2') agg3 = self._create_az('agg-az3', 'nova-test3') service1 = self._create_service_with_topic('compute', 'host1', disabled=False) service2 = self._create_service_with_topic('compute', 'host2', disabled=True) service3 = self._create_service_with_topic('compute', 'host3', disabled=False) service4 = self._create_service_with_topic('compute', 'host4', disabled=True) self._create_service_with_topic('compute', 'host5', disabled=True) self._add_to_aggregate(service1, self.agg) self._add_to_aggregate(service2, self.agg) self._add_to_aggregate(service3, agg2) self._add_to_aggregate(service4, agg3) zones, not_zones = az.get_availability_zones(self.context) self.assertEqual(['nova-test', 'nova-test2'], zones) self.assertEqual(['nova-test3', 'nova'], not_zones) zones = az.get_availability_zones(self.context, True) self.assertEqual(['nova-test', 'nova-test2'], zones) zones, not_zones = az.get_availability_zones(self.context, with_hosts=True) self.assertJsonEqual(zones, [(u'nova-test2', set([u'host3'])), (u'nova-test', set([u'host1']))]) self.assertJsonEqual(not_zones, [(u'nova-test3', set([u'host4'])), (u'nova', set([u'host5']))]) def test_get_instance_availability_zone_default_value(self): """Test get right availability zone by given an instance.""" fake_inst = objects.Instance(host=self.host, availability_zone=None) self.assertEqual(self.default_az, az.get_instance_availability_zone(self.context, fake_inst)) def test_get_instance_availability_zone_from_aggregate(self): """Test get availability zone from aggregate by given an instance.""" host = 'host170' service = self._create_service_with_topic('compute', host) self._add_to_aggregate(service, self.agg) fake_inst = objects.Instance(host=host, availability_zone=self.availability_zone) self.assertEqual(self.availability_zone, az.get_instance_availability_zone(self.context, fake_inst)) @mock.patch.object(az._get_cache(), 'get') def test_get_instance_availability_zone_cache_differs(self, cache_get): host = 'host170' service = self._create_service_with_topic('compute', host) self._add_to_aggregate(service, self.agg) cache_get.return_value = self.default_az fake_inst = objects.Instance(host=host, availability_zone=self.availability_zone) self.assertEqual( self.availability_zone, az.get_instance_availability_zone(self.context, fake_inst)) def test_get_instance_availability_zone_no_host(self): """Test get availability zone from instance if host not set.""" fake_inst = objects.Instance(host=None, availability_zone='inst-az') result = az.get_instance_availability_zone(self.context, fake_inst) self.assertEqual('inst-az', result) def test_get_instance_availability_zone_no_host_no_az(self): """Test get availability zone if neither host nor az is set.""" fake_inst = objects.Instance(host=None, availability_zone=None) result = az.get_instance_availability_zone(self.context, fake_inst) self.assertIsNone(result) nova-13.1.4/nova/tests/unit/compute/0000775000567000056710000000000013064447471020456 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/compute/test_stats.py0000664000567000056710000002232313064447140023220 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for compute node stats.""" from nova.compute import stats from nova.compute import task_states from nova.compute import vm_states from nova import test from nova.tests.unit import fake_instance from nova.tests import uuidsentinel as uuids class StatsTestCase(test.NoDBTestCase): def setUp(self): super(StatsTestCase, self).setUp() self.stats = stats.Stats() def _fake_object(self, updates): return fake_instance.fake_instance_obj(None, **updates) def _create_instance(self, values=None): instance = { "os_type": "Linux", "project_id": "1234", "task_state": None, "vm_state": vm_states.BUILDING, "vcpus": 1, "uuid": uuids.stats_linux_instance_1, } if values: instance.update(values) return self._fake_object(instance) def test_os_type_count(self): os_type = "Linux" self.assertEqual(0, self.stats.num_os_type(os_type)) self.stats._increment("num_os_type_" + os_type) self.stats._increment("num_os_type_" + os_type) self.stats._increment("num_os_type_Vax") self.assertEqual(2, self.stats.num_os_type(os_type)) self.stats["num_os_type_" + os_type] -= 1 self.assertEqual(1, self.stats.num_os_type(os_type)) def test_update_project_count(self): proj_id = "1234" def _get(): return self.stats.num_instances_for_project(proj_id) self.assertEqual(0, _get()) self.stats._increment("num_proj_" + proj_id) self.assertEqual(1, _get()) self.stats["num_proj_" + proj_id] -= 1 self.assertEqual(0, _get()) def test_instance_count(self): self.assertEqual(0, self.stats.num_instances) for i in range(5): self.stats._increment("num_instances") self.stats["num_instances"] -= 1 self.assertEqual(4, self.stats.num_instances) def test_add_stats_for_instance(self): instance = { "os_type": "Linux", "project_id": "1234", "task_state": None, "vm_state": vm_states.BUILDING, "vcpus": 3, "uuid": uuids.stats_linux_instance_1, } self.stats.update_stats_for_instance(self._fake_object(instance)) instance = { "os_type": "FreeBSD", "project_id": "1234", "task_state": task_states.SCHEDULING, "vm_state": None, "vcpus": 1, "uuid": uuids.stats_freebsd_instance, } self.stats.update_stats_for_instance(self._fake_object(instance)) instance = { "os_type": "Linux", "project_id": "2345", "task_state": task_states.SCHEDULING, "vm_state": vm_states.BUILDING, "vcpus": 2, "uuid": uuids.stats_linux_instance_2, } self.stats.update_stats_for_instance(self._fake_object(instance)) instance = { "os_type": "Linux", "project_id": "2345", "task_state": task_states.RESCUING, "vm_state": vm_states.ACTIVE, "vcpus": 2, "uuid": uuids.stats_linux_instance_3, } self.stats.update_stats_for_instance(self._fake_object(instance)) instance = { "os_type": "Linux", "project_id": "2345", "task_state": task_states.UNSHELVING, "vm_state": vm_states.ACTIVE, "vcpus": 2, "uuid": uuids.stats_linux_instance_4, } self.stats.update_stats_for_instance(self._fake_object(instance)) self.assertEqual(4, self.stats.num_os_type("Linux")) self.assertEqual(1, self.stats.num_os_type("FreeBSD")) self.assertEqual(2, self.stats.num_instances_for_project("1234")) self.assertEqual(3, self.stats.num_instances_for_project("2345")) self.assertEqual(1, self.stats["num_task_None"]) self.assertEqual(2, self.stats["num_task_" + task_states.SCHEDULING]) self.assertEqual(1, self.stats["num_task_" + task_states.UNSHELVING]) self.assertEqual(1, self.stats["num_task_" + task_states.RESCUING]) self.assertEqual(1, self.stats["num_vm_None"]) self.assertEqual(2, self.stats["num_vm_" + vm_states.BUILDING]) def test_calculate_workload(self): self.stats._increment("num_task_None") self.stats._increment("num_task_" + task_states.SCHEDULING) self.stats._increment("num_task_" + task_states.SCHEDULING) self.assertEqual(2, self.stats.calculate_workload()) def test_update_stats_for_instance_no_change(self): instance = self._create_instance() self.stats.update_stats_for_instance(instance) self.stats.update_stats_for_instance(instance) # no change self.assertEqual(1, self.stats.num_instances) self.assertEqual(1, self.stats.num_instances_for_project("1234")) self.assertEqual(1, self.stats["num_os_type_Linux"]) self.assertEqual(1, self.stats["num_task_None"]) self.assertEqual(1, self.stats["num_vm_" + vm_states.BUILDING]) def test_update_stats_for_instance_vm_change(self): instance = self._create_instance() self.stats.update_stats_for_instance(instance) instance["vm_state"] = vm_states.PAUSED self.stats.update_stats_for_instance(instance) self.assertEqual(1, self.stats.num_instances) self.assertEqual(1, self.stats.num_instances_for_project(1234)) self.assertEqual(1, self.stats["num_os_type_Linux"]) self.assertEqual(0, self.stats["num_vm_%s" % vm_states.BUILDING]) self.assertEqual(1, self.stats["num_vm_%s" % vm_states.PAUSED]) def test_update_stats_for_instance_task_change(self): instance = self._create_instance() self.stats.update_stats_for_instance(instance) instance["task_state"] = task_states.REBUILDING self.stats.update_stats_for_instance(instance) self.assertEqual(1, self.stats.num_instances) self.assertEqual(1, self.stats.num_instances_for_project("1234")) self.assertEqual(1, self.stats["num_os_type_Linux"]) self.assertEqual(0, self.stats["num_task_None"]) self.assertEqual(1, self.stats["num_task_%s" % task_states.REBUILDING]) def test_update_stats_for_instance_deleted(self): instance = self._create_instance() self.stats.update_stats_for_instance(instance) self.assertEqual(1, self.stats.num_instances_for_project("1234")) instance["vm_state"] = vm_states.DELETED self.stats.update_stats_for_instance(instance) self.assertEqual(0, self.stats.num_instances) self.assertEqual(0, self.stats.num_instances_for_project("1234")) self.assertEqual(0, self.stats.num_os_type("Linux")) self.assertEqual(0, self.stats["num_vm_" + vm_states.BUILDING]) def test_update_stats_for_instance_offloaded(self): instance = self._create_instance() self.stats.update_stats_for_instance(instance) self.assertEqual(1, self.stats.num_instances_for_project("1234")) instance["vm_state"] = vm_states.SHELVED_OFFLOADED self.stats.update_stats_for_instance(instance) self.assertEqual(0, self.stats.num_instances) self.assertEqual(0, self.stats.num_instances_for_project("1234")) self.assertEqual(0, self.stats.num_os_type("Linux")) self.assertEqual(0, self.stats["num_vm_" + vm_states.BUILDING]) def test_io_workload(self): vms = [vm_states.ACTIVE, vm_states.BUILDING, vm_states.PAUSED] tasks = [task_states.RESIZE_MIGRATING, task_states.REBUILDING, task_states.RESIZE_PREP, task_states.IMAGE_SNAPSHOT, task_states.IMAGE_BACKUP, task_states.RESCUING, task_states.UNSHELVING, task_states.SHELVING] for state in vms: self.stats._increment("num_vm_" + state) for state in tasks: self.stats._increment("num_task_" + state) self.assertEqual(8, self.stats.io_workload) def test_io_workload_saved_to_stats(self): values = {'task_state': task_states.RESIZE_MIGRATING} instance = self._create_instance(values) self.stats.update_stats_for_instance(instance) self.assertEqual(2, self.stats["io_workload"]) def test_clear(self): instance = self._create_instance() self.stats.update_stats_for_instance(instance) self.assertNotEqual(0, len(self.stats)) self.assertEqual(1, len(self.stats.states)) self.stats.clear() self.assertEqual(0, len(self.stats)) self.assertEqual(0, len(self.stats.states)) nova-13.1.4/nova/tests/unit/compute/test_arch.py0000664000567000056710000000403513064447152023002 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from nova.compute import arch from nova import exception from nova import test class ArchTest(test.NoDBTestCase): @mock.patch.object(os, "uname") def test_host(self, mock_uname): os.uname.return_value = ( 'Linux', 'localhost.localdomain', '3.14.8-200.fc20.x86_64', '#1 SMP Mon Jun 16 21:57:53 UTC 2014', 'i686' ) self.assertEqual(arch.I686, arch.from_host()) def test_valid_string(self): self.assertTrue(arch.is_valid("x86_64")) def test_valid_constant(self): self.assertTrue(arch.is_valid(arch.X86_64)) def test_valid_bogus(self): self.assertFalse(arch.is_valid("x86_64wibble")) def test_canonicalize_i386(self): self.assertEqual(arch.I686, arch.canonicalize("i386")) def test_canonicalize_amd64(self): self.assertEqual(arch.X86_64, arch.canonicalize("amd64")) def test_canonicalize_case(self): self.assertEqual(arch.X86_64, arch.canonicalize("X86_64")) def test_canonicalize_compat_xen1(self): self.assertEqual(arch.I686, arch.canonicalize("x86_32")) def test_canonicalize_compat_xen2(self): self.assertEqual(arch.I686, arch.canonicalize("x86_32p")) def test_canonicalize_bogus(self): self.assertRaises(exception.InvalidArchitectureName, arch.canonicalize, "x86_64wibble") nova-13.1.4/nova/tests/unit/compute/test_shelve.py0000664000567000056710000005426713064447152023367 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from mox3 import mox from oslo_config import cfg from oslo_utils import fixture as utils_fixture from oslo_utils import timeutils from nova.compute import claims from nova.compute import task_states from nova.compute import vm_states from nova import db from nova import objects from nova.tests.unit.compute import test_compute from nova.tests.unit.image import fake as fake_image CONF = cfg.CONF CONF.import_opt('shelved_offload_time', 'nova.compute.manager') def _fake_resources(): resources = { 'memory_mb': 2048, 'memory_mb_used': 0, 'free_ram_mb': 2048, 'local_gb': 20, 'local_gb_used': 0, 'free_disk_gb': 20, 'vcpus': 2, 'vcpus_used': 0 } return resources class ShelveComputeManagerTestCase(test_compute.BaseTestCase): def _shelve_instance(self, shelved_offload_time, clean_shutdown=True): CONF.set_override('shelved_offload_time', shelved_offload_time) host = 'fake-mini' instance = self._create_fake_instance_obj(params={'host': host}) image_id = 'fake_image_id' host = 'fake-mini' self.useFixture(utils_fixture.TimeFixture()) instance.task_state = task_states.SHELVING instance.save() self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage') self.mox.StubOutWithMock(self.compute.driver, 'snapshot') self.mox.StubOutWithMock(self.compute.driver, 'power_off') self.mox.StubOutWithMock(self.compute, '_get_power_state') self.mox.StubOutWithMock(self.compute.network_api, 'cleanup_instance_network_on_host') self.compute._notify_about_instance_usage(self.context, instance, 'shelve.start') if clean_shutdown: self.compute.driver.power_off(instance, CONF.shutdown_timeout, self.compute.SHUTDOWN_RETRY_INTERVAL) else: self.compute.driver.power_off(instance, 0, 0) self.compute._get_power_state(self.context, instance).AndReturn(123) if CONF.shelved_offload_time == 0: self.compute.network_api.cleanup_instance_network_on_host( self.context, instance, instance.host) self.compute.driver.snapshot(self.context, instance, 'fake_image_id', mox.IgnoreArg()) tracking = {'last_state': instance.vm_state} def check_save(expected_task_state=None): self.assertEqual(123, instance.power_state) if tracking['last_state'] == vm_states.ACTIVE: if CONF.shelved_offload_time == 0: self.assertEqual(task_states.SHELVING_OFFLOADING, instance.task_state) else: self.assertIsNone(instance.task_state) self.assertEqual(vm_states.SHELVED, instance.vm_state) self.assertEqual([task_states.SHELVING, task_states.SHELVING_IMAGE_UPLOADING], expected_task_state) self.assertIn('shelved_at', instance.system_metadata) self.assertEqual(image_id, instance.system_metadata['shelved_image_id']) self.assertEqual(host, instance.system_metadata['shelved_host']) tracking['last_state'] = instance.vm_state elif (tracking['last_state'] == vm_states.SHELVED and CONF.shelved_offload_time == 0): self.assertIsNone(instance.task_state) self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state) self.assertEqual([task_states.SHELVING, task_states.SHELVING_OFFLOADING], expected_task_state) tracking['last_state'] = instance.vm_state elif (tracking['last_state'] == vm_states.SHELVED_OFFLOADED and CONF.shelved_offload_time == 0): self.assertIsNone(instance.host) self.assertIsNone(instance.node) self.assertIsNone(expected_task_state) else: self.fail('Unexpected save!') self.compute._notify_about_instance_usage(self.context, instance, 'shelve.end') if CONF.shelved_offload_time == 0: self.compute._notify_about_instance_usage(self.context, instance, 'shelve_offload.start') self.compute.driver.power_off(instance, 0, 0) self.compute._get_power_state(self.context, instance).AndReturn(123) self.compute._notify_about_instance_usage(self.context, instance, 'shelve_offload.end') self.mox.ReplayAll() with mock.patch.object(instance, 'save') as mock_save: mock_save.side_effect = check_save self.compute.shelve_instance(self.context, instance, image_id=image_id, clean_shutdown=clean_shutdown) def test_shelve(self): self._shelve_instance(-1) def test_shelve_forced_shutdown(self): self._shelve_instance(-1, clean_shutdown=False) def test_shelve_and_offload(self): self._shelve_instance(0) def _shelve_offload(self, clean_shutdown=True): host = 'fake-mini' instance = self._create_fake_instance_obj(params={'host': host}) instance.task_state = task_states.SHELVING instance.save() self.useFixture(utils_fixture.TimeFixture()) self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage') self.mox.StubOutWithMock(self.compute.driver, 'power_off') self.mox.StubOutWithMock(self.compute, '_get_power_state') self.mox.StubOutWithMock(self.compute.network_api, 'cleanup_instance_network_on_host') self.mox.StubOutWithMock(self.compute, '_update_resource_tracker') self.compute._notify_about_instance_usage(self.context, instance, 'shelve_offload.start') if clean_shutdown: self.compute.driver.power_off(instance, CONF.shutdown_timeout, self.compute.SHUTDOWN_RETRY_INTERVAL) else: self.compute.driver.power_off(instance, 0, 0) self.compute.network_api.cleanup_instance_network_on_host( self.context, instance, instance.host) self.compute._get_power_state(self.context, instance).AndReturn(123) self.compute._update_resource_tracker(self.context, instance) self.compute._notify_about_instance_usage(self.context, instance, 'shelve_offload.end') self.mox.ReplayAll() with mock.patch.object(instance, 'save'): self.compute.shelve_offload_instance(self.context, instance, clean_shutdown=clean_shutdown) self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state) self.assertIsNone(instance.task_state) def test_shelve_offload(self): self._shelve_offload() def test_shelve_offload_forced_shutdown(self): self._shelve_offload(clean_shutdown=False) def test_unshelve(self): instance = self._create_fake_instance_obj() instance.task_state = task_states.UNSHELVING instance.save() image = {'id': 'fake_id'} node = test_compute.NODENAME limits = {} filter_properties = {'limits': limits} host = 'fake-mini' cur_time = timeutils.utcnow() # Adding shelved_* keys in system metadata to verify # whether those are deleted after unshelve call. sys_meta = dict(instance.system_metadata) sys_meta['shelved_at'] = cur_time.isoformat() sys_meta['shelved_image_id'] = image['id'] sys_meta['shelved_host'] = host instance.system_metadata = sys_meta self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage') self.mox.StubOutWithMock(self.compute, '_prep_block_device') self.mox.StubOutWithMock(self.compute.driver, 'spawn') self.mox.StubOutWithMock(self.compute, '_get_power_state') self.mox.StubOutWithMock(self.rt, 'instance_claim') self.mox.StubOutWithMock(self.compute.network_api, 'setup_instance_network_on_host') self.deleted_image_id = None def fake_delete(self2, ctxt, image_id): self.deleted_image_id = image_id def fake_claim(context, instance, limits): instance.host = self.compute.host requests = objects.InstancePCIRequests(requests=[]) return claims.Claim(context, instance, self.rt, _fake_resources(), requests) tracking = { 'last_state': instance.task_state, 'spawned': False, } def check_save(expected_task_state=None): if tracking['last_state'] == task_states.UNSHELVING: if tracking['spawned']: self.assertIsNone(instance.task_state) else: self.assertEqual(task_states.SPAWNING, instance.task_state) tracking['spawned'] = True tracking['last_state'] == instance.task_state elif tracking['last_state'] == task_states.SPAWNING: self.assertEqual(vm_states.ACTIVE, instance.vm_state) tracking['last_state'] == instance.task_state else: self.fail('Unexpected save!') fake_image.stub_out_image_service(self) self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete) self.compute._notify_about_instance_usage(self.context, instance, 'unshelve.start') self.compute._prep_block_device(self.context, instance, mox.IgnoreArg(), do_check_attach=False).AndReturn('fake_bdm') self.compute.network_api.setup_instance_network_on_host( self.context, instance, self.compute.host) self.compute.driver.spawn(self.context, instance, mox.IsA(objects.ImageMeta), injected_files=[], admin_password=None, network_info=[], block_device_info='fake_bdm') self.compute._get_power_state(self.context, instance).AndReturn(123) self.compute._notify_about_instance_usage(self.context, instance, 'unshelve.end') self.mox.ReplayAll() with mock.patch.object(self.rt, 'instance_claim', side_effect=fake_claim), \ mock.patch.object(instance, 'save') as mock_save: mock_save.side_effect = check_save self.compute.unshelve_instance( self.context, instance, image=image, filter_properties=filter_properties, node=node) self.assertNotIn('shelved_at', instance.system_metadata) self.assertNotIn('shelved_image_id', instance.system_metadata) self.assertNotIn('shelved_host', instance.system_metadata) self.assertEqual(image['id'], self.deleted_image_id) self.assertEqual(instance.host, self.compute.host) self.assertEqual(123, instance.power_state) self.assertEqual(vm_states.ACTIVE, instance.vm_state) self.assertIsNone(instance.task_state) self.assertIsNone(instance.key_data) self.assertEqual(self.compute.host, instance.host) self.assertFalse(instance.auto_disk_config) @mock.patch('nova.utils.get_image_from_system_metadata') def test_unshelve_volume_backed(self, mock_image_meta): instance = self._create_fake_instance_obj() node = test_compute.NODENAME limits = {} filter_properties = {'limits': limits} instance.task_state = task_states.UNSHELVING instance.save() image_meta = {'properties': {'base_image_ref': 'fake_id'}} mock_image_meta.return_value = image_meta self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage') self.mox.StubOutWithMock(self.compute, '_prep_block_device') self.mox.StubOutWithMock(self.compute.driver, 'spawn') self.mox.StubOutWithMock(self.compute, '_get_power_state') self.mox.StubOutWithMock(self.rt, 'instance_claim') self.mox.StubOutWithMock(self.compute.network_api, 'setup_instance_network_on_host') tracking = {'last_state': instance.task_state} def check_save(expected_task_state=None): if tracking['last_state'] == task_states.UNSHELVING: self.assertEqual(task_states.SPAWNING, instance.task_state) tracking['last_state'] = instance.task_state elif tracking['last_state'] == task_states.SPAWNING: self.assertEqual(123, instance.power_state) self.assertEqual(vm_states.ACTIVE, instance.vm_state) self.assertIsNone(instance.task_state) self.assertIsNone(instance.key_data) self.assertFalse(instance.auto_disk_config) self.assertIsNone(instance.task_state) tracking['last_state'] = instance.task_state else: self.fail('Unexpected save!') self.compute._notify_about_instance_usage(self.context, instance, 'unshelve.start') self.compute._prep_block_device(self.context, instance, mox.IgnoreArg(), do_check_attach=False).AndReturn('fake_bdm') self.compute.network_api.setup_instance_network_on_host( self.context, instance, self.compute.host) self.rt.instance_claim(self.context, instance, limits).AndReturn( claims.Claim(self.context, instance, self.rt, _fake_resources(), objects.InstancePCIRequests(requests=[]))) self.compute.driver.spawn(self.context, instance, mox.IsA(objects.ImageMeta), injected_files=[], admin_password=None, network_info=[], block_device_info='fake_bdm') self.compute._get_power_state(self.context, instance).AndReturn(123) self.compute._notify_about_instance_usage(self.context, instance, 'unshelve.end') self.mox.ReplayAll() with mock.patch.object(instance, 'save') as mock_save: mock_save.side_effect = check_save self.compute.unshelve_instance(self.context, instance, image=None, filter_properties=filter_properties, node=node) @mock.patch.object(objects.InstanceList, 'get_by_filters') def test_shelved_poll_none_offloaded(self, mock_get_by_filters): # Test instances are not offloaded when shelved_offload_time is -1 self.flags(shelved_offload_time=-1) self.compute._poll_shelved_instances(self.context) self.assertEqual(0, mock_get_by_filters.call_count) @mock.patch('oslo_utils.timeutils.is_older_than') def test_shelved_poll_none_exist(self, mock_older): self.flags(shelved_offload_time=1) mock_older.return_value = False with mock.patch.object(self.compute, 'shelve_offload_instance') as soi: self.compute._poll_shelved_instances(self.context) self.assertFalse(soi.called) @mock.patch('oslo_utils.timeutils.is_older_than') def test_shelved_poll_not_timedout(self, mock_older): mock_older.return_value = False self.flags(shelved_offload_time=1) shelved_time = timeutils.utcnow() time_fixture = self.useFixture(utils_fixture.TimeFixture(shelved_time)) time_fixture.advance_time_seconds(CONF.shelved_offload_time - 1) instance = self._create_fake_instance_obj() instance.vm_state = vm_states.SHELVED instance.task_state = None instance.host = self.compute.host sys_meta = instance.system_metadata sys_meta['shelved_at'] = shelved_time.isoformat() instance.save() with mock.patch.object(self.compute, 'shelve_offload_instance') as soi: self.compute._poll_shelved_instances(self.context) self.assertFalse(soi.called) self.assertTrue(mock_older.called) def test_shelved_poll_timedout(self): self.flags(shelved_offload_time=1) shelved_time = timeutils.utcnow() time_fixture = self.useFixture(utils_fixture.TimeFixture(shelved_time)) time_fixture.advance_time_seconds(CONF.shelved_offload_time + 1) instance = self._create_fake_instance_obj() instance.vm_state = vm_states.SHELVED instance.task_state = None instance.host = self.compute.host sys_meta = instance.system_metadata sys_meta['shelved_at'] = shelved_time.isoformat() instance.save() data = [] def fake_soi(context, instance, **kwargs): data.append(instance.uuid) with mock.patch.object(self.compute, 'shelve_offload_instance') as soi: soi.side_effect = fake_soi self.compute._poll_shelved_instances(self.context) self.assertTrue(soi.called) self.assertEqual(instance.uuid, data[0]) @mock.patch('oslo_utils.timeutils.is_older_than') @mock.patch('oslo_utils.timeutils.parse_strtime') def test_shelved_poll_filters_task_state(self, mock_parse, mock_older): self.flags(shelved_offload_time=1) mock_older.return_value = True instance1 = self._create_fake_instance_obj() instance1.task_state = task_states.SPAWNING instance1.vm_state = vm_states.SHELVED instance1.host = self.compute.host instance1.system_metadata = {'shelved_at': ''} instance1.save() instance2 = self._create_fake_instance_obj() instance2.task_state = None instance2.vm_state = vm_states.SHELVED instance2.host = self.compute.host instance2.system_metadata = {'shelved_at': ''} instance2.save() data = [] def fake_soi(context, instance, **kwargs): data.append(instance.uuid) with mock.patch.object(self.compute, 'shelve_offload_instance') as soi: soi.side_effect = fake_soi self.compute._poll_shelved_instances(self.context) self.assertTrue(soi.called) self.assertEqual([instance2.uuid], data) @mock.patch('oslo_utils.timeutils.is_older_than') @mock.patch('oslo_utils.timeutils.parse_strtime') def test_shelved_poll_checks_task_state_on_save(self, mock_parse, mock_older): self.flags(shelved_offload_time=1) mock_older.return_value = True instance = self._create_fake_instance_obj() instance.task_state = None instance.vm_state = vm_states.SHELVED instance.host = self.compute.host instance.system_metadata = {'shelved_at': ''} instance.save() def fake_parse_hook(timestring): instance.task_state = task_states.SPAWNING instance.save() mock_parse.side_effect = fake_parse_hook with mock.patch.object(self.compute, 'shelve_offload_instance') as soi: self.compute._poll_shelved_instances(self.context) self.assertFalse(soi.called) class ShelveComputeAPITestCase(test_compute.BaseTestCase): def test_shelve(self): # Ensure instance can be shelved. fake_instance = self._create_fake_instance_obj( {'display_name': 'vm01'}) instance = fake_instance self.assertIsNone(instance['task_state']) def fake_init(self2): # In original _FakeImageService.__init__(), some fake images are # created. To verify the snapshot name of this test only, here # sets a fake method. self2.images = {} def fake_create(self2, ctxt, metadata, data=None): self.assertEqual(metadata['name'], 'vm01-shelved') metadata['id'] = '8b24ed3f-ee57-43bc-bc2e-fb2e9482bc42' return metadata fake_image.stub_out_image_service(self) self.stubs.Set(fake_image._FakeImageService, '__init__', fake_init) self.stubs.Set(fake_image._FakeImageService, 'create', fake_create) self.compute_api.shelve(self.context, instance) self.assertEqual(instance.task_state, task_states.SHELVING) db.instance_destroy(self.context, instance['uuid']) @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid') def test_unshelve(self, get_by_instance_uuid): # Ensure instance can be unshelved. instance = self._create_fake_instance_obj() self.assertIsNone(instance['task_state']) self.compute_api.shelve(self.context, instance) instance.task_state = None instance.vm_state = vm_states.SHELVED instance.save() fake_spec = objects.RequestSpec() get_by_instance_uuid.return_value = fake_spec with mock.patch.object(self.compute_api.compute_task_api, 'unshelve_instance') as unshelve: self.compute_api.unshelve(self.context, instance) get_by_instance_uuid.assert_called_once_with(self.context, instance.uuid) unshelve.assert_called_once_with(self.context, instance, fake_spec) self.assertEqual(instance.task_state, task_states.UNSHELVING) db.instance_destroy(self.context, instance['uuid']) nova-13.1.4/nova/tests/unit/compute/test_vmmode.py0000664000567000056710000000473513064447152023363 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.compute import vm_mode from nova import exception from nova import test from nova.tests.unit import fake_instance class ComputeVMModeTest(test.NoDBTestCase): def _fake_object(self, updates): return fake_instance.fake_instance_obj(None, **updates) def test_case(self): inst = self._fake_object(dict(vm_mode="HVM")) mode = vm_mode.get_from_instance(inst) self.assertEqual(mode, "hvm") def test_legacy_pv(self): inst = self._fake_object(dict(vm_mode="pv")) mode = vm_mode.get_from_instance(inst) self.assertEqual(mode, "xen") def test_legacy_hv(self): inst = self._fake_object(dict(vm_mode="hv")) mode = vm_mode.get_from_instance(inst) self.assertEqual(mode, "hvm") def test_bogus(self): inst = self._fake_object(dict(vm_mode="wibble")) self.assertRaises(exception.Invalid, vm_mode.get_from_instance, inst) def test_good(self): inst = self._fake_object(dict(vm_mode="hvm")) mode = vm_mode.get_from_instance(inst) self.assertEqual(mode, "hvm") def test_name_pv_compat(self): mode = vm_mode.canonicalize('pv') self.assertEqual(vm_mode.XEN, mode) def test_name_hv_compat(self): mode = vm_mode.canonicalize('hv') self.assertEqual(vm_mode.HVM, mode) def test_name_baremetal_compat(self): mode = vm_mode.canonicalize('baremetal') self.assertEqual(vm_mode.HVM, mode) def test_name_hvm(self): mode = vm_mode.canonicalize('hvm') self.assertEqual(vm_mode.HVM, mode) def test_name_none(self): mode = vm_mode.canonicalize(None) self.assertIsNone(mode) def test_name_invalid(self): self.assertRaises(exception.InvalidVirtualMachineMode, vm_mode.canonicalize, 'invalid') nova-13.1.4/nova/tests/unit/compute/test_keypairs.py0000664000567000056710000002456513064447152023726 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for keypair API.""" from oslo_concurrency import processutils from oslo_config import cfg import six from nova.compute import api as compute_api from nova import context from nova import exception from nova.objects import keypair as keypair_obj from nova import quota from nova.tests.unit.compute import test_compute from nova.tests.unit import fake_crypto from nova.tests.unit import fake_notifier from nova.tests.unit.objects import test_keypair CONF = cfg.CONF QUOTAS = quota.QUOTAS class KeypairAPITestCase(test_compute.BaseTestCase): def setUp(self): super(KeypairAPITestCase, self).setUp() self.keypair_api = compute_api.KeypairAPI() self.ctxt = context.RequestContext('fake', 'fake') self._keypair_db_call_stubs() self.existing_key_name = 'fake existing key name' self.pub_key = ('ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLnVkqJu9WVf' '/5StU3JCrBR2r1s1j8K1tux+5XeSvdqaM8lMFNorzbY5iyoBbR' 'S56gy1jmm43QsMPJsrpfUZKcJpRENSe3OxIIwWXRoiapZe78u/' 'a9xKwj0avFYMcws9Rk9iAB7W4K1nEJbyCPl5lRBoyqeHBqrnnu' 'XWEgGxJCK0Ah6wcOzwlEiVjdf4kxzXrwPHyi7Ea1qvnNXTziF8' 'yYmUlH4C8UXfpTQckwSwpDyxZUc63P8q+vPbs3Q2kw+/7vvkCK' 'HJAXVI+oCiyMMfffoTq16M1xfV58JstgtTqAXG+ZFpicGajREU' 'E/E3hO5MGgcHmyzIrWHKpe1n3oEGuz') self.fingerprint = '4e:48:c6:a0:4a:f9:dd:b5:4c:85:54:5a:af:43:47:5a' self.keypair_type = keypair_obj.KEYPAIR_TYPE_SSH self.key_destroyed = False def _keypair_db_call_stubs(self): def db_key_pair_get_all_by_user(context, user_id): return [dict(test_keypair.fake_keypair, name=self.existing_key_name, public_key=self.pub_key, fingerprint=self.fingerprint)] def db_key_pair_create(context, keypair): return dict(test_keypair.fake_keypair, **keypair) def db_key_pair_destroy(context, user_id, name): if name == self.existing_key_name: self.key_destroyed = True def db_key_pair_get(context, user_id, name): if name == self.existing_key_name and not self.key_destroyed: return dict(test_keypair.fake_keypair, name=self.existing_key_name, public_key=self.pub_key, fingerprint=self.fingerprint) else: raise exception.KeypairNotFound(user_id=user_id, name=name) self.stub_out("nova.db.key_pair_get_all_by_user", db_key_pair_get_all_by_user) self.stub_out("nova.db.key_pair_create", db_key_pair_create) self.stub_out("nova.db.key_pair_destroy", db_key_pair_destroy) self.stub_out("nova.db.key_pair_get", db_key_pair_get) def _check_notifications(self, action='create', key_name='foo'): self.assertEqual(2, len(fake_notifier.NOTIFICATIONS)) n1 = fake_notifier.NOTIFICATIONS[0] self.assertEqual('INFO', n1.priority) self.assertEqual('keypair.%s.start' % action, n1.event_type) self.assertEqual('api.%s' % CONF.host, n1.publisher_id) self.assertEqual('fake', n1.payload['user_id']) self.assertEqual('fake', n1.payload['tenant_id']) self.assertEqual(key_name, n1.payload['key_name']) n2 = fake_notifier.NOTIFICATIONS[1] self.assertEqual('INFO', n2.priority) self.assertEqual('keypair.%s.end' % action, n2.event_type) self.assertEqual('api.%s' % CONF.host, n2.publisher_id) self.assertEqual('fake', n2.payload['user_id']) self.assertEqual('fake', n2.payload['tenant_id']) self.assertEqual(key_name, n2.payload['key_name']) class CreateImportSharedTestMixIn(object): """Tests shared between create and import_key. Mix-in pattern is used here so that these `test_*` methods aren't picked up by the test runner unless they are part of a 'concrete' test case. """ def assertKeypairRaises(self, exc_class, expected_message, name): func = getattr(self.keypair_api, self.func_name) args = [] if self.func_name == 'import_key_pair': args.append(self.pub_key) args.append(self.keypair_type) exc = self.assertRaises(exc_class, func, self.ctxt, self.ctxt.user_id, name, *args) self.assertEqual(expected_message, six.text_type(exc)) def assertInvalidKeypair(self, expected_message, name): msg = 'Keypair data is invalid: %s' % expected_message self.assertKeypairRaises(exception.InvalidKeypair, msg, name) def test_name_too_short(self): msg = ('Keypair name must be string and between 1 ' 'and 255 characters long') self.assertInvalidKeypair(msg, '') def test_name_too_long(self): msg = ('Keypair name must be string and between 1 ' 'and 255 characters long') self.assertInvalidKeypair(msg, 'x' * 256) def test_invalid_chars(self): msg = "Keypair name contains unsafe characters" self.assertInvalidKeypair(msg, '* BAD CHARACTERS! *') def test_already_exists(self): def db_key_pair_create_duplicate(context, keypair): raise exception.KeyPairExists(key_name=keypair.get('name', '')) self.stub_out("nova.db.key_pair_create", db_key_pair_create_duplicate) msg = ("Key pair '%(key_name)s' already exists." % {'key_name': self.existing_key_name}) self.assertKeypairRaises(exception.KeyPairExists, msg, self.existing_key_name) def test_quota_limit(self): def fake_quotas_count(self, context, resource, *args, **kwargs): return CONF.quota_key_pairs self.stubs.Set(QUOTAS, "count", fake_quotas_count) msg = "Maximum number of key pairs exceeded" self.assertKeypairRaises(exception.KeypairLimitExceeded, msg, 'foo') class CreateKeypairTestCase(KeypairAPITestCase, CreateImportSharedTestMixIn): func_name = 'create_key_pair' def _check_success(self): keypair, private_key = self.keypair_api.create_key_pair( self.ctxt, self.ctxt.user_id, 'foo', key_type=self.keypair_type) self.assertEqual('foo', keypair['name']) self.assertEqual(self.keypair_type, keypair['type']) self._check_notifications() def test_success_ssh(self): self._check_success() def test_success_x509(self): self.keypair_type = keypair_obj.KEYPAIR_TYPE_X509 self._check_success() def test_x509_subject_too_long(self): # X509 keypairs will fail if the Subject they're created with # is longer than 64 characters. The previous unit tests could not # detect the issue because the ctxt.user_id was too short. # This unit tests is added to prove this issue. self.keypair_type = keypair_obj.KEYPAIR_TYPE_X509 self.ctxt.user_id = 'a' * 65 self.assertRaises(processutils.ProcessExecutionError, self._check_success) class ImportKeypairTestCase(KeypairAPITestCase, CreateImportSharedTestMixIn): func_name = 'import_key_pair' def _check_success(self): keypair = self.keypair_api.import_key_pair(self.ctxt, self.ctxt.user_id, 'foo', self.pub_key, self.keypair_type) self.assertEqual('foo', keypair['name']) self.assertEqual(self.keypair_type, keypair['type']) self.assertEqual(self.fingerprint, keypair['fingerprint']) self.assertEqual(self.pub_key, keypair['public_key']) self.assertEqual(self.keypair_type, keypair['type']) self._check_notifications(action='import') def test_success_ssh(self): self._check_success() def test_success_x509(self): self.keypair_type = keypair_obj.KEYPAIR_TYPE_X509 certif, fingerprint = fake_crypto.get_x509_cert_and_fingerprint() self.pub_key = certif self.fingerprint = fingerprint self._check_success() def test_bad_key_data(self): exc = self.assertRaises(exception.InvalidKeypair, self.keypair_api.import_key_pair, self.ctxt, self.ctxt.user_id, 'foo', 'bad key data') msg = u'Keypair data is invalid: failed to generate fingerprint' self.assertEqual(msg, six.text_type(exc)) class GetKeypairTestCase(KeypairAPITestCase): def test_success(self): keypair = self.keypair_api.get_key_pair(self.ctxt, self.ctxt.user_id, self.existing_key_name) self.assertEqual(self.existing_key_name, keypair['name']) class GetKeypairsTestCase(KeypairAPITestCase): def test_success(self): keypairs = self.keypair_api.get_key_pairs(self.ctxt, self.ctxt.user_id) self.assertEqual([self.existing_key_name], [k['name'] for k in keypairs]) class DeleteKeypairTestCase(KeypairAPITestCase): def test_success(self): self.keypair_api.get_key_pair(self.ctxt, self.ctxt.user_id, self.existing_key_name) self.keypair_api.delete_key_pair(self.ctxt, self.ctxt.user_id, self.existing_key_name) self.assertRaises(exception.KeypairNotFound, self.keypair_api.get_key_pair, self.ctxt, self.ctxt.user_id, self.existing_key_name) self._check_notifications(action='delete', key_name=self.existing_key_name) nova-13.1.4/nova/tests/unit/compute/test_tracker.py0000664000567000056710000022544013064447152023525 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from oslo_utils import units from nova.compute import arch from nova.compute import claims from nova.compute import hv_type from nova.compute import power_state from nova.compute import resource_tracker from nova.compute import task_states from nova.compute import vm_mode from nova.compute import vm_states from nova import exception as exc from nova import objects from nova.objects import base as obj_base from nova.pci import manager as pci_manager from nova import test _VIRT_DRIVER_AVAIL_RESOURCES = { 'vcpus': 4, 'memory_mb': 512, 'local_gb': 6, 'vcpus_used': 0, 'memory_mb_used': 0, 'local_gb_used': 0, 'hypervisor_type': 'fake', 'hypervisor_version': 0, 'hypervisor_hostname': 'fakehost', 'cpu_info': '', 'numa_topology': None, } _COMPUTE_NODE_FIXTURES = [ objects.ComputeNode( id=1, host='fake-host', vcpus=_VIRT_DRIVER_AVAIL_RESOURCES['vcpus'], memory_mb=_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb'], local_gb=_VIRT_DRIVER_AVAIL_RESOURCES['local_gb'], vcpus_used=_VIRT_DRIVER_AVAIL_RESOURCES['vcpus_used'], memory_mb_used=_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb_used'], local_gb_used=_VIRT_DRIVER_AVAIL_RESOURCES['local_gb_used'], hypervisor_type='fake', hypervisor_version=0, hypervisor_hostname='fake-host', free_ram_mb=(_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb'] - _VIRT_DRIVER_AVAIL_RESOURCES['memory_mb_used']), free_disk_gb=(_VIRT_DRIVER_AVAIL_RESOURCES['local_gb'] - _VIRT_DRIVER_AVAIL_RESOURCES['local_gb_used']), current_workload=0, running_vms=0, cpu_info='{}', disk_available_least=0, host_ip='1.1.1.1', supported_hv_specs=[ objects.HVSpec.from_list([arch.I686, hv_type.KVM, vm_mode.HVM]) ], metrics=None, pci_device_pools=None, extra_resources=None, stats={}, numa_topology=None, cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5, disk_allocation_ratio=1.0, ), ] _INSTANCE_TYPE_FIXTURES = { 1: { 'id': 1, 'flavorid': 'fakeid-1', 'name': 'fake1.small', 'memory_mb': 128, 'vcpus': 1, 'root_gb': 1, 'ephemeral_gb': 0, 'swap': 0, 'rxtx_factor': 0, 'vcpu_weight': 1, 'extra_specs': {}, }, 2: { 'id': 2, 'flavorid': 'fakeid-2', 'name': 'fake1.medium', 'memory_mb': 256, 'vcpus': 2, 'root_gb': 5, 'ephemeral_gb': 0, 'swap': 0, 'rxtx_factor': 0, 'vcpu_weight': 1, 'extra_specs': {}, }, } _INSTANCE_TYPE_OBJ_FIXTURES = { 1: objects.Flavor(id=1, flavorid='fakeid-1', name='fake1.small', memory_mb=128, vcpus=1, root_gb=1, ephemeral_gb=0, swap=0, rxtx_factor=0, vcpu_weight=1, extra_specs={}), 2: objects.Flavor(id=2, flavorid='fakeid-2', name='fake1.medium', memory_mb=256, vcpus=2, root_gb=5, ephemeral_gb=0, swap=0, rxtx_factor=0, vcpu_weight=1, extra_specs={}), } _2MB = 2 * units.Mi / units.Ki _INSTANCE_NUMA_TOPOLOGIES = { '2mb': objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell( id=0, cpuset=set([1]), memory=_2MB, pagesize=0), objects.InstanceNUMACell( id=1, cpuset=set([3]), memory=_2MB, pagesize=0)]), } _NUMA_LIMIT_TOPOLOGIES = { '2mb': objects.NUMATopologyLimits(id=0, cpu_allocation_ratio=1.0, ram_allocation_ratio=1.0), } _NUMA_PAGE_TOPOLOGIES = { '2kb*8': objects.NUMAPagesTopology(size_kb=2, total=8, used=0) } _NUMA_HOST_TOPOLOGIES = { '2mb': objects.NUMATopology(cells=[ objects.NUMACell(id=0, cpuset=set([1, 2]), memory=_2MB, cpu_usage=0, memory_usage=0, mempages=[_NUMA_PAGE_TOPOLOGIES['2kb*8']], siblings=[], pinned_cpus=set([])), objects.NUMACell(id=1, cpuset=set([3, 4]), memory=_2MB, cpu_usage=0, memory_usage=0, mempages=[_NUMA_PAGE_TOPOLOGIES['2kb*8']], siblings=[], pinned_cpus=set([]))]), } _INSTANCE_FIXTURES = [ objects.Instance( id=1, host=None, # prevent RT trying to lazy-load this node=None, uuid='c17741a5-6f3d-44a8-ade8-773dc8c29124', memory_mb=_INSTANCE_TYPE_FIXTURES[1]['memory_mb'], vcpus=_INSTANCE_TYPE_FIXTURES[1]['vcpus'], root_gb=_INSTANCE_TYPE_FIXTURES[1]['root_gb'], ephemeral_gb=_INSTANCE_TYPE_FIXTURES[1]['ephemeral_gb'], numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb'], instance_type_id=1, vm_state=vm_states.ACTIVE, power_state=power_state.RUNNING, task_state=None, os_type='fake-os', # Used by the stats collector. project_id='fake-project', # Used by the stats collector. flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1], old_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1], new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1], ), objects.Instance( id=2, host=None, node=None, uuid='33805b54-dea6-47b8-acb2-22aeb1b57919', memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'], vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'], root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'], ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'], numa_topology=None, instance_type_id=2, vm_state=vm_states.DELETED, power_state=power_state.SHUTDOWN, task_state=None, os_type='fake-os', project_id='fake-project-2', flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2], old_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2], new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2], ), ] _MIGRATION_FIXTURES = { # A migration that has only this compute node as the source host 'source-only': objects.Migration( id=1, instance_uuid='f15ecfb0-9bf6-42db-9837-706eb2c4bf08', source_compute='fake-host', dest_compute='other-host', source_node='fake-node', dest_node='other-node', old_instance_type_id=1, new_instance_type_id=2, migration_type='resize', status='migrating' ), # A migration that has only this compute node as the dest host 'dest-only': objects.Migration( id=2, instance_uuid='f6ed631a-8645-4b12-8e1e-2fff55795765', source_compute='other-host', dest_compute='fake-host', source_node='other-node', dest_node='fake-node', old_instance_type_id=1, new_instance_type_id=2, migration_type='resize', status='migrating' ), # A migration that has this compute node as both the source and dest host 'source-and-dest': objects.Migration( id=3, instance_uuid='f4f0bfea-fe7e-4264-b598-01cb13ef1997', source_compute='fake-host', dest_compute='fake-host', source_node='fake-node', dest_node='fake-node', old_instance_type_id=1, new_instance_type_id=2, migration_type='resize', status='migrating' ), # A migration that has this compute node as destination and is an evac 'dest-only-evac': objects.Migration( id=4, instance_uuid='077fb63a-bdc8-4330-90ef-f012082703dc', source_compute='other-host', dest_compute='fake-host', source_node='other-node', dest_node='fake-node', old_instance_type_id=2, new_instance_type_id=None, migration_type='evacuation', status='pre-migrating' ), } _MIGRATION_INSTANCE_FIXTURES = { # source-only 'f15ecfb0-9bf6-42db-9837-706eb2c4bf08': objects.Instance( id=101, host=None, # prevent RT trying to lazy-load this node=None, uuid='f15ecfb0-9bf6-42db-9837-706eb2c4bf08', memory_mb=_INSTANCE_TYPE_FIXTURES[1]['memory_mb'], vcpus=_INSTANCE_TYPE_FIXTURES[1]['vcpus'], root_gb=_INSTANCE_TYPE_FIXTURES[1]['root_gb'], ephemeral_gb=_INSTANCE_TYPE_FIXTURES[1]['ephemeral_gb'], numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb'], instance_type_id=1, vm_state=vm_states.ACTIVE, power_state=power_state.RUNNING, task_state=task_states.RESIZE_MIGRATING, system_metadata={}, os_type='fake-os', project_id='fake-project', flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1], old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1], new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2], ), # dest-only 'f6ed631a-8645-4b12-8e1e-2fff55795765': objects.Instance( id=102, host=None, # prevent RT trying to lazy-load this node=None, uuid='f6ed631a-8645-4b12-8e1e-2fff55795765', memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'], vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'], root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'], ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'], numa_topology=None, instance_type_id=2, vm_state=vm_states.ACTIVE, power_state=power_state.RUNNING, task_state=task_states.RESIZE_MIGRATING, system_metadata={}, os_type='fake-os', project_id='fake-project', flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2], old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1], new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2], ), # source-and-dest 'f4f0bfea-fe7e-4264-b598-01cb13ef1997': objects.Instance( id=3, host=None, # prevent RT trying to lazy-load this node=None, uuid='f4f0bfea-fe7e-4264-b598-01cb13ef1997', memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'], vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'], root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'], ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'], numa_topology=None, instance_type_id=2, vm_state=vm_states.ACTIVE, power_state=power_state.RUNNING, task_state=task_states.RESIZE_MIGRATING, system_metadata={}, os_type='fake-os', project_id='fake-project', flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2], old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1], new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2], ), # dest-only-evac '077fb63a-bdc8-4330-90ef-f012082703dc': objects.Instance( id=102, host=None, # prevent RT trying to lazy-load this node=None, uuid='077fb63a-bdc8-4330-90ef-f012082703dc', memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'], vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'], root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'], ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'], numa_topology=None, instance_type_id=2, vm_state=vm_states.ACTIVE, power_state=power_state.RUNNING, task_state=task_states.REBUILDING, system_metadata={}, os_type='fake-os', project_id='fake-project', flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2], old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1], new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2], ), } _MIGRATION_CONTEXT_FIXTURES = { 'f4f0bfea-fe7e-4264-b598-01cb13ef1997': objects.MigrationContext( instance_uuid='f4f0bfea-fe7e-4264-b598-01cb13ef1997', migration_id=3, new_numa_topology=None, old_numa_topology=None), 'c17741a5-6f3d-44a8-ade8-773dc8c29124': objects.MigrationContext( instance_uuid='c17741a5-6f3d-44a8-ade8-773dc8c29124', migration_id=3, new_numa_topology=None, old_numa_topology=None), 'f15ecfb0-9bf6-42db-9837-706eb2c4bf08': objects.MigrationContext( instance_uuid='f15ecfb0-9bf6-42db-9837-706eb2c4bf08', migration_id=1, new_numa_topology=None, old_numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb']), 'f6ed631a-8645-4b12-8e1e-2fff55795765': objects.MigrationContext( instance_uuid='f6ed631a-8645-4b12-8e1e-2fff55795765', migration_id=2, new_numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb'], old_numa_topology=None), '077fb63a-bdc8-4330-90ef-f012082703dc': objects.MigrationContext( instance_uuid='077fb63a-bdc8-4330-90ef-f012082703dc', migration_id=2, new_numa_topology=None, old_numa_topology=None), } def overhead_zero(instance): # Emulate that the driver does not adjust the memory # of the instance... return { 'memory_mb': 0 } def setup_rt(hostname, nodename, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES, estimate_overhead=overhead_zero): """Sets up the resource tracker instance with mock fixtures. :param virt_resources: Optional override of the resource representation returned by the virt driver's `get_available_resource()` method. :param estimate_overhead: Optional override of a function that should return overhead of memory given an instance object. Defaults to returning zero overhead. """ sched_client_mock = mock.MagicMock() notifier_mock = mock.MagicMock() vd = mock.MagicMock() # Make sure we don't change any global fixtures during tests virt_resources = copy.deepcopy(virt_resources) vd.get_available_resource.return_value = virt_resources vd.estimate_instance_overhead.side_effect = estimate_overhead with test.nested( mock.patch('nova.scheduler.client.SchedulerClient', return_value=sched_client_mock), mock.patch('nova.rpc.get_notifier', return_value=notifier_mock)): rt = resource_tracker.ResourceTracker(hostname, vd, nodename) return (rt, sched_client_mock, vd) class BaseTestCase(test.NoDBTestCase): def setUp(self): super(BaseTestCase, self).setUp() self.rt = None self.flags(my_ip='1.1.1.1') def _setup_rt(self, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES, estimate_overhead=overhead_zero): (self.rt, self.sched_client_mock, self.driver_mock) = setup_rt( 'fake-host', 'fake-node', virt_resources, estimate_overhead) class TestUpdateAvailableResources(BaseTestCase): def _update_available_resources(self): # We test RT._update separately, since the complexity # of the update_available_resource() function is high enough as # it is, we just want to focus here on testing the resources # parameter that update_available_resource() eventually passes # to _update(). with mock.patch.object(self.rt, '_update') as update_mock: self.rt.update_available_resource(mock.sentinel.ctx) return update_mock @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') @mock.patch('nova.objects.InstanceList.get_by_host_and_node') def test_no_instances_no_migrations_no_reserved(self, get_mock, migr_mock, get_cn_mock): self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self._setup_rt() get_mock.return_value = [] migr_mock.return_value = [] get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0] update_mock = self._update_available_resources() vd = self.driver_mock vd.get_available_resource.assert_called_once_with('fake-node') get_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host', 'fake-node', expected_attrs=[ 'system_metadata', 'numa_topology', 'flavor', 'migration_context']) get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host', 'fake-node') migr_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host', 'fake-node') expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) expected_resources.update({ # host is added in update_available_resources() # before calling _update() 'host': 'fake-host', 'host_ip': '1.1.1.1', 'numa_topology': None, 'metrics': '[]', 'cpu_info': '', 'hypervisor_hostname': 'fakehost', 'free_disk_gb': 6, 'hypervisor_version': 0, 'local_gb': 6, 'free_ram_mb': 512, 'memory_mb_used': 0, 'pci_device_pools': objects.PciDevicePoolList(), 'vcpus_used': 0, 'hypervisor_type': 'fake', 'local_gb_used': 0, 'memory_mb': 512, 'current_workload': 0, 'vcpus': 4, 'running_vms': 0 }) update_mock.assert_called_once_with(mock.sentinel.ctx) self.assertTrue(obj_base.obj_equal_prims(expected_resources, self.rt.compute_node)) @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') @mock.patch('nova.objects.InstanceList.get_by_host_and_node') def test_no_instances_no_migrations_reserved_disk_and_ram( self, get_mock, migr_mock, get_cn_mock): self.flags(reserved_host_disk_mb=1024, reserved_host_memory_mb=512) self._setup_rt() get_mock.return_value = [] migr_mock.return_value = [] get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0] update_mock = self._update_available_resources() get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host', 'fake-node') expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) expected_resources.update({ # host is added in update_available_resources() # before calling _update() 'host': 'fake-host', 'host_ip': '1.1.1.1', 'numa_topology': None, 'metrics': '[]', 'cpu_info': '', 'hypervisor_hostname': 'fakehost', 'free_disk_gb': 5, # 6GB avail - 1 GB reserved 'hypervisor_version': 0, 'local_gb': 6, 'free_ram_mb': 0, # 512MB avail - 512MB reserved 'memory_mb_used': 512, # 0MB used + 512MB reserved 'pci_device_pools': objects.PciDevicePoolList(), 'vcpus_used': 0, 'hypervisor_type': 'fake', 'local_gb_used': 1, # 0GB used + 1 GB reserved 'memory_mb': 512, 'current_workload': 0, 'vcpus': 4, 'running_vms': 0 }) update_mock.assert_called_once_with(mock.sentinel.ctx) self.assertTrue(obj_base.obj_equal_prims(expected_resources, self.rt.compute_node)) @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') @mock.patch('nova.objects.InstanceList.get_by_host_and_node') def test_some_instances_no_migrations(self, get_mock, migr_mock, get_cn_mock): self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) # Setup virt resources to match used resources to number # of defined instances on the hypervisor virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES) virt_resources.update(vcpus_used=1, memory_mb_used=128, local_gb_used=1) self._setup_rt(virt_resources=virt_resources) get_mock.return_value = _INSTANCE_FIXTURES migr_mock.return_value = [] get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0] update_mock = self._update_available_resources() get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host', 'fake-node') expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) expected_resources.update({ # host is added in update_available_resources() # before calling _update() 'host': 'fake-host', 'host_ip': '1.1.1.1', 'numa_topology': None, 'metrics': '[]', 'cpu_info': '', 'hypervisor_hostname': 'fakehost', 'free_disk_gb': 5, # 6 - 1 used 'hypervisor_version': 0, 'local_gb': 6, 'free_ram_mb': 384, # 512 - 128 used 'memory_mb_used': 128, 'pci_device_pools': objects.PciDevicePoolList(), 'vcpus_used': 1, 'hypervisor_type': 'fake', 'local_gb_used': 1, 'memory_mb': 512, 'current_workload': 0, 'vcpus': 4, 'running_vms': 1 # One active instance }) update_mock.assert_called_once_with(mock.sentinel.ctx) self.assertTrue(obj_base.obj_equal_prims(expected_resources, self.rt.compute_node)) @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') @mock.patch('nova.objects.InstanceList.get_by_host_and_node') def test_orphaned_instances_no_migrations(self, get_mock, migr_mock, get_cn_mock): self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) # Setup virt resources to match used resources to number # of defined instances on the hypervisor virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES) virt_resources.update(memory_mb_used=64) self._setup_rt(virt_resources=virt_resources) get_mock.return_value = [] migr_mock.return_value = [] get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0] # Orphaned instances are those that the virt driver has on # record as consuming resources on the compute node, but the # Nova database has no record of the instance being active # on the host. For some reason, the resource tracker only # considers orphaned instance's memory usage in its calculations # of free resources... orphaned_usages = { '71ed7ef6-9d2e-4c65-9f4e-90bb6b76261d': { # Yes, the return result format of get_per_instance_usage # is indeed this stupid and redundant. Also note that the # libvirt driver just returns an empty dict always for this # method and so who the heck knows whether this stuff # actually works. 'uuid': '71ed7ef6-9d2e-4c65-9f4e-90bb6b76261d', 'memory_mb': 64 } } vd = self.driver_mock vd.get_per_instance_usage.return_value = orphaned_usages update_mock = self._update_available_resources() get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host', 'fake-node') expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) expected_resources.update({ # host is added in update_available_resources() # before calling _update() 'host': 'fake-host', 'host_ip': '1.1.1.1', 'numa_topology': None, 'metrics': '[]', 'cpu_info': '', 'hypervisor_hostname': 'fakehost', 'free_disk_gb': 6, 'hypervisor_version': 0, 'local_gb': 6, 'free_ram_mb': 448, # 512 - 64 orphaned usage 'memory_mb_used': 64, 'pci_device_pools': objects.PciDevicePoolList(), 'vcpus_used': 0, 'hypervisor_type': 'fake', 'local_gb_used': 0, 'memory_mb': 512, 'current_workload': 0, 'vcpus': 4, # Yep, for some reason, orphaned instances are not counted # as running VMs... 'running_vms': 0 }) update_mock.assert_called_once_with(mock.sentinel.ctx) self.assertTrue(obj_base.obj_equal_prims(expected_resources, self.rt.compute_node)) @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') @mock.patch('nova.objects.Instance.get_by_uuid') @mock.patch('nova.objects.InstanceList.get_by_host_and_node') def test_no_instances_source_migration(self, get_mock, get_inst_mock, migr_mock, get_cn_mock): # We test the behavior of update_available_resource() when # there is an active migration that involves this compute node # as the source host not the destination host, and the resource # tracker does not have any instances assigned to it. This is # the case when a migration from this compute host to another # has been completed, but the user has not confirmed the resize # yet, so the resource tracker must continue to keep the resources # for the original instance type available on the source compute # node in case of a revert of the resize. self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) # Setup virt resources to match used resources to number # of defined instances on the hypervisor virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES) virt_resources.update(vcpus_used=4, memory_mb_used=128, local_gb_used=1) self._setup_rt(virt_resources=virt_resources) get_mock.return_value = [] migr_obj = _MIGRATION_FIXTURES['source-only'] migr_mock.return_value = [migr_obj] get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0] # Migration.instance property is accessed in the migration # processing code, and this property calls # objects.Instance.get_by_uuid, so we have the migration return inst_uuid = migr_obj.instance_uuid instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone() get_inst_mock.return_value = instance instance.migration_context = _MIGRATION_CONTEXT_FIXTURES[inst_uuid] update_mock = self._update_available_resources() get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host', 'fake-node') expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) expected_resources.update({ # host is added in update_available_resources() # before calling _update() 'host': 'fake-host', 'host_ip': '1.1.1.1', 'numa_topology': None, 'metrics': '[]', 'cpu_info': '', 'hypervisor_hostname': 'fakehost', 'free_disk_gb': 5, 'hypervisor_version': 0, 'local_gb': 6, 'free_ram_mb': 384, # 512 total - 128 for possible revert of orig 'memory_mb_used': 128, # 128 possible revert amount 'pci_device_pools': objects.PciDevicePoolList(), 'vcpus_used': 1, 'hypervisor_type': 'fake', 'local_gb_used': 1, 'memory_mb': 512, 'current_workload': 0, 'vcpus': 4, 'running_vms': 0 }) update_mock.assert_called_once_with(mock.sentinel.ctx) self.assertTrue(obj_base.obj_equal_prims(expected_resources, self.rt.compute_node)) @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') @mock.patch('nova.objects.Instance.get_by_uuid') @mock.patch('nova.objects.InstanceList.get_by_host_and_node') def test_no_instances_dest_migration(self, get_mock, get_inst_mock, migr_mock, get_cn_mock): # We test the behavior of update_available_resource() when # there is an active migration that involves this compute node # as the destination host not the source host, and the resource # tracker does not yet have any instances assigned to it. This is # the case when a migration to this compute host from another host # is in progress, but the user has not confirmed the resize # yet, so the resource tracker must reserve the resources # for the possibly-to-be-confirmed instance's instance type # node in case of a confirm of the resize. self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) # Setup virt resources to match used resources to number # of defined instances on the hypervisor virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES) virt_resources.update(vcpus_used=2, memory_mb_used=256, local_gb_used=5) self._setup_rt(virt_resources=virt_resources) get_mock.return_value = [] migr_obj = _MIGRATION_FIXTURES['dest-only'] migr_mock.return_value = [migr_obj] inst_uuid = migr_obj.instance_uuid instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone() get_inst_mock.return_value = instance get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0] instance.migration_context = _MIGRATION_CONTEXT_FIXTURES[inst_uuid] update_mock = self._update_available_resources() get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host', 'fake-node') expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) expected_resources.update({ # host is added in update_available_resources() # before calling _update() 'host': 'fake-host', 'host_ip': '1.1.1.1', 'numa_topology': None, 'metrics': '[]', 'cpu_info': '', 'hypervisor_hostname': 'fakehost', 'free_disk_gb': 1, 'hypervisor_version': 0, 'local_gb': 6, 'free_ram_mb': 256, # 512 total - 256 for possible confirm of new 'memory_mb_used': 256, # 256 possible confirmed amount 'pci_device_pools': objects.PciDevicePoolList(), 'vcpus_used': 2, 'hypervisor_type': 'fake', 'local_gb_used': 5, 'memory_mb': 512, 'current_workload': 0, 'vcpus': 4, 'running_vms': 0 }) update_mock.assert_called_once_with(mock.sentinel.ctx) self.assertTrue(obj_base.obj_equal_prims(expected_resources, self.rt.compute_node)) @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') @mock.patch('nova.objects.Instance.get_by_uuid') @mock.patch('nova.objects.InstanceList.get_by_host_and_node') def test_no_instances_dest_evacuation(self, get_mock, get_inst_mock, migr_mock, get_cn_mock): # We test the behavior of update_available_resource() when # there is an active evacuation that involves this compute node # as the destination host not the source host, and the resource # tracker does not yet have any instances assigned to it. This is # the case when a migration to this compute host from another host # is in progress, but not finished yet. self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) # Setup virt resources to match used resources to number # of defined instances on the hypervisor virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES) virt_resources.update(vcpus_used=2, memory_mb_used=256, local_gb_used=5) self._setup_rt(virt_resources=virt_resources) get_mock.return_value = [] migr_obj = _MIGRATION_FIXTURES['dest-only-evac'] migr_mock.return_value = [migr_obj] inst_uuid = migr_obj.instance_uuid instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone() get_inst_mock.return_value = instance get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0] instance.migration_context = _MIGRATION_CONTEXT_FIXTURES[inst_uuid] update_mock = self._update_available_resources() get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host', 'fake-node') expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) expected_resources.update({ # host is added in update_available_resources() # before calling _update() 'host': 'fake-host', 'host_ip': '1.1.1.1', 'numa_topology': None, 'metrics': '[]', 'cpu_info': '', 'hypervisor_hostname': 'fakehost', 'free_disk_gb': 1, 'hypervisor_version': 0, 'local_gb': 6, 'free_ram_mb': 256, # 512 total - 256 for possible confirm of new 'memory_mb_used': 256, # 256 possible confirmed amount 'pci_device_pools': objects.PciDevicePoolList(), 'vcpus_used': 2, 'hypervisor_type': 'fake', 'local_gb_used': 5, 'memory_mb': 512, 'current_workload': 0, 'vcpus': 4, 'running_vms': 0 }) update_mock.assert_called_once_with(mock.sentinel.ctx) self.assertTrue(obj_base.obj_equal_prims(expected_resources, self.rt.compute_node)) @mock.patch('nova.objects.MigrationContext.get_by_instance_uuid', return_value=None) @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') @mock.patch('nova.objects.Instance.get_by_uuid') @mock.patch('nova.objects.InstanceList.get_by_host_and_node') def test_some_instances_source_and_dest_migration(self, get_mock, get_inst_mock, migr_mock, get_cn_mock, get_mig_ctxt_mock): # We test the behavior of update_available_resource() when # there is an active migration that involves this compute node # as the destination host AND the source host, and the resource # tracker has a few instances assigned to it, including the # instance that is resizing to this same compute node. The tracking # of resource amounts takes into account both the old and new # resize instance types as taking up space on the node. self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) # Setup virt resources to match used resources to number # of defined instances on the hypervisor virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES) virt_resources.update(vcpus_used=4, memory_mb_used=512, local_gb_used=7) self._setup_rt(virt_resources=virt_resources) migr_obj = _MIGRATION_FIXTURES['source-and-dest'] migr_mock.return_value = [migr_obj] inst_uuid = migr_obj.instance_uuid # The resizing instance has already had its instance type # changed to the *new* instance type (the bigger one, instance type 2) resizing_instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone() resizing_instance.migration_context = ( _MIGRATION_CONTEXT_FIXTURES[resizing_instance.uuid]) all_instances = _INSTANCE_FIXTURES + [resizing_instance] get_mock.return_value = all_instances get_inst_mock.return_value = resizing_instance get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0] update_mock = self._update_available_resources() get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host', 'fake-node') expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) expected_resources.update({ # host is added in update_available_resources() # before calling _update() 'host': 'fake-host', 'host_ip': '1.1.1.1', 'numa_topology': None, 'metrics': '[]', 'cpu_info': '', 'hypervisor_hostname': 'fakehost', # 6 total - 1G existing - 5G new flav - 1G old flav 'free_disk_gb': -1, 'hypervisor_version': 0, 'local_gb': 6, # 512 total - 128 existing - 256 new flav - 128 old flav 'free_ram_mb': 0, 'memory_mb_used': 512, # 128 exist + 256 new flav + 128 old flav 'pci_device_pools': objects.PciDevicePoolList(), 'vcpus_used': 4, 'hypervisor_type': 'fake', 'local_gb_used': 7, # 1G existing, 5G new flav + 1 old flav 'memory_mb': 512, 'current_workload': 1, # One migrating instance... 'vcpus': 4, 'running_vms': 2 }) update_mock.assert_called_once_with(mock.sentinel.ctx) self.assertTrue(obj_base.obj_equal_prims(expected_resources, self.rt.compute_node)) class TestInitComputeNode(BaseTestCase): @mock.patch('nova.objects.ComputeNode.create') @mock.patch('nova.objects.Service.get_by_compute_host') @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename') def test_no_op_init_compute_node(self, get_mock, service_mock, create_mock): self._setup_rt() resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES) compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) self.rt.compute_node = compute_node self.rt._init_compute_node(mock.sentinel.ctx, resources) self.assertFalse(service_mock.called) self.assertFalse(get_mock.called) self.assertFalse(create_mock.called) self.assertFalse(self.rt.disabled) @mock.patch('nova.objects.ComputeNode.create') @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename') def test_compute_node_loaded(self, get_mock, create_mock): self._setup_rt() def fake_get_node(_ctx, host, node): res = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) return res get_mock.side_effect = fake_get_node resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES) self.rt._init_compute_node(mock.sentinel.ctx, resources) get_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host', 'fake-node') self.assertFalse(create_mock.called) self.assertFalse(self.rt.disabled) @mock.patch('nova.objects.ComputeNode.create') @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename') def test_compute_node_created_on_empty(self, get_mock, create_mock): self._setup_rt() get_mock.side_effect = exc.NotFound cpu_alloc_ratio = 1.0 ram_alloc_ratio = 1.0 disk_alloc_ratio = 1.0 resources = { 'host_ip': '1.1.1.1', 'numa_topology': None, 'metrics': '[]', 'cpu_info': '', 'hypervisor_hostname': 'fakehost', 'free_disk_gb': 6, 'hypervisor_version': 0, 'local_gb': 6, 'free_ram_mb': 512, 'memory_mb_used': 0, 'pci_device_pools': [], 'vcpus_used': 0, 'hypervisor_type': 'fake', 'local_gb_used': 0, 'memory_mb': 512, 'current_workload': 0, 'vcpus': 4, 'running_vms': 0, 'pci_passthrough_devices': '[]' } # The expected compute represents the initial values used # when creating a compute node. expected_compute = objects.ComputeNode( host_ip=resources['host_ip'], vcpus=resources['vcpus'], memory_mb=resources['memory_mb'], local_gb=resources['local_gb'], cpu_info=resources['cpu_info'], vcpus_used=resources['vcpus_used'], memory_mb_used=resources['memory_mb_used'], local_gb_used=resources['local_gb_used'], numa_topology=resources['numa_topology'], hypervisor_type=resources['hypervisor_type'], hypervisor_version=resources['hypervisor_version'], hypervisor_hostname=resources['hypervisor_hostname'], # NOTE(sbauza): ResourceTracker adds host field host='fake-host', # NOTE(sbauza): ResourceTracker adds CONF allocation ratios ram_allocation_ratio=ram_alloc_ratio, cpu_allocation_ratio=cpu_alloc_ratio, disk_allocation_ratio=disk_alloc_ratio, ) # Forcing the flags to the values we know self.rt.ram_allocation_ratio = ram_alloc_ratio self.rt.cpu_allocation_ratio = cpu_alloc_ratio self.rt.disk_allocation_ratio = disk_alloc_ratio self.rt._init_compute_node(mock.sentinel.ctx, resources) self.assertFalse(self.rt.disabled) get_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host', 'fake-node') create_mock.assert_called_once_with() self.assertTrue(obj_base.obj_equal_prims(expected_compute, self.rt.compute_node)) def test_copy_resources_adds_allocation_ratios(self): self.flags(cpu_allocation_ratio=4.0, ram_allocation_ratio=3.0, disk_allocation_ratio=2.0) self._setup_rt() resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES) compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) self.rt.compute_node = compute_node self.rt._copy_resources(resources) self.assertEqual(4.0, self.rt.compute_node.cpu_allocation_ratio) self.assertEqual(3.0, self.rt.compute_node.ram_allocation_ratio) self.assertEqual(2.0, self.rt.compute_node.disk_allocation_ratio) class TestUpdateComputeNode(BaseTestCase): @mock.patch('nova.objects.Service.get_by_compute_host') def test_existing_compute_node_updated_same_resources(self, service_mock): self._setup_rt() # This is the same set of resources as the fixture, deliberately. We # are checking below to see that update_resource_stats() is not # needlessly called when the resources don't actually change. compute = objects.ComputeNode( host_ip='1.1.1.1', numa_topology=None, metrics='[]', cpu_info='', hypervisor_hostname='fakehost', free_disk_gb=6, hypervisor_version=0, local_gb=6, free_ram_mb=512, memory_mb_used=0, pci_device_pools=objects.PciDevicePoolList(), vcpus_used=0, hypervisor_type='fake', local_gb_used=0, memory_mb=512, current_workload=0, vcpus=4, running_vms=0, cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5, disk_allocation_ratio=1.0, ) self.rt.compute_node = compute self.rt._update(mock.sentinel.ctx) self.assertFalse(self.rt.disabled) self.assertFalse(service_mock.called) # The above call to _update() will populate the # RT.old_resources collection with the resources. Here, we check that # if we call _update() again with the same resources, that # the scheduler client won't be called again to update those # (unchanged) resources for the compute node self.sched_client_mock.reset_mock() urs_mock = self.sched_client_mock.update_resource_stats self.rt._update(mock.sentinel.ctx) self.assertFalse(urs_mock.called) @mock.patch('nova.objects.Service.get_by_compute_host') def test_existing_compute_node_updated_new_resources(self, service_mock): self._setup_rt() # Deliberately changing local_gb_used, vcpus_used, and memory_mb_used # below to be different from the compute node fixture's base usages. # We want to check that the code paths update the stored compute node # usage records with what is supplied to _update(). compute = objects.ComputeNode( host='fake-host', host_ip='1.1.1.1', numa_topology=None, metrics='[]', cpu_info='', hypervisor_hostname='fakehost', free_disk_gb=2, hypervisor_version=0, local_gb=6, free_ram_mb=384, memory_mb_used=128, pci_device_pools=objects.PciDevicePoolList(), vcpus_used=2, hypervisor_type='fake', local_gb_used=4, memory_mb=512, current_workload=0, vcpus=4, running_vms=0, cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5, disk_allocation_ratio=1.0, ) self.rt.compute_node = compute self.rt._update(mock.sentinel.ctx) self.assertFalse(self.rt.disabled) self.assertFalse(service_mock.called) urs_mock = self.sched_client_mock.update_resource_stats urs_mock.assert_called_once_with(self.rt.compute_node) class TestInstanceClaim(BaseTestCase): def setUp(self): super(TestInstanceClaim, self).setUp() self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self._setup_rt() self.rt.compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) # not using mock.sentinel.ctx because instance_claim calls #elevated self.ctx = mock.MagicMock() self.elevated = mock.MagicMock() self.ctx.elevated.return_value = self.elevated self.instance = _INSTANCE_FIXTURES[0].obj_clone() def assertEqualNUMAHostTopology(self, expected, got): attrs = ('cpuset', 'memory', 'id', 'cpu_usage', 'memory_usage') if None in (expected, got): if expected != got: raise AssertionError("Topologies don't match. Expected: " "%(expected)s, but got: %(got)s" % {'expected': expected, 'got': got}) else: return if len(expected) != len(got): raise AssertionError("Topologies don't match due to different " "number of cells. Expected: " "%(expected)s, but got: %(got)s" % {'expected': expected, 'got': got}) for exp_cell, got_cell in zip(expected.cells, got.cells): for attr in attrs: if getattr(exp_cell, attr) != getattr(got_cell, attr): raise AssertionError("Topologies don't match. Expected: " "%(expected)s, but got: %(got)s" % {'expected': expected, 'got': got}) def test_claim_disabled(self): self.rt.compute_node = None self.assertTrue(self.rt.disabled) with mock.patch.object(self.instance, 'save'): claim = self.rt.instance_claim(mock.sentinel.ctx, self.instance, None) self.assertEqual(self.rt.host, self.instance.host) self.assertEqual(self.rt.host, self.instance.launched_on) self.assertEqual(self.rt.nodename, self.instance.node) self.assertIsInstance(claim, claims.NopClaim) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') def test_update_usage_with_claim(self, migr_mock, pci_mock): # Test that RT.update_usage() only changes the compute node # resources if there has been a claim first. pci_mock.return_value = objects.InstancePCIRequests(requests=[]) expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) self.rt.update_usage(self.ctx, self.instance) self.assertTrue(obj_base.obj_equal_prims(expected, self.rt.compute_node)) disk_used = self.instance.root_gb + self.instance.ephemeral_gb expected.update({ 'local_gb_used': disk_used, 'memory_mb_used': self.instance.memory_mb, 'free_disk_gb': expected['local_gb'] - disk_used, "free_ram_mb": expected['memory_mb'] - self.instance.memory_mb, 'running_vms': 1, 'vcpus_used': 1, 'pci_device_pools': objects.PciDevicePoolList(), }) with mock.patch.object(self.rt, '_update') as update_mock: with mock.patch.object(self.instance, 'save'): self.rt.instance_claim(self.ctx, self.instance, None) update_mock.assert_called_once_with(self.elevated) self.assertTrue(obj_base.obj_equal_prims(expected, self.rt.compute_node)) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') def test_update_usage_removed(self, migr_mock, pci_mock): # Test that RT.update_usage() removes the instance when update is # called in a removed state pci_mock.return_value = objects.InstancePCIRequests(requests=[]) expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) disk_used = self.instance.root_gb + self.instance.ephemeral_gb expected.update({ 'local_gb_used': disk_used, 'memory_mb_used': self.instance.memory_mb, 'free_disk_gb': expected['local_gb'] - disk_used, "free_ram_mb": expected['memory_mb'] - self.instance.memory_mb, 'running_vms': 1, 'vcpus_used': 1, 'pci_device_pools': objects.PciDevicePoolList(), }) with mock.patch.object(self.rt, '_update') as update_mock: with mock.patch.object(self.instance, 'save'): self.rt.instance_claim(self.ctx, self.instance, None) update_mock.assert_called_once_with(self.elevated) self.assertTrue(obj_base.obj_equal_prims(expected, self.rt.compute_node)) expected_updated = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) expected_updated['pci_device_pools'] = objects.PciDevicePoolList() self.instance.vm_state = vm_states.SHELVED_OFFLOADED with mock.patch.object(self.rt, '_update') as update_mock: self.rt.update_usage(self.ctx, self.instance) self.assertTrue(obj_base.obj_equal_prims(expected_updated, self.rt.compute_node)) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') def test_claim(self, migr_mock, pci_mock): self.assertFalse(self.rt.disabled) pci_mock.return_value = objects.InstancePCIRequests(requests=[]) disk_used = self.instance.root_gb + self.instance.ephemeral_gb expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) expected.update({ 'local_gb_used': disk_used, 'memory_mb_used': self.instance.memory_mb, 'free_disk_gb': expected['local_gb'] - disk_used, "free_ram_mb": expected['memory_mb'] - self.instance.memory_mb, 'running_vms': 1, 'vcpus_used': 1, 'pci_device_pools': objects.PciDevicePoolList(), }) with mock.patch.object(self.rt, '_update') as update_mock: with mock.patch.object(self.instance, 'save'): self.rt.instance_claim(self.ctx, self.instance, None) update_mock.assert_called_once_with(self.elevated) self.assertTrue(obj_base.obj_equal_prims(expected, self.rt.compute_node)) @mock.patch('nova.pci.stats.PciDeviceStats.support_requests', return_value=True) @mock.patch('nova.pci.manager.PciDevTracker.claim_instance') @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') def test_claim_with_pci(self, migr_mock, pci_mock, pci_manager_mock, pci_stats_mock): # Test that a claim involving PCI requests correctly claims # PCI devices on the host and sends an updated pci_device_pools # attribute of the ComputeNode object. self.assertFalse(self.rt.disabled) # TODO(jaypipes): Remove once the PCI tracker is always created # upon the resource tracker being initialized... self.rt.pci_tracker = pci_manager.PciDevTracker(mock.sentinel.ctx) pci_pools = objects.PciDevicePoolList() pci_manager_mock.return_value = pci_pools request = objects.InstancePCIRequest(count=1, spec=[{'vendor_id': 'v', 'product_id': 'p'}]) pci_mock.return_value = objects.InstancePCIRequests(requests=[request]) disk_used = self.instance.root_gb + self.instance.ephemeral_gb expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) expected.update({ 'local_gb_used': disk_used, 'memory_mb_used': self.instance.memory_mb, 'free_disk_gb': expected['local_gb'] - disk_used, "free_ram_mb": expected['memory_mb'] - self.instance.memory_mb, 'running_vms': 1, 'vcpus_used': 1, 'pci_device_pools': pci_pools }) with mock.patch.object(self.rt, '_update') as update_mock: with mock.patch.object(self.instance, 'save'): self.rt.instance_claim(self.ctx, self.instance, None) update_mock.assert_called_once_with(self.elevated) pci_manager_mock.assert_called_once_with(mock.ANY, # context... pci_mock.return_value, None) self.assertTrue(obj_base.obj_equal_prims(expected, self.rt.compute_node)) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') def test_claim_abort_context_manager(self, migr_mock, pci_mock): pci_mock.return_value = objects.InstancePCIRequests(requests=[]) self.assertEqual(0, self.rt.compute_node.local_gb_used) self.assertEqual(0, self.rt.compute_node.memory_mb_used) self.assertEqual(0, self.rt.compute_node.running_vms) mock_save = mock.MagicMock() mock_clear_numa = mock.MagicMock() @mock.patch.object(self.instance, 'save', mock_save) @mock.patch.object(self.instance, 'clear_numa_topology', mock_clear_numa) @mock.patch.object(objects.Instance, 'obj_clone', return_value=self.instance) def _doit(mock_clone): with self.rt.instance_claim(self.ctx, self.instance, None): # Raise an exception. Just make sure below that the abort() # method of the claim object was called (and the resulting # resources reset to the pre-claimed amounts) raise test.TestingException() self.assertRaises(test.TestingException, _doit) self.assertEqual(2, mock_save.call_count) mock_clear_numa.assert_called_once_with() self.assertIsNone(self.instance.host) self.assertIsNone(self.instance.node) # Assert that the resources claimed by the Claim() constructor # are returned to the resource tracker due to the claim's abort() # method being called when triggered by the exception raised above. self.assertEqual(0, self.rt.compute_node.local_gb_used) self.assertEqual(0, self.rt.compute_node.memory_mb_used) self.assertEqual(0, self.rt.compute_node.running_vms) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') def test_claim_abort(self, migr_mock, pci_mock): pci_mock.return_value = objects.InstancePCIRequests(requests=[]) disk_used = self.instance.root_gb + self.instance.ephemeral_gb @mock.patch.object(objects.Instance, 'obj_clone', return_value=self.instance) @mock.patch.object(self.instance, 'save') def _claim(mock_save, mock_clone): return self.rt.instance_claim(self.ctx, self.instance, None) claim = _claim() self.assertEqual(disk_used, self.rt.compute_node.local_gb_used) self.assertEqual(self.instance.memory_mb, self.rt.compute_node.memory_mb_used) self.assertEqual(1, self.rt.compute_node.running_vms) mock_save = mock.MagicMock() mock_clear_numa = mock.MagicMock() @mock.patch.object(self.instance, 'save', mock_save) @mock.patch.object(self.instance, 'clear_numa_topology', mock_clear_numa) def _abort(): claim.abort() _abort() mock_save.assert_called_once_with() mock_clear_numa.assert_called_once_with() self.assertIsNone(self.instance.host) self.assertIsNone(self.instance.node) self.assertEqual(0, self.rt.compute_node.local_gb_used) self.assertEqual(0, self.rt.compute_node.memory_mb_used) self.assertEqual(0, self.rt.compute_node.running_vms) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') def test_claim_limits(self, migr_mock, pci_mock): self.assertFalse(self.rt.disabled) pci_mock.return_value = objects.InstancePCIRequests(requests=[]) good_limits = { 'memory_mb': _COMPUTE_NODE_FIXTURES[0]['memory_mb'], 'disk_gb': _COMPUTE_NODE_FIXTURES[0]['local_gb'], 'vcpu': _COMPUTE_NODE_FIXTURES[0]['vcpus'], } for key in good_limits.keys(): bad_limits = copy.deepcopy(good_limits) bad_limits[key] = 0 self.assertRaises(exc.ComputeResourcesUnavailable, self.rt.instance_claim, self.ctx, self.instance, bad_limits) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') def test_claim_numa(self, migr_mock, pci_mock): self.assertFalse(self.rt.disabled) pci_mock.return_value = objects.InstancePCIRequests(requests=[]) self.instance.numa_topology = _INSTANCE_NUMA_TOPOLOGIES['2mb'] host_topology = _NUMA_HOST_TOPOLOGIES['2mb'] self.rt.compute_node['numa_topology'] = host_topology._to_json() limits = {'numa_topology': _NUMA_LIMIT_TOPOLOGIES['2mb']} expected_numa = copy.deepcopy(host_topology) for cell in expected_numa.cells: cell.memory_usage += _2MB cell.cpu_usage += 1 with mock.patch.object(self.rt, '_update') as update_mock: with mock.patch.object(self.instance, 'save'): self.rt.instance_claim(self.ctx, self.instance, limits) update_mock.assert_called_once_with(self.ctx.elevated()) updated_compute_node = self.rt.compute_node new_numa = updated_compute_node['numa_topology'] new_numa = objects.NUMATopology.obj_from_db_obj(new_numa) self.assertEqualNUMAHostTopology(expected_numa, new_numa) @mock.patch('nova.objects.Instance.save') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') @mock.patch('nova.objects.Instance.get_by_uuid') @mock.patch('nova.objects.InstanceList.get_by_host_and_node') @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid') class TestMoveClaim(BaseTestCase): def setUp(self): super(TestMoveClaim, self).setUp() self._setup_rt() self.rt.compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) self.instance = _INSTANCE_FIXTURES[0].obj_clone() self.flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1] self.limits = {} # not using mock.sentinel.ctx because resize_claim calls #elevated self.ctx = mock.MagicMock() self.elevated = mock.MagicMock() self.ctx.elevated.return_value = self.elevated # Initialise extensible resource trackers self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) with test.nested( mock.patch('nova.objects.InstanceList.get_by_host_and_node'), mock.patch('nova.objects.MigrationList.' 'get_in_progress_by_host_and_node') ) as (inst_list_mock, migr_mock): inst_list_mock.return_value = objects.InstanceList(objects=[]) migr_mock.return_value = objects.MigrationList(objects=[]) self.rt.update_available_resource(self.ctx) def register_mocks(self, pci_mock, inst_list_mock, inst_by_uuid, migr_mock, inst_save_mock): pci_mock.return_value = objects.InstancePCIRequests(requests=[]) self.inst_list_mock = inst_list_mock self.inst_by_uuid = inst_by_uuid self.migr_mock = migr_mock self.inst_save_mock = inst_save_mock def audit(self, rt, instances, migrations, migr_inst): self.inst_list_mock.return_value = \ objects.InstanceList(objects=instances) self.migr_mock.return_value = \ objects.MigrationList(objects=migrations) self.inst_by_uuid.return_value = migr_inst rt.update_available_resource(self.ctx) def assertEqual(self, expected, actual): if type(expected) != dict or type(actual) != dict: super(TestMoveClaim, self).assertEqual(expected, actual) return fail = False for k, e in expected.items(): a = actual[k] if e != a: print("%s: %s != %s" % (k, e, a)) fail = True if fail: self.fail() def adjust_expected(self, expected, flavor): disk_used = flavor['root_gb'] + flavor['ephemeral_gb'] expected.free_disk_gb -= disk_used expected.local_gb_used += disk_used expected.free_ram_mb -= flavor['memory_mb'] expected.memory_mb_used += flavor['memory_mb'] expected.vcpus_used += flavor['vcpus'] @mock.patch('nova.objects.Flavor.get_by_id') def test_claim(self, flavor_mock, pci_mock, inst_list_mock, inst_by_uuid, migr_mock, inst_save_mock): """Resize self.instance and check that the expected quantities of each resource have been consumed. """ self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock, inst_save_mock) self.driver_mock.get_host_ip_addr.return_value = "fake-ip" flavor_mock.return_value = objects.Flavor(**self.flavor) mig_context_obj = _MIGRATION_CONTEXT_FIXTURES[self.instance.uuid] self.instance.migration_context = mig_context_obj expected = copy.deepcopy(self.rt.compute_node) self.adjust_expected(expected, self.flavor) create_mig_mock = mock.patch.object(self.rt, '_create_migration') mig_ctxt_mock = mock.patch('nova.objects.MigrationContext', return_value=mig_context_obj) with create_mig_mock as migr_mock, mig_ctxt_mock as ctxt_mock: migr_mock.return_value = _MIGRATION_FIXTURES['source-only'] claim = self.rt.resize_claim( self.ctx, self.instance, self.flavor, None) self.assertEqual(1, ctxt_mock.call_count) self.assertIsInstance(claim, claims.MoveClaim) inst_save_mock.assert_called_once_with() self.assertTrue(obj_base.obj_equal_prims(expected, self.rt.compute_node)) def test_claim_abort(self, pci_mock, inst_list_mock, inst_by_uuid, migr_mock, inst_save_mock): # Resize self.instance and check that the expected quantities of each # resource have been consumed. The abort the resize claim and check # that the resources have been set back to their original values. self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock, inst_save_mock) self.driver_mock.get_host_ip_addr.return_value = "fake-host" migr_obj = _MIGRATION_FIXTURES['dest-only'] self.instance = _MIGRATION_INSTANCE_FIXTURES[migr_obj['instance_uuid']] mig_context_obj = _MIGRATION_CONTEXT_FIXTURES[self.instance.uuid] self.instance.migration_context = mig_context_obj self.flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2] with mock.patch.object(self.rt, '_create_migration') as migr_mock: migr_mock.return_value = migr_obj claim = self.rt.resize_claim( self.ctx, self.instance, self.flavor, None) self.assertIsInstance(claim, claims.MoveClaim) self.assertEqual(5, self.rt.compute_node.local_gb_used) self.assertEqual(256, self.rt.compute_node.memory_mb_used) self.assertEqual(1, len(self.rt.tracked_migrations)) with mock.patch('nova.objects.Instance.' 'drop_migration_context') as drop_migr_mock: claim.abort() drop_migr_mock.assert_called_once_with() self.assertEqual(0, self.rt.compute_node.local_gb_used) self.assertEqual(0, self.rt.compute_node.memory_mb_used) self.assertEqual(0, len(self.rt.tracked_migrations)) def test_same_host(self, pci_mock, inst_list_mock, inst_by_uuid, migr_mock, inst_save_mock): """Resize self.instance to the same host but with a different flavor. Then abort the claim. Check that the same amount of resources are available afterwards as we started with. """ self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock, inst_save_mock) migr_obj = _MIGRATION_FIXTURES['source-and-dest'] self.instance = _MIGRATION_INSTANCE_FIXTURES[migr_obj['instance_uuid']] self.instance._context = self.ctx mig_context_obj = _MIGRATION_CONTEXT_FIXTURES[self.instance.uuid] self.instance.migration_context = mig_context_obj with mock.patch.object(self.instance, 'save'): self.rt.instance_claim(self.ctx, self.instance, None) expected = copy.deepcopy(self.rt.compute_node) create_mig_mock = mock.patch.object(self.rt, '_create_migration') mig_ctxt_mock = mock.patch('nova.objects.MigrationContext', return_value=mig_context_obj) with create_mig_mock as migr_mock, mig_ctxt_mock as ctxt_mock: migr_mock.return_value = migr_obj claim = self.rt.resize_claim(self.ctx, self.instance, _INSTANCE_TYPE_OBJ_FIXTURES[1], None) self.assertEqual(1, ctxt_mock.call_count) self.audit(self.rt, [self.instance], [migr_obj], self.instance) inst_save_mock.assert_called_once_with() self.assertNotEqual(expected, self.rt.compute_node) claim.instance.migration_context = mig_context_obj with mock.patch('nova.objects.MigrationContext._destroy') as destroy_m: claim.abort() self.assertTrue(obj_base.obj_equal_prims(expected, self.rt.compute_node)) destroy_m.assert_called_once_with(self.ctx, claim.instance.uuid) def test_revert_reserve_source( self, pci_mock, inst_list_mock, inst_by_uuid, migr_mock, inst_save_mock): """Check that the source node of an instance migration reserves resources until the migration has completed, even if the migration is reverted. """ self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock, inst_save_mock) # Get our migrations, instances and itypes in a row src_migr = _MIGRATION_FIXTURES['source-only'] src_instance = ( _MIGRATION_INSTANCE_FIXTURES[src_migr['instance_uuid']].obj_clone() ) src_instance.migration_context = ( _MIGRATION_CONTEXT_FIXTURES[src_instance.uuid]) old_itype = _INSTANCE_TYPE_FIXTURES[src_migr['old_instance_type_id']] dst_migr = _MIGRATION_FIXTURES['dest-only'] dst_instance = ( _MIGRATION_INSTANCE_FIXTURES[dst_migr['instance_uuid']].obj_clone() ) new_itype = _INSTANCE_TYPE_FIXTURES[dst_migr['new_instance_type_id']] dst_instance.migration_context = ( _MIGRATION_CONTEXT_FIXTURES[dst_instance.uuid]) # Set up the destination resource tracker # update_available_resource to initialise extensible resource trackers src_rt = self.rt (dst_rt, _, _) = setup_rt("other-host", "other-node") dst_rt.compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0]) inst_list_mock.return_value = objects.InstanceList(objects=[]) dst_rt.update_available_resource(self.ctx) # Register the instance with dst_rt expected = copy.deepcopy(dst_rt.compute_node) with mock.patch.object(dst_instance, 'save'): dst_rt.instance_claim(self.ctx, dst_instance) self.adjust_expected(expected, new_itype) expected.stats = {'num_task_resize_migrating': 1, 'io_workload': 1, 'num_instances': 1, 'num_proj_fake-project': 1, 'num_vm_active': 1, 'num_os_type_fake-os': 1} expected.current_workload = 1 expected.running_vms = 1 self.assertTrue(obj_base.obj_equal_prims(expected, dst_rt.compute_node)) # Provide the migration via a mock, then audit dst_rt to check that # the instance + migration resources are not double-counted self.audit(dst_rt, [dst_instance], [dst_migr], dst_instance) self.assertTrue(obj_base.obj_equal_prims(expected, dst_rt.compute_node)) # Audit src_rt with src_migr expected = copy.deepcopy(src_rt.compute_node) self.adjust_expected(expected, old_itype) self.audit(src_rt, [], [src_migr], src_instance) self.assertTrue(obj_base.obj_equal_prims(expected, src_rt.compute_node)) # Flag the instance as reverting and re-audit src_instance['vm_state'] = vm_states.RESIZED src_instance['task_state'] = task_states.RESIZE_REVERTING self.audit(src_rt, [], [src_migr], src_instance) self.assertTrue(obj_base.obj_equal_prims(expected, src_rt.compute_node)) def test_update_available_resources_migration_no_context(self, pci_mock, inst_list_mock, inst_by_uuid, migr_mock, inst_save_mock): """When migrating onto older nodes - it is possible for the migration_context record to be missing. Confirm resource audit works regardless. """ self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock, inst_save_mock) migr_obj = _MIGRATION_FIXTURES['source-and-dest'] self.instance = _MIGRATION_INSTANCE_FIXTURES[migr_obj['instance_uuid']] self.instance.migration_context = None expected = copy.deepcopy(self.rt.compute_node) self.adjust_expected(expected, self.flavor) self.audit(self.rt, [], [migr_obj], self.instance) self.assertTrue(obj_base.obj_equal_prims(expected, self.rt.compute_node)) def test_dupe_filter(self, pci_mock, inst_list_mock, inst_by_uuid, migr_mock, inst_save_mock): self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock, inst_save_mock) migr_obj = _MIGRATION_FIXTURES['source-and-dest'] # This is good enough to prevent a lazy-load; value is unimportant migr_obj['updated_at'] = None self.instance = _MIGRATION_INSTANCE_FIXTURES[migr_obj['instance_uuid']] self.instance.migration_context = ( _MIGRATION_CONTEXT_FIXTURES[self.instance.uuid]) self.audit(self.rt, [], [migr_obj, migr_obj], self.instance) self.assertEqual(1, len(self.rt.tracked_migrations)) class TestInstanceInResizeState(test.NoDBTestCase): def test_active_suspending(self): instance = objects.Instance(vm_state=vm_states.ACTIVE, task_state=task_states.SUSPENDING) self.assertFalse(resource_tracker._instance_in_resize_state(instance)) def test_resized_suspending(self): instance = objects.Instance(vm_state=vm_states.RESIZED, task_state=task_states.SUSPENDING) self.assertTrue(resource_tracker._instance_in_resize_state(instance)) def test_resized_resize_migrating(self): instance = objects.Instance(vm_state=vm_states.RESIZED, task_state=task_states.RESIZE_MIGRATING) self.assertTrue(resource_tracker._instance_in_resize_state(instance)) def test_resized_resize_finish(self): instance = objects.Instance(vm_state=vm_states.RESIZED, task_state=task_states.RESIZE_FINISH) self.assertTrue(resource_tracker._instance_in_resize_state(instance)) nova-13.1.4/nova/tests/unit/compute/__init__.py0000664000567000056710000000000013064447140022546 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/compute/monitors/0000775000567000056710000000000013064447471022330 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/compute/monitors/__init__.py0000664000567000056710000000000013064447140024420 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/compute/monitors/cpu/0000775000567000056710000000000013064447471023117 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/compute/monitors/cpu/__init__.py0000664000567000056710000000000013064447140025207 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/compute/monitors/cpu/test_virt_driver.py0000664000567000056710000000660313064447152027070 0ustar jenkinsjenkins00000000000000# Copyright 2013 Intel Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Compute Driver CPU resource monitor.""" import mock from nova.compute.monitors.cpu import virt_driver from nova import objects from nova import test class FakeDriver(object): def get_host_cpu_stats(self): return {'kernel': 5664160000000, 'idle': 1592705190000000, 'frequency': 800, 'user': 26728850000000, 'iowait': 6121490000000} class FakeResourceTracker(object): driver = FakeDriver() class VirtDriverCPUMonitorTestCase(test.NoDBTestCase): def test_get_metric_names(self): monitor = virt_driver.Monitor(FakeResourceTracker()) names = monitor.get_metric_names() self.assertEqual(10, len(names)) self.assertIn("cpu.frequency", names) self.assertIn("cpu.user.time", names) self.assertIn("cpu.kernel.time", names) self.assertIn("cpu.idle.time", names) self.assertIn("cpu.iowait.time", names) self.assertIn("cpu.user.percent", names) self.assertIn("cpu.kernel.percent", names) self.assertIn("cpu.idle.percent", names) self.assertIn("cpu.iowait.percent", names) self.assertIn("cpu.percent", names) def test_get_metrics(self): metrics = objects.MonitorMetricList() monitor = virt_driver.Monitor(FakeResourceTracker()) monitor.add_metrics_to_list(metrics) names = monitor.get_metric_names() for metric in metrics.objects: self.assertIn(metric.name, names) # Some conversion to a dict to ease testing... metrics = {m.name: m.value for m in metrics.objects} self.assertEqual(metrics["cpu.frequency"], 800) self.assertEqual(metrics["cpu.user.time"], 26728850000000) self.assertEqual(metrics["cpu.kernel.time"], 5664160000000) self.assertEqual(metrics["cpu.idle.time"], 1592705190000000) self.assertEqual(metrics["cpu.iowait.time"], 6121490000000) self.assertEqual(metrics["cpu.user.percent"], 1) self.assertEqual(metrics["cpu.kernel.percent"], 0) self.assertEqual(metrics["cpu.idle.percent"], 97) self.assertEqual(metrics["cpu.iowait.percent"], 0) self.assertEqual(metrics["cpu.percent"], 2) def test_ensure_single_sampling(self): # We want to ensure that the virt driver's get_host_cpu_stats() # is only ever called once, otherwise values for monitor metrics # might be illogical -- e.g. pct cpu times for user/system/idle # may add up to more than 100. metrics = objects.MonitorMetricList() monitor = virt_driver.Monitor(FakeResourceTracker()) with mock.patch.object(FakeDriver, 'get_host_cpu_stats') as mocked: monitor.add_metrics_to_list(metrics) mocked.assert_called_once_with() nova-13.1.4/nova/tests/unit/compute/monitors/test_monitors.py0000664000567000056710000000426313064447140025611 0ustar jenkinsjenkins00000000000000# Copyright 2013 Intel Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for resource monitors.""" import mock from nova.compute import monitors from nova import test class MonitorsTestCase(test.NoDBTestCase): """Test case for monitors.""" @mock.patch('stevedore.enabled.EnabledExtensionManager') def test_check_enabled_monitor(self, _mock_ext_manager): class FakeExt(object): def __init__(self, ept, name): self.entry_point_target = ept self.name = name # We check to ensure only one CPU monitor is loaded... self.flags(compute_monitors=['mon1', 'mon2']) handler = monitors.MonitorHandler(None) ext_cpu_mon1 = FakeExt('nova.compute.monitors.cpu.virt_driver:Monitor', 'mon1') ext_cpu_mon2 = FakeExt('nova.compute.monitors.cpu.virt_driver:Monitor', 'mon2') self.assertTrue(handler.check_enabled_monitor(ext_cpu_mon1)) self.assertFalse(handler.check_enabled_monitor(ext_cpu_mon2)) # We check to ensure that the auto-prefixing of the CPU # namespace is handled properly... self.flags(compute_monitors=['cpu.mon1', 'mon2']) handler = monitors.MonitorHandler(None) ext_cpu_mon1 = FakeExt('nova.compute.monitors.cpu.virt_driver:Monitor', 'mon1') ext_cpu_mon2 = FakeExt('nova.compute.monitors.cpu.virt_driver:Monitor', 'mon2') self.assertTrue(handler.check_enabled_monitor(ext_cpu_mon1)) self.assertFalse(handler.check_enabled_monitor(ext_cpu_mon2)) nova-13.1.4/nova/tests/unit/compute/test_multiple_nodes.py0000664000567000056710000001600513064447152025110 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NTT DOCOMO, INC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for compute service with multiple compute nodes.""" from oslo_utils import importutils import nova.conf from nova import context from nova import objects from nova import test from nova.tests import uuidsentinel from nova.virt import fake CONF = nova.conf.CONF CONF.import_opt('compute_manager', 'nova.service') class BaseTestCase(test.TestCase): def tearDown(self): fake.restore_nodes() super(BaseTestCase, self).tearDown() class FakeDriverSingleNodeTestCase(BaseTestCase): def setUp(self): super(FakeDriverSingleNodeTestCase, self).setUp() self.driver = fake.FakeDriver(virtapi=None) fake.set_nodes(['xyz']) def test_get_available_resource(self): res = self.driver.get_available_resource('xyz') self.assertEqual(res['hypervisor_hostname'], 'xyz') class FakeDriverMultiNodeTestCase(BaseTestCase): def setUp(self): super(FakeDriverMultiNodeTestCase, self).setUp() self.driver = fake.FakeDriver(virtapi=None) fake.set_nodes(['aaa', 'bbb']) def test_get_available_resource(self): res_a = self.driver.get_available_resource('aaa') self.assertEqual(res_a['hypervisor_hostname'], 'aaa') res_b = self.driver.get_available_resource('bbb') self.assertEqual(res_b['hypervisor_hostname'], 'bbb') res_x = self.driver.get_available_resource('xxx') self.assertEqual(res_x, {}) class MultiNodeComputeTestCase(BaseTestCase): def setUp(self): super(MultiNodeComputeTestCase, self).setUp() self.flags(compute_driver='nova.virt.fake.FakeDriver') self.compute = importutils.import_object(CONF.compute_manager) self.flags(use_local=True, group='conductor') self.conductor = self.start_service('conductor', manager=CONF.conductor.manager) def fake_get_compute_nodes_in_db(context, use_slave=False): fake_compute_nodes = [{'local_gb': 259, 'uuid': uuidsentinel.fake_compute, 'vcpus_used': 0, 'deleted': 0, 'hypervisor_type': 'powervm', 'created_at': '2013-04-01T00:27:06.000000', 'local_gb_used': 0, 'updated_at': '2013-04-03T00:35:41.000000', 'hypervisor_hostname': 'fake_phyp1', 'memory_mb_used': 512, 'memory_mb': 131072, 'current_workload': 0, 'vcpus': 16, 'cpu_info': 'ppc64,powervm,3940', 'running_vms': 0, 'free_disk_gb': 259, 'service_id': 7, 'hypervisor_version': 7, 'disk_available_least': 265856, 'deleted_at': None, 'free_ram_mb': 130560, 'metrics': '', 'numa_topology': '', 'stats': '', 'id': 2, 'host': 'fake_phyp1', 'cpu_allocation_ratio': None, 'ram_allocation_ratio': None, 'disk_allocation_ratio': None, 'host_ip': '127.0.0.1'}] return [objects.ComputeNode._from_db_object( context, objects.ComputeNode(), cn) for cn in fake_compute_nodes] def fake_compute_node_delete(context, compute_node_id): self.assertEqual(2, compute_node_id) self.stubs.Set(self.compute, '_get_compute_nodes_in_db', fake_get_compute_nodes_in_db) self.stub_out('nova.db.compute_node_delete', fake_compute_node_delete) def test_update_available_resource_add_remove_node(self): ctx = context.get_admin_context() fake.set_nodes(['A', 'B', 'C']) self.compute.update_available_resource(ctx) self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()), ['A', 'B', 'C']) fake.set_nodes(['A', 'B']) self.compute.update_available_resource(ctx) self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()), ['A', 'B']) fake.set_nodes(['A', 'B', 'C']) self.compute.update_available_resource(ctx) self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()), ['A', 'B', 'C']) def test_compute_manager_removes_deleted_node(self): ctx = context.get_admin_context() fake.set_nodes(['A', 'B']) fake_compute_nodes = [ objects.ComputeNode( context=ctx, hypervisor_hostname='A', id=2), objects.ComputeNode( context=ctx, hypervisor_hostname='B', id=3), ] def fake_get_compute_nodes_in_db(context, use_slave=False): return fake_compute_nodes def fake_compute_node_delete(context, compute_node_id): for cn in fake_compute_nodes: if compute_node_id == cn.id: fake_compute_nodes.remove(cn) return self.stubs.Set(self.compute, '_get_compute_nodes_in_db', fake_get_compute_nodes_in_db) self.stub_out('nova.db.compute_node_delete', fake_compute_node_delete) self.compute.update_available_resource(ctx) # Verify nothing is deleted if driver and db compute nodes match self.assertEqual(len(fake_compute_nodes), 2) self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()), ['A', 'B']) fake.set_nodes(['A']) self.compute.update_available_resource(ctx) # Verify B gets deleted since now only A is reported by driver self.assertEqual(len(fake_compute_nodes), 1) self.assertEqual(fake_compute_nodes[0]['hypervisor_hostname'], 'A') self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()), ['A']) nova-13.1.4/nova/tests/unit/compute/test_compute_utils.py0000664000567000056710000007521413064447152024770 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests For miscellaneous util methods used with compute.""" import copy import string import uuid import mock from oslo_serialization import jsonutils from oslo_utils import importutils import six from nova.compute import flavors from nova.compute import power_state from nova.compute import task_states from nova.compute import utils as compute_utils import nova.conf from nova import context from nova import exception from nova.image import glance from nova.network import api as network_api from nova.network import model from nova import objects from nova.objects import block_device as block_device_obj from nova import rpc from nova import test from nova.tests.unit import fake_block_device from nova.tests.unit import fake_instance from nova.tests.unit import fake_network from nova.tests.unit import fake_notifier from nova.tests.unit import fake_server_actions import nova.tests.unit.image.fake from nova.tests.unit.objects import test_flavor from nova.tests import uuidsentinel as uuids CONF = nova.conf.CONF CONF.import_opt('compute_manager', 'nova.service') def create_instance(context, user_id='fake', project_id='fake', params=None): """Create a test instance.""" flavor = flavors.get_flavor_by_name('m1.tiny') net_info = model.NetworkInfo([]) info_cache = objects.InstanceInfoCache(network_info=net_info) inst = objects.Instance(context=context, image_ref=1, reservation_id='r-fakeres', user_id=user_id, project_id=project_id, instance_type_id=flavor.id, flavor=flavor, old_flavor=None, new_flavor=None, system_metadata={}, ami_launch_index=0, root_gb=0, ephemeral_gb=0, info_cache=info_cache) if params: inst.update(params) inst.create() return inst class ComputeValidateDeviceTestCase(test.NoDBTestCase): def setUp(self): super(ComputeValidateDeviceTestCase, self).setUp() self.context = context.RequestContext('fake', 'fake') # check if test name includes "xen" if 'xen' in self.id(): self.flags(compute_driver='xenapi.XenAPIDriver') self.instance = objects.Instance(uuid=uuid.uuid4().hex, root_device_name=None, default_ephemeral_device=None) else: self.instance = objects.Instance(uuid=uuid.uuid4().hex, root_device_name='/dev/vda', default_ephemeral_device='/dev/vdb') flavor = objects.Flavor(**test_flavor.fake_flavor) self.instance.system_metadata = {} self.instance.flavor = flavor self.instance.default_swap_device = None self.data = [] self.stub_out('nova.db.block_device_mapping_get_all_by_instance', lambda context, instance: self.data) def _validate_device(self, device=None): bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( self.context, self.instance['uuid']) return compute_utils.get_device_name_for_instance( self.instance, bdms, device) @staticmethod def _fake_bdm(device): return fake_block_device.FakeDbBlockDeviceDict({ 'source_type': 'volume', 'destination_type': 'volume', 'device_name': device, 'no_device': None, 'volume_id': 'fake', 'snapshot_id': None, 'guest_format': None }) def test_wrap(self): self.data = [] for letter in string.ascii_lowercase[2:]: self.data.append(self._fake_bdm('/dev/vd' + letter)) device = self._validate_device() self.assertEqual(device, '/dev/vdaa') def test_wrap_plus_one(self): self.data = [] for letter in string.ascii_lowercase[2:]: self.data.append(self._fake_bdm('/dev/vd' + letter)) self.data.append(self._fake_bdm('/dev/vdaa')) device = self._validate_device() self.assertEqual(device, '/dev/vdab') def test_later(self): self.data = [ self._fake_bdm('/dev/vdc'), self._fake_bdm('/dev/vdd'), self._fake_bdm('/dev/vde'), ] device = self._validate_device() self.assertEqual(device, '/dev/vdf') def test_gap(self): self.data = [ self._fake_bdm('/dev/vdc'), self._fake_bdm('/dev/vde'), ] device = self._validate_device() self.assertEqual(device, '/dev/vdd') def test_no_bdms(self): self.data = [] device = self._validate_device() self.assertEqual(device, '/dev/vdc') def test_lxc_names_work(self): self.instance['root_device_name'] = '/dev/a' self.instance['ephemeral_device_name'] = '/dev/b' self.data = [] device = self._validate_device() self.assertEqual(device, '/dev/c') def test_name_conversion(self): self.data = [] device = self._validate_device('/dev/c') self.assertEqual(device, '/dev/vdc') device = self._validate_device('/dev/sdc') self.assertEqual(device, '/dev/vdc') device = self._validate_device('/dev/xvdc') self.assertEqual(device, '/dev/vdc') def test_invalid_device_prefix(self): self.assertRaises(exception.InvalidDevicePath, self._validate_device, '/baddata/vdc') def test_device_in_use(self): exc = self.assertRaises(exception.DevicePathInUse, self._validate_device, '/dev/vda') self.assertIn('/dev/vda', six.text_type(exc)) def test_swap(self): self.instance['default_swap_device'] = "/dev/vdc" device = self._validate_device() self.assertEqual(device, '/dev/vdd') def test_swap_no_ephemeral(self): self.instance.default_ephemeral_device = None self.instance.default_swap_device = "/dev/vdb" device = self._validate_device() self.assertEqual(device, '/dev/vdc') def test_ephemeral_xenapi(self): self.instance.flavor.ephemeral_gb = 10 self.instance.flavor.swap = 0 device = self._validate_device() self.assertEqual(device, '/dev/xvdc') def test_swap_xenapi(self): self.instance.flavor.ephemeral_gb = 0 self.instance.flavor.swap = 10 device = self._validate_device() self.assertEqual(device, '/dev/xvdb') def test_swap_and_ephemeral_xenapi(self): self.instance.flavor.ephemeral_gb = 10 self.instance.flavor.swap = 10 device = self._validate_device() self.assertEqual(device, '/dev/xvdd') def test_swap_and_one_attachment_xenapi(self): self.instance.flavor.ephemeral_gb = 0 self.instance.flavor.swap = 10 device = self._validate_device() self.assertEqual(device, '/dev/xvdb') self.data.append(self._fake_bdm(device)) device = self._validate_device() self.assertEqual(device, '/dev/xvdd') def test_no_dev_root_device_name_get_next_name(self): self.instance['root_device_name'] = 'vda' device = self._validate_device() self.assertEqual('/dev/vdc', device) class DefaultDeviceNamesForInstanceTestCase(test.NoDBTestCase): def setUp(self): super(DefaultDeviceNamesForInstanceTestCase, self).setUp() self.context = context.RequestContext('fake', 'fake') self.ephemerals = block_device_obj.block_device_make_list( self.context, [fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'instance_uuid': uuids.block_device_instance, 'device_name': '/dev/vdb', 'source_type': 'blank', 'destination_type': 'local', 'delete_on_termination': True, 'guest_format': None, 'boot_index': -1})]) self.swap = block_device_obj.block_device_make_list( self.context, [fake_block_device.FakeDbBlockDeviceDict( {'id': 2, 'instance_uuid': uuids.block_device_instance, 'device_name': '/dev/vdc', 'source_type': 'blank', 'destination_type': 'local', 'delete_on_termination': True, 'guest_format': 'swap', 'boot_index': -1})]) self.block_device_mapping = block_device_obj.block_device_make_list( self.context, [fake_block_device.FakeDbBlockDeviceDict( {'id': 3, 'instance_uuid': uuids.block_device_instance, 'device_name': '/dev/vda', 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': 'fake-volume-id-1', 'boot_index': 0}), fake_block_device.FakeDbBlockDeviceDict( {'id': 4, 'instance_uuid': uuids.block_device_instance, 'device_name': '/dev/vdd', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': 'fake-snapshot-id-1', 'boot_index': -1}), fake_block_device.FakeDbBlockDeviceDict( {'id': 5, 'instance_uuid': uuids.block_device_instance, 'device_name': '/dev/vde', 'source_type': 'blank', 'destination_type': 'volume', 'boot_index': -1})]) self.instance = {'uuid': uuids.instance, 'ephemeral_gb': 2} self.is_libvirt = False self.root_device_name = '/dev/vda' self.update_called = False self.patchers = [] self.patchers.append( mock.patch.object(objects.BlockDeviceMapping, 'save')) for patcher in self.patchers: patcher.start() def tearDown(self): super(DefaultDeviceNamesForInstanceTestCase, self).tearDown() for patcher in self.patchers: patcher.stop() def _test_default_device_names(self, *block_device_lists): compute_utils.default_device_names_for_instance(self.instance, self.root_device_name, *block_device_lists) def test_only_block_device_mapping(self): # Test no-op original_bdm = copy.deepcopy(self.block_device_mapping) self._test_default_device_names([], [], self.block_device_mapping) for original, new in zip(original_bdm, self.block_device_mapping): self.assertEqual(original.device_name, new.device_name) # Assert it defaults the missing one as expected self.block_device_mapping[1]['device_name'] = None self.block_device_mapping[2]['device_name'] = None self._test_default_device_names([], [], self.block_device_mapping) self.assertEqual('/dev/vdb', self.block_device_mapping[1]['device_name']) self.assertEqual('/dev/vdc', self.block_device_mapping[2]['device_name']) def test_with_ephemerals(self): # Test ephemeral gets assigned self.ephemerals[0]['device_name'] = None self._test_default_device_names(self.ephemerals, [], self.block_device_mapping) self.assertEqual(self.ephemerals[0]['device_name'], '/dev/vdb') self.block_device_mapping[1]['device_name'] = None self.block_device_mapping[2]['device_name'] = None self._test_default_device_names(self.ephemerals, [], self.block_device_mapping) self.assertEqual('/dev/vdc', self.block_device_mapping[1]['device_name']) self.assertEqual('/dev/vdd', self.block_device_mapping[2]['device_name']) def test_with_swap(self): # Test swap only self.swap[0]['device_name'] = None self._test_default_device_names([], self.swap, []) self.assertEqual(self.swap[0]['device_name'], '/dev/vdb') # Test swap and block_device_mapping self.swap[0]['device_name'] = None self.block_device_mapping[1]['device_name'] = None self.block_device_mapping[2]['device_name'] = None self._test_default_device_names([], self.swap, self.block_device_mapping) self.assertEqual(self.swap[0]['device_name'], '/dev/vdb') self.assertEqual('/dev/vdc', self.block_device_mapping[1]['device_name']) self.assertEqual('/dev/vdd', self.block_device_mapping[2]['device_name']) def test_all_together(self): # Test swap missing self.swap[0]['device_name'] = None self._test_default_device_names(self.ephemerals, self.swap, self.block_device_mapping) self.assertEqual(self.swap[0]['device_name'], '/dev/vdc') # Test swap and eph missing self.swap[0]['device_name'] = None self.ephemerals[0]['device_name'] = None self._test_default_device_names(self.ephemerals, self.swap, self.block_device_mapping) self.assertEqual(self.ephemerals[0]['device_name'], '/dev/vdb') self.assertEqual(self.swap[0]['device_name'], '/dev/vdc') # Test all missing self.swap[0]['device_name'] = None self.ephemerals[0]['device_name'] = None self.block_device_mapping[1]['device_name'] = None self.block_device_mapping[2]['device_name'] = None self._test_default_device_names(self.ephemerals, self.swap, self.block_device_mapping) self.assertEqual(self.ephemerals[0]['device_name'], '/dev/vdb') self.assertEqual(self.swap[0]['device_name'], '/dev/vdc') self.assertEqual('/dev/vdd', self.block_device_mapping[1]['device_name']) self.assertEqual('/dev/vde', self.block_device_mapping[2]['device_name']) class UsageInfoTestCase(test.TestCase): def setUp(self): def fake_get_nw_info(cls, ctxt, instance): self.assertTrue(ctxt.is_admin) return fake_network.fake_get_instance_nw_info(self, 1, 1) super(UsageInfoTestCase, self).setUp() self.stubs.Set(network_api.API, 'get_instance_nw_info', fake_get_nw_info) fake_notifier.stub_notifier(self.stubs) self.addCleanup(fake_notifier.reset) self.flags(use_local=True, group='conductor') self.flags(compute_driver='nova.virt.fake.FakeDriver', network_manager='nova.network.manager.FlatManager') self.compute = importutils.import_object(CONF.compute_manager) self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id) def fake_show(meh, context, id, **kwargs): return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}} self.stubs.Set(nova.tests.unit.image.fake._FakeImageService, 'show', fake_show) fake_network.set_stub_network_methods(self) fake_server_actions.stub_out_action_events(self.stubs) def test_notify_usage_exists(self): # Ensure 'exists' notification generates appropriate usage data. instance = create_instance(self.context) # Set some system metadata sys_metadata = {'image_md_key1': 'val1', 'image_md_key2': 'val2', 'other_data': 'meow'} instance.system_metadata.update(sys_metadata) instance.save() compute_utils.notify_usage_exists( rpc.get_notifier('compute'), self.context, instance) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.priority, 'INFO') self.assertEqual(msg.event_type, 'compute.instance.exists') payload = msg.payload self.assertEqual(payload['tenant_id'], self.project_id) self.assertEqual(payload['user_id'], self.user_id) self.assertEqual(payload['instance_id'], instance['uuid']) self.assertEqual(payload['instance_type'], 'm1.tiny') type_id = flavors.get_flavor_by_name('m1.tiny')['id'] self.assertEqual(str(payload['instance_type_id']), str(type_id)) flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid'] self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id)) for attr in ('display_name', 'created_at', 'launched_at', 'state', 'state_description', 'bandwidth', 'audit_period_beginning', 'audit_period_ending', 'image_meta'): self.assertIn(attr, payload, "Key %s not in payload" % attr) self.assertEqual(payload['image_meta'], {'md_key1': 'val1', 'md_key2': 'val2'}) image_ref_url = "%s/images/1" % glance.generate_glance_url() self.assertEqual(payload['image_ref_url'], image_ref_url) self.compute.terminate_instance(self.context, instance, [], []) def test_notify_usage_exists_deleted_instance(self): # Ensure 'exists' notification generates appropriate usage data. instance = create_instance(self.context) # Set some system metadata sys_metadata = {'image_md_key1': 'val1', 'image_md_key2': 'val2', 'other_data': 'meow'} instance.system_metadata.update(sys_metadata) instance.save() self.compute.terminate_instance(self.context, instance, [], []) compute_utils.notify_usage_exists( rpc.get_notifier('compute'), self.context, instance) msg = fake_notifier.NOTIFICATIONS[-1] self.assertEqual(msg.priority, 'INFO') self.assertEqual(msg.event_type, 'compute.instance.exists') payload = msg.payload self.assertEqual(payload['tenant_id'], self.project_id) self.assertEqual(payload['user_id'], self.user_id) self.assertEqual(payload['instance_id'], instance['uuid']) self.assertEqual(payload['instance_type'], 'm1.tiny') type_id = flavors.get_flavor_by_name('m1.tiny')['id'] self.assertEqual(str(payload['instance_type_id']), str(type_id)) flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid'] self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id)) for attr in ('display_name', 'created_at', 'launched_at', 'state', 'state_description', 'bandwidth', 'audit_period_beginning', 'audit_period_ending', 'image_meta'): self.assertIn(attr, payload, "Key %s not in payload" % attr) self.assertEqual(payload['image_meta'], {'md_key1': 'val1', 'md_key2': 'val2'}) image_ref_url = "%s/images/1" % glance.generate_glance_url() self.assertEqual(payload['image_ref_url'], image_ref_url) def test_notify_usage_exists_instance_not_found(self): # Ensure 'exists' notification generates appropriate usage data. instance = create_instance(self.context) self.compute.terminate_instance(self.context, instance, [], []) compute_utils.notify_usage_exists( rpc.get_notifier('compute'), self.context, instance) msg = fake_notifier.NOTIFICATIONS[-1] self.assertEqual(msg.priority, 'INFO') self.assertEqual(msg.event_type, 'compute.instance.exists') payload = msg.payload self.assertEqual(payload['tenant_id'], self.project_id) self.assertEqual(payload['user_id'], self.user_id) self.assertEqual(payload['instance_id'], instance['uuid']) self.assertEqual(payload['instance_type'], 'm1.tiny') type_id = flavors.get_flavor_by_name('m1.tiny')['id'] self.assertEqual(str(payload['instance_type_id']), str(type_id)) flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid'] self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id)) for attr in ('display_name', 'created_at', 'launched_at', 'state', 'state_description', 'bandwidth', 'audit_period_beginning', 'audit_period_ending', 'image_meta'): self.assertIn(attr, payload, "Key %s not in payload" % attr) self.assertEqual(payload['image_meta'], {}) image_ref_url = "%s/images/1" % glance.generate_glance_url() self.assertEqual(payload['image_ref_url'], image_ref_url) def test_notify_about_instance_usage(self): instance = create_instance(self.context) # Set some system metadata sys_metadata = {'image_md_key1': 'val1', 'image_md_key2': 'val2', 'other_data': 'meow'} instance.system_metadata.update(sys_metadata) instance.save() extra_usage_info = {'image_name': 'fake_name'} compute_utils.notify_about_instance_usage( rpc.get_notifier('compute'), self.context, instance, 'create.start', extra_usage_info=extra_usage_info) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.priority, 'INFO') self.assertEqual(msg.event_type, 'compute.instance.create.start') payload = msg.payload self.assertEqual(payload['tenant_id'], self.project_id) self.assertEqual(payload['user_id'], self.user_id) self.assertEqual(payload['instance_id'], instance['uuid']) self.assertEqual(payload['instance_type'], 'm1.tiny') type_id = flavors.get_flavor_by_name('m1.tiny')['id'] self.assertEqual(str(payload['instance_type_id']), str(type_id)) flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid'] self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id)) for attr in ('display_name', 'created_at', 'launched_at', 'state', 'state_description', 'image_meta'): self.assertIn(attr, payload, "Key %s not in payload" % attr) self.assertEqual(payload['image_meta'], {'md_key1': 'val1', 'md_key2': 'val2'}) self.assertEqual(payload['image_name'], 'fake_name') image_ref_url = "%s/images/1" % glance.generate_glance_url() self.assertEqual(payload['image_ref_url'], image_ref_url) self.compute.terminate_instance(self.context, instance, [], []) def test_notify_about_aggregate_update_with_id(self): # Set aggregate payload aggregate_payload = {'aggregate_id': 1} compute_utils.notify_about_aggregate_update(self.context, "create.end", aggregate_payload) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.priority, 'INFO') self.assertEqual(msg.event_type, 'aggregate.create.end') payload = msg.payload self.assertEqual(payload['aggregate_id'], 1) def test_notify_about_aggregate_update_with_name(self): # Set aggregate payload aggregate_payload = {'name': 'fakegroup'} compute_utils.notify_about_aggregate_update(self.context, "create.start", aggregate_payload) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.priority, 'INFO') self.assertEqual(msg.event_type, 'aggregate.create.start') payload = msg.payload self.assertEqual(payload['name'], 'fakegroup') def test_notify_about_aggregate_update_without_name_id(self): # Set empty aggregate payload aggregate_payload = {} compute_utils.notify_about_aggregate_update(self.context, "create.start", aggregate_payload) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0) class ComputeUtilsGetValFromSysMetadata(test.NoDBTestCase): def test_get_value_from_system_metadata(self): instance = fake_instance.fake_instance_obj('fake-context') system_meta = {'int_val': 1, 'int_string': '2', 'not_int': 'Nope'} instance.system_metadata = system_meta result = compute_utils.get_value_from_system_metadata( instance, 'int_val', int, 0) self.assertEqual(1, result) result = compute_utils.get_value_from_system_metadata( instance, 'int_string', int, 0) self.assertEqual(2, result) result = compute_utils.get_value_from_system_metadata( instance, 'not_int', int, 0) self.assertEqual(0, result) class ComputeUtilsGetNWInfo(test.NoDBTestCase): def test_instance_object_none_info_cache(self): inst = fake_instance.fake_instance_obj('fake-context', expected_attrs=['info_cache']) self.assertIsNone(inst.info_cache) result = compute_utils.get_nw_info_for_instance(inst) self.assertEqual(jsonutils.dumps([]), result.json()) class ComputeUtilsGetRebootTypes(test.NoDBTestCase): def setUp(self): super(ComputeUtilsGetRebootTypes, self).setUp() self.context = context.RequestContext('fake', 'fake') def test_get_reboot_type_started_soft(self): reboot_type = compute_utils.get_reboot_type(task_states.REBOOT_STARTED, power_state.RUNNING) self.assertEqual(reboot_type, 'SOFT') def test_get_reboot_type_pending_soft(self): reboot_type = compute_utils.get_reboot_type(task_states.REBOOT_PENDING, power_state.RUNNING) self.assertEqual(reboot_type, 'SOFT') def test_get_reboot_type_hard(self): reboot_type = compute_utils.get_reboot_type('foo', power_state.RUNNING) self.assertEqual(reboot_type, 'HARD') def test_get_reboot_not_running_hard(self): reboot_type = compute_utils.get_reboot_type('foo', 'bar') self.assertEqual(reboot_type, 'HARD') class ComputeUtilsTestCase(test.NoDBTestCase): @mock.patch('netifaces.interfaces') def test_get_machine_ips_value_error(self, mock_interfaces): # Tests that the utility method does not explode if netifaces raises # a ValueError. iface = mock.sentinel mock_interfaces.return_value = [iface] with mock.patch('netifaces.ifaddresses', side_effect=ValueError) as mock_ifaddresses: addresses = compute_utils.get_machine_ips() self.assertEqual([], addresses) mock_ifaddresses.assert_called_once_with(iface) class ComputeUtilsQuotaDeltaTestCase(test.TestCase): def setUp(self): super(ComputeUtilsQuotaDeltaTestCase, self).setUp() self.context = context.RequestContext('fake', 'fake') def test_upsize_quota_delta(self): old_flavor = flavors.get_flavor_by_name('m1.tiny') new_flavor = flavors.get_flavor_by_name('m1.medium') expected_deltas = { 'cores': new_flavor['vcpus'] - old_flavor['vcpus'], 'ram': new_flavor['memory_mb'] - old_flavor['memory_mb'] } deltas = compute_utils.upsize_quota_delta(self.context, new_flavor, old_flavor) self.assertEqual(expected_deltas, deltas) def test_downsize_quota_delta(self): inst = create_instance(self.context, params=None) inst.old_flavor = flavors.get_flavor_by_name('m1.medium') inst.new_flavor = flavors.get_flavor_by_name('m1.tiny') expected_deltas = { 'cores': (inst.new_flavor['vcpus'] - inst.old_flavor['vcpus']), 'ram': (inst.new_flavor['memory_mb'] - inst.old_flavor['memory_mb']) } deltas = compute_utils.downsize_quota_delta(self.context, inst) self.assertEqual(expected_deltas, deltas) def test_reverse_quota_delta(self): inst = create_instance(self.context, params=None) inst.old_flavor = flavors.get_flavor_by_name('m1.tiny') inst.new_flavor = flavors.get_flavor_by_name('m1.medium') expected_deltas = { 'cores': -1 * (inst.new_flavor['vcpus'] - inst.old_flavor['vcpus']), 'ram': -1 * (inst.new_flavor['memory_mb'] - inst.old_flavor['memory_mb']) } deltas = compute_utils.reverse_upsize_quota_delta(self.context, inst) self.assertEqual(expected_deltas, deltas) @mock.patch.object(objects.Quotas, 'reserve') @mock.patch.object(objects.quotas, 'ids_from_instance') def test_reserve_quota_delta(self, mock_ids_from_instance, mock_reserve): quotas = objects.Quotas(context=context) inst = create_instance(self.context, params=None) inst.old_flavor = flavors.get_flavor_by_name('m1.tiny') inst.new_flavor = flavors.get_flavor_by_name('m1.medium') mock_ids_from_instance.return_value = (inst.project_id, inst.user_id) mock_reserve.return_value = quotas deltas = compute_utils.upsize_quota_delta(self.context, inst.new_flavor, inst.old_flavor) compute_utils.reserve_quota_delta(self.context, deltas, inst) mock_reserve.assert_called_once_with(project_id=inst.project_id, user_id=inst.user_id, **deltas) nova-13.1.4/nova/tests/unit/compute/test_flavors.py0000664000567000056710000000443313064447140023540 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for flavor basic functions""" from nova.compute import flavors from nova import exception from nova import test class ExtraSpecTestCase(test.NoDBTestCase): def _flavor_validate_extra_spec_keys_invalid_input(self, key_name_list): self.assertRaises(exception.InvalidInput, flavors.validate_extra_spec_keys, key_name_list) def test_flavor_validate_extra_spec_keys_invalid_input(self): lists = [['', ], ['*', ], ['+', ]] for x in lists: self._flavor_validate_extra_spec_keys_invalid_input(x) def test_flavor_validate_extra_spec_keys(self): key_name_list = ['abc', 'ab c', 'a-b-c', 'a_b-c', 'a:bc'] flavors.validate_extra_spec_keys(key_name_list) class CreateFlavorTestCase(test.NoDBTestCase): def test_create_flavor_ram_error(self): args = ("ram_test", "9999999999", "1", "10", "1") try: flavors.create(*args) self.fail("Be sure this will never be executed.") except exception.InvalidInput as e: self.assertIn("ram", e.message) def test_create_flavor_disk_error(self): args = ("disk_test", "1024", "1", "9999999999", "1") try: flavors.create(*args) self.fail("Be sure this will never be executed.") except exception.InvalidInput as e: self.assertIn("disk", e.message) def test_create_flavor_ephemeral_error(self): args = ("ephemeral_test", "1024", "1", "10", "9999999999") try: flavors.create(*args) self.fail("Be sure this will never be executed.") except exception.InvalidInput as e: self.assertIn("ephemeral", e.message) nova-13.1.4/nova/tests/unit/compute/test_hvtype.py0000664000567000056710000000334013064447152023402 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.compute import hv_type from nova import exception from nova import test class HvTypeTest(test.NoDBTestCase): def test_valid_string(self): self.assertTrue(hv_type.is_valid("vmware")) def test_valid_constant(self): self.assertTrue(hv_type.is_valid(hv_type.QEMU)) def test_valid_docker(self): self.assertTrue(hv_type.is_valid("docker")) def test_valid_lxd(self): self.assertTrue(hv_type.is_valid("lxd")) def test_valid_vz(self): self.assertTrue(hv_type.is_valid(hv_type.VIRTUOZZO)) def test_valid_bogus(self): self.assertFalse(hv_type.is_valid("acmehypervisor")) def test_canonicalize_none(self): self.assertIsNone(hv_type.canonicalize(None)) def test_canonicalize_case(self): self.assertEqual(hv_type.QEMU, hv_type.canonicalize("QeMu")) def test_canonicalize_xapi(self): self.assertEqual(hv_type.XEN, hv_type.canonicalize("xapi")) def test_canonicalize_invalid(self): self.assertRaises(exception.InvalidHypervisorVirtType, hv_type.canonicalize, "wibble") nova-13.1.4/nova/tests/unit/compute/test_compute.py0000664000567000056710000205437713064447152023561 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for compute service.""" import base64 import datetime import operator import sys import time import traceback import uuid from eventlet import greenthread import mock from mox3 import mox from neutronclient.common import exceptions as neutron_exceptions from oslo_log import log as logging import oslo_messaging as messaging from oslo_serialization import jsonutils from oslo_utils import fixture as utils_fixture from oslo_utils import importutils from oslo_utils import timeutils from oslo_utils import units from oslo_utils import uuidutils import six import testtools from testtools import matchers as testtools_matchers import nova from nova import availability_zones from nova import block_device from nova import compute from nova.compute import api as compute_api from nova.compute import arch from nova.compute import flavors from nova.compute import manager as compute_manager from nova.compute import power_state from nova.compute import rpcapi as compute_rpcapi from nova.compute import task_states from nova.compute import utils as compute_utils from nova.compute import vm_states from nova.conductor import manager as conductor_manager import nova.conf from nova.console import type as ctype from nova import context from nova import db from nova import exception from nova.image import api as image_api from nova.image import glance from nova.network import api as network_api from nova.network import model as network_model from nova.network.security_group import openstack_driver from nova import objects from nova.objects import block_device as block_device_obj from nova.objects import fields as obj_fields from nova.objects import instance as instance_obj from nova.objects import migrate_data as migrate_data_obj from nova import policy from nova import quota from nova.scheduler import client as scheduler_client from nova import test from nova.tests import fixtures from nova.tests.unit.compute import eventlet_utils from nova.tests.unit.compute import fake_resource_tracker from nova.tests.unit import fake_block_device from nova.tests.unit import fake_instance from nova.tests.unit import fake_network from nova.tests.unit import fake_network_cache_model from nova.tests.unit import fake_notifier from nova.tests.unit import fake_server_actions from nova.tests.unit.image import fake as fake_image from nova.tests.unit import matchers from nova.tests.unit.objects import test_flavor from nova.tests.unit.objects import test_instance_numa_topology from nova.tests.unit.objects import test_migration from nova.tests.unit import utils as test_utils from nova.tests import uuidsentinel as uuids from nova import utils from nova.virt import block_device as driver_block_device from nova.virt import event from nova.virt import fake from nova.virt import hardware from nova.volume import cinder QUOTAS = quota.QUOTAS LOG = logging.getLogger(__name__) CONF = nova.conf.CONF CONF.import_opt('compute_manager', 'nova.service') CONF.import_opt('host', 'nova.netconf') CONF.import_opt('live_migration_retry_count', 'nova.compute.manager') FAKE_IMAGE_REF = uuids.image_ref NODENAME = 'fakenode1' def fake_not_implemented(*args, **kwargs): raise NotImplementedError() def get_primitive_instance_by_uuid(context, instance_uuid): """Helper method to get an instance and then convert it to a primitive form using jsonutils. """ instance = db.instance_get_by_uuid(context, instance_uuid) return jsonutils.to_primitive(instance) def unify_instance(instance): """Return a dict-like instance for both object-initiated and model-initiated sources that can reasonably be compared. """ newdict = dict() for k, v in six.iteritems(instance): if isinstance(v, datetime.datetime): # NOTE(danms): DB models and Instance objects have different # timezone expectations v = v.replace(tzinfo=None) elif k == 'fault': # NOTE(danms): DB models don't have 'fault' continue elif k == 'pci_devices': # NOTE(yonlig.he) pci devices need lazy loading # fake db does not support it yet. continue newdict[k] = v return newdict class FakeComputeTaskAPI(object): def resize_instance(self, context, instance, extra_instance_updates, scheduler_hint, flavor, reservations): pass class BaseTestCase(test.TestCase): def setUp(self): super(BaseTestCase, self).setUp() self.flags(network_manager='nova.network.manager.FlatManager') fake.set_nodes([NODENAME]) self.flags(use_local=True, group='conductor') fake_notifier.stub_notifier(self.stubs) self.addCleanup(fake_notifier.reset) self.compute = importutils.import_object(CONF.compute_manager) # execute power syncing synchronously for testing: self.compute._sync_power_pool = eventlet_utils.SyncPool() # override tracker with a version that doesn't need the database: fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host, self.compute.driver, NODENAME) self.compute._resource_tracker_dict[NODENAME] = fake_rt def fake_get_compute_nodes_in_db(context, use_slave=False): fake_compute_nodes = [{'local_gb': 259, 'uuid': uuids.fake_compute_node, 'vcpus_used': 0, 'deleted': 0, 'hypervisor_type': 'powervm', 'created_at': '2013-04-01T00:27:06.000000', 'local_gb_used': 0, 'updated_at': '2013-04-03T00:35:41.000000', 'hypervisor_hostname': 'fake_phyp1', 'memory_mb_used': 512, 'memory_mb': 131072, 'current_workload': 0, 'vcpus': 16, 'cpu_info': 'ppc64,powervm,3940', 'running_vms': 0, 'free_disk_gb': 259, 'service_id': 7, 'hypervisor_version': 7, 'disk_available_least': 265856, 'deleted_at': None, 'free_ram_mb': 130560, 'metrics': '', 'stats': '', 'numa_topology': '', 'id': 2, 'host': 'fake_phyp1', 'cpu_allocation_ratio': 16.0, 'ram_allocation_ratio': 1.5, 'disk_allocation_ratio': 1.0, 'host_ip': '127.0.0.1'}] return [objects.ComputeNode._from_db_object( context, objects.ComputeNode(), cn) for cn in fake_compute_nodes] def fake_compute_node_delete(context, compute_node_id): self.assertEqual(2, compute_node_id) self.stubs.Set(self.compute, '_get_compute_nodes_in_db', fake_get_compute_nodes_in_db) self.stub_out('nova.db.compute_node_delete', fake_compute_node_delete) self.compute.update_available_resource( context.get_admin_context()) self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id) self.none_quotas = objects.Quotas.from_reservations( self.context, None) def fake_show(meh, context, id, **kwargs): if id: return {'id': id, 'name': 'fake_name', 'status': 'active', 'properties': {'kernel_id': 'fake_kernel_id', 'ramdisk_id': 'fake_ramdisk_id', 'something_else': 'meow'}} else: raise exception.ImageNotFound(image_id=id) fake_image.stub_out_image_service(self) self.stubs.Set(fake_image._FakeImageService, 'show', fake_show) fake_taskapi = FakeComputeTaskAPI() self.stubs.Set(self.compute, 'compute_task_api', fake_taskapi) fake_network.set_stub_network_methods(self) fake_server_actions.stub_out_action_events(self.stubs) def fake_get_nw_info(cls, ctxt, instance, *args, **kwargs): return network_model.NetworkInfo() self.stubs.Set(network_api.API, 'get_instance_nw_info', fake_get_nw_info) def fake_allocate_for_instance(cls, ctxt, instance, *args, **kwargs): self.assertFalse(ctxt.is_admin) return fake_network.fake_get_instance_nw_info(self, 1, 1) self.stubs.Set(network_api.API, 'allocate_for_instance', fake_allocate_for_instance) self.compute_api = compute.API() # Just to make long lines short self.rt = self.compute._get_resource_tracker(NODENAME) def tearDown(self): ctxt = context.get_admin_context() fake_image.FakeImageService_reset() instances = db.instance_get_all(ctxt) for instance in instances: db.instance_destroy(ctxt, instance['uuid']) fake.restore_nodes() super(BaseTestCase, self).tearDown() def _fake_instance(self, updates): return fake_instance.fake_instance_obj(None, **updates) def _create_fake_instance_obj(self, params=None, type_name='m1.tiny', services=False, context=None): flavor = flavors.get_flavor_by_name(type_name) inst = objects.Instance(context=context or self.context) inst.vm_state = vm_states.ACTIVE inst.task_state = None inst.power_state = power_state.RUNNING inst.image_ref = FAKE_IMAGE_REF inst.reservation_id = 'r-fakeres' inst.user_id = self.user_id inst.project_id = self.project_id inst.host = self.compute.host inst.node = NODENAME inst.instance_type_id = flavor.id inst.ami_launch_index = 0 inst.memory_mb = 0 inst.vcpus = 0 inst.root_gb = 0 inst.ephemeral_gb = 0 inst.architecture = arch.X86_64 inst.os_type = 'Linux' inst.system_metadata = ( params and params.get('system_metadata', {}) or {}) inst.locked = False inst.created_at = timeutils.utcnow() inst.updated_at = timeutils.utcnow() inst.launched_at = timeutils.utcnow() inst.security_groups = objects.SecurityGroupList(objects=[]) inst.flavor = flavor inst.old_flavor = None inst.new_flavor = None if params: inst.update(params) if services: _create_service_entries(self.context.elevated(), [['fake_zone', [inst.host]]]) inst.create() return inst def _create_instance_type(self, params=None): """Create a test instance type.""" if not params: params = {} context = self.context.elevated() inst = {} inst['name'] = 'm1.small' inst['memory_mb'] = 1024 inst['vcpus'] = 1 inst['root_gb'] = 20 inst['ephemeral_gb'] = 10 inst['flavorid'] = '1' inst['swap'] = 2048 inst['rxtx_factor'] = 1 inst.update(params) return db.flavor_create(context, inst)['id'] def _create_group(self): values = {'name': 'testgroup', 'description': 'testgroup', 'user_id': self.user_id, 'project_id': self.project_id} return db.security_group_create(self.context, values) def _stub_migrate_server(self): def _fake_migrate_server(*args, **kwargs): pass self.stubs.Set(conductor_manager.ComputeTaskManager, 'migrate_server', _fake_migrate_server) def _init_aggregate_with_host(self, aggr, aggr_name, zone, host): if not aggr: aggr = self.api.create_aggregate(self.context, aggr_name, zone) aggr = self.api.add_host_to_aggregate(self.context, aggr.id, host) return aggr class ComputeVolumeTestCase(BaseTestCase): def setUp(self): super(ComputeVolumeTestCase, self).setUp() self.fetched_attempts = 0 self.instance = { 'id': 'fake', 'uuid': uuids.instance, 'name': 'fake', 'root_device_name': '/dev/vda', } self.fake_volume = fake_block_device.FakeDbBlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'volume_id': uuids.volume_id, 'device_name': '/dev/vdb', 'connection_info': jsonutils.dumps({})}) self.instance_object = objects.Instance._from_db_object( self.context, objects.Instance(), fake_instance.fake_db_instance()) self.stubs.Set(self.compute.volume_api, 'get', lambda *a, **kw: {'id': uuids.volume_id, 'size': 4, 'attach_status': 'detached'}) self.stubs.Set(self.compute.driver, 'get_volume_connector', lambda *a, **kw: None) self.stubs.Set(self.compute.volume_api, 'initialize_connection', lambda *a, **kw: {}) self.stubs.Set(self.compute.volume_api, 'terminate_connection', lambda *a, **kw: None) self.stubs.Set(self.compute.volume_api, 'attach', lambda *a, **kw: None) self.stubs.Set(self.compute.volume_api, 'detach', lambda *a, **kw: None) self.stubs.Set(self.compute.volume_api, 'check_attach', lambda *a, **kw: None) self.stubs.Set(greenthread, 'sleep', lambda *a, **kw: None) def store_cinfo(context, *args, **kwargs): self.cinfo = jsonutils.loads(args[-1].get('connection_info')) return self.fake_volume self.stub_out('nova.db.block_device_mapping_create', store_cinfo) self.stub_out('nova.db.block_device_mapping_update', store_cinfo) def test_attach_volume_serial(self): fake_bdm = objects.BlockDeviceMapping(context=self.context, **self.fake_volume) with (mock.patch.object(cinder.API, 'get_volume_encryption_metadata', return_value={})): instance = self._create_fake_instance_obj() self.compute.attach_volume(self.context, instance, bdm=fake_bdm) self.assertEqual(self.cinfo.get('serial'), uuids.volume_id) def test_attach_volume_raises(self): fake_bdm = objects.BlockDeviceMapping(**self.fake_volume) instance = self._create_fake_instance_obj() def fake_attach(*args, **kwargs): raise test.TestingException with test.nested( mock.patch.object(driver_block_device.DriverVolumeBlockDevice, 'attach'), mock.patch.object(cinder.API, 'unreserve_volume'), mock.patch.object(objects.BlockDeviceMapping, 'destroy') ) as (mock_attach, mock_unreserve, mock_destroy): mock_attach.side_effect = fake_attach self.assertRaises( test.TestingException, self.compute.attach_volume, self.context, instance, fake_bdm) self.assertTrue(mock_unreserve.called) self.assertTrue(mock_destroy.called) def test_detach_volume_api_raises(self): fake_bdm = objects.BlockDeviceMapping(**self.fake_volume) instance = self._create_fake_instance_obj() with test.nested( mock.patch.object(self.compute, '_driver_detach_volume'), mock.patch.object(self.compute.volume_api, 'detach'), mock.patch.object(objects.BlockDeviceMapping, 'get_by_volume_and_instance'), mock.patch.object(fake_bdm, 'destroy') ) as (mock_internal_detach, mock_detach, mock_get, mock_destroy): mock_detach.side_effect = test.TestingException mock_get.return_value = fake_bdm self.assertRaises( test.TestingException, self.compute.detach_volume, self.context, 'fake', instance, 'fake_id') mock_internal_detach.assert_called_once_with(self.context, instance, fake_bdm, {}) self.assertTrue(mock_destroy.called) def test_await_block_device_created_too_slow(self): self.flags(block_device_allocate_retries=2) self.flags(block_device_allocate_retries_interval=0.1) def never_get(context, vol_id): return { 'status': 'creating', 'id': 'blah', } self.stubs.Set(self.compute.volume_api, 'get', never_get) self.assertRaises(exception.VolumeNotCreated, self.compute._await_block_device_map_created, self.context, '1') def test_await_block_device_created_failed(self): c = self.compute fake_result = {'status': 'error', 'id': 'blah'} with mock.patch.object(c.volume_api, 'get', return_value=fake_result) as fake_get: self.assertRaises(exception.VolumeNotCreated, c._await_block_device_map_created, self.context, '1') fake_get.assert_called_once_with(self.context, '1') def test_await_block_device_created_slow(self): c = self.compute self.flags(block_device_allocate_retries=4) self.flags(block_device_allocate_retries_interval=0.1) def slow_get(context, vol_id): if self.fetched_attempts < 2: self.fetched_attempts += 1 return { 'status': 'creating', 'id': 'blah', } return { 'status': 'available', 'id': 'blah', } self.stubs.Set(c.volume_api, 'get', slow_get) attempts = c._await_block_device_map_created(self.context, '1') self.assertEqual(attempts, 3) def test_await_block_device_created_retries_negative(self): c = self.compute self.flags(block_device_allocate_retries=-1) self.flags(block_device_allocate_retries_interval=0.1) def volume_get(context, vol_id): return { 'status': 'available', 'id': 'blah', } self.stubs.Set(c.volume_api, 'get', volume_get) attempts = c._await_block_device_map_created(self.context, '1') self.assertEqual(1, attempts) def test_await_block_device_created_retries_zero(self): c = self.compute self.flags(block_device_allocate_retries=0) self.flags(block_device_allocate_retries_interval=0.1) def volume_get(context, vol_id): return { 'status': 'available', 'id': 'blah', } self.stubs.Set(c.volume_api, 'get', volume_get) attempts = c._await_block_device_map_created(self.context, '1') self.assertEqual(1, attempts) def test_boot_volume_serial(self): with ( mock.patch.object(objects.BlockDeviceMapping, 'save') ) as mock_save: block_device_mapping = [ block_device.BlockDeviceDict({ 'id': 1, 'no_device': None, 'source_type': 'volume', 'destination_type': 'volume', 'snapshot_id': None, 'volume_id': uuids.volume_id, 'device_name': '/dev/vdb', 'volume_size': 55, 'delete_on_termination': False, })] bdms = block_device_obj.block_device_make_list_from_dicts( self.context, block_device_mapping) prepped_bdm = self.compute._prep_block_device( self.context, self.instance_object, bdms) self.assertEqual(2, mock_save.call_count) volume_driver_bdm = prepped_bdm['block_device_mapping'][0] self.assertEqual(volume_driver_bdm['connection_info']['serial'], uuids.volume_id) def test_boot_volume_metadata(self, metadata=True): def volume_api_get(*args, **kwargs): if metadata: return { 'size': 1, 'volume_image_metadata': {'vol_test_key': 'vol_test_value', 'min_ram': u'128', 'min_disk': u'256', 'size': u'536870912' }, } else: return {} self.stubs.Set(self.compute_api.volume_api, 'get', volume_api_get) expected_no_metadata = {'min_disk': 0, 'min_ram': 0, 'properties': {}, 'size': 0, 'status': 'active'} block_device_mapping = [{ 'id': 1, 'device_name': 'vda', 'no_device': None, 'virtual_name': None, 'snapshot_id': None, 'volume_id': uuids.volume_id, 'delete_on_termination': False, }] image_meta = self.compute_api._get_bdm_image_metadata( self.context, block_device_mapping) if metadata: self.assertEqual(image_meta['properties']['vol_test_key'], 'vol_test_value') self.assertEqual(128, image_meta['min_ram']) self.assertEqual(256, image_meta['min_disk']) self.assertEqual(units.Gi, image_meta['size']) else: self.assertEqual(expected_no_metadata, image_meta) # Test it with new-style BDMs block_device_mapping = [{ 'boot_index': 0, 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': uuids.volume_id, 'delete_on_termination': False, }] image_meta = self.compute_api._get_bdm_image_metadata( self.context, block_device_mapping, legacy_bdm=False) if metadata: self.assertEqual(image_meta['properties']['vol_test_key'], 'vol_test_value') self.assertEqual(128, image_meta['min_ram']) self.assertEqual(256, image_meta['min_disk']) self.assertEqual(units.Gi, image_meta['size']) else: self.assertEqual(expected_no_metadata, image_meta) def test_boot_volume_no_metadata(self): self.test_boot_volume_metadata(metadata=False) def test_boot_image_metadata(self, metadata=True): def image_api_get(*args, **kwargs): if metadata: return { 'properties': {'img_test_key': 'img_test_value'} } else: return {} self.stubs.Set(self.compute_api.image_api, 'get', image_api_get) block_device_mapping = [{ 'boot_index': 0, 'source_type': 'image', 'destination_type': 'local', 'image_id': "fake-image", 'delete_on_termination': True, }] image_meta = self.compute_api._get_bdm_image_metadata( self.context, block_device_mapping, legacy_bdm=False) if metadata: self.assertEqual('img_test_value', image_meta['properties']['img_test_key']) else: self.assertEqual(image_meta, {}) def test_boot_image_no_metadata(self): self.test_boot_image_metadata(metadata=False) def test_poll_bandwidth_usage_not_implemented(self): ctxt = context.get_admin_context() self.mox.StubOutWithMock(self.compute.driver, 'get_all_bw_counters') self.mox.StubOutWithMock(utils, 'last_completed_audit_period') self.mox.StubOutWithMock(time, 'time') self.mox.StubOutWithMock(objects.InstanceList, 'get_by_host') # Following methods will be called utils.last_completed_audit_period().AndReturn((0, 0)) time.time().AndReturn(10) # Note - time called two more times from Log time.time().AndReturn(20) time.time().AndReturn(21) objects.InstanceList.get_by_host(ctxt, 'fake-mini', use_slave=True).AndReturn([]) self.compute.driver.get_all_bw_counters([]).AndRaise( NotImplementedError) self.mox.ReplayAll() self.flags(bandwidth_poll_interval=1) self.compute._poll_bandwidth_usage(ctxt) # A second call won't call the stubs again as the bandwidth # poll is now disabled self.compute._poll_bandwidth_usage(ctxt) self.mox.UnsetStubs() @mock.patch.object(objects.InstanceList, 'get_by_host') @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') def test_get_host_volume_bdms(self, mock_get_by_inst, mock_get_by_host): fake_instance = mock.Mock(uuid=uuids.volume_instance) mock_get_by_host.return_value = [fake_instance] volume_bdm = mock.Mock(id=1, is_volume=True) not_volume_bdm = mock.Mock(id=2, is_volume=False) mock_get_by_inst.return_value = [volume_bdm, not_volume_bdm] expected_host_bdms = [{'instance': fake_instance, 'instance_bdms': [volume_bdm]}] got_host_bdms = self.compute._get_host_volume_bdms('fake-context') mock_get_by_host.assert_called_once_with('fake-context', self.compute.host, use_slave=False) mock_get_by_inst.assert_called_once_with('fake-context', uuids.volume_instance, use_slave=False) self.assertEqual(expected_host_bdms, got_host_bdms) def test_poll_volume_usage_disabled(self): ctxt = 'MockContext' self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms') self.mox.StubOutWithMock(utils, 'last_completed_audit_period') # None of the mocks should be called. self.mox.ReplayAll() self.flags(volume_usage_poll_interval=0) self.compute._poll_volume_usage(ctxt) self.mox.UnsetStubs() def test_poll_volume_usage_returns_no_vols(self): ctxt = 'MockContext' self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms') self.mox.StubOutWithMock(self.compute.driver, 'get_all_volume_usage') # Following methods are called. self.compute._get_host_volume_bdms(ctxt, use_slave=True).AndReturn([]) self.mox.ReplayAll() self.flags(volume_usage_poll_interval=10) self.compute._poll_volume_usage(ctxt) self.mox.UnsetStubs() def test_poll_volume_usage_with_data(self): ctxt = 'MockContext' self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms') self.mox.StubOutWithMock(self.compute, '_update_volume_usage_cache') self.stubs.Set(self.compute.driver, 'get_all_volume_usage', lambda x, y: [3, 4]) # All the mocks are called self.compute._get_host_volume_bdms(ctxt, use_slave=True).AndReturn([1, 2]) self.compute._update_volume_usage_cache(ctxt, [3, 4]) self.mox.ReplayAll() self.flags(volume_usage_poll_interval=10) self.compute._poll_volume_usage(ctxt) self.mox.UnsetStubs() def test_detach_volume_usage(self): # Test that detach volume update the volume usage cache table correctly instance = self._create_fake_instance_obj() bdm = objects.BlockDeviceMapping(context=self.context, id=1, device_name='/dev/vdb', connection_info='{}', instance_uuid=instance['uuid'], source_type='volume', destination_type='volume', no_device=False, disk_bus='foo', device_type='disk', volume_size=1, volume_id=uuids.volume_id) host_volume_bdms = {'id': 1, 'device_name': '/dev/vdb', 'connection_info': '{}', 'instance_uuid': instance['uuid'], 'volume_id': uuids.volume_id} self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'get_by_volume_and_instance') self.mox.StubOutWithMock(self.compute.driver, 'block_stats') self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms') self.mox.StubOutWithMock(self.compute.driver, 'get_all_volume_usage') self.mox.StubOutWithMock(self.compute.driver, 'instance_exists') # The following methods will be called objects.BlockDeviceMapping.get_by_volume_and_instance( self.context, uuids.volume_id, instance.uuid).AndReturn( bdm.obj_clone()) self.compute.driver.block_stats(instance, 'vdb').\ AndReturn([1, 30, 1, 20, None]) self.compute._get_host_volume_bdms(self.context, use_slave=True).AndReturn( host_volume_bdms) self.compute.driver.get_all_volume_usage( self.context, host_volume_bdms).AndReturn( [{'volume': uuids.volume_id, 'rd_req': 1, 'rd_bytes': 10, 'wr_req': 1, 'wr_bytes': 5, 'instance': instance}]) self.compute.driver.instance_exists(mox.IgnoreArg()).AndReturn(True) self.mox.ReplayAll() def fake_get_volume_encryption_metadata(self, context, volume_id): return {} self.stubs.Set(cinder.API, 'get_volume_encryption_metadata', fake_get_volume_encryption_metadata) self.compute.attach_volume(self.context, instance, bdm) # Poll volume usage & then detach the volume. This will update the # total fields in the volume usage cache. self.flags(volume_usage_poll_interval=10) self.compute._poll_volume_usage(self.context) # Check that a volume.usage and volume.attach notification was sent self.assertEqual(2, len(fake_notifier.NOTIFICATIONS)) self.compute.detach_volume(self.context, uuids.volume_id, instance) # Check that volume.attach, 2 volume.usage, and volume.detach # notifications were sent self.assertEqual(4, len(fake_notifier.NOTIFICATIONS)) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual('compute.instance.volume.attach', msg.event_type) msg = fake_notifier.NOTIFICATIONS[2] self.assertEqual('volume.usage', msg.event_type) payload = msg.payload self.assertEqual(instance['uuid'], payload['instance_id']) self.assertEqual('fake', payload['user_id']) self.assertEqual('fake', payload['tenant_id']) self.assertEqual(1, payload['reads']) self.assertEqual(30, payload['read_bytes']) self.assertEqual(1, payload['writes']) self.assertEqual(20, payload['write_bytes']) self.assertIsNone(payload['availability_zone']) msg = fake_notifier.NOTIFICATIONS[3] self.assertEqual('compute.instance.volume.detach', msg.event_type) # Check the database for the volume_usages = db.vol_get_usage_by_time(self.context, 0) self.assertEqual(1, len(volume_usages)) volume_usage = volume_usages[0] self.assertEqual(0, volume_usage['curr_reads']) self.assertEqual(0, volume_usage['curr_read_bytes']) self.assertEqual(0, volume_usage['curr_writes']) self.assertEqual(0, volume_usage['curr_write_bytes']) self.assertEqual(1, volume_usage['tot_reads']) self.assertEqual(30, volume_usage['tot_read_bytes']) self.assertEqual(1, volume_usage['tot_writes']) self.assertEqual(20, volume_usage['tot_write_bytes']) def test_prepare_image_mapping(self): swap_size = 1 ephemeral_size = 1 instance_type = {'swap': swap_size, 'ephemeral_gb': ephemeral_size} mappings = [ {'virtual': 'ami', 'device': 'sda1'}, {'virtual': 'root', 'device': '/dev/sda1'}, {'virtual': 'swap', 'device': 'sdb4'}, {'virtual': 'ephemeral0', 'device': 'sdc1'}, {'virtual': 'ephemeral1', 'device': 'sdc2'}, ] preped_bdm = self.compute_api._prepare_image_mapping( instance_type, mappings) expected_result = [ { 'device_name': '/dev/sdb4', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': 'swap', 'boot_index': -1, 'volume_size': swap_size }, { 'device_name': '/dev/sdc1', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': CONF.default_ephemeral_format, 'boot_index': -1, 'volume_size': ephemeral_size }, { 'device_name': '/dev/sdc2', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': CONF.default_ephemeral_format, 'boot_index': -1, 'volume_size': ephemeral_size } ] for expected, got in zip(expected_result, preped_bdm): self.assertThat(expected, matchers.IsSubDictOf(got)) def test_validate_bdm(self): def fake_get(self, context, res_id): return {'id': res_id, 'size': 4} def fake_check_attach(*args, **kwargs): pass self.stubs.Set(cinder.API, 'get', fake_get) self.stubs.Set(cinder.API, 'get_snapshot', fake_get) self.stubs.Set(cinder.API, 'check_attach', fake_check_attach) volume_id = '55555555-aaaa-bbbb-cccc-555555555555' snapshot_id = '66666666-aaaa-bbbb-cccc-555555555555' image_id = '77777777-aaaa-bbbb-cccc-555555555555' instance = self._create_fake_instance_obj() instance_type = {'swap': 1, 'ephemeral_gb': 2} mappings = [ fake_block_device.FakeDbBlockDeviceDict({ 'device_name': '/dev/sdb4', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': 'swap', 'boot_index': -1, 'volume_size': 1 }, anon=True), fake_block_device.FakeDbBlockDeviceDict({ 'device_name': '/dev/sda1', 'source_type': 'volume', 'destination_type': 'volume', 'device_type': 'disk', 'volume_id': volume_id, 'guest_format': None, 'boot_index': 1, }, anon=True), fake_block_device.FakeDbBlockDeviceDict({ 'device_name': '/dev/sda2', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': snapshot_id, 'device_type': 'disk', 'guest_format': None, 'volume_size': 6, 'boot_index': 0, }, anon=True), fake_block_device.FakeDbBlockDeviceDict({ 'device_name': '/dev/sda3', 'source_type': 'image', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': None, 'boot_index': 2, 'volume_size': 1 }, anon=True) ] mappings = block_device_obj.block_device_make_list_from_dicts( self.context, mappings) # Make sure it passes at first self.compute_api._validate_bdm(self.context, instance, instance_type, mappings) self.assertEqual(4, mappings[1].volume_size) self.assertEqual(6, mappings[2].volume_size) # Boot sequence mappings[2].boot_index = 2 self.assertRaises(exception.InvalidBDMBootSequence, self.compute_api._validate_bdm, self.context, instance, instance_type, mappings) mappings[2].boot_index = 0 # number of local block_devices self.flags(max_local_block_devices=1) self.assertRaises(exception.InvalidBDMLocalsLimit, self.compute_api._validate_bdm, self.context, instance, instance_type, mappings) ephemerals = [ fake_block_device.FakeDbBlockDeviceDict({ 'device_name': '/dev/vdb', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': None, 'boot_index': -1, 'volume_size': 1 }, anon=True), fake_block_device.FakeDbBlockDeviceDict({ 'device_name': '/dev/vdc', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': None, 'boot_index': -1, 'volume_size': 1 }, anon=True) ] ephemerals = block_device_obj.block_device_make_list_from_dicts( self.context, ephemerals) self.flags(max_local_block_devices=4) # More ephemerals are OK as long as they are not over the size limit mappings_ = mappings[:] mappings_.objects.extend(ephemerals) self.compute_api._validate_bdm(self.context, instance, instance_type, mappings_) # Ephemerals over the size limit ephemerals[0].volume_size = 3 mappings_ = mappings[:] mappings_.objects.extend(ephemerals) self.assertRaises(exception.InvalidBDMEphemeralSize, self.compute_api._validate_bdm, self.context, instance, instance_type, mappings_) # Swap over the size limit mappings[0].volume_size = 3 self.assertRaises(exception.InvalidBDMSwapSize, self.compute_api._validate_bdm, self.context, instance, instance_type, mappings) mappings[0].volume_size = 1 additional_swap = [ fake_block_device.FakeDbBlockDeviceDict({ 'device_name': '/dev/vdb', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': 'swap', 'boot_index': -1, 'volume_size': 1 }, anon=True) ] additional_swap = block_device_obj.block_device_make_list_from_dicts( self.context, additional_swap) # More than one swap mappings_ = mappings[:] mappings_.objects.extend(additional_swap) self.assertRaises(exception.InvalidBDMFormat, self.compute_api._validate_bdm, self.context, instance, instance_type, mappings_) image_no_size = [ fake_block_device.FakeDbBlockDeviceDict({ 'device_name': '/dev/sda4', 'source_type': 'image', 'image_id': image_id, 'destination_type': 'volume', 'boot_index': -1, 'volume_size': None, }, anon=True) ] image_no_size = block_device_obj.block_device_make_list_from_dicts( self.context, image_no_size) mappings_ = mappings[:] mappings_.objects.extend(image_no_size) self.assertRaises(exception.InvalidBDM, self.compute_api._validate_bdm, self.context, instance, instance_type, mappings_) # blank device without a specified size fails blank_no_size = [ fake_block_device.FakeDbBlockDeviceDict({ 'device_name': '/dev/sda4', 'source_type': 'blank', 'destination_type': 'volume', 'boot_index': -1, 'volume_size': None, }, anon=True) ] blank_no_size = block_device_obj.block_device_make_list_from_dicts( self.context, blank_no_size) mappings_ = mappings[:] mappings_.objects.extend(blank_no_size) self.assertRaises(exception.InvalidBDM, self.compute_api._validate_bdm, self.context, instance, instance_type, mappings_) def test_validate_bdm_media_service_exceptions(self): instance_type = {'swap': 1, 'ephemeral_gb': 1} bdms = [fake_block_device.FakeDbBlockDeviceDict({ 'id': 1, 'no_device': None, 'source_type': 'volume', 'destination_type': 'volume', 'snapshot_id': None, 'volume_id': uuids.volume_id, 'device_name': 'vda', 'boot_index': 0, 'delete_on_termination': False}, anon=True)] bdms = block_device_obj.block_device_make_list_from_dicts( self.context, bdms) # First we test a list of invalid status values that should result # in an InvalidVolume exception being raised. status_values = ( # First two check that the status is 'available'. ('creating', 'detached'), ('error', 'detached'), # Checks that the attach_status is 'detached'. ('available', 'attached') ) for status, attach_status in status_values: if attach_status == 'attached': def fake_volume_get(self, ctxt, volume_id): return {'id': volume_id, 'status': status, 'attach_status': attach_status, 'multiattach': False, 'attachments': {}} else: def fake_volume_get(self, ctxt, volume_id): return {'id': volume_id, 'status': status, 'attach_status': attach_status, 'multiattach': False} self.stubs.Set(cinder.API, 'get', fake_volume_get) self.assertRaises(exception.InvalidVolume, self.compute_api._validate_bdm, self.context, self.instance, instance_type, bdms) # Now we test a 404 case that results in InvalidBDMVolume. def fake_volume_get_not_found(self, context, volume_id): raise exception.VolumeNotFound(volume_id) self.stubs.Set(cinder.API, 'get', fake_volume_get_not_found) self.assertRaises(exception.InvalidBDMVolume, self.compute_api._validate_bdm, self.context, self.instance, instance_type, bdms) # Check that the volume status is 'available' and attach_status is # 'detached' and accept the request if so def fake_volume_get_ok(self, context, volume_id): return {'id': volume_id, 'status': 'available', 'attach_status': 'detached', 'multiattach': False} self.stubs.Set(cinder.API, 'get', fake_volume_get_ok) self.compute_api._validate_bdm(self.context, self.instance, instance_type, bdms) def test_volume_snapshot_create(self): self.assertRaises(messaging.ExpectedException, self.compute.volume_snapshot_create, self.context, self.instance_object, 'fake_id', {}) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(NotImplementedError, self.compute.volume_snapshot_create, self.context, self.instance_object, 'fake_id', {}) def test_volume_snapshot_delete(self): self.assertRaises(messaging.ExpectedException, self.compute.volume_snapshot_delete, self.context, self.instance_object, 'fake_id', 'fake_id2', {}) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(NotImplementedError, self.compute.volume_snapshot_delete, self.context, self.instance_object, 'fake_id', 'fake_id2', {}) @mock.patch.object(cinder.API, 'create', side_effect=exception.OverQuota(overs='volumes')) def test_prep_block_device_over_quota_failure(self, mock_create): instance = self._create_fake_instance_obj() bdms = [ block_device.BlockDeviceDict({ 'boot_index': 0, 'guest_format': None, 'connection_info': None, 'device_type': u'disk', 'source_type': 'image', 'destination_type': 'volume', 'volume_size': 1, 'image_id': 1, 'device_name': '/dev/vdb', })] bdms = block_device_obj.block_device_make_list_from_dicts( self.context, bdms) self.assertRaises(exception.VolumeLimitExceeded, compute_manager.ComputeManager()._prep_block_device, self.context, instance, bdms) self.assertTrue(mock_create.called) @mock.patch.object(nova.virt.block_device, 'get_swap') @mock.patch.object(nova.virt.block_device, 'convert_blanks') @mock.patch.object(nova.virt.block_device, 'convert_images') @mock.patch.object(nova.virt.block_device, 'convert_snapshots') @mock.patch.object(nova.virt.block_device, 'convert_volumes') @mock.patch.object(nova.virt.block_device, 'convert_ephemerals') @mock.patch.object(nova.virt.block_device, 'convert_swap') @mock.patch.object(nova.virt.block_device, 'attach_block_devices') def test_prep_block_device_with_blanks(self, attach_block_devices, convert_swap, convert_ephemerals, convert_volumes, convert_snapshots, convert_images, convert_blanks, get_swap): instance = self._create_fake_instance_obj() instance['root_device_name'] = '/dev/vda' root_volume = objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict({ 'instance_uuid': uuids.block_device_instance, 'source_type': 'image', 'destination_type': 'volume', 'image_id': 'fake-image-id-1', 'volume_size': 1, 'boot_index': 0})) blank_volume1 = objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict({ 'instance_uuid': uuids.block_device_instance, 'source_type': 'blank', 'destination_type': 'volume', 'volume_size': 1, 'boot_index': 1})) blank_volume2 = objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict({ 'instance_uuid': uuids.block_device_instance, 'source_type': 'blank', 'destination_type': 'volume', 'volume_size': 1, 'boot_index': 2})) bdms = [blank_volume1, blank_volume2, root_volume] def fake_attach_block_devices(bdm, *args, **kwargs): return bdm convert_swap.return_value = [] convert_ephemerals.return_value = [] convert_volumes.return_value = [blank_volume1, blank_volume2] convert_snapshots.return_value = [] convert_images.return_value = [root_volume] convert_blanks.return_value = [] attach_block_devices.side_effect = fake_attach_block_devices get_swap.return_value = [] expected_block_device_info = { 'root_device_name': '/dev/vda', 'swap': [], 'ephemerals': [], 'block_device_mapping': bdms } manager = compute_manager.ComputeManager() manager.use_legacy_block_device_info = False block_device_info = manager._prep_block_device(self.context, instance, bdms) convert_swap.assert_called_once_with(bdms) convert_ephemerals.assert_called_once_with(bdms) bdm_args = tuple(bdms) convert_volumes.assert_called_once_with(bdm_args) convert_snapshots.assert_called_once_with(bdm_args) convert_images.assert_called_once_with(bdm_args) convert_blanks.assert_called_once_with(bdm_args) self.assertEqual(expected_block_device_info, block_device_info) self.assertEqual(1, attach_block_devices.call_count) get_swap.assert_called_once_with([]) class ComputeTestCase(BaseTestCase): def setUp(self): super(ComputeTestCase, self).setUp() self.useFixture(fixtures.SpawnIsSynchronousFixture()) def test_wrap_instance_fault(self): inst = {"uuid": uuids.instance} called = {'fault_added': False} def did_it_add_fault(*args): called['fault_added'] = True self.stubs.Set(compute_utils, 'add_instance_fault_from_exc', did_it_add_fault) @compute_manager.wrap_instance_fault def failer(self2, context, instance): raise NotImplementedError() self.assertRaises(NotImplementedError, failer, self.compute, self.context, instance=inst) self.assertTrue(called['fault_added']) def test_wrap_instance_fault_instance_in_args(self): inst = {"uuid": uuids.instance} called = {'fault_added': False} def did_it_add_fault(*args): called['fault_added'] = True self.stubs.Set(compute_utils, 'add_instance_fault_from_exc', did_it_add_fault) @compute_manager.wrap_instance_fault def failer(self2, context, instance): raise NotImplementedError() self.assertRaises(NotImplementedError, failer, self.compute, self.context, inst) self.assertTrue(called['fault_added']) def test_wrap_instance_fault_no_instance(self): inst = {"uuid": uuids.instance} called = {'fault_added': False} def did_it_add_fault(*args): called['fault_added'] = True self.stubs.Set(compute_utils, 'add_instance_fault_from_exc', did_it_add_fault) @compute_manager.wrap_instance_fault def failer(self2, context, instance): raise exception.InstanceNotFound(instance_id=instance['uuid']) self.assertRaises(exception.InstanceNotFound, failer, self.compute, self.context, inst) self.assertFalse(called['fault_added']) @mock.patch.object(objects.InstanceActionEvent, 'event_start') @mock.patch.object(objects.InstanceActionEvent, 'event_finish_with_failure') def test_wrap_instance_event(self, mock_finish, mock_start): inst = {"uuid": uuids.instance} @compute_manager.wrap_instance_event def fake_event(self, context, instance): pass fake_event(self.compute, self.context, instance=inst) self.assertTrue(mock_start.called) self.assertTrue(mock_finish.called) @mock.patch.object(objects.InstanceActionEvent, 'event_start') @mock.patch.object(objects.InstanceActionEvent, 'event_finish_with_failure') def test_wrap_instance_event_return(self, mock_finish, mock_start): inst = {"uuid": uuids.instance} @compute_manager.wrap_instance_event def fake_event(self, context, instance): return True retval = fake_event(self.compute, self.context, instance=inst) self.assertTrue(retval) self.assertTrue(mock_start.called) self.assertTrue(mock_finish.called) @mock.patch.object(objects.InstanceActionEvent, 'event_start') @mock.patch.object(objects.InstanceActionEvent, 'event_finish_with_failure') def test_wrap_instance_event_log_exception(self, mock_finish, mock_start): inst = {"uuid": uuids.instance} @compute_manager.wrap_instance_event def fake_event(self2, context, instance): raise exception.NovaException() self.assertRaises(exception.NovaException, fake_event, self.compute, self.context, instance=inst) self.assertTrue(mock_start.called) self.assertTrue(mock_finish.called) args, kwargs = mock_finish.call_args self.assertIsInstance(kwargs['exc_val'], exception.NovaException) def test_object_compat(self): db_inst = fake_instance.fake_db_instance() @compute_manager.object_compat def test_fn(_self, context, instance): self.assertIsInstance(instance, objects.Instance) self.assertEqual(instance.uuid, db_inst['uuid']) self.assertEqual(instance.metadata, db_inst['metadata']) self.assertEqual(instance.system_metadata, db_inst['system_metadata']) test_fn(None, self.context, instance=db_inst) def test_object_compat_no_metas(self): # Tests that we don't try to set metadata/system_metadata on the # instance object using fields that aren't in the db object. db_inst = fake_instance.fake_db_instance() db_inst.pop('metadata', None) db_inst.pop('system_metadata', None) @compute_manager.object_compat def test_fn(_self, context, instance): self.assertIsInstance(instance, objects.Instance) self.assertEqual(instance.uuid, db_inst['uuid']) self.assertNotIn('metadata', instance) self.assertNotIn('system_metadata', instance) test_fn(None, self.context, instance=db_inst) def test_object_compat_more_positional_args(self): db_inst = fake_instance.fake_db_instance() @compute_manager.object_compat def test_fn(_self, context, instance, pos_arg_1, pos_arg_2): self.assertIsInstance(instance, objects.Instance) self.assertEqual(instance.uuid, db_inst['uuid']) self.assertEqual(instance.metadata, db_inst['metadata']) self.assertEqual(instance.system_metadata, db_inst['system_metadata']) self.assertEqual(pos_arg_1, 'fake_pos_arg1') self.assertEqual(pos_arg_2, 'fake_pos_arg2') test_fn(None, self.context, db_inst, 'fake_pos_arg1', 'fake_pos_arg2') def test_create_instance_with_img_ref_associates_config_drive(self): # Make sure create associates a config drive. instance = self._create_fake_instance_obj( params={'config_drive': '1234', }) try: self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instances = db.instance_get_all(self.context) instance = instances[0] self.assertTrue(instance['config_drive']) finally: db.instance_destroy(self.context, instance['uuid']) def test_create_instance_associates_config_drive(self): # Make sure create associates a config drive. instance = self._create_fake_instance_obj( params={'config_drive': '1234', }) try: self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instances = db.instance_get_all(self.context) instance = instances[0] self.assertTrue(instance['config_drive']) finally: db.instance_destroy(self.context, instance['uuid']) def test_create_instance_unlimited_memory(self): # Default of memory limit=None is unlimited. self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self.rt.update_available_resource(self.context.elevated()) params = {"memory_mb": 999999999999} filter_properties = {'limits': {'memory_mb': None}} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, filter_properties, block_device_mapping=[]) self.assertEqual(999999999999, self.rt.compute_node['memory_mb_used']) def test_create_instance_unlimited_disk(self): self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self.rt.update_available_resource(self.context.elevated()) params = {"root_gb": 999999999999, "ephemeral_gb": 99999999999} filter_properties = {'limits': {'disk_gb': None}} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, filter_properties, block_device_mapping=[]) def test_create_multiple_instances_then_starve(self): self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self.rt.update_available_resource(self.context.elevated()) limits = {'memory_mb': 4096, 'disk_gb': 1000} params = {"memory_mb": 1024, "root_gb": 128, "ephemeral_gb": 128} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[], limits=limits) self.assertEqual(1024, self.rt.compute_node['memory_mb_used']) self.assertEqual(256, self.rt.compute_node['local_gb_used']) params = {"memory_mb": 2048, "root_gb": 256, "ephemeral_gb": 256} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[], limits=limits) self.assertEqual(3072, self.rt.compute_node['memory_mb_used']) self.assertEqual(768, self.rt.compute_node['local_gb_used']) params = {"memory_mb": 8192, "root_gb": 8192, "ephemeral_gb": 8192} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[], limits=limits) self.assertEqual(3072, self.rt.compute_node['memory_mb_used']) self.assertEqual(768, self.rt.compute_node['local_gb_used']) def test_create_multiple_instance_with_neutron_port(self): instance_type = flavors.get_default_flavor() def fake_is_neutron(): return True self.stubs.Set(utils, 'is_neutron', fake_is_neutron) requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id=uuids.port_instance)]) self.assertRaises(exception.MultiplePortsNotApplicable, self.compute_api.create, self.context, instance_type=instance_type, image_href=None, max_count=2, requested_networks=requested_networks) def test_create_instance_with_oversubscribed_ram(self): # Test passing of oversubscribed ram policy from the scheduler. self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self.rt.update_available_resource(self.context.elevated()) # get total memory as reported by virt driver: resources = self.compute.driver.get_available_resource(NODENAME) total_mem_mb = resources['memory_mb'] oversub_limit_mb = total_mem_mb * 1.5 instance_mb = int(total_mem_mb * 1.45) # build an instance, specifying an amount of memory that exceeds # total_mem_mb, but is less than the oversubscribed limit: params = {"memory_mb": instance_mb, "root_gb": 128, "ephemeral_gb": 128} instance = self._create_fake_instance_obj(params) limits = {'memory_mb': oversub_limit_mb} filter_properties = {'limits': limits} self.compute.build_and_run_instance(self.context, instance, {}, {}, filter_properties, block_device_mapping=[]) self.assertEqual(instance_mb, self.rt.compute_node['memory_mb_used']) def test_create_instance_with_oversubscribed_ram_fail(self): """Test passing of oversubscribed ram policy from the scheduler, but with insufficient memory. """ self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self.rt.update_available_resource(self.context.elevated()) # get total memory as reported by virt driver: resources = self.compute.driver.get_available_resource(NODENAME) total_mem_mb = resources['memory_mb'] oversub_limit_mb = total_mem_mb * 1.5 instance_mb = int(total_mem_mb * 1.55) # build an instance, specifying an amount of memory that exceeds # both total_mem_mb and the oversubscribed limit: params = {"memory_mb": instance_mb, "root_gb": 128, "ephemeral_gb": 128} instance = self._create_fake_instance_obj(params) filter_properties = {'limits': {'memory_mb': oversub_limit_mb}} self.compute.build_and_run_instance(self.context, instance, {}, {}, filter_properties, block_device_mapping=[]) def test_create_instance_with_oversubscribed_cpu(self): # Test passing of oversubscribed cpu policy from the scheduler. self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self.rt.update_available_resource(self.context.elevated()) limits = {'vcpu': 3} filter_properties = {'limits': limits} # get total memory as reported by virt driver: resources = self.compute.driver.get_available_resource(NODENAME) self.assertEqual(1, resources['vcpus']) # build an instance, specifying an amount of memory that exceeds # total_mem_mb, but is less than the oversubscribed limit: params = {"memory_mb": 10, "root_gb": 1, "ephemeral_gb": 1, "vcpus": 2} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, filter_properties, block_device_mapping=[]) self.assertEqual(2, self.rt.compute_node['vcpus_used']) # create one more instance: params = {"memory_mb": 10, "root_gb": 1, "ephemeral_gb": 1, "vcpus": 1} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, filter_properties, block_device_mapping=[]) self.assertEqual(3, self.rt.compute_node['vcpus_used']) # delete the instance: instance['vm_state'] = vm_states.DELETED self.rt.update_usage(self.context, instance=instance) self.assertEqual(2, self.rt.compute_node['vcpus_used']) # now oversubscribe vcpus and fail: params = {"memory_mb": 10, "root_gb": 1, "ephemeral_gb": 1, "vcpus": 2} instance = self._create_fake_instance_obj(params) limits = {'vcpu': 3} self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[], limits=limits) self.assertEqual(vm_states.ERROR, instance.vm_state) def test_create_instance_with_oversubscribed_disk(self): # Test passing of oversubscribed disk policy from the scheduler. self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self.rt.update_available_resource(self.context.elevated()) # get total memory as reported by virt driver: resources = self.compute.driver.get_available_resource(NODENAME) total_disk_gb = resources['local_gb'] oversub_limit_gb = total_disk_gb * 1.5 instance_gb = int(total_disk_gb * 1.45) # build an instance, specifying an amount of disk that exceeds # total_disk_gb, but is less than the oversubscribed limit: params = {"root_gb": instance_gb, "memory_mb": 10} instance = self._create_fake_instance_obj(params) limits = {'disk_gb': oversub_limit_gb} filter_properties = {'limits': limits} self.compute.build_and_run_instance(self.context, instance, {}, {}, filter_properties, block_device_mapping=[]) self.assertEqual(instance_gb, self.rt.compute_node['local_gb_used']) def test_create_instance_with_oversubscribed_disk_fail(self): """Test passing of oversubscribed disk policy from the scheduler, but with insufficient disk. """ self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self.rt.update_available_resource(self.context.elevated()) # get total memory as reported by virt driver: resources = self.compute.driver.get_available_resource(NODENAME) total_disk_gb = resources['local_gb'] oversub_limit_gb = total_disk_gb * 1.5 instance_gb = int(total_disk_gb * 1.55) # build an instance, specifying an amount of disk that exceeds # total_disk_gb, but is less than the oversubscribed limit: params = {"root_gb": instance_gb, "memory_mb": 10} instance = self._create_fake_instance_obj(params) limits = {'disk_gb': oversub_limit_gb} self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[], limits=limits) self.assertEqual(vm_states.ERROR, instance.vm_state) def test_create_instance_without_node_param(self): instance = self._create_fake_instance_obj({'node': None}) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instances = db.instance_get_all(self.context) instance = instances[0] self.assertEqual(NODENAME, instance['node']) def test_create_instance_no_image(self): # Create instance with no image provided. params = {'image_ref': ''} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self._assert_state({'vm_state': vm_states.ACTIVE, 'task_state': None}) @testtools.skipIf(test_utils.is_osx(), 'IPv6 pretty-printing broken on OSX, see bug 1409135') def test_default_access_ip(self): self.flags(default_access_ip_network_name='test1') fake_network.unset_stub_network_methods(self) instance = self._create_fake_instance_obj() orig_update = self.compute._instance_update # Make sure the access_ip_* updates happen in the same DB # update as the set to ACTIVE. def _instance_update(ctxt, instance_uuid, **kwargs): if kwargs.get('vm_state', None) == vm_states.ACTIVE: self.assertEqual(kwargs['access_ip_v4'], '192.168.1.100') self.assertEqual(kwargs['access_ip_v6'], '2001:db8:0:1::1') return orig_update(ctxt, instance_uuid, **kwargs) self.stubs.Set(self.compute, '_instance_update', _instance_update) try: self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instances = db.instance_get_all(self.context) instance = instances[0] self.assertEqual(instance['access_ip_v4'], '192.168.1.100') self.assertEqual(instance['access_ip_v6'], '2001:db8:0:1:dcad:beff:feef:1') finally: db.instance_destroy(self.context, instance['uuid']) def test_no_default_access_ip(self): instance = self._create_fake_instance_obj() try: self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instances = db.instance_get_all(self.context) instance = instances[0] self.assertFalse(instance['access_ip_v4']) self.assertFalse(instance['access_ip_v6']) finally: db.instance_destroy(self.context, instance['uuid']) def test_fail_to_schedule_persists(self): # check the persistence of the ERROR(scheduling) state. params = {'vm_state': vm_states.ERROR, 'task_state': task_states.SCHEDULING} self._create_fake_instance_obj(params=params) # check state is failed even after the periodic poll self.compute.periodic_tasks(context.get_admin_context()) self._assert_state({'vm_state': vm_states.ERROR, 'task_state': task_states.SCHEDULING}) def test_run_instance_setup_block_device_mapping_fail(self): """block device mapping failure test. Make sure that when there is a block device mapping problem, the instance goes to ERROR state, cleaning the task state """ def fake(*args, **kwargs): raise exception.InvalidBDM() self.stubs.Set(nova.compute.manager.ComputeManager, '_prep_block_device', fake) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance( self.context, instance=instance, image={}, request_spec={}, block_device_mapping=[], filter_properties={}, requested_networks=[], injected_files=None, admin_password=None, node=None) # check state is failed even after the periodic poll self._assert_state({'vm_state': vm_states.ERROR, 'task_state': None}) self.compute.periodic_tasks(context.get_admin_context()) self._assert_state({'vm_state': vm_states.ERROR, 'task_state': None}) @mock.patch('nova.compute.manager.ComputeManager._prep_block_device', side_effect=exception.OverQuota(overs='volumes')) def test_setup_block_device_over_quota_fail(self, mock_prep_block_dev): """block device mapping over quota failure test. Make sure when we're over volume quota according to Cinder client, the appropriate exception is raised and the instances to ERROR state, cleaning the task state. """ instance = self._create_fake_instance_obj() self.compute.build_and_run_instance( self.context, instance=instance, request_spec={}, filter_properties={}, requested_networks=[], injected_files=None, admin_password=None, node=None, block_device_mapping=[], image={}) # check state is failed even after the periodic poll self._assert_state({'vm_state': vm_states.ERROR, 'task_state': None}) self.compute.periodic_tasks(context.get_admin_context()) self._assert_state({'vm_state': vm_states.ERROR, 'task_state': None}) self.assertTrue(mock_prep_block_dev.called) def test_run_instance_spawn_fail(self): """spawn failure test. Make sure that when there is a spawning problem, the instance goes to ERROR state, cleaning the task state. """ def fake(*args, **kwargs): raise test.TestingException() self.stubs.Set(self.compute.driver, 'spawn', fake) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance( self.context, instance=instance, request_spec={}, filter_properties={}, requested_networks=[], injected_files=None, admin_password=None, block_device_mapping=[], image={}, node=None) # check state is failed even after the periodic poll self._assert_state({'vm_state': vm_states.ERROR, 'task_state': None}) self.compute.periodic_tasks(context.get_admin_context()) self._assert_state({'vm_state': vm_states.ERROR, 'task_state': None}) def test_run_instance_dealloc_network_instance_not_found(self): """spawn network deallocate test. Make sure that when an instance is not found during spawn that the network is deallocated """ instance = self._create_fake_instance_obj() def fake(*args, **kwargs): raise exception.InstanceNotFound(instance_id="fake") self.stubs.Set(self.compute.driver, 'spawn', fake) self.mox.StubOutWithMock(self.compute, '_deallocate_network') self.compute._deallocate_network(mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) def test_run_instance_bails_on_missing_instance(self): # Make sure that run_instance() will quickly ignore a deleted instance instance = self._create_fake_instance_obj() with mock.patch.object(instance, 'save') as mock_save: mock_save.side_effect = exception.InstanceNotFound(instance_id=1) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertTrue(mock_save.called) def test_run_instance_bails_on_deleting_instance(self): # Make sure that run_instance() will quickly ignore a deleting instance instance = self._create_fake_instance_obj() with mock.patch.object(instance, 'save') as mock_save: mock_save.side_effect = exception.UnexpectedDeletingTaskStateError( instance_uuid=instance['uuid'], expected={'task_state': 'bar'}, actual={'task_state': 'foo'}) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertTrue(mock_save.called) def test_can_terminate_on_error_state(self): # Make sure that the instance can be terminated in ERROR state. # check failed to schedule --> terminate params = {'vm_state': vm_states.ERROR} instance = self._create_fake_instance_obj(params=params) self.compute.terminate_instance(self.context, instance, [], []) self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid, self.context, instance['uuid']) # Double check it's not there for admins, either. self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid, self.context.elevated(), instance['uuid']) def test_run_terminate(self): # Make sure it is possible to run and terminate instance. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instances = db.instance_get_all(self.context) LOG.info("Running instances: %s", instances) self.assertEqual(len(instances), 1) self.compute.terminate_instance(self.context, instance, [], []) instances = db.instance_get_all(self.context) LOG.info("After terminating instances: %s", instances) self.assertEqual(len(instances), 0) admin_deleted_context = context.get_admin_context( read_deleted="only") instance = db.instance_get_by_uuid(admin_deleted_context, instance['uuid']) self.assertEqual(instance['vm_state'], vm_states.DELETED) self.assertIsNone(instance['task_state']) def test_run_terminate_with_vol_attached(self): """Make sure it is possible to run and terminate instance with volume attached """ instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instances = db.instance_get_all(self.context) LOG.info("Running instances: %s", instances) self.assertEqual(len(instances), 1) def fake_check_attach(*args, **kwargs): pass def fake_reserve_volume(*args, **kwargs): pass def fake_volume_get(self, context, volume_id): return {'id': volume_id, 'attach_status': 'attached', 'attachments': {instance.uuid: { 'attachment_id': 'abc123' } } } def fake_terminate_connection(self, context, volume_id, connector): pass def fake_detach(self, context, volume_id, instance_uuid): pass bdms = [] def fake_rpc_reserve_block_device_name(self, context, instance, device, volume_id, **kwargs): bdm = objects.BlockDeviceMapping( **{'context': context, 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': uuids.volume_id, 'instance_uuid': instance['uuid'], 'device_name': '/dev/vdc'}) bdm.create() bdms.append(bdm) return bdm self.stubs.Set(cinder.API, 'get', fake_volume_get) self.stubs.Set(cinder.API, 'check_attach', fake_check_attach) self.stubs.Set(cinder.API, 'reserve_volume', fake_reserve_volume) self.stubs.Set(cinder.API, 'terminate_connection', fake_terminate_connection) self.stubs.Set(cinder.API, 'detach', fake_detach) self.stubs.Set(compute_rpcapi.ComputeAPI, 'reserve_block_device_name', fake_rpc_reserve_block_device_name) self.compute_api.attach_volume(self.context, instance, 1, '/dev/vdc') self.compute.terminate_instance(self.context, instance, bdms, []) instances = db.instance_get_all(self.context) LOG.info("After terminating instances: %s", instances) self.assertEqual(len(instances), 0) bdms = db.block_device_mapping_get_all_by_instance(self.context, instance['uuid']) self.assertEqual(len(bdms), 0) def test_run_terminate_no_image(self): """Make sure instance started without image (from volume) can be termintad without issues """ params = {'image_ref': ''} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self._assert_state({'vm_state': vm_states.ACTIVE, 'task_state': None}) self.compute.terminate_instance(self.context, instance, [], []) instances = db.instance_get_all(self.context) self.assertEqual(len(instances), 0) def test_terminate_no_network(self): # This is as reported in LP bug 1008875 instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instances = db.instance_get_all(self.context) LOG.info("Running instances: %s", instances) self.assertEqual(len(instances), 1) self.mox.ReplayAll() self.compute.terminate_instance(self.context, instance, [], []) instances = db.instance_get_all(self.context) LOG.info("After terminating instances: %s", instances) self.assertEqual(len(instances), 0) def test_run_terminate_timestamps(self): # Make sure timestamps are set for launched and destroyed. instance = self._create_fake_instance_obj() instance['launched_at'] = None self.assertIsNone(instance['launched_at']) self.assertIsNone(instance['deleted_at']) launch = timeutils.utcnow() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.refresh() self.assertTrue(instance['launched_at'].replace(tzinfo=None) > launch) self.assertIsNone(instance['deleted_at']) terminate = timeutils.utcnow() self.compute.terminate_instance(self.context, instance, [], []) with utils.temporary_mutation(self.context, read_deleted='only'): instance = db.instance_get_by_uuid(self.context, instance['uuid']) self.assertTrue(instance['launched_at'].replace( tzinfo=None) < terminate) self.assertTrue(instance['deleted_at'].replace( tzinfo=None) > terminate) def test_run_terminate_deallocate_net_failure_sets_error_state(self): instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instances = db.instance_get_all(self.context) LOG.info("Running instances: %s", instances) self.assertEqual(len(instances), 1) def _fake_deallocate_network(*args, **kwargs): raise test.TestingException() self.stubs.Set(self.compute, '_deallocate_network', _fake_deallocate_network) self.assertRaises(test.TestingException, self.compute.terminate_instance, self.context, instance, [], []) instance = db.instance_get_by_uuid(self.context, instance['uuid']) self.assertEqual(instance['vm_state'], vm_states.ERROR) def test_stop(self): # Ensure instance can be stopped. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.POWERING_OFF}) inst_uuid = instance['uuid'] extra = ['system_metadata', 'metadata'] inst_obj = objects.Instance.get_by_uuid(self.context, inst_uuid, expected_attrs=extra) self.compute.stop_instance(self.context, instance=inst_obj, clean_shutdown=True) self.compute.terminate_instance(self.context, instance, [], []) def test_start(self): # Ensure instance can be started. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.POWERING_OFF}) extra = ['system_metadata', 'metadata'] inst_uuid = instance['uuid'] inst_obj = objects.Instance.get_by_uuid(self.context, inst_uuid, expected_attrs=extra) self.compute.stop_instance(self.context, instance=inst_obj, clean_shutdown=True) inst_obj.task_state = task_states.POWERING_ON inst_obj.save() self.compute.start_instance(self.context, instance=inst_obj) self.compute.terminate_instance(self.context, instance, [], []) def test_start_shelved_instance(self): # Ensure shelved instance can be started. self.deleted_image_id = None def fake_delete(self_, ctxt, image_id): self.deleted_image_id = image_id fake_image.stub_out_image_service(self) self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete) instance = self._create_fake_instance_obj() image = {'id': 'fake_id'} # Adding shelved information to instance system metadata. shelved_time = timeutils.utcnow().isoformat() instance.system_metadata['shelved_at'] = shelved_time instance.system_metadata['shelved_image_id'] = image['id'] instance.system_metadata['shelved_host'] = 'fake-mini' instance.save() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.POWERING_OFF, "vm_state": vm_states.SHELVED}) extra = ['system_metadata', 'metadata'] inst_uuid = instance['uuid'] inst_obj = objects.Instance.get_by_uuid(self.context, inst_uuid, expected_attrs=extra) self.compute.stop_instance(self.context, instance=inst_obj, clean_shutdown=True) inst_obj.task_state = task_states.POWERING_ON inst_obj.save() self.compute.start_instance(self.context, instance=inst_obj) self.assertEqual(image['id'], self.deleted_image_id) self.assertNotIn('shelved_at', inst_obj.system_metadata) self.assertNotIn('shelved_image_id', inst_obj.system_metadata) self.assertNotIn('shelved_host', inst_obj.system_metadata) self.compute.terminate_instance(self.context, instance, [], []) def test_stop_start_no_image(self): params = {'image_ref': ''} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.POWERING_OFF}) extra = ['system_metadata', 'metadata'] inst_uuid = instance['uuid'] inst_obj = objects.Instance.get_by_uuid(self.context, inst_uuid, expected_attrs=extra) self.compute.stop_instance(self.context, instance=inst_obj, clean_shutdown=True) inst_obj.task_state = task_states.POWERING_ON inst_obj.save() self.compute.start_instance(self.context, instance=inst_obj) self.compute.terminate_instance(self.context, instance, [], []) def test_rescue(self): # Ensure instance can be rescued and unrescued. called = {'rescued': False, 'unrescued': False} def fake_rescue(self, context, instance_ref, network_info, image_meta, rescue_password): called['rescued'] = True self.stubs.Set(nova.virt.fake.FakeDriver, 'rescue', fake_rescue) def fake_unrescue(self, instance_ref, network_info): called['unrescued'] = True self.stubs.Set(nova.virt.fake.FakeDriver, 'unrescue', fake_unrescue) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.task_state = task_states.RESCUING instance.save() self.compute.rescue_instance(self.context, instance, None, None, True) self.assertTrue(called['rescued']) instance.task_state = task_states.UNRESCUING instance.save() self.compute.unrescue_instance(self.context, instance) self.assertTrue(called['unrescued']) self.compute.terminate_instance(self.context, instance, [], []) def test_rescue_notifications(self): # Ensure notifications on instance rescue. def fake_rescue(self, context, instance_ref, network_info, image_meta, rescue_password): pass self.stubs.Set(nova.virt.fake.FakeDriver, 'rescue', fake_rescue) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) fake_notifier.NOTIFICATIONS = [] instance.task_state = task_states.RESCUING instance.save() self.compute.rescue_instance(self.context, instance, None, True, True) expected_notifications = ['compute.instance.rescue.start', 'compute.instance.exists', 'compute.instance.rescue.end'] self.assertEqual([m.event_type for m in fake_notifier.NOTIFICATIONS], expected_notifications) for n, msg in enumerate(fake_notifier.NOTIFICATIONS): self.assertEqual(msg.event_type, expected_notifications[n]) self.assertEqual(msg.priority, 'INFO') payload = msg.payload self.assertEqual(payload['tenant_id'], self.project_id) self.assertEqual(payload['user_id'], self.user_id) self.assertEqual(payload['instance_id'], instance.uuid) self.assertEqual(payload['instance_type'], 'm1.tiny') type_id = flavors.get_flavor_by_name('m1.tiny')['id'] self.assertEqual(str(payload['instance_type_id']), str(type_id)) self.assertIn('display_name', payload) self.assertIn('created_at', payload) self.assertIn('launched_at', payload) image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF) self.assertEqual(payload['image_ref_url'], image_ref_url) msg = fake_notifier.NOTIFICATIONS[0] self.assertIn('rescue_image_name', msg.payload) self.compute.terminate_instance(self.context, instance, [], []) def test_unrescue_notifications(self): # Ensure notifications on instance rescue. def fake_unrescue(self, instance_ref, network_info): pass self.stubs.Set(nova.virt.fake.FakeDriver, 'unrescue', fake_unrescue) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) fake_notifier.NOTIFICATIONS = [] instance.task_state = task_states.UNRESCUING instance.save() self.compute.unrescue_instance(self.context, instance) expected_notifications = ['compute.instance.unrescue.start', 'compute.instance.unrescue.end'] self.assertEqual([m.event_type for m in fake_notifier.NOTIFICATIONS], expected_notifications) for n, msg in enumerate(fake_notifier.NOTIFICATIONS): self.assertEqual(msg.event_type, expected_notifications[n]) self.assertEqual(msg.priority, 'INFO') payload = msg.payload self.assertEqual(payload['tenant_id'], self.project_id) self.assertEqual(payload['user_id'], self.user_id) self.assertEqual(payload['instance_id'], instance.uuid) self.assertEqual(payload['instance_type'], 'm1.tiny') type_id = flavors.get_flavor_by_name('m1.tiny')['id'] self.assertEqual(str(payload['instance_type_id']), str(type_id)) self.assertIn('display_name', payload) self.assertIn('created_at', payload) self.assertIn('launched_at', payload) image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF) self.assertEqual(payload['image_ref_url'], image_ref_url) self.compute.terminate_instance(self.context, instance, [], []) def test_rescue_handle_err(self): # If the driver fails to rescue, instance state should got to ERROR # and the exception should be converted to InstanceNotRescuable inst_obj = self._create_fake_instance_obj() self.mox.StubOutWithMock(self.compute, '_get_rescue_image') self.mox.StubOutWithMock(nova.virt.fake.FakeDriver, 'rescue') self.compute._get_rescue_image( mox.IgnoreArg(), inst_obj, mox.IgnoreArg()).AndReturn( objects.ImageMeta.from_dict({})) nova.virt.fake.FakeDriver.rescue( mox.IgnoreArg(), inst_obj, [], mox.IgnoreArg(), 'password' ).AndRaise(RuntimeError("Try again later")) self.mox.ReplayAll() expected_message = ('Instance %s cannot be rescued: ' 'Driver Error: Try again later' % inst_obj.uuid) with testtools.ExpectedException( exception.InstanceNotRescuable, expected_message): self.compute.rescue_instance( self.context, instance=inst_obj, rescue_password='password', rescue_image_ref=None, clean_shutdown=True) self.assertEqual(vm_states.ERROR, inst_obj.vm_state) @mock.patch.object(image_api.API, "get") @mock.patch.object(nova.virt.fake.FakeDriver, "rescue") def test_rescue_with_image_specified(self, mock_rescue, mock_image_get): image_ref = uuids.image_instance rescue_image_meta = {} params = {"task_state": task_states.RESCUING} instance = self._create_fake_instance_obj(params=params) ctxt = context.get_admin_context() mock_context = mock.Mock() mock_context.elevated.return_value = ctxt mock_image_get.return_value = rescue_image_meta self.compute.rescue_instance(mock_context, instance=instance, rescue_password="password", rescue_image_ref=image_ref, clean_shutdown=True) mock_image_get.assert_called_with(ctxt, image_ref) mock_rescue.assert_called_with(ctxt, instance, [], test.MatchType(objects.ImageMeta), 'password') self.compute.terminate_instance(ctxt, instance, [], []) @mock.patch.object(image_api.API, "get") @mock.patch.object(nova.virt.fake.FakeDriver, "rescue") def test_rescue_with_base_image_when_image_not_specified(self, mock_rescue, mock_image_get): image_ref = "image-ref" system_meta = {"image_base_image_ref": image_ref} rescue_image_meta = {} params = {"task_state": task_states.RESCUING, "system_metadata": system_meta} instance = self._create_fake_instance_obj(params=params) ctxt = context.get_admin_context() mock_context = mock.Mock() mock_context.elevated.return_value = ctxt mock_image_get.return_value = rescue_image_meta self.compute.rescue_instance(mock_context, instance=instance, rescue_password="password", rescue_image_ref=None, clean_shutdown=True) mock_image_get.assert_called_with(ctxt, image_ref) mock_rescue.assert_called_with(ctxt, instance, [], test.MatchType(objects.ImageMeta), 'password') self.compute.terminate_instance(self.context, instance, [], []) def test_power_on(self): # Ensure instance can be powered on. called = {'power_on': False} def fake_driver_power_on(self, context, instance, network_info, block_device_info): called['power_on'] = True self.stubs.Set(nova.virt.fake.FakeDriver, 'power_on', fake_driver_power_on) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) extra = ['system_metadata', 'metadata'] inst_obj = objects.Instance.get_by_uuid(self.context, instance['uuid'], expected_attrs=extra) inst_obj.task_state = task_states.POWERING_ON inst_obj.save() self.compute.start_instance(self.context, instance=inst_obj) self.assertTrue(called['power_on']) self.compute.terminate_instance(self.context, inst_obj, [], []) def test_power_off(self): # Ensure instance can be powered off. called = {'power_off': False} def fake_driver_power_off(self, instance, shutdown_timeout, shutdown_attempts): called['power_off'] = True self.stubs.Set(nova.virt.fake.FakeDriver, 'power_off', fake_driver_power_off) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) extra = ['system_metadata', 'metadata'] inst_obj = objects.Instance.get_by_uuid(self.context, instance['uuid'], expected_attrs=extra) inst_obj.task_state = task_states.POWERING_OFF inst_obj.save() self.compute.stop_instance(self.context, instance=inst_obj, clean_shutdown=True) self.assertTrue(called['power_off']) self.compute.terminate_instance(self.context, inst_obj, [], []) def test_pause(self): # Ensure instance can be paused and unpaused. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.task_state = task_states.PAUSING instance.save() fake_notifier.NOTIFICATIONS = [] self.compute.pause_instance(self.context, instance=instance) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'compute.instance.pause.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'compute.instance.pause.end') instance.task_state = task_states.UNPAUSING instance.save() fake_notifier.NOTIFICATIONS = [] self.compute.unpause_instance(self.context, instance=instance) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'compute.instance.unpause.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'compute.instance.unpause.end') self.compute.terminate_instance(self.context, instance, [], []) def test_suspend(self): # ensure instance can be suspended and resumed. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.task_state = task_states.SUSPENDING instance.save() self.compute.suspend_instance(self.context, instance) instance.task_state = task_states.RESUMING instance.save() self.compute.resume_instance(self.context, instance) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 6) msg = fake_notifier.NOTIFICATIONS[2] self.assertEqual(msg.event_type, 'compute.instance.suspend.start') msg = fake_notifier.NOTIFICATIONS[3] self.assertEqual(msg.event_type, 'compute.instance.suspend.end') self.compute.terminate_instance(self.context, instance, [], []) def test_suspend_error(self): # Ensure vm_state is ERROR when suspend error occurs. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) with mock.patch.object(self.compute.driver, 'suspend', side_effect=test.TestingException): self.assertRaises(test.TestingException, self.compute.suspend_instance, self.context, instance=instance) instance = db.instance_get_by_uuid(self.context, instance.uuid) self.assertEqual(vm_states.ERROR, instance.vm_state) def test_suspend_not_implemented(self): # Ensure expected exception is raised and the vm_state of instance # restore to original value if suspend is not implemented by driver instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) with mock.patch.object(self.compute.driver, 'suspend', side_effect=NotImplementedError('suspend test')): self.assertRaises(NotImplementedError, self.compute.suspend_instance, self.context, instance=instance) instance = db.instance_get_by_uuid(self.context, instance.uuid) self.assertEqual(vm_states.ACTIVE, instance.vm_state) def test_suspend_rescued(self): # ensure rescued instance can be suspended and resumed. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.vm_state = vm_states.RESCUED instance.task_state = task_states.SUSPENDING instance.save() self.compute.suspend_instance(self.context, instance) self.assertEqual(instance.vm_state, vm_states.SUSPENDED) instance.task_state = task_states.RESUMING instance.save() self.compute.resume_instance(self.context, instance) self.assertEqual(instance.vm_state, vm_states.RESCUED) self.compute.terminate_instance(self.context, instance, [], []) def test_resume_notifications(self): # ensure instance can be suspended and resumed. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.task_state = task_states.SUSPENDING instance.save() self.compute.suspend_instance(self.context, instance) instance.task_state = task_states.RESUMING instance.save() self.compute.resume_instance(self.context, instance) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 6) msg = fake_notifier.NOTIFICATIONS[4] self.assertEqual(msg.event_type, 'compute.instance.resume.start') msg = fake_notifier.NOTIFICATIONS[5] self.assertEqual(msg.event_type, 'compute.instance.resume.end') self.compute.terminate_instance(self.context, instance, [], []) def test_resume_no_old_state(self): # ensure a suspended instance with no old_vm_state is resumed to the # ACTIVE state instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.vm_state = vm_states.SUSPENDED instance.task_state = task_states.RESUMING instance.save() self.compute.resume_instance(self.context, instance) self.assertEqual(instance.vm_state, vm_states.ACTIVE) self.compute.terminate_instance(self.context, instance, [], []) def test_resume_error(self): # Ensure vm_state is ERROR when resume error occurs. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.task_state = task_states.SUSPENDING instance.save() self.compute.suspend_instance(self.context, instance) instance.task_state = task_states.RESUMING instance.save() with mock.patch.object(self.compute.driver, 'resume', side_effect=test.TestingException): self.assertRaises(test.TestingException, self.compute.resume_instance, self.context, instance) instance = db.instance_get_by_uuid(self.context, instance.uuid) self.assertEqual(vm_states.ERROR, instance.vm_state) def test_rebuild(self): # Ensure instance can be rebuilt. instance = self._create_fake_instance_obj() image_ref = instance['image_ref'] sys_metadata = db.instance_system_metadata_get(self.context, instance['uuid']) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.REBUILDING}) self.compute.rebuild_instance(self.context, instance, image_ref, image_ref, injected_files=[], new_pass="new_password", orig_sys_metadata=sys_metadata, bdms=[], recreate=False, on_shared_storage=False) self.compute.terminate_instance(self.context, instance, [], []) def test_rebuild_driver(self): # Make sure virt drivers can override default rebuild called = {'rebuild': False} def fake(**kwargs): instance = kwargs['instance'] instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING instance.save(expected_task_state=[task_states.REBUILDING]) instance.task_state = task_states.REBUILD_SPAWNING instance.save( expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING]) called['rebuild'] = True self.stubs.Set(self.compute.driver, 'rebuild', fake) instance = self._create_fake_instance_obj() image_ref = instance['image_ref'] sys_metadata = db.instance_system_metadata_get(self.context, instance['uuid']) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.REBUILDING}) self.compute.rebuild_instance(self.context, instance, image_ref, image_ref, injected_files=[], new_pass="new_password", orig_sys_metadata=sys_metadata, bdms=[], recreate=False, on_shared_storage=False) self.assertTrue(called['rebuild']) self.compute.terminate_instance(self.context, instance, [], []) @mock.patch('nova.compute.manager.ComputeManager._detach_volume') def test_rebuild_driver_with_volumes(self, mock_detach): bdms = block_device_obj.block_device_make_list(self.context, [fake_block_device.FakeDbBlockDeviceDict({ 'id': 3, 'volume_id': uuids.volume_id, 'instance_uuid': uuids.block_device_instance, 'device_name': '/dev/vda', 'connection_info': '{"driver_volume_type": "rbd"}', 'source_type': 'image', 'destination_type': 'volume', 'image_id': 'fake-image-id-1', 'boot_index': 0 })]) # Make sure virt drivers can override default rebuild called = {'rebuild': False} def fake(**kwargs): instance = kwargs['instance'] instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING instance.save(expected_task_state=[task_states.REBUILDING]) instance.task_state = task_states.REBUILD_SPAWNING instance.save( expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING]) called['rebuild'] = True func = kwargs['detach_block_devices'] # Have the fake driver call the function to detach block devices func(self.context, bdms) # Verify volumes to be detached without destroying mock_detach.assert_called_once_with(self.context, bdms[0].volume_id, instance, destroy_bdm=False) self.stubs.Set(self.compute.driver, 'rebuild', fake) instance = self._create_fake_instance_obj() image_ref = instance['image_ref'] sys_metadata = db.instance_system_metadata_get(self.context, instance['uuid']) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.REBUILDING}) self.compute.rebuild_instance(self.context, instance, image_ref, image_ref, injected_files=[], new_pass="new_password", orig_sys_metadata=sys_metadata, bdms=bdms, recreate=False, on_shared_storage=False) self.assertTrue(called['rebuild']) self.compute.terminate_instance(self.context, instance, [], []) def test_rebuild_no_image(self): # Ensure instance can be rebuilt when started with no image. params = {'image_ref': ''} instance = self._create_fake_instance_obj(params) sys_metadata = db.instance_system_metadata_get(self.context, instance['uuid']) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.REBUILDING}) self.compute.rebuild_instance(self.context, instance, '', '', injected_files=[], new_pass="new_password", orig_sys_metadata=sys_metadata, bdms=[], recreate=False, on_shared_storage=False) self.compute.terminate_instance(self.context, instance, [], []) def test_rebuild_launched_at_time(self): # Ensure instance can be rebuilt. old_time = datetime.datetime(2012, 4, 1) cur_time = datetime.datetime(2012, 12, 21, 12, 21) time_fixture = self.useFixture(utils_fixture.TimeFixture(old_time)) instance = self._create_fake_instance_obj() image_ref = instance['image_ref'] self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) time_fixture.advance_time_delta(cur_time - old_time) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.REBUILDING}) self.compute.rebuild_instance(self.context, instance, image_ref, image_ref, injected_files=[], new_pass="new_password", orig_sys_metadata={}, bdms=[], recreate=False, on_shared_storage=False) instance.refresh() self.assertEqual(cur_time, instance['launched_at'].replace(tzinfo=None)) self.compute.terminate_instance(self.context, instance, [], []) def test_rebuild_with_injected_files(self): # Ensure instance can be rebuilt with injected files. injected_files = [ (b'/a/b/c', base64.b64encode(b'foobarbaz')), ] self.decoded_files = [ (b'/a/b/c', b'foobarbaz'), ] def _spawn(context, instance, image_meta, injected_files, admin_password, network_info, block_device_info): self.assertEqual(self.decoded_files, injected_files) self.stubs.Set(self.compute.driver, 'spawn', _spawn) instance = self._create_fake_instance_obj() image_ref = instance['image_ref'] sys_metadata = db.instance_system_metadata_get(self.context, instance['uuid']) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.REBUILDING}) self.compute.rebuild_instance(self.context, instance, image_ref, image_ref, injected_files=injected_files, new_pass="new_password", orig_sys_metadata=sys_metadata, bdms=[], recreate=False, on_shared_storage=False) self.compute.terminate_instance(self.context, instance, [], []) def _test_reboot(self, soft, test_delete=False, test_unrescue=False, fail_reboot=False, fail_running=False): reboot_type = soft and 'SOFT' or 'HARD' task_pending = (soft and task_states.REBOOT_PENDING or task_states.REBOOT_PENDING_HARD) task_started = (soft and task_states.REBOOT_STARTED or task_states.REBOOT_STARTED_HARD) expected_task = (soft and task_states.REBOOTING or task_states.REBOOTING_HARD) expected_tasks = (soft and (task_states.REBOOTING, task_states.REBOOT_PENDING, task_states.REBOOT_STARTED) or (task_states.REBOOTING_HARD, task_states.REBOOT_PENDING_HARD, task_states.REBOOT_STARTED_HARD)) # This is a true unit test, so we don't need the network stubs. fake_network.unset_stub_network_methods(self) self.mox.StubOutWithMock(self.compute, '_get_instance_block_device_info') self.mox.StubOutWithMock(self.compute.network_api, 'get_instance_nw_info') self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage') self.mox.StubOutWithMock(self.compute, '_instance_update') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.mox.StubOutWithMock(self.compute, '_get_power_state') self.mox.StubOutWithMock(self.compute.driver, 'reboot') # FIXME(comstud): I don't feel like the context needs to # be elevated at all. Hopefully remove elevated from # reboot_instance and remove the stub here in a future patch. # econtext would just become self.context below then. econtext = self.context.elevated() db_instance = fake_instance.fake_db_instance( **dict(uuid=uuids.db_instance, power_state=power_state.NOSTATE, vm_state=vm_states.ACTIVE, task_state=expected_task, launched_at=timeutils.utcnow())) instance = objects.Instance._from_db_object(econtext, objects.Instance(), db_instance) updated_dbinstance1 = fake_instance.fake_db_instance( **dict(uuid=uuids.db_instance_1, power_state=10003, vm_state=vm_states.ACTIVE, task_state=expected_task, instance_type=flavors.get_default_flavor(), launched_at=timeutils.utcnow())) updated_dbinstance2 = fake_instance.fake_db_instance( **dict(uuid=uuids.db_instance_2, power_state=10003, vm_state=vm_states.ACTIVE, instance_type=flavors.get_default_flavor(), task_state=expected_task, launched_at=timeutils.utcnow())) if test_unrescue: instance.vm_state = vm_states.RESCUED instance.obj_reset_changes() fake_nw_model = network_model.NetworkInfo() fake_block_dev_info = 'fake_block_dev_info' fake_power_state1 = 10001 fake_power_state2 = power_state.RUNNING fake_power_state3 = 10002 # Beginning of calls we expect. self.mox.StubOutWithMock(self.context, 'elevated') self.context.elevated().AndReturn(econtext) self.compute._get_instance_block_device_info( econtext, instance).AndReturn(fake_block_dev_info) self.compute.network_api.get_instance_nw_info( econtext, instance).AndReturn(fake_nw_model) self.compute._notify_about_instance_usage(econtext, instance, 'reboot.start') self.compute._get_power_state(econtext, instance).AndReturn(fake_power_state1) db.instance_update_and_get_original(econtext, instance['uuid'], {'task_state': task_pending, 'expected_task_state': expected_tasks, 'power_state': fake_power_state1}, columns_to_join=['system_metadata', 'extra', 'extra.flavor'] ).AndReturn((None, updated_dbinstance1)) expected_nw_info = fake_nw_model db.instance_update_and_get_original(econtext, updated_dbinstance1['uuid'], {'task_state': task_started, 'expected_task_state': task_pending}, columns_to_join=['system_metadata'] ).AndReturn((None, updated_dbinstance1)) # Annoying. driver.reboot is wrapped in a try/except, and # doesn't re-raise. It eats exception generated by mox if # this is called with the wrong args, so we have to hack # around it. reboot_call_info = {} expected_call_info = { 'args': (econtext, instance, expected_nw_info, reboot_type), 'kwargs': {'block_device_info': fake_block_dev_info}} fault = exception.InstanceNotFound(instance_id='instance-0000') def fake_reboot(*args, **kwargs): reboot_call_info['args'] = args reboot_call_info['kwargs'] = kwargs # NOTE(sirp): Since `bad_volumes_callback` is a function defined # within `reboot_instance`, we don't have access to its value and # can't stub it out, thus we skip that comparison. kwargs.pop('bad_volumes_callback') if fail_reboot: raise fault self.stubs.Set(self.compute.driver, 'reboot', fake_reboot) # Power state should be updated again if not fail_reboot or fail_running: new_power_state = fake_power_state2 self.compute._get_power_state(econtext, instance).AndReturn(fake_power_state2) else: new_power_state = fake_power_state3 self.compute._get_power_state(econtext, instance).AndReturn(fake_power_state3) if test_delete: fault = exception.InstanceNotFound( instance_id=instance['uuid']) db.instance_update_and_get_original( econtext, updated_dbinstance1['uuid'], {'power_state': new_power_state, 'task_state': None, 'vm_state': vm_states.ACTIVE}, columns_to_join=['system_metadata'], ).AndRaise(fault) self.compute._notify_about_instance_usage( econtext, instance, 'reboot.end') elif fail_reboot and not fail_running: db.instance_update_and_get_original( econtext, updated_dbinstance1['uuid'], {'vm_state': vm_states.ERROR}, columns_to_join=['system_metadata'], ).AndRaise(fault) else: db.instance_update_and_get_original( econtext, updated_dbinstance1['uuid'], {'power_state': new_power_state, 'task_state': None, 'vm_state': vm_states.ACTIVE}, columns_to_join=['system_metadata'], ).AndReturn((None, updated_dbinstance2)) if fail_running: self.compute._notify_about_instance_usage(econtext, instance, 'reboot.error', fault=fault) self.compute._notify_about_instance_usage( econtext, instance, 'reboot.end') self.mox.ReplayAll() if not fail_reboot or fail_running: self.compute.reboot_instance(self.context, instance=instance, block_device_info=None, reboot_type=reboot_type) else: self.assertRaises(exception.InstanceNotFound, self.compute.reboot_instance, self.context, instance=instance, block_device_info=None, reboot_type=reboot_type) self.assertEqual(expected_call_info, reboot_call_info) def test_reboot_soft(self): self._test_reboot(True) def test_reboot_soft_and_delete(self): self._test_reboot(True, True) def test_reboot_soft_and_rescued(self): self._test_reboot(True, False, True) def test_reboot_soft_and_delete_and_rescued(self): self._test_reboot(True, True, True) def test_reboot_hard(self): self._test_reboot(False) def test_reboot_hard_and_delete(self): self._test_reboot(False, True) def test_reboot_hard_and_rescued(self): self._test_reboot(False, False, True) def test_reboot_hard_and_delete_and_rescued(self): self._test_reboot(False, True, True) @mock.patch.object(jsonutils, 'to_primitive') def test_reboot_fail(self, mock_to_primitive): self._test_reboot(False, fail_reboot=True) def test_reboot_fail_running(self): self._test_reboot(False, fail_reboot=True, fail_running=True) def test_get_instance_block_device_info_source_image(self): bdms = block_device_obj.block_device_make_list(self.context, [fake_block_device.FakeDbBlockDeviceDict({ 'id': 3, 'volume_id': uuids.volume_id, 'instance_uuid': uuids.block_device_instance, 'device_name': '/dev/vda', 'connection_info': '{"driver_volume_type": "rbd"}', 'source_type': 'image', 'destination_type': 'volume', 'image_id': 'fake-image-id-1', 'boot_index': 0 })]) with (mock.patch.object( objects.BlockDeviceMappingList, 'get_by_instance_uuid', return_value=bdms) ) as mock_get_by_instance: block_device_info = ( self.compute._get_instance_block_device_info( self.context, self._create_fake_instance_obj()) ) expected = { 'swap': None, 'ephemerals': [], 'root_device_name': None, 'block_device_mapping': [{ 'connection_info': { 'driver_volume_type': 'rbd' }, 'mount_device': '/dev/vda', 'delete_on_termination': False }] } self.assertTrue(mock_get_by_instance.called) self.assertEqual(block_device_info, expected) def test_get_instance_block_device_info_passed_bdms(self): bdms = block_device_obj.block_device_make_list(self.context, [fake_block_device.FakeDbBlockDeviceDict({ 'id': 3, 'volume_id': uuids.volume_id, 'device_name': '/dev/vdd', 'connection_info': '{"driver_volume_type": "rbd"}', 'source_type': 'volume', 'destination_type': 'volume'}) ]) with (mock.patch.object( objects.BlockDeviceMappingList, 'get_by_instance_uuid')) as mock_get_by_instance: block_device_info = ( self.compute._get_instance_block_device_info( self.context, self._create_fake_instance_obj(), bdms=bdms) ) expected = { 'swap': None, 'ephemerals': [], 'root_device_name': None, 'block_device_mapping': [{ 'connection_info': { 'driver_volume_type': 'rbd' }, 'mount_device': '/dev/vdd', 'delete_on_termination': False }] } self.assertFalse(mock_get_by_instance.called) self.assertEqual(block_device_info, expected) def test_get_instance_block_device_info_swap_and_ephemerals(self): instance = self._create_fake_instance_obj() ephemeral0 = fake_block_device.FakeDbBlockDeviceDict({ 'id': 1, 'instance_uuid': uuids.block_device_instance, 'device_name': '/dev/vdb', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'disk_bus': 'virtio', 'delete_on_termination': True, 'guest_format': None, 'volume_size': 1, 'boot_index': -1 }) ephemeral1 = fake_block_device.FakeDbBlockDeviceDict({ 'id': 2, 'instance_uuid': uuids.block_device_instance, 'device_name': '/dev/vdc', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'disk_bus': 'virtio', 'delete_on_termination': True, 'guest_format': None, 'volume_size': 2, 'boot_index': -1 }) swap = fake_block_device.FakeDbBlockDeviceDict({ 'id': 3, 'instance_uuid': uuids.block_device_instance, 'device_name': '/dev/vdd', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'disk_bus': 'virtio', 'delete_on_termination': True, 'guest_format': 'swap', 'volume_size': 1, 'boot_index': -1 }) bdms = block_device_obj.block_device_make_list(self.context, [swap, ephemeral0, ephemeral1]) with ( mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid', return_value=bdms) ) as mock_get_by_instance_uuid: expected_block_device_info = { 'swap': {'device_name': '/dev/vdd', 'swap_size': 1}, 'ephemerals': [{'device_name': '/dev/vdb', 'num': 0, 'size': 1, 'virtual_name': 'ephemeral0'}, {'device_name': '/dev/vdc', 'num': 1, 'size': 2, 'virtual_name': 'ephemeral1'}], 'block_device_mapping': [], 'root_device_name': None } block_device_info = ( self.compute._get_instance_block_device_info( self.context, instance) ) mock_get_by_instance_uuid.assert_called_once_with(self.context, instance['uuid']) self.assertEqual(expected_block_device_info, block_device_info) def test_inject_network_info(self): # Ensure we can inject network info. called = {'inject': False} def fake_driver_inject_network(self, instance, network_info): called['inject'] = True self.stubs.Set(nova.virt.fake.FakeDriver, 'inject_network_info', fake_driver_inject_network) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.compute.inject_network_info(self.context, instance=instance) self.assertTrue(called['inject']) self.compute.terminate_instance(self.context, instance, [], []) def test_reset_network(self): # Ensure we can reset networking on an instance. called = {'count': 0} def fake_driver_reset_network(self, instance): called['count'] += 1 self.stubs.Set(nova.virt.fake.FakeDriver, 'reset_network', fake_driver_reset_network) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.compute.reset_network(self.context, instance) self.assertEqual(called['count'], 1) self.compute.terminate_instance(self.context, instance, [], []) def _get_snapshotting_instance(self): # Ensure instance can be snapshotted. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING instance.save() return instance def test_snapshot(self): inst_obj = self._get_snapshotting_instance() self.compute.snapshot_instance(self.context, image_id='fakesnap', instance=inst_obj) def test_snapshot_no_image(self): inst_obj = self._get_snapshotting_instance() inst_obj.image_ref = '' inst_obj.save() self.compute.snapshot_instance(self.context, image_id='fakesnap', instance=inst_obj) def _test_snapshot_fails(self, raise_during_cleanup, method, expected_state=True): def fake_snapshot(*args, **kwargs): raise test.TestingException() self.fake_image_delete_called = False def fake_delete(self_, context, image_id): self.fake_image_delete_called = True if raise_during_cleanup: raise Exception() self.stubs.Set(self.compute.driver, 'snapshot', fake_snapshot) fake_image.stub_out_image_service(self) self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete) inst_obj = self._get_snapshotting_instance() if method == 'snapshot': self.assertRaises(test.TestingException, self.compute.snapshot_instance, self.context, image_id='fakesnap', instance=inst_obj) else: self.assertRaises(test.TestingException, self.compute.backup_instance, self.context, image_id='fakesnap', instance=inst_obj, backup_type='fake', rotation=1) self.assertEqual(expected_state, self.fake_image_delete_called) self._assert_state({'task_state': None}) @mock.patch.object(nova.compute.manager.ComputeManager, '_rotate_backups') def test_backup_fails(self, mock_rotate): self._test_snapshot_fails(False, 'backup') @mock.patch.object(nova.compute.manager.ComputeManager, '_rotate_backups') def test_backup_fails_cleanup_ignores_exception(self, mock_rotate): self._test_snapshot_fails(True, 'backup') @mock.patch.object(nova.compute.manager.ComputeManager, '_rotate_backups') @mock.patch.object(nova.compute.manager.ComputeManager, '_do_snapshot_instance') def test_backup_fails_rotate_backup(self, mock_snap, mock_rotate): mock_rotate.side_effect = test.TestingException() self._test_snapshot_fails(True, 'backup', False) def test_snapshot_fails(self): self._test_snapshot_fails(False, 'snapshot') def test_snapshot_fails_cleanup_ignores_exception(self): self._test_snapshot_fails(True, 'snapshot') def _test_snapshot_deletes_image_on_failure(self, status, exc): self.fake_image_delete_called = False def fake_show(self_, context, image_id, **kwargs): self.assertEqual('fakesnap', image_id) image = {'id': image_id, 'status': status} return image self.stubs.Set(fake_image._FakeImageService, 'show', fake_show) def fake_delete(self_, context, image_id): self.fake_image_delete_called = True self.assertEqual('fakesnap', image_id) self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete) def fake_snapshot(*args, **kwargs): raise exc self.stubs.Set(self.compute.driver, 'snapshot', fake_snapshot) fake_image.stub_out_image_service(self) inst_obj = self._get_snapshotting_instance() self.compute.snapshot_instance(self.context, image_id='fakesnap', instance=inst_obj) def test_snapshot_fails_with_glance_error(self): image_not_found = exception.ImageNotFound(image_id='fakesnap') self._test_snapshot_deletes_image_on_failure('error', image_not_found) self.assertFalse(self.fake_image_delete_called) self._assert_state({'task_state': None}) def test_snapshot_fails_with_task_state_error(self): deleting_state_error = exception.UnexpectedDeletingTaskStateError( instance_uuid=uuids.instance, expected={'task_state': task_states.IMAGE_SNAPSHOT}, actual={'task_state': task_states.DELETING}) self._test_snapshot_deletes_image_on_failure( 'error', deleting_state_error) self.assertTrue(self.fake_image_delete_called) self._test_snapshot_deletes_image_on_failure( 'active', deleting_state_error) self.assertFalse(self.fake_image_delete_called) def test_snapshot_fails_with_instance_not_found(self): instance_not_found = exception.InstanceNotFound(instance_id='uuid') self._test_snapshot_deletes_image_on_failure( 'error', instance_not_found) self.assertTrue(self.fake_image_delete_called) self._test_snapshot_deletes_image_on_failure( 'active', instance_not_found) self.assertFalse(self.fake_image_delete_called) def test_snapshot_handles_cases_when_instance_is_deleted(self): inst_obj = self._get_snapshotting_instance() inst_obj.task_state = task_states.DELETING inst_obj.save() self.compute.snapshot_instance(self.context, image_id='fakesnap', instance=inst_obj) def test_snapshot_handles_cases_when_instance_is_not_found(self): inst_obj = self._get_snapshotting_instance() inst_obj2 = objects.Instance.get_by_uuid(self.context, inst_obj.uuid) inst_obj2.destroy() self.compute.snapshot_instance(self.context, image_id='fakesnap', instance=inst_obj) def _assert_state(self, state_dict): """Assert state of VM is equal to state passed as parameter.""" instances = db.instance_get_all(self.context) self.assertEqual(len(instances), 1) if 'vm_state' in state_dict: self.assertEqual(state_dict['vm_state'], instances[0]['vm_state']) if 'task_state' in state_dict: self.assertEqual(state_dict['task_state'], instances[0]['task_state']) if 'power_state' in state_dict: self.assertEqual(state_dict['power_state'], instances[0]['power_state']) def test_console_output(self): # Make sure we can get console output from instance. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) output = self.compute.get_console_output(self.context, instance=instance, tail_length=None) self.assertEqual(output, b'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE') self.compute.terminate_instance(self.context, instance, [], []) def test_console_output_bytes(self): # Make sure we can get console output from instance. instance = self._create_fake_instance_obj() with mock.patch.object(self.compute, 'get_console_output') as mock_console_output: mock_console_output.return_value = b'Hello.' output = self.compute.get_console_output(self.context, instance=instance, tail_length=None) self.assertEqual(output, b'Hello.') self.compute.terminate_instance(self.context, instance, [], []) def test_console_output_tail(self): # Make sure we can get console output from instance. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) output = self.compute.get_console_output(self.context, instance=instance, tail_length=2) self.assertEqual(output, b'ANOTHER\nLAST LINE') self.compute.terminate_instance(self.context, instance, [], []) def test_console_output_not_implemented(self): def fake_not_implemented(*args, **kwargs): raise NotImplementedError() self.stubs.Set(self.compute.driver, 'get_console_output', fake_not_implemented) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertRaises(messaging.ExpectedException, self.compute.get_console_output, self.context, instance, 0) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(NotImplementedError, self.compute.get_console_output, self.context, instance, 0) self.compute.terminate_instance(self.context, instance, [], []) def test_console_output_instance_not_found(self): def fake_not_found(*args, **kwargs): raise exception.InstanceNotFound(instance_id='fake-instance') self.stubs.Set(self.compute.driver, 'get_console_output', fake_not_found) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertRaises(messaging.ExpectedException, self.compute.get_console_output, self.context, instance, 0) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.InstanceNotFound, self.compute.get_console_output, self.context, instance, 0) self.compute.terminate_instance(self.context, instance, [], []) def test_novnc_vnc_console(self): # Make sure we can a vnc console for an instance. self.flags(enabled=True, group='vnc') self.flags(enabled=False, group='spice') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) # Try with the full instance console = self.compute.get_vnc_console(self.context, 'novnc', instance=instance) self.assertTrue(console) self.compute.terminate_instance(self.context, instance, [], []) def test_validate_console_port_vnc(self): self.flags(enabled=True, group='vnc') self.flags(enabled=True, group='spice') instance = self._create_fake_instance_obj() def fake_driver_get_console(*args, **kwargs): return ctype.ConsoleVNC(host="fake_host", port=5900) self.stubs.Set(self.compute.driver, "get_vnc_console", fake_driver_get_console) self.assertTrue(self.compute.validate_console_port( context=self.context, instance=instance, port=5900, console_type="novnc")) def test_validate_console_port_spice(self): self.flags(enabled=True, group='vnc') self.flags(enabled=True, group='spice') instance = self._create_fake_instance_obj() def fake_driver_get_console(*args, **kwargs): return ctype.ConsoleSpice(host="fake_host", port=5900, tlsPort=88) self.stubs.Set(self.compute.driver, "get_spice_console", fake_driver_get_console) self.assertTrue(self.compute.validate_console_port( context=self.context, instance=instance, port=5900, console_type="spice-html5")) def test_validate_console_port_rdp(self): self.flags(enabled=True, group='rdp') instance = self._create_fake_instance_obj() def fake_driver_get_console(*args, **kwargs): return ctype.ConsoleRDP(host="fake_host", port=5900) self.stubs.Set(self.compute.driver, "get_rdp_console", fake_driver_get_console) self.assertTrue(self.compute.validate_console_port( context=self.context, instance=instance, port=5900, console_type="rdp-html5")) def test_validate_console_port_mks(self): self.flags(enabled=True, group='mks') instance = self._create_fake_instance_obj() with mock.patch.object( self.compute.driver, 'get_mks_console') as mock_getmks: mock_getmks.return_value = ctype.ConsoleMKS(host="fake_host", port=5900) result = self.compute.validate_console_port(context=self.context, instance=instance, port=5900, console_type="webmks") self.assertTrue(result) def test_validate_console_port_wrong_port(self): self.flags(enabled=True, group='vnc') self.flags(enabled=True, group='spice') instance = self._create_fake_instance_obj() def fake_driver_get_console(*args, **kwargs): return ctype.ConsoleSpice(host="fake_host", port=5900, tlsPort=88) self.stubs.Set(self.compute.driver, "get_vnc_console", fake_driver_get_console) self.assertFalse(self.compute.validate_console_port( context=self.context, instance=instance, port="wrongport", console_type="spice-html5")) def test_xvpvnc_vnc_console(self): # Make sure we can a vnc console for an instance. self.flags(enabled=True, group='vnc') self.flags(enabled=False, group='spice') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) console = self.compute.get_vnc_console(self.context, 'xvpvnc', instance=instance) self.assertTrue(console) self.compute.terminate_instance(self.context, instance, [], []) def test_invalid_vnc_console_type(self): # Raise useful error if console type is an unrecognised string. self.flags(enabled=True, group='vnc') self.flags(enabled=False, group='spice') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertRaises(messaging.ExpectedException, self.compute.get_vnc_console, self.context, 'invalid', instance=instance) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.ConsoleTypeInvalid, self.compute.get_vnc_console, self.context, 'invalid', instance=instance) self.compute.terminate_instance(self.context, instance, [], []) def test_missing_vnc_console_type(self): # Raise useful error is console type is None. self.flags(enabled=True, group='vnc') self.flags(enabled=False, group='spice') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertRaises(messaging.ExpectedException, self.compute.get_vnc_console, self.context, None, instance=instance) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.ConsoleTypeInvalid, self.compute.get_vnc_console, self.context, None, instance=instance) self.compute.terminate_instance(self.context, instance, [], []) def test_get_vnc_console_not_implemented(self): self.stubs.Set(self.compute.driver, 'get_vnc_console', fake_not_implemented) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertRaises(messaging.ExpectedException, self.compute.get_vnc_console, self.context, 'novnc', instance=instance) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(NotImplementedError, self.compute.get_vnc_console, self.context, 'novnc', instance=instance) self.compute.terminate_instance(self.context, instance, [], []) def test_spicehtml5_spice_console(self): # Make sure we can a spice console for an instance. self.flags(enabled=False, group='vnc') self.flags(enabled=True, group='spice') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) # Try with the full instance console = self.compute.get_spice_console(self.context, 'spice-html5', instance=instance) self.assertTrue(console) self.compute.terminate_instance(self.context, instance, [], []) def test_invalid_spice_console_type(self): # Raise useful error if console type is an unrecognised string self.flags(enabled=False, group='vnc') self.flags(enabled=True, group='spice') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertRaises(messaging.ExpectedException, self.compute.get_spice_console, self.context, 'invalid', instance=instance) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.ConsoleTypeInvalid, self.compute.get_spice_console, self.context, 'invalid', instance=instance) self.compute.terminate_instance(self.context, instance, [], []) def test_get_spice_console_not_implemented(self): self.stubs.Set(self.compute.driver, 'get_spice_console', fake_not_implemented) self.flags(enabled=False, group='vnc') self.flags(enabled=True, group='spice') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertRaises(messaging.ExpectedException, self.compute.get_spice_console, self.context, 'spice-html5', instance=instance) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(NotImplementedError, self.compute.get_spice_console, self.context, 'spice-html5', instance=instance) self.compute.terminate_instance(self.context, instance, [], []) def test_missing_spice_console_type(self): # Raise useful error is console type is None self.flags(enabled=False, group='vnc') self.flags(enabled=True, group='spice') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertRaises(messaging.ExpectedException, self.compute.get_spice_console, self.context, None, instance=instance) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.ConsoleTypeInvalid, self.compute.get_spice_console, self.context, None, instance=instance) self.compute.terminate_instance(self.context, instance, [], []) def test_rdphtml5_rdp_console(self): # Make sure we can a rdp console for an instance. self.flags(enabled=False, group='vnc') self.flags(enabled=True, group='rdp') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) # Try with the full instance console = self.compute.get_rdp_console(self.context, 'rdp-html5', instance=instance) self.assertTrue(console) self.compute.terminate_instance(self.context, instance, [], []) def test_invalid_rdp_console_type(self): # Raise useful error if console type is an unrecognised string self.flags(enabled=False, group='vnc') self.flags(enabled=True, group='rdp') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertRaises(messaging.ExpectedException, self.compute.get_rdp_console, self.context, 'invalid', instance=instance) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.ConsoleTypeInvalid, self.compute.get_rdp_console, self.context, 'invalid', instance=instance) self.compute.terminate_instance(self.context, instance, [], []) def test_missing_rdp_console_type(self): # Raise useful error is console type is None self.flags(enabled=False, group='vnc') self.flags(enabled=True, group='rdp') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertRaises(messaging.ExpectedException, self.compute.get_rdp_console, self.context, None, instance=instance) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.ConsoleTypeInvalid, self.compute.get_rdp_console, self.context, None, instance=instance) self.compute.terminate_instance(self.context, instance, [], []) def test_vnc_console_instance_not_ready(self): self.flags(enabled=True, group='vnc') self.flags(enabled=False, group='spice') instance = self._create_fake_instance_obj( params={'vm_state': vm_states.BUILDING}) def fake_driver_get_console(*args, **kwargs): raise exception.InstanceNotFound(instance_id=instance['uuid']) self.stubs.Set(self.compute.driver, "get_vnc_console", fake_driver_get_console) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.InstanceNotReady, self.compute.get_vnc_console, self.context, 'novnc', instance=instance) def test_spice_console_instance_not_ready(self): self.flags(enabled=False, group='vnc') self.flags(enabled=True, group='spice') instance = self._create_fake_instance_obj( params={'vm_state': vm_states.BUILDING}) def fake_driver_get_console(*args, **kwargs): raise exception.InstanceNotFound(instance_id=instance['uuid']) self.stubs.Set(self.compute.driver, "get_spice_console", fake_driver_get_console) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.InstanceNotReady, self.compute.get_spice_console, self.context, 'spice-html5', instance=instance) def test_rdp_console_instance_not_ready(self): self.flags(enabled=False, group='vnc') self.flags(enabled=True, group='rdp') instance = self._create_fake_instance_obj( params={'vm_state': vm_states.BUILDING}) def fake_driver_get_console(*args, **kwargs): raise exception.InstanceNotFound(instance_id=instance['uuid']) self.stubs.Set(self.compute.driver, "get_rdp_console", fake_driver_get_console) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.InstanceNotReady, self.compute.get_rdp_console, self.context, 'rdp-html5', instance=instance) def test_vnc_console_disabled(self): self.flags(enabled=False, group='vnc') instance = self._create_fake_instance_obj( params={'vm_state': vm_states.BUILDING}) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.ConsoleTypeUnavailable, self.compute.get_vnc_console, self.context, 'novnc', instance=instance) def test_spice_console_disabled(self): self.flags(enabled=False, group='spice') instance = self._create_fake_instance_obj( params={'vm_state': vm_states.BUILDING}) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.ConsoleTypeUnavailable, self.compute.get_spice_console, self.context, 'spice-html5', instance=instance) def test_rdp_console_disabled(self): self.flags(enabled=False, group='rdp') instance = self._create_fake_instance_obj( params={'vm_state': vm_states.BUILDING}) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.ConsoleTypeUnavailable, self.compute.get_rdp_console, self.context, 'rdp-html5', instance=instance) def test_diagnostics(self): # Make sure we can get diagnostics for an instance. expected_diagnostic = {'cpu0_time': 17300000000, 'memory': 524288, 'vda_errors': -1, 'vda_read': 262144, 'vda_read_req': 112, 'vda_write': 5778432, 'vda_write_req': 488, 'vnet1_rx': 2070139, 'vnet1_rx_drop': 0, 'vnet1_rx_errors': 0, 'vnet1_rx_packets': 26701, 'vnet1_tx': 140208, 'vnet1_tx_drop': 0, 'vnet1_tx_errors': 0, 'vnet1_tx_packets': 662, } instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) diagnostics = self.compute.get_diagnostics(self.context, instance=instance) self.assertEqual(diagnostics, expected_diagnostic) self.compute.terminate_instance(self.context, instance, [], []) def test_instance_diagnostics(self): # Make sure we can get diagnostics for an instance. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) diagnostics = self.compute.get_instance_diagnostics(self.context, instance=instance) expected = {'config_drive': True, 'cpu_details': [{'time': 17300000000}], 'disk_details': [{'errors_count': 0, 'id': 'fake-disk-id', 'read_bytes': 262144, 'read_requests': 112, 'write_bytes': 5778432, 'write_requests': 488}], 'driver': 'fake', 'hypervisor_os': 'fake-os', 'memory_details': {'maximum': 524288, 'used': 0}, 'nic_details': [{'mac_address': '01:23:45:67:89:ab', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 2070139, 'rx_packets': 26701, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 140208, 'tx_packets': 662}], 'state': 'running', 'uptime': 46664, 'version': '1.0'} self.assertEqual(expected, diagnostics) self.compute.terminate_instance(self.context, instance, [], []) def test_add_fixed_ip_usage_notification(self): def dummy(*args, **kwargs): pass self.stubs.Set(network_api.API, 'add_fixed_ip_to_instance', dummy) self.stubs.Set(nova.compute.manager.ComputeManager, 'inject_network_info', dummy) self.stubs.Set(nova.compute.manager.ComputeManager, 'reset_network', dummy) instance = self._create_fake_instance_obj() self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0) self.compute.add_fixed_ip_to_instance(self.context, network_id=1, instance=instance) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) self.compute.terminate_instance(self.context, instance, [], []) def test_remove_fixed_ip_usage_notification(self): def dummy(*args, **kwargs): pass self.stubs.Set(network_api.API, 'remove_fixed_ip_from_instance', dummy) self.stubs.Set(nova.compute.manager.ComputeManager, 'inject_network_info', dummy) self.stubs.Set(nova.compute.manager.ComputeManager, 'reset_network', dummy) instance = self._create_fake_instance_obj() self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0) self.compute.remove_fixed_ip_from_instance(self.context, 1, instance=instance) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) self.compute.terminate_instance(self.context, instance, [], []) def test_run_instance_usage_notification(self, request_spec=None): # Ensure run instance generates appropriate usage notification. request_spec = request_spec or {} instance = self._create_fake_instance_obj() expected_image_name = request_spec.get('image', {}).get('name', '') self.compute.build_and_run_instance(self.context, instance, request_spec=request_spec, filter_properties={}, image={'name': expected_image_name}, block_device_mapping=[]) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) instance.refresh() msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'compute.instance.create.start') # The last event is the one with the sugar in it. msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.priority, 'INFO') self.assertEqual(msg.event_type, 'compute.instance.create.end') payload = msg.payload self.assertEqual(payload['tenant_id'], self.project_id) self.assertEqual(payload['user_id'], self.user_id) self.assertEqual(payload['instance_id'], instance['uuid']) self.assertEqual(payload['instance_type'], 'm1.tiny') type_id = flavors.get_flavor_by_name('m1.tiny')['id'] self.assertEqual(str(payload['instance_type_id']), str(type_id)) flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid'] self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id)) self.assertEqual(payload['state'], 'active') self.assertIn('display_name', payload) self.assertIn('created_at', payload) self.assertIn('launched_at', payload) self.assertIn('fixed_ips', payload) self.assertTrue(payload['launched_at']) image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF) self.assertEqual(payload['image_ref_url'], image_ref_url) self.assertEqual('Success', payload['message']) self.compute.terminate_instance(self.context, instance, [], []) def test_run_instance_image_usage_notification(self): request_spec = {'image': {'name': 'fake_name', 'key': 'value'}} self.test_run_instance_usage_notification(request_spec=request_spec) def test_run_instance_usage_notification_volume_meta(self): # Volume's image metadata won't contain the image name request_spec = {'image': {'key': 'value'}} self.test_run_instance_usage_notification(request_spec=request_spec) def test_run_instance_end_notification_on_abort(self): # Test that an error notif is sent if the build is aborted instance = self._create_fake_instance_obj() instance_uuid = instance['uuid'] def build_inst_abort(*args, **kwargs): raise exception.BuildAbortException(reason="already deleted", instance_uuid=instance_uuid) self.stubs.Set(self.compute.driver, 'spawn', build_inst_abort) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertTrue(len(fake_notifier.NOTIFICATIONS) >= 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'compute.instance.create.start') msg = fake_notifier.NOTIFICATIONS[-1] self.assertEqual(msg.event_type, 'compute.instance.create.error') self.assertEqual('ERROR', msg.priority) payload = msg.payload message = payload['message'] self.assertNotEqual(-1, message.find("already deleted")) def test_run_instance_error_notification_on_reschedule(self): # Test that error notif is sent if the build got rescheduled instance = self._create_fake_instance_obj() instance_uuid = instance['uuid'] def build_inst_fail(*args, **kwargs): raise exception.RescheduledException(instance_uuid=instance_uuid, reason="something bad happened") self.stubs.Set(self.compute.driver, 'spawn', build_inst_fail) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertTrue(len(fake_notifier.NOTIFICATIONS) >= 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'compute.instance.create.start') msg = fake_notifier.NOTIFICATIONS[-1] self.assertEqual(msg.event_type, 'compute.instance.create.error') self.assertEqual('ERROR', msg.priority) payload = msg.payload message = payload['message'] self.assertNotEqual(-1, message.find("something bad happened")) def test_run_instance_error_notification_on_failure(self): # Test that error notif is sent if build fails hard instance = self._create_fake_instance_obj() def build_inst_fail(*args, **kwargs): raise test.TestingException("i'm dying") self.stubs.Set(self.compute.driver, 'spawn', build_inst_fail) self.compute.build_and_run_instance( self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertTrue(len(fake_notifier.NOTIFICATIONS) >= 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'compute.instance.create.start') msg = fake_notifier.NOTIFICATIONS[-1] self.assertEqual(msg.event_type, 'compute.instance.create.error') self.assertEqual('ERROR', msg.priority) payload = msg.payload message = payload['message'] self.assertNotEqual(-1, message.find("i'm dying")) def test_terminate_usage_notification(self): # Ensure terminate_instance generates correct usage notification. old_time = datetime.datetime(2012, 4, 1) cur_time = datetime.datetime(2012, 12, 21, 12, 21) time_fixture = self.useFixture(utils_fixture.TimeFixture(old_time)) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) fake_notifier.NOTIFICATIONS = [] time_fixture.advance_time_delta(cur_time - old_time) self.compute.terminate_instance(self.context, instance, [], []) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 4) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.priority, 'INFO') self.assertEqual(msg.event_type, 'compute.instance.delete.start') msg1 = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg1.event_type, 'compute.instance.shutdown.start') msg1 = fake_notifier.NOTIFICATIONS[2] self.assertEqual(msg1.event_type, 'compute.instance.shutdown.end') msg1 = fake_notifier.NOTIFICATIONS[3] self.assertEqual(msg1.event_type, 'compute.instance.delete.end') payload = msg1.payload self.assertEqual(payload['tenant_id'], self.project_id) self.assertEqual(payload['user_id'], self.user_id) self.assertEqual(payload['instance_id'], instance['uuid']) self.assertEqual(payload['instance_type'], 'm1.tiny') type_id = flavors.get_flavor_by_name('m1.tiny')['id'] self.assertEqual(str(payload['instance_type_id']), str(type_id)) flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid'] self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id)) self.assertIn('display_name', payload) self.assertIn('created_at', payload) self.assertIn('launched_at', payload) self.assertIn('terminated_at', payload) self.assertIn('deleted_at', payload) self.assertEqual(payload['terminated_at'], utils.strtime(cur_time)) self.assertEqual(payload['deleted_at'], utils.strtime(cur_time)) image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF) self.assertEqual(payload['image_ref_url'], image_ref_url) def test_run_instance_queries_macs(self): # run_instance should ask the driver for node mac addresses and pass # that to the network_api in use. fake_network.unset_stub_network_methods(self) instance = self._create_fake_instance_obj() macs = set(['01:23:45:67:89:ab']) self.mox.StubOutWithMock(self.compute.network_api, "allocate_for_instance") self.compute.network_api.allocate_for_instance( self.context, instance, vpn=False, requested_networks=None, macs=macs, security_groups=[], dhcp_options=None, bind_host_id=self.compute.host).AndReturn( fake_network.fake_get_instance_nw_info(self, 1, 1)) self.mox.StubOutWithMock(self.compute.driver, "macs_for_instance") self.compute.driver.macs_for_instance( mox.IsA(instance_obj.Instance)).AndReturn(macs) self.mox.ReplayAll() self.compute._build_networks_for_instance(self.context, instance, requested_networks=None, security_groups=None) def _create_server_group(self, policies, instance_host): group_instance = self._create_fake_instance_obj( params=dict(host=instance_host)) instance_group = objects.InstanceGroup(self.context) instance_group.user_id = self.user_id instance_group.project_id = self.project_id instance_group.name = 'messi' instance_group.uuid = str(uuid.uuid4()) instance_group.members = [group_instance.uuid] instance_group.policies = policies fake_notifier.NOTIFICATIONS = [] instance_group.create() self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(instance_group.name, msg.payload['name']) self.assertEqual(instance_group.members, msg.payload['members']) self.assertEqual(instance_group.policies, msg.payload['policies']) self.assertEqual(instance_group.project_id, msg.payload['project_id']) self.assertEqual(instance_group.uuid, msg.payload['uuid']) self.assertEqual('servergroup.create', msg.event_type) return instance_group def test_instance_set_to_error_on_uncaught_exception(self): # Test that instance is set to error state when exception is raised. instance = self._create_fake_instance_obj() fake_network.unset_stub_network_methods(self) @mock.patch.object(self.compute.network_api, 'allocate_for_instance', side_effect=messaging.RemoteError()) @mock.patch.object(self.compute.network_api, 'deallocate_for_instance') def _do_test(mock_deallocate, mock_allocate): self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.refresh() self.assertEqual(vm_states.ERROR, instance.vm_state) self.compute.terminate_instance(self.context, instance, [], []) _do_test() def test_delete_instance_keeps_net_on_power_off_fail(self): self.mox.StubOutWithMock(self.compute.driver, 'destroy') self.mox.StubOutWithMock(self.compute, '_deallocate_network') exp = exception.InstancePowerOffFailure(reason='') self.compute.driver.destroy(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndRaise(exp) # mox will detect if _deallocate_network gets called unexpectedly self.mox.ReplayAll() instance = self._create_fake_instance_obj() self.assertRaises(exception.InstancePowerOffFailure, self.compute._delete_instance, self.context, instance, [], self.none_quotas) def test_delete_instance_loses_net_on_other_fail(self): self.mox.StubOutWithMock(self.compute.driver, 'destroy') self.mox.StubOutWithMock(self.compute, '_deallocate_network') exp = test.TestingException() self.compute.driver.destroy(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndRaise(exp) self.compute._deallocate_network(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() instance = self._create_fake_instance_obj() self.assertRaises(test.TestingException, self.compute._delete_instance, self.context, instance, [], self.none_quotas) def test_delete_instance_deletes_console_auth_tokens(self): instance = self._create_fake_instance_obj() self.flags(enabled=True, group='vnc') self.tokens_deleted = False def fake_delete_tokens(*args, **kwargs): self.tokens_deleted = True cauth_rpcapi = self.compute.consoleauth_rpcapi self.stubs.Set(cauth_rpcapi, 'delete_tokens_for_instance', fake_delete_tokens) self.compute._delete_instance(self.context, instance, [], self.none_quotas) self.assertTrue(self.tokens_deleted) def test_delete_instance_deletes_console_auth_tokens_cells(self): instance = self._create_fake_instance_obj() self.flags(enabled=True, group='vnc') self.flags(enable=True, group='cells') self.tokens_deleted = False def fake_delete_tokens(*args, **kwargs): self.tokens_deleted = True cells_rpcapi = self.compute.cells_rpcapi self.stubs.Set(cells_rpcapi, 'consoleauth_delete_tokens', fake_delete_tokens) self.compute._delete_instance(self.context, instance, [], self.none_quotas) self.assertTrue(self.tokens_deleted) def test_delete_instance_changes_power_state(self): """Test that the power state is NOSTATE after deleting an instance.""" instance = self._create_fake_instance_obj() self.compute._delete_instance(self.context, instance, [], self.none_quotas) self.assertEqual(power_state.NOSTATE, instance.power_state) def test_instance_termination_exception_sets_error(self): """Test that we handle InstanceTerminationFailure which is propagated up from the underlying driver. """ instance = self._create_fake_instance_obj() def fake_delete_instance(context, instance, bdms, reservations=None): raise exception.InstanceTerminationFailure(reason='') self.stubs.Set(self.compute, '_delete_instance', fake_delete_instance) self.assertRaises(exception.InstanceTerminationFailure, self.compute.terminate_instance, self.context, instance, [], []) instance = db.instance_get_by_uuid(self.context, instance['uuid']) self.assertEqual(instance['vm_state'], vm_states.ERROR) def test_network_is_deallocated_on_spawn_failure(self): # When a spawn fails the network must be deallocated. instance = self._create_fake_instance_obj() self.mox.StubOutWithMock(self.compute, "_prep_block_device") self.compute._prep_block_device( mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndRaise(messaging.RemoteError('', '', '')) self.mox.ReplayAll() self.compute.build_and_run_instance( self.context, instance, {}, {}, {}, block_device_mapping=[]) self.compute.terminate_instance(self.context, instance, [], []) def test_lock(self): # FIXME(comstud): This test is such crap. This is testing # compute API lock functionality in a test class for the compute # manager by running an instance. Hello? We should just have # unit tests in test_compute_api that test the check_instance_lock # decorator and make sure that appropriate compute_api methods # have the decorator. instance = self._create_fake_instance_obj() instance_uuid = instance['uuid'] self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) non_admin_context = context.RequestContext(None, None, is_admin=False) def check_task_state(task_state): instance = db.instance_get_by_uuid(self.context, instance_uuid) self.assertEqual(instance['task_state'], task_state) instance.refresh() # should fail with locked nonadmin context self.compute_api.lock(self.context, instance) self.assertRaises(exception.InstanceIsLocked, self.compute_api.reboot, non_admin_context, instance, 'SOFT') check_task_state(None) # should fail with invalid task state self.compute_api.unlock(self.context, instance) instance.task_state = task_states.REBOOTING instance.save() self.assertRaises(exception.InstanceInvalidState, self.compute_api.reboot, non_admin_context, instance, 'SOFT') check_task_state(task_states.REBOOTING) # should succeed with admin context instance.task_state = None instance.save() self.compute_api.reboot(self.context, instance, 'SOFT') check_task_state(task_states.REBOOTING) self.compute.terminate_instance(self.context, instance, [], []) def _check_locked_by(self, instance_uuid, locked_by): instance = db.instance_get_by_uuid(self.context, instance_uuid) self.assertEqual(instance['locked'], locked_by is not None) self.assertEqual(instance['locked_by'], locked_by) return instance def test_override_owner_lock(self): # FIXME(comstud): This test is such crap. This is testing # compute API lock functionality in a test class for the compute # manager by running an instance. Hello? We should just have # unit tests in test_compute_api that test the check_instance_lock # decorator and make sure that appropriate compute_api methods # have the decorator. admin_context = context.RequestContext('admin-user', 'admin-project', is_admin=True) instance = self._create_fake_instance_obj() instance_uuid = instance['uuid'] self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) # Ensure that an admin can override the owner lock self.compute_api.lock(self.context, instance) self._check_locked_by(instance_uuid, 'owner') self.compute_api.unlock(admin_context, instance) self._check_locked_by(instance_uuid, None) def test_upgrade_owner_lock(self): # FIXME(comstud): This test is such crap. This is testing # compute API lock functionality in a test class for the compute # manager by running an instance. Hello? We should just have # unit tests in test_compute_api that test the check_instance_lock # decorator and make sure that appropriate compute_api methods # have the decorator. admin_context = context.RequestContext('admin-user', 'admin-project', is_admin=True) instance = self._create_fake_instance_obj() instance_uuid = instance['uuid'] self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) # Ensure that an admin can upgrade the lock and that # the owner can no longer unlock self.compute_api.lock(self.context, instance) self.compute_api.lock(admin_context, instance) self._check_locked_by(instance_uuid, 'admin') instance.refresh() self.assertRaises(exception.PolicyNotAuthorized, self.compute_api.unlock, self.context, instance) self._check_locked_by(instance_uuid, 'admin') self.compute_api.unlock(admin_context, instance) self._check_locked_by(instance_uuid, None) def _test_state_revert(self, instance, operation, pre_task_state, kwargs=None, vm_state=None): if kwargs is None: kwargs = {} # The API would have set task_state, so do that here to test # that the state gets reverted on failure db.instance_update(self.context, instance['uuid'], {"task_state": pre_task_state}) orig_elevated = self.context.elevated orig_notify = self.compute._notify_about_instance_usage def _get_an_exception(*args, **kwargs): raise test.TestingException() self.stubs.Set(self.context, 'elevated', _get_an_exception) self.stubs.Set(self.compute, '_notify_about_instance_usage', _get_an_exception) func = getattr(self.compute, operation) self.assertRaises(test.TestingException, func, self.context, instance=instance, **kwargs) # self.context.elevated() is called in tearDown() self.stubs.Set(self.context, 'elevated', orig_elevated) self.stubs.Set(self.compute, '_notify_about_instance_usage', orig_notify) # Fetch the instance's task_state and make sure it reverted to None. instance = db.instance_get_by_uuid(self.context, instance['uuid']) if vm_state: self.assertEqual(instance.vm_state, vm_state) self.assertIsNone(instance["task_state"]) def test_state_revert(self): # ensure that task_state is reverted after a failed operation. migration = objects.Migration(context=self.context.elevated()) migration.instance_uuid = 'b48316c5-71e8-45e4-9884-6c78055b9b13' migration.new_instance_type_id = '1' instance_type = objects.Flavor() actions = [ ("reboot_instance", task_states.REBOOTING, {'block_device_info': [], 'reboot_type': 'SOFT'}), ("stop_instance", task_states.POWERING_OFF, {'clean_shutdown': True}), ("start_instance", task_states.POWERING_ON), ("terminate_instance", task_states.DELETING, {'bdms': [], 'reservations': []}, vm_states.ERROR), ("soft_delete_instance", task_states.SOFT_DELETING, {'reservations': []}), ("restore_instance", task_states.RESTORING), ("rebuild_instance", task_states.REBUILDING, {'orig_image_ref': None, 'image_ref': None, 'injected_files': [], 'new_pass': '', 'orig_sys_metadata': {}, 'bdms': [], 'recreate': False, 'on_shared_storage': False}), ("set_admin_password", task_states.UPDATING_PASSWORD, {'new_pass': None}), ("rescue_instance", task_states.RESCUING, {'rescue_password': None, 'rescue_image_ref': None, 'clean_shutdown': True}), ("unrescue_instance", task_states.UNRESCUING), ("revert_resize", task_states.RESIZE_REVERTING, {'migration': migration, 'reservations': []}), ("prep_resize", task_states.RESIZE_PREP, {'image': {}, 'instance_type': instance_type, 'reservations': [], 'request_spec': {}, 'filter_properties': {}, 'node': None, 'clean_shutdown': True}), ("resize_instance", task_states.RESIZE_PREP, {'migration': migration, 'image': {}, 'reservations': [], 'instance_type': {}, 'clean_shutdown': True}), ("pause_instance", task_states.PAUSING), ("unpause_instance", task_states.UNPAUSING), ("suspend_instance", task_states.SUSPENDING), ("resume_instance", task_states.RESUMING), ] self._stub_out_resize_network_methods() instance = self._create_fake_instance_obj() for operation in actions: if 'revert_resize' in operation: migration.source_compute = 'fake-mini' def fake_migration_save(*args, **kwargs): raise test.TestingException() self.stubs.Set(migration, 'save', fake_migration_save) self._test_state_revert(instance, *operation) def _ensure_quota_reservations_committed(self, instance): """Mock up commit of quota reservations.""" reservations = list('fake_res') self.mox.StubOutWithMock(nova.quota.QUOTAS, 'commit') nova.quota.QUOTAS.commit(mox.IgnoreArg(), reservations, project_id=instance['project_id'], user_id=instance['user_id']) self.mox.ReplayAll() return reservations def _ensure_quota_reservations_rolledback(self, instance): """Mock up rollback of quota reservations.""" reservations = list('fake_res') self.mox.StubOutWithMock(nova.quota.QUOTAS, 'rollback') nova.quota.QUOTAS.rollback(mox.IgnoreArg(), reservations, project_id=instance['project_id'], user_id=instance['user_id']) self.mox.ReplayAll() return reservations def test_quotas_successful_delete(self): instance = self._create_fake_instance_obj() resvs = self._ensure_quota_reservations_committed(instance) self.compute.terminate_instance(self.context, instance, bdms=[], reservations=resvs) def test_quotas_failed_delete(self): instance = self._create_fake_instance_obj() def fake_shutdown_instance(*args, **kwargs): raise test.TestingException() self.stubs.Set(self.compute, '_shutdown_instance', fake_shutdown_instance) resvs = self._ensure_quota_reservations_rolledback(instance) self.assertRaises(test.TestingException, self.compute.terminate_instance, self.context, instance, bdms=[], reservations=resvs) def test_quotas_successful_soft_delete(self): instance = self._create_fake_instance_obj( params=dict(task_state=task_states.SOFT_DELETING)) resvs = self._ensure_quota_reservations_committed(instance) self.compute.soft_delete_instance(self.context, instance, reservations=resvs) def test_quotas_failed_soft_delete(self): instance = self._create_fake_instance_obj( params=dict(task_state=task_states.SOFT_DELETING)) def fake_soft_delete(*args, **kwargs): raise test.TestingException() self.stubs.Set(self.compute.driver, 'soft_delete', fake_soft_delete) resvs = self._ensure_quota_reservations_rolledback(instance) self.assertRaises(test.TestingException, self.compute.soft_delete_instance, self.context, instance, reservations=resvs) def test_quotas_destroy_of_soft_deleted_instance(self): instance = self._create_fake_instance_obj( params=dict(vm_state=vm_states.SOFT_DELETED)) # Termination should be successful, but quota reservations # rolled back because the instance was in SOFT_DELETED state. resvs = self._ensure_quota_reservations_rolledback(instance) self.compute.terminate_instance(self.context, instance, bdms=[], reservations=resvs) def _stub_out_resize_network_methods(self): def fake(cls, ctxt, instance, *args, **kwargs): pass self.stubs.Set(network_api.API, 'setup_networks_on_host', fake) self.stubs.Set(network_api.API, 'migrate_instance_start', fake) self.stubs.Set(network_api.API, 'migrate_instance_finish', fake) def _test_finish_resize(self, power_on, resize_instance=True): # Contrived test to ensure finish_resize doesn't raise anything and # also tests resize from ACTIVE or STOPPED state which determines # if the resized instance is powered on or not. vm_state = None if power_on: vm_state = vm_states.ACTIVE else: vm_state = vm_states.STOPPED params = {'vm_state': vm_state} instance = self._create_fake_instance_obj(params) image = {} disk_info = 'fake-disk-info' instance_type = flavors.get_default_flavor() if not resize_instance: old_instance_type = flavors.get_flavor_by_name('m1.tiny') instance_type['root_gb'] = old_instance_type['root_gb'] instance_type['swap'] = old_instance_type['swap'] instance_type['ephemeral_gb'] = old_instance_type['ephemeral_gb'] instance.task_state = task_states.RESIZE_PREP instance.save() self.compute.prep_resize(self.context, instance=instance, instance_type=instance_type, image={}, reservations=[], request_spec={}, filter_properties={}, node=None, clean_shutdown=True) instance.task_state = task_states.RESIZE_MIGRATED instance.save() # NOTE(mriedem): make sure prep_resize set old_vm_state correctly sys_meta = instance.system_metadata self.assertIn('old_vm_state', sys_meta) if power_on: self.assertEqual(vm_states.ACTIVE, sys_meta['old_vm_state']) else: self.assertEqual(vm_states.STOPPED, sys_meta['old_vm_state']) migration = objects.Migration.get_by_instance_and_status( self.context.elevated(), instance.uuid, 'pre-migrating') orig_mig_save = migration.save orig_inst_save = instance.save network_api = self.compute.network_api self.mox.StubOutWithMock(network_api, 'setup_networks_on_host') self.mox.StubOutWithMock(network_api, 'migrate_instance_finish') self.mox.StubOutWithMock(self.compute.network_api, 'get_instance_nw_info') self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage') self.mox.StubOutWithMock(self.compute.driver, 'finish_migration') self.mox.StubOutWithMock(self.compute, '_get_instance_block_device_info') self.mox.StubOutWithMock(migration, 'save') self.mox.StubOutWithMock(instance, 'save') def _mig_save(): self.assertEqual(migration.status, 'finished') self.assertEqual(vm_state, instance.vm_state) self.assertEqual(task_states.RESIZE_FINISH, instance.task_state) self.assertTrue(migration._context.is_admin) orig_mig_save() def _instance_save0(expected_task_state=None): self.assertEqual(task_states.RESIZE_MIGRATED, expected_task_state) self.assertEqual(instance_type['id'], instance.instance_type_id) self.assertEqual(task_states.RESIZE_FINISH, instance.task_state) orig_inst_save(expected_task_state=expected_task_state) def _instance_save1(expected_task_state=None): self.assertEqual(task_states.RESIZE_FINISH, expected_task_state) self.assertEqual(vm_states.RESIZED, instance.vm_state) self.assertIsNone(instance.task_state) self.assertIn('launched_at', instance.obj_what_changed()) orig_inst_save(expected_task_state=expected_task_state) network_api.setup_networks_on_host(self.context, instance, 'fake-mini') network_api.migrate_instance_finish(self.context, mox.IsA(objects.Instance), mox.IsA(dict)) self.compute.network_api.get_instance_nw_info( self.context, instance).AndReturn('fake-nwinfo1') # First save to update old/current flavor and task state exp_kwargs = dict(expected_task_state=task_states.RESIZE_MIGRATED) instance.save(**exp_kwargs).WithSideEffects(_instance_save0) self.compute._notify_about_instance_usage( self.context, instance, 'finish_resize.start', network_info='fake-nwinfo1') self.compute._get_instance_block_device_info( self.context, instance, refresh_conn_info=True).AndReturn('fake-bdminfo') # nova.conf sets the default flavor to m1.small and the test # sets the default flavor to m1.tiny so they should be different # which makes this a resize self.compute.driver.finish_migration(self.context, migration, instance, disk_info, 'fake-nwinfo1', mox.IsA(objects.ImageMeta), resize_instance, 'fake-bdminfo', power_on) # Ensure instance status updates is after the migration finish migration.save().WithSideEffects(_mig_save) exp_kwargs = dict(expected_task_state=task_states.RESIZE_FINISH) instance.save(**exp_kwargs).WithSideEffects(_instance_save1) self.compute._notify_about_instance_usage( self.context, instance, 'finish_resize.end', network_info='fake-nwinfo1') # NOTE(comstud): This actually does the mox.ReplayAll() reservations = self._ensure_quota_reservations_committed(instance) self.compute.finish_resize(self.context, migration=migration, disk_info=disk_info, image=image, instance=instance, reservations=reservations) def test_finish_resize_from_active(self): self._test_finish_resize(power_on=True) def test_finish_resize_from_stopped(self): self._test_finish_resize(power_on=False) def test_finish_resize_without_resize_instance(self): self._test_finish_resize(power_on=True, resize_instance=False) def test_finish_resize_with_volumes(self): """Contrived test to ensure finish_resize doesn't raise anything.""" # create instance instance = self._create_fake_instance_obj() # create volume volume = {'instance_uuid': None, 'device_name': None, 'id': 'fake', 'size': 200, 'attach_status': 'detached'} bdm = objects.BlockDeviceMapping( **{'context': self.context, 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': uuids.volume_id, 'instance_uuid': instance['uuid'], 'device_name': '/dev/vdc'}) bdm.create() # stub out volume attach def fake_volume_get(self, context, volume_id): return volume self.stubs.Set(cinder.API, "get", fake_volume_get) def fake_volume_check_attach(self, context, volume_id, instance): pass self.stubs.Set(cinder.API, "check_attach", fake_volume_check_attach) def fake_get_volume_encryption_metadata(self, context, volume_id): return {} self.stubs.Set(cinder.API, 'get_volume_encryption_metadata', fake_get_volume_encryption_metadata) orig_connection_data = { 'target_discovered': True, 'target_iqn': 'iqn.2010-10.org.openstack:%s.1' % uuids.volume_id, 'target_portal': '127.0.0.0.1:3260', 'volume_id': uuids.volume_id, } connection_info = { 'driver_volume_type': 'iscsi', 'data': orig_connection_data, } def fake_init_conn(self, context, volume_id, session): return connection_info self.stubs.Set(cinder.API, "initialize_connection", fake_init_conn) def fake_attach(self, context, volume_id, instance_uuid, device_name, mode='rw'): volume['instance_uuid'] = instance_uuid volume['device_name'] = device_name self.stubs.Set(cinder.API, "attach", fake_attach) # stub out virt driver attach def fake_get_volume_connector(*args, **kwargs): return {} self.stubs.Set(self.compute.driver, 'get_volume_connector', fake_get_volume_connector) def fake_attach_volume(*args, **kwargs): pass self.stubs.Set(self.compute.driver, 'attach_volume', fake_attach_volume) # attach volume to instance self.compute.attach_volume(self.context, instance, bdm) # assert volume attached correctly self.assertEqual(volume['device_name'], '/dev/vdc') disk_info = db.block_device_mapping_get_all_by_instance( self.context, instance.uuid) self.assertEqual(len(disk_info), 1) for bdm in disk_info: self.assertEqual(bdm['device_name'], volume['device_name']) self.assertEqual(bdm['connection_info'], jsonutils.dumps(connection_info)) # begin resize instance_type = flavors.get_default_flavor() instance.task_state = task_states.RESIZE_PREP instance.save() self.compute.prep_resize(self.context, instance=instance, instance_type=instance_type, image={}, reservations=[], request_spec={}, filter_properties={}, node=None, clean_shutdown=True) # fake out detach for prep_resize (and later terminate) def fake_terminate_connection(self, context, volume, connector): connection_info['data'] = None self.stubs.Set(cinder.API, "terminate_connection", fake_terminate_connection) self._stub_out_resize_network_methods() migration = objects.Migration.get_by_instance_and_status( self.context.elevated(), instance.uuid, 'pre-migrating') self.compute.resize_instance(self.context, instance=instance, migration=migration, image={}, reservations=[], instance_type=jsonutils.to_primitive(instance_type), clean_shutdown=True) # assert bdm is unchanged disk_info = db.block_device_mapping_get_all_by_instance( self.context, instance.uuid) self.assertEqual(len(disk_info), 1) for bdm in disk_info: self.assertEqual(bdm['device_name'], volume['device_name']) cached_connection_info = jsonutils.loads(bdm['connection_info']) self.assertEqual(cached_connection_info['data'], orig_connection_data) # but connection was terminated self.assertIsNone(connection_info['data']) # stub out virt driver finish_migration def fake(*args, **kwargs): pass self.stubs.Set(self.compute.driver, 'finish_migration', fake) instance.task_state = task_states.RESIZE_MIGRATED instance.save() reservations = self._ensure_quota_reservations_committed(instance) # new initialize connection new_connection_data = dict(orig_connection_data) new_iqn = 'iqn.2010-10.org.openstack:%s.2' % uuids.volume_id, new_connection_data['target_iqn'] = new_iqn def fake_init_conn_with_data(self, context, volume, session): connection_info['data'] = new_connection_data return connection_info self.stubs.Set(cinder.API, "initialize_connection", fake_init_conn_with_data) self.compute.finish_resize(self.context, migration=migration, disk_info={}, image={}, instance=instance, reservations=reservations) # assert volume attached correctly disk_info = db.block_device_mapping_get_all_by_instance( self.context, instance['uuid']) self.assertEqual(len(disk_info), 1) for bdm in disk_info: self.assertEqual(bdm['connection_info'], jsonutils.dumps(connection_info)) # stub out detach def fake_detach(self, context, volume_uuid): volume['device_path'] = None volume['instance_uuid'] = None self.stubs.Set(cinder.API, "detach", fake_detach) # clean up self.compute.terminate_instance(self.context, instance, [], []) def test_finish_resize_handles_error(self): # Make sure we don't leave the instance in RESIZE on error. def throw_up(*args, **kwargs): raise test.TestingException() self.stubs.Set(self.compute.driver, 'finish_migration', throw_up) self._stub_out_resize_network_methods() old_flavor_name = 'm1.tiny' instance = self._create_fake_instance_obj(type_name=old_flavor_name) reservations = self._ensure_quota_reservations_rolledback(instance) instance_type = flavors.get_flavor_by_name('m1.small') self.compute.prep_resize(self.context, instance=instance, instance_type=instance_type, image={}, reservations=reservations, request_spec={}, filter_properties={}, node=None, clean_shutdown=True) migration = objects.Migration.get_by_instance_and_status( self.context.elevated(), instance.uuid, 'pre-migrating') instance.refresh() instance.task_state = task_states.RESIZE_MIGRATED instance.save() self.assertRaises(test.TestingException, self.compute.finish_resize, self.context, migration=migration, disk_info={}, image={}, instance=instance, reservations=reservations) instance.refresh() self.assertEqual(vm_states.ERROR, instance.vm_state) old_flavor = flavors.get_flavor_by_name(old_flavor_name) self.assertEqual(old_flavor['memory_mb'], instance.memory_mb) self.assertEqual(old_flavor['vcpus'], instance.vcpus) self.assertEqual(old_flavor['root_gb'], instance.root_gb) self.assertEqual(old_flavor['ephemeral_gb'], instance.ephemeral_gb) self.assertEqual(old_flavor['id'], instance.instance_type_id) self.assertNotEqual(instance_type['id'], instance.instance_type_id) def test_set_instance_info(self): old_flavor_name = 'm1.tiny' new_flavor_name = 'm1.small' instance = self._create_fake_instance_obj(type_name=old_flavor_name) new_flavor = flavors.get_flavor_by_name(new_flavor_name) self.compute._set_instance_info(instance, new_flavor.obj_clone()) self.assertEqual(new_flavor['memory_mb'], instance.memory_mb) self.assertEqual(new_flavor['vcpus'], instance.vcpus) self.assertEqual(new_flavor['root_gb'], instance.root_gb) self.assertEqual(new_flavor['ephemeral_gb'], instance.ephemeral_gb) self.assertEqual(new_flavor['id'], instance.instance_type_id) def test_rebuild_instance_notification(self): # Ensure notifications on instance migrate/resize. old_time = datetime.datetime(2012, 4, 1) cur_time = datetime.datetime(2012, 12, 21, 12, 21) time_fixture = self.useFixture(utils_fixture.TimeFixture(old_time)) inst_ref = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, inst_ref, {}, {}, {}, block_device_mapping=[]) time_fixture.advance_time_delta(cur_time - old_time) fake_notifier.NOTIFICATIONS = [] instance = db.instance_get_by_uuid(self.context, inst_ref['uuid']) orig_sys_metadata = db.instance_system_metadata_get(self.context, inst_ref['uuid']) image_ref = instance["image_ref"] new_image_ref = image_ref + '-new_image_ref' db.instance_update(self.context, inst_ref['uuid'], {'image_ref': new_image_ref}) password = "new_password" inst_ref.task_state = task_states.REBUILDING inst_ref.save() self.compute.rebuild_instance(self.context, inst_ref, image_ref, new_image_ref, injected_files=[], new_pass=password, orig_sys_metadata=orig_sys_metadata, bdms=[], recreate=False, on_shared_storage=False) inst_ref.refresh() image_ref_url = glance.generate_image_url(image_ref) new_image_ref_url = glance.generate_image_url(new_image_ref) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 3) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'compute.instance.exists') self.assertEqual(msg.payload['image_ref_url'], image_ref_url) msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'compute.instance.rebuild.start') self.assertEqual(msg.payload['image_ref_url'], new_image_ref_url) self.assertEqual(msg.payload['image_name'], 'fake_name') msg = fake_notifier.NOTIFICATIONS[2] self.assertEqual(msg.event_type, 'compute.instance.rebuild.end') self.assertEqual(msg.priority, 'INFO') payload = msg.payload self.assertEqual(payload['image_name'], 'fake_name') self.assertEqual(payload['tenant_id'], self.project_id) self.assertEqual(payload['user_id'], self.user_id) self.assertEqual(payload['instance_id'], inst_ref['uuid']) self.assertEqual(payload['instance_type'], 'm1.tiny') type_id = flavors.get_flavor_by_name('m1.tiny')['id'] self.assertEqual(str(payload['instance_type_id']), str(type_id)) flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid'] self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id)) self.assertIn('display_name', payload) self.assertIn('created_at', payload) self.assertIn('launched_at', payload) self.assertEqual(payload['launched_at'], utils.strtime(cur_time)) self.assertEqual(payload['image_ref_url'], new_image_ref_url) self.compute.terminate_instance(self.context, inst_ref, [], []) def test_finish_resize_instance_notification(self): # Ensure notifications on instance migrate/resize. old_time = datetime.datetime(2012, 4, 1) cur_time = datetime.datetime(2012, 12, 21, 12, 21) time_fixture = self.useFixture(utils_fixture.TimeFixture(old_time)) instance = self._create_fake_instance_obj() new_type = flavors.get_flavor_by_name('m1.small') new_type_id = new_type['id'] flavor_id = new_type['flavorid'] self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.host = 'foo' instance.task_state = task_states.RESIZE_PREP instance.save() self.compute.prep_resize(self.context, instance=instance, instance_type=new_type, image={}, reservations=[], request_spec={}, filter_properties={}, node=None, clean_shutdown=True) self._stub_out_resize_network_methods() migration = objects.Migration.get_by_instance_and_status( self.context.elevated(), instance.uuid, 'pre-migrating') self.compute.resize_instance(self.context, instance=instance, migration=migration, image={}, instance_type=new_type, reservations=[], clean_shutdown=True) time_fixture.advance_time_delta(cur_time - old_time) fake_notifier.NOTIFICATIONS = [] self.compute.finish_resize(self.context, migration=migration, reservations=[], disk_info={}, image={}, instance=instance) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'compute.instance.finish_resize.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'compute.instance.finish_resize.end') self.assertEqual(msg.priority, 'INFO') payload = msg.payload self.assertEqual(payload['tenant_id'], self.project_id) self.assertEqual(payload['user_id'], self.user_id) self.assertEqual(payload['instance_id'], instance.uuid) self.assertEqual(payload['instance_type'], 'm1.small') self.assertEqual(str(payload['instance_type_id']), str(new_type_id)) self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id)) self.assertIn('display_name', payload) self.assertIn('created_at', payload) self.assertIn('launched_at', payload) self.assertEqual(payload['launched_at'], utils.strtime(cur_time)) image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF) self.assertEqual(payload['image_ref_url'], image_ref_url) self.compute.terminate_instance(self.context, instance, [], []) def test_resize_instance_notification(self): # Ensure notifications on instance migrate/resize. old_time = datetime.datetime(2012, 4, 1) cur_time = datetime.datetime(2012, 12, 21, 12, 21) time_fixture = self.useFixture(utils_fixture.TimeFixture(old_time)) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) time_fixture.advance_time_delta(cur_time - old_time) fake_notifier.NOTIFICATIONS = [] instance.host = 'foo' instance.task_state = task_states.RESIZE_PREP instance.save() instance_type = flavors.get_default_flavor() self.compute.prep_resize(self.context, instance=instance, instance_type=instance_type, image={}, reservations=[], request_spec={}, filter_properties={}, node=None, clean_shutdown=True) db.migration_get_by_instance_and_status(self.context.elevated(), instance.uuid, 'pre-migrating') self.assertEqual(len(fake_notifier.NOTIFICATIONS), 3) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'compute.instance.exists') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'compute.instance.resize.prep.start') msg = fake_notifier.NOTIFICATIONS[2] self.assertEqual(msg.event_type, 'compute.instance.resize.prep.end') self.assertEqual(msg.priority, 'INFO') payload = msg.payload self.assertEqual(payload['tenant_id'], self.project_id) self.assertEqual(payload['user_id'], self.user_id) self.assertEqual(payload['instance_id'], instance.uuid) self.assertEqual(payload['instance_type'], 'm1.tiny') type_id = flavors.get_flavor_by_name('m1.tiny')['id'] self.assertEqual(str(payload['instance_type_id']), str(type_id)) flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid'] self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id)) self.assertIn('display_name', payload) self.assertIn('created_at', payload) self.assertIn('launched_at', payload) image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF) self.assertEqual(payload['image_ref_url'], image_ref_url) self.compute.terminate_instance(self.context, instance, [], []) def test_prep_resize_instance_migration_error_on_none_host(self): """Ensure prep_resize raises a migration error if destination host is not defined """ instance = self._create_fake_instance_obj() reservations = self._ensure_quota_reservations_rolledback(instance) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.host = None instance.save() instance_type = flavors.get_default_flavor() self.assertRaises(exception.MigrationError, self.compute.prep_resize, self.context, instance=instance, instance_type=instance_type, image={}, reservations=reservations, request_spec={}, filter_properties={}, node=None, clean_shutdown=True) self.compute.terminate_instance(self.context, instance, [], []) def test_resize_instance_driver_error(self): # Ensure instance status set to Error on resize error. def throw_up(*args, **kwargs): raise test.TestingException() self.stubs.Set(self.compute.driver, 'migrate_disk_and_power_off', throw_up) instance = self._create_fake_instance_obj() instance_type = flavors.get_default_flavor() reservations = self._ensure_quota_reservations_rolledback(instance) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.host = 'foo' instance.save() self.compute.prep_resize(self.context, instance=instance, instance_type=instance_type, image={}, reservations=reservations, request_spec={}, filter_properties={}, node=None, clean_shutdown=True) instance.task_state = task_states.RESIZE_PREP instance.save() migration = objects.Migration.get_by_instance_and_status( self.context.elevated(), instance.uuid, 'pre-migrating') # verify self.assertRaises(test.TestingException, self.compute.resize_instance, self.context, instance=instance, migration=migration, image={}, reservations=reservations, instance_type=jsonutils.to_primitive(instance_type), clean_shutdown=True) # NOTE(comstud): error path doesn't use objects, so our object # is not updated. Refresh and compare against the DB. instance.refresh() self.assertEqual(instance.vm_state, vm_states.ERROR) self.compute.terminate_instance(self.context, instance, [], []) def test_resize_instance_driver_rollback(self): # Ensure instance status set to Running after rollback. def throw_up(*args, **kwargs): raise exception.InstanceFaultRollback(test.TestingException()) self.stubs.Set(self.compute.driver, 'migrate_disk_and_power_off', throw_up) instance = self._create_fake_instance_obj() instance_type = flavors.get_default_flavor() reservations = self._ensure_quota_reservations_rolledback(instance) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.host = 'foo' instance.save() self.compute.prep_resize(self.context, instance=instance, instance_type=instance_type, image={}, reservations=reservations, request_spec={}, filter_properties={}, node=None, clean_shutdown=True) instance.task_state = task_states.RESIZE_PREP instance.save() migration = objects.Migration.get_by_instance_and_status( self.context.elevated(), instance.uuid, 'pre-migrating') self.assertRaises(test.TestingException, self.compute.resize_instance, self.context, instance=instance, migration=migration, image={}, reservations=reservations, instance_type=jsonutils.to_primitive(instance_type), clean_shutdown=True) # NOTE(comstud): error path doesn't use objects, so our object # is not updated. Refresh and compare against the DB. instance.refresh() self.assertEqual(instance.vm_state, vm_states.ACTIVE) self.assertIsNone(instance.task_state) self.compute.terminate_instance(self.context, instance, [], []) def _test_resize_instance(self, clean_shutdown=True): # Ensure instance can be migrated/resized. instance = self._create_fake_instance_obj() instance_type = flavors.get_default_flavor() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.host = 'foo' instance.save() self.compute.prep_resize(self.context, instance=instance, instance_type=instance_type, image={}, reservations=[], request_spec={}, filter_properties={}, node=None, clean_shutdown=True) # verify 'old_vm_state' was set on system_metadata instance.refresh() sys_meta = instance.system_metadata self.assertEqual(vm_states.ACTIVE, sys_meta['old_vm_state']) self._stub_out_resize_network_methods() instance.task_state = task_states.RESIZE_PREP instance.save() migration = objects.Migration.get_by_instance_and_status( self.context.elevated(), instance.uuid, 'pre-migrating') with test.nested( mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid', return_value='fake_bdms'), mock.patch.object( self.compute, '_get_instance_block_device_info', return_value='fake_bdinfo'), mock.patch.object(self.compute, '_terminate_volume_connections'), mock.patch.object(self.compute, '_get_power_off_values', return_value=(1, 2)) ) as (mock_get_by_inst_uuid, mock_get_instance_vol_bdinfo, mock_terminate_vol_conn, mock_get_power_off_values): self.compute.resize_instance(self.context, instance=instance, migration=migration, image={}, reservations=[], instance_type=jsonutils.to_primitive(instance_type), clean_shutdown=clean_shutdown) mock_get_instance_vol_bdinfo.assert_called_once_with( self.context, instance, bdms='fake_bdms') mock_terminate_vol_conn.assert_called_once_with(self.context, instance, 'fake_bdms') mock_get_power_off_values.assert_called_once_with(self.context, instance, clean_shutdown) self.assertEqual(migration.dest_compute, instance.host) self.compute.terminate_instance(self.context, instance, [], []) def test_resize_instance(self): self._test_resize_instance() def test_resize_instance_forced_shutdown(self): self._test_resize_instance(clean_shutdown=False) def _test_confirm_resize(self, power_on, numa_topology=None): # Common test case method for confirm_resize def fake(*args, **kwargs): pass def fake_confirm_migration_driver(*args, **kwargs): # Confirm the instance uses the new type in finish_resize self.assertEqual('3', instance.flavor.flavorid) old_vm_state = None p_state = None if power_on: old_vm_state = vm_states.ACTIVE p_state = power_state.RUNNING else: old_vm_state = vm_states.STOPPED p_state = power_state.SHUTDOWN params = {'vm_state': old_vm_state, 'power_state': p_state} instance = self._create_fake_instance_obj(params) self.flags(allow_resize_to_same_host=True) self.stubs.Set(self.compute.driver, 'finish_migration', fake) self.stubs.Set(self.compute.driver, 'confirm_migration', fake_confirm_migration_driver) self._stub_out_resize_network_methods() reservations = self._ensure_quota_reservations_committed(instance) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) # Confirm the instance size before the resize starts instance.refresh() instance_type_ref = db.flavor_get(self.context, instance.instance_type_id) self.assertEqual(instance_type_ref['flavorid'], '1') instance.vm_state = old_vm_state instance.power_state = p_state instance.numa_topology = numa_topology instance.save() new_instance_type_ref = flavors.get_flavor_by_flavor_id(3) self.compute.prep_resize(self.context, instance=instance, instance_type=new_instance_type_ref, image={}, reservations=reservations, request_spec={}, filter_properties={}, node=None, clean_shutdown=True) migration = objects.Migration.get_by_instance_and_status( self.context.elevated(), instance.uuid, 'pre-migrating') migration_context = objects.MigrationContext.get_by_instance_uuid( self.context.elevated(), instance.uuid) self.assertIsInstance(migration_context.old_numa_topology, numa_topology.__class__) self.assertIsNone(migration_context.new_numa_topology) # NOTE(mriedem): ensure prep_resize set old_vm_state in system_metadata sys_meta = instance.system_metadata self.assertEqual(old_vm_state, sys_meta['old_vm_state']) instance.task_state = task_states.RESIZE_PREP instance.save() self.compute.resize_instance(self.context, instance=instance, migration=migration, image={}, reservations=[], instance_type=new_instance_type_ref, clean_shutdown=True) self.compute.finish_resize(self.context, migration=migration, reservations=[], disk_info={}, image={}, instance=instance) # Prove that the instance size is now the new size instance_type_ref = db.flavor_get(self.context, instance.instance_type_id) self.assertEqual(instance_type_ref['flavorid'], '3') # Prove that the NUMA topology has also been updated to that of the new # flavor - meaning None self.assertIsNone(instance.numa_topology) # Finally, confirm the resize and verify the new flavor is applied instance.task_state = None instance.save() self.compute.confirm_resize(self.context, instance=instance, reservations=reservations, migration=migration) instance.refresh() instance_type_ref = db.flavor_get(self.context, instance.instance_type_id) self.assertEqual(instance_type_ref['flavorid'], '3') self.assertEqual('fake-mini', migration.source_compute) self.assertEqual(old_vm_state, instance.vm_state) self.assertIsNone(instance.task_state) self.assertIsNone(instance.migration_context) self.assertEqual(p_state, instance.power_state) self.compute.terminate_instance(self.context, instance, [], []) def test_confirm_resize_from_active(self): self._test_confirm_resize(power_on=True) def test_confirm_resize_from_stopped(self): self._test_confirm_resize(power_on=False) def test_confirm_resize_with_migration_context(self): numa_topology = ( test_instance_numa_topology.get_fake_obj_numa_topology( self.context)) self._test_confirm_resize(power_on=True, numa_topology=numa_topology) def test_confirm_resize_with_numa_topology_and_cpu_pinning(self): instance = self._create_fake_instance_obj() instance.old_flavor = instance.flavor instance.new_flavor = instance.flavor # we have two hosts with the same NUMA topologies. # now instance use two cpus from node_0 (cpu1 and cpu2) on current host old_inst_topology = objects.InstanceNUMATopology( instance_uuid=instance.uuid, cells=[ objects.InstanceNUMACell( id=0, cpuset=set([1, 2]), memory=512, pagesize=2048, cpu_policy=obj_fields.CPUAllocationPolicy.DEDICATED, cpu_pinning={'0': 1, '1': 2}) ]) # instance will use two cpus from node_1 (cpu3 and cpu4) # on *some other host* new_inst_topology = objects.InstanceNUMATopology( instance_uuid=instance.uuid, cells=[ objects.InstanceNUMACell( id=1, cpuset=set([3, 4]), memory=512, pagesize=2048, cpu_policy=obj_fields.CPUAllocationPolicy.DEDICATED, cpu_pinning={'0': 3, '1': 4}) ]) instance.numa_topology = old_inst_topology # instance placed in node_0 on current host. cpu1 and cpu2 from node_0 # are used cell1 = objects.NUMACell( id=0, cpuset=set([1, 2]), pinned_cpus=set([1, 2]), memory=512, pagesize=2048, cpu_usage=2, memory_usage=0, siblings=[], mempages=[objects.NUMAPagesTopology( size_kb=2048, total=256, used=256)]) # as instance placed in node_0 all cpus from node_1 (cpu3 and cpu4) # are free (on current host) cell2 = objects.NUMACell( id=1, cpuset=set([3, 4]), pinned_cpus=set(), memory=512, pagesize=2048, memory_usage=0, cpu_usage=0, siblings=[], mempages=[objects.NUMAPagesTopology( size_kb=2048, total=256, used=0)]) host_numa_topology = objects.NUMATopology(cells=[cell1, cell2]) migration = objects.Migration(context=self.context.elevated()) migration.instance_uuid = instance.uuid migration.status = 'finished' migration.migration_type = 'migration' migration.source_node = NODENAME migration.create() migration_context = objects.MigrationContext() migration_context.migration_id = migration.id migration_context.old_numa_topology = old_inst_topology migration_context.new_numa_topology = new_inst_topology instance.migration_context = migration_context instance.vm_state = vm_states.RESIZED instance.system_metadata = {} instance.save() self.rt.tracked_migrations[instance.uuid] = (migration, instance.flavor) self.rt.compute_node.numa_topology = jsonutils.dumps( host_numa_topology.obj_to_primitive()) with mock.patch.object(self.compute.network_api, 'setup_networks_on_host'): self.compute.confirm_resize(self.context, instance=instance, migration=migration, reservations=[]) instance.refresh() self.assertEqual(vm_states.ACTIVE, instance['vm_state']) updated_topology = objects.NUMATopology.obj_from_primitive( jsonutils.loads(self.rt.compute_node.numa_topology)) # after confirming resize all cpus on currect host must be free self.assertEqual(2, len(updated_topology.cells)) for cell in updated_topology.cells: self.assertEqual(0, cell.cpu_usage) self.assertEqual(set(), cell.pinned_cpus) def _test_finish_revert_resize(self, power_on, remove_old_vm_state=False, numa_topology=None): """Convenience method that does most of the work for the test_finish_revert_resize tests. :param power_on -- True if testing resize from ACTIVE state, False if testing resize from STOPPED state. :param remove_old_vm_state -- True if testing a case where the 'old_vm_state' system_metadata is not present when the finish_revert_resize method is called. """ def fake(*args, **kwargs): pass def fake_finish_revert_migration_driver(*args, **kwargs): # Confirm the instance uses the old type in finish_revert_resize inst = args[1] self.assertEqual('1', inst.flavor.flavorid) old_vm_state = None if power_on: old_vm_state = vm_states.ACTIVE else: old_vm_state = vm_states.STOPPED params = {'vm_state': old_vm_state} instance = self._create_fake_instance_obj(params) self.stubs.Set(self.compute.driver, 'finish_migration', fake) self.stubs.Set(self.compute.driver, 'finish_revert_migration', fake_finish_revert_migration_driver) self._stub_out_resize_network_methods() reservations = self._ensure_quota_reservations_committed(instance) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.refresh() instance_type_ref = db.flavor_get(self.context, instance.instance_type_id) self.assertEqual(instance_type_ref['flavorid'], '1') old_vm_state = instance['vm_state'] instance.host = 'foo' instance.vm_state = old_vm_state instance.numa_topology = numa_topology instance.save() new_instance_type_ref = flavors.get_flavor_by_flavor_id(3) self.compute.prep_resize(self.context, instance=instance, instance_type=new_instance_type_ref, image={}, reservations=reservations, request_spec={}, filter_properties={}, node=None, clean_shutdown=True) migration = objects.Migration.get_by_instance_and_status( self.context.elevated(), instance.uuid, 'pre-migrating') migration_context = objects.MigrationContext.get_by_instance_uuid( self.context.elevated(), instance.uuid) self.assertIsInstance(migration_context.old_numa_topology, numa_topology.__class__) # NOTE(mriedem): ensure prep_resize set old_vm_state in system_metadata sys_meta = instance.system_metadata self.assertEqual(old_vm_state, sys_meta['old_vm_state']) instance.task_state = task_states.RESIZE_PREP instance.save() self.compute.resize_instance(self.context, instance=instance, migration=migration, image={}, reservations=[], instance_type=new_instance_type_ref, clean_shutdown=True) self.compute.finish_resize(self.context, migration=migration, reservations=[], disk_info={}, image={}, instance=instance) # Prove that the instance size is now the new size instance_type_ref = flavors.get_flavor_by_flavor_id(3) self.assertEqual(instance_type_ref['flavorid'], '3') # Prove that the NUMA topology has also been updated to that of the new # flavor - meaning None self.assertIsNone(instance.numa_topology) instance.task_state = task_states.RESIZE_REVERTING instance.save() self.compute.revert_resize(self.context, migration=migration, instance=instance, reservations=reservations) instance.refresh() if remove_old_vm_state: # need to wipe out the old_vm_state from system_metadata # before calling finish_revert_resize sys_meta = instance.system_metadata sys_meta.pop('old_vm_state') # Have to reset for save() to work instance.system_metadata = sys_meta instance.save() self.compute.finish_revert_resize(self.context, migration=migration, instance=instance, reservations=reservations) self.assertIsNone(instance.task_state) instance_type_ref = db.flavor_get(self.context, instance['instance_type_id']) self.assertEqual(instance_type_ref['flavorid'], '1') self.assertEqual(instance.host, migration.source_compute) self.assertEqual(migration.dest_compute, migration.source_compute) self.assertIsInstance(instance.numa_topology, numa_topology.__class__) if remove_old_vm_state: self.assertEqual(vm_states.ACTIVE, instance.vm_state) else: self.assertEqual(old_vm_state, instance.vm_state) def test_finish_revert_resize_from_active(self): self._test_finish_revert_resize(power_on=True) def test_finish_revert_resize_from_stopped(self): self._test_finish_revert_resize(power_on=False) def test_finish_revert_resize_from_stopped_remove_old_vm_state(self): # in this case we resize from STOPPED but end up with ACTIVE # because the old_vm_state value is not present in # finish_revert_resize self._test_finish_revert_resize(power_on=False, remove_old_vm_state=True) def test_finish_revert_resize_migration_context(self): numa_topology = ( test_instance_numa_topology.get_fake_obj_numa_topology( self.context)) self._test_finish_revert_resize(power_on=True, numa_topology=numa_topology) def test_get_by_flavor_id(self): flavor_type = flavors.get_flavor_by_flavor_id(1) self.assertEqual(flavor_type['name'], 'm1.tiny') def test_resize_instance_handles_migration_error(self): # Ensure vm_state is ERROR when error occurs. def raise_migration_failure(*args): raise test.TestingException() self.stubs.Set(self.compute.driver, 'migrate_disk_and_power_off', raise_migration_failure) instance = self._create_fake_instance_obj() reservations = self._ensure_quota_reservations_rolledback(instance) instance_type = flavors.get_default_flavor() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.host = 'foo' instance.save() self.compute.prep_resize(self.context, instance=instance, instance_type=instance_type, image={}, reservations=reservations, request_spec={}, filter_properties={}, node=None, clean_shutdown=True) migration = objects.Migration.get_by_instance_and_status( self.context.elevated(), instance.uuid, 'pre-migrating') instance.task_state = task_states.RESIZE_PREP instance.save() self.assertRaises(test.TestingException, self.compute.resize_instance, self.context, instance=instance, migration=migration, image={}, reservations=reservations, instance_type=jsonutils.to_primitive(instance_type), clean_shutdown=True) # NOTE(comstud): error path doesn't use objects, so our object # is not updated. Refresh and compare against the DB. instance.refresh() self.assertEqual(instance.vm_state, vm_states.ERROR) self.compute.terminate_instance(self.context, instance, [], []) def test_pre_live_migration_instance_has_no_fixed_ip(self): # Confirm that no exception is raised if there is no fixed ip on # pre_live_migration instance = self._create_fake_instance_obj() c = context.get_admin_context() self.mox.ReplayAll() self.compute.driver.pre_live_migration(mox.IsA(c), mox.IsA(instance), {'block_device_mapping': []}, mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) def test_pre_live_migration_works_correctly(self): # Confirm setup_compute_volume is called when volume is mounted. def stupid(*args, **kwargs): return fake_network.fake_get_instance_nw_info(self) self.stubs.Set(self.compute.network_api, 'get_instance_nw_info', stupid) # creating instance testdata instance = self._create_fake_instance_obj({'host': 'dummy'}) c = context.get_admin_context() nw_info = fake_network.fake_get_instance_nw_info(self) # creating mocks self.mox.StubOutWithMock(self.compute.driver, 'pre_live_migration') self.compute.driver.pre_live_migration(mox.IsA(c), mox.IsA(instance), {'swap': None, 'ephemerals': [], 'root_device_name': None, 'block_device_mapping': []}, mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.mox.StubOutWithMock(self.compute.driver, 'ensure_filtering_rules_for_instance') self.compute.driver.ensure_filtering_rules_for_instance( mox.IsA(instance), nw_info) self.mox.StubOutWithMock(self.compute.network_api, 'setup_networks_on_host') self.compute.network_api.setup_networks_on_host(c, instance, self.compute.host) fake_notifier.NOTIFICATIONS = [] # start test self.mox.ReplayAll() migrate_data = {'is_shared_instance_path': False} ret = self.compute.pre_live_migration(c, instance=instance, block_migration=False, disk=None, migrate_data=migrate_data) self.assertIsNone(ret) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'compute.instance.live_migration.pre.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'compute.instance.live_migration.pre.end') # cleanup db.instance_destroy(c, instance['uuid']) @mock.patch('nova.objects.Migration.save') def test_live_migration_exception_rolls_back(self, mock_save): # Confirm exception when pre_live_migration fails. c = context.get_admin_context() instance = self._create_fake_instance_obj( {'host': 'src_host', 'task_state': task_states.MIGRATING}) updated_instance = self._create_fake_instance_obj( {'host': 'fake-dest-host'}) dest_host = updated_instance['host'] fake_bdms = [ objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( {'volume_id': uuids.volume_id_1, 'source_type': 'volume', 'destination_type': 'volume'})), objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( {'volume_id': uuids.volume_id_2, 'source_type': 'volume', 'destination_type': 'volume'})) ] migrate_data = migrate_data_obj.XenapiLiveMigrateData( block_migration=True) # creating mocks self.mox.StubOutWithMock(self.compute.driver, 'get_instance_disk_info') self.mox.StubOutWithMock(self.compute.compute_rpcapi, 'pre_live_migration') self.mox.StubOutWithMock(objects.BlockDeviceMappingList, 'get_by_instance_uuid') self.mox.StubOutWithMock(self.compute.network_api, 'setup_networks_on_host') self.mox.StubOutWithMock(self.compute.compute_rpcapi, 'remove_volume_connection') self.mox.StubOutWithMock(self.compute.compute_rpcapi, 'rollback_live_migration_at_destination') block_device_info = { 'swap': None, 'ephemerals': [], 'block_device_mapping': [], 'root_device_name': None} self.compute.driver.get_instance_disk_info( instance, block_device_info=block_device_info).AndReturn('fake_disk') self.compute.compute_rpcapi.pre_live_migration(c, instance, True, 'fake_disk', dest_host, migrate_data).AndRaise(test.TestingException()) self.compute.network_api.setup_networks_on_host(c, instance, self.compute.host) objects.BlockDeviceMappingList.get_by_instance_uuid(c, instance.uuid).MultipleTimes().AndReturn(fake_bdms) self.compute.compute_rpcapi.remove_volume_connection( c, uuids.volume_id_1, instance, dest_host) self.compute.compute_rpcapi.remove_volume_connection( c, uuids.volume_id_2, instance, dest_host) self.compute.compute_rpcapi.rollback_live_migration_at_destination( c, instance, dest_host, destroy_disks=True, migrate_data=mox.IsA(migrate_data_obj.LiveMigrateData)) # start test self.mox.ReplayAll() migration = objects.Migration() self.assertRaises(test.TestingException, self.compute.live_migration, c, dest=dest_host, block_migration=True, instance=instance, migration=migration, migrate_data=migrate_data) instance.refresh() self.assertEqual('src_host', instance.host) self.assertEqual(vm_states.ACTIVE, instance.vm_state) self.assertIsNone(instance.task_state) self.assertEqual('error', migration.status) @mock.patch.object(compute_utils, 'EventReporter') @mock.patch('nova.objects.Migration.save') def test_live_migration_works_correctly(self, mock_save, event_mock): # Confirm live_migration() works as expected correctly. # creating instance testdata c = context.get_admin_context() instance = self._create_fake_instance_obj(context=c) instance.host = self.compute.host dest = 'desthost' migrate_data = migrate_data_obj.LibvirtLiveMigrateData( is_shared_instance_path=False, is_shared_block_storage=False) self.mox.StubOutWithMock(self.compute.compute_rpcapi, 'pre_live_migration') self.compute.compute_rpcapi.pre_live_migration( c, instance, False, None, dest, migrate_data).AndReturn( migrate_data) self.mox.StubOutWithMock(self.compute.network_api, 'migrate_instance_start') migration = {'source_compute': instance['host'], 'dest_compute': dest} self.compute.network_api.migrate_instance_start(c, instance, migration) self.mox.StubOutWithMock(self.compute.compute_rpcapi, 'post_live_migration_at_destination') self.compute.compute_rpcapi.post_live_migration_at_destination( c, instance, False, dest) self.mox.StubOutWithMock(self.compute.network_api, 'setup_networks_on_host') self.mox.StubOutWithMock(self.compute.instance_events, 'clear_events_for_instance') self.compute.instance_events.clear_events_for_instance( mox.IgnoreArg()) # start test self.mox.ReplayAll() migration = objects.Migration() ret = self.compute.live_migration(c, dest=dest, instance=instance, block_migration=False, migration=migration, migrate_data=migrate_data) self.assertIsNone(ret) event_mock.assert_called_with( c, 'compute_live_migration', instance.uuid) # cleanup instance.destroy() self.assertEqual('completed', migration.status) def test_post_live_migration_no_shared_storage_working_correctly(self): """Confirm post_live_migration() works correctly as expected for non shared storage migration. """ # Create stubs result = {} # No share storage live migration don't need to destroy at source # server because instance has been migrated to destination, but a # cleanup for block device and network are needed. def fakecleanup(*args, **kwargs): result['cleanup'] = True self.stubs.Set(self.compute.driver, 'cleanup', fakecleanup) dest = 'desthost' srchost = self.compute.host # creating testdata c = context.get_admin_context() instance = self._create_fake_instance_obj({ 'host': srchost, 'state_description': 'migrating', 'state': power_state.PAUSED, 'task_state': task_states.MIGRATING, 'power_state': power_state.PAUSED}) # creating mocks self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance') self.compute.driver.unfilter_instance(instance, []) self.mox.StubOutWithMock(self.compute.network_api, 'migrate_instance_start') migration = {'source_compute': srchost, 'dest_compute': dest, } self.compute.network_api.migrate_instance_start(c, instance, migration) self.mox.StubOutWithMock(self.compute.compute_rpcapi, 'post_live_migration_at_destination') self.compute.compute_rpcapi.post_live_migration_at_destination( c, instance, False, dest) self.mox.StubOutWithMock(self.compute.network_api, 'setup_networks_on_host') self.mox.StubOutWithMock(self.compute.instance_events, 'clear_events_for_instance') self.compute.instance_events.clear_events_for_instance( mox.IgnoreArg()) # start test self.mox.ReplayAll() migrate_data = objects.LibvirtLiveMigrateData( is_shared_instance_path=False, is_shared_block_storage=False, block_migration=False) self.compute._post_live_migration(c, instance, dest, migrate_data=migrate_data) self.assertIn('cleanup', result) self.assertTrue(result['cleanup']) def test_post_live_migration_working_correctly(self): # Confirm post_live_migration() works as expected correctly. dest = 'desthost' srchost = self.compute.host # creating testdata c = context.get_admin_context() instance = self._create_fake_instance_obj({ 'host': srchost, 'state_description': 'migrating', 'state': power_state.PAUSED}, context=c) instance.update({'task_state': task_states.MIGRATING, 'power_state': power_state.PAUSED}) instance.save() migration_obj = objects.Migration() migrate_data = migrate_data_obj.LiveMigrateData( migration=migration_obj) # creating mocks with test.nested( mock.patch.object(self.compute.driver, 'post_live_migration'), mock.patch.object(self.compute.driver, 'unfilter_instance'), mock.patch.object(self.compute.network_api, 'migrate_instance_start'), mock.patch.object(self.compute.compute_rpcapi, 'post_live_migration_at_destination'), mock.patch.object(self.compute.driver, 'post_live_migration_at_source'), mock.patch.object(self.compute.network_api, 'setup_networks_on_host'), mock.patch.object(self.compute.instance_events, 'clear_events_for_instance'), mock.patch.object(self.compute, 'update_available_resource'), mock.patch.object(migration_obj, 'save'), ) as ( post_live_migration, unfilter_instance, migrate_instance_start, post_live_migration_at_destination, post_live_migration_at_source, setup_networks_on_host, clear_events, update_available_resource, mig_save ): self.compute._post_live_migration(c, instance, dest, migrate_data=migrate_data) post_live_migration.assert_has_calls([ mock.call(c, instance, {'swap': None, 'ephemerals': [], 'root_device_name': None, 'block_device_mapping': []}, migrate_data)]) unfilter_instance.assert_has_calls([mock.call(instance, [])]) migration = {'source_compute': srchost, 'dest_compute': dest, } migrate_instance_start.assert_has_calls([ mock.call(c, instance, migration)]) post_live_migration_at_destination.assert_has_calls([ mock.call(c, instance, False, dest)]) post_live_migration_at_source.assert_has_calls( [mock.call(c, instance, [])]) clear_events.assert_called_once_with(instance) update_available_resource.assert_has_calls([mock.call(c)]) self.assertEqual('completed', migration_obj.status) mig_save.assert_called_once_with() def test_post_live_migration_terminate_volume_connections(self): c = context.get_admin_context() instance = self._create_fake_instance_obj({ 'host': self.compute.host, 'state_description': 'migrating', 'state': power_state.PAUSED}, context=c) instance.update({'task_state': task_states.MIGRATING, 'power_state': power_state.PAUSED}) instance.save() bdms = block_device_obj.block_device_make_list(c, [fake_block_device.FakeDbBlockDeviceDict({ 'source_type': 'blank', 'guest_format': None, 'destination_type': 'local'}), fake_block_device.FakeDbBlockDeviceDict({ 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': uuids.volume_id}), ]) with test.nested( mock.patch.object(self.compute.network_api, 'migrate_instance_start'), mock.patch.object(self.compute.compute_rpcapi, 'post_live_migration_at_destination'), mock.patch.object(self.compute.network_api, 'setup_networks_on_host'), mock.patch.object(self.compute.instance_events, 'clear_events_for_instance'), mock.patch.object(self.compute, '_get_instance_block_device_info'), mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid'), mock.patch.object(self.compute.driver, 'get_volume_connector'), mock.patch.object(cinder.API, 'terminate_connection') ) as ( migrate_instance_start, post_live_migration_at_destination, setup_networks_on_host, clear_events_for_instance, get_instance_volume_block_device_info, get_by_instance_uuid, get_volume_connector, terminate_connection ): get_by_instance_uuid.return_value = bdms get_volume_connector.return_value = 'fake-connector' self.compute._post_live_migration(c, instance, 'dest_host') terminate_connection.assert_called_once_with( c, uuids.volume_id, 'fake-connector') @mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid') def test_rollback_live_migration(self, mock_bdms): c = context.get_admin_context() instance = mock.MagicMock() migration = mock.MagicMock() migrate_data = {'migration': migration} mock_bdms.return_value = [] @mock.patch.object(self.compute, '_live_migration_cleanup_flags') @mock.patch.object(self.compute, 'network_api') def _test(mock_nw_api, mock_lmcf): mock_lmcf.return_value = False, False self.compute._rollback_live_migration(c, instance, 'foo', False, migrate_data=migrate_data) mock_nw_api.setup_networks_on_host.assert_called_once_with( c, instance, self.compute.host) _test() self.assertEqual('error', migration.status) self.assertEqual(0, instance.progress) migration.save.assert_called_once_with() @mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid') def test_rollback_live_migration_set_migration_status(self, mock_bdms): c = context.get_admin_context() instance = mock.MagicMock() migration = mock.MagicMock() migrate_data = {'migration': migration} mock_bdms.return_value = [] @mock.patch.object(self.compute, '_live_migration_cleanup_flags') @mock.patch.object(self.compute, 'network_api') def _test(mock_nw_api, mock_lmcf): mock_lmcf.return_value = False, False self.compute._rollback_live_migration(c, instance, 'foo', False, migrate_data=migrate_data, migration_status='fake') mock_nw_api.setup_networks_on_host.assert_called_once_with( c, instance, self.compute.host) _test() self.assertEqual('fake', migration.status) migration.save.assert_called_once_with() def test_rollback_live_migration_at_destination_correctly(self): # creating instance testdata c = context.get_admin_context() instance = self._create_fake_instance_obj({'host': 'dummy'}) fake_notifier.NOTIFICATIONS = [] self.mox.StubOutWithMock(self.compute.network_api, 'setup_networks_on_host') self.compute.network_api.setup_networks_on_host(c, instance, self.compute.host, teardown=True) self.mox.StubOutWithMock(self.compute.driver, 'rollback_live_migration_at_destination') self.compute.driver.rollback_live_migration_at_destination(c, instance, [], {'swap': None, 'ephemerals': [], 'root_device_name': None, 'block_device_mapping': []}, destroy_disks=True, migrate_data=None) # start test self.mox.ReplayAll() ret = self.compute.rollback_live_migration_at_destination(c, instance=instance, destroy_disks=True, migrate_data=None) self.assertIsNone(ret) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'compute.instance.live_migration.rollback.dest.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'compute.instance.live_migration.rollback.dest.end') @mock.patch('nova.network.api.API.setup_networks_on_host', side_effect=test.TestingException) @mock.patch('nova.virt.driver.ComputeDriver.' 'rollback_live_migration_at_destination') @mock.patch('nova.objects.migrate_data.LiveMigrateData.' 'detect_implementation') def test_rollback_live_migration_at_destination_network_fails( self, mock_detect, mock_rollback, net_mock): c = context.get_admin_context() instance = self._create_fake_instance_obj() self.assertRaises(test.TestingException, self.compute.rollback_live_migration_at_destination, c, instance, destroy_disks=True, migrate_data={}) mock_rollback.assert_called_once_with( c, instance, mock.ANY, mock.ANY, destroy_disks=True, migrate_data=mock_detect.return_value) def test_run_kill_vm(self): # Detect when a vm is terminated behind the scenes. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instances = db.instance_get_all(self.context) LOG.info("Running instances: %s", instances) self.assertEqual(len(instances), 1) instance_uuid = instances[0]['uuid'] self.compute.driver._test_remove_vm(instance_uuid) # Force the compute manager to do its periodic poll ctxt = context.get_admin_context() self.compute._sync_power_states(ctxt) instances = db.instance_get_all(self.context) LOG.info("After force-killing instances: %s", instances) self.assertEqual(len(instances), 1) self.assertIsNone(instances[0]['task_state']) def _fill_fault(self, values): extra = {x: None for x in ['created_at', 'deleted_at', 'updated_at', 'deleted']} extra['id'] = 1 extra['details'] = '' extra.update(values) return extra def test_add_instance_fault(self): instance = self._create_fake_instance_obj() exc_info = None def fake_db_fault_create(ctxt, values): self.assertIn('raise NotImplementedError', values['details']) del values['details'] expected = { 'code': 500, 'message': 'test', 'instance_uuid': instance['uuid'], 'host': self.compute.host } self.assertEqual(expected, values) return self._fill_fault(expected) try: raise NotImplementedError('test') except NotImplementedError: exc_info = sys.exc_info() self.stub_out('nova.db.instance_fault_create', fake_db_fault_create) ctxt = context.get_admin_context() compute_utils.add_instance_fault_from_exc(ctxt, instance, NotImplementedError('test'), exc_info) def test_add_instance_fault_with_remote_error(self): instance = self._create_fake_instance_obj() exc_info = None raised_exc = None def fake_db_fault_create(ctxt, values): global exc_info global raised_exc self.assertIn('raise messaging.RemoteError', values['details']) del values['details'] expected = { 'code': 500, 'instance_uuid': instance['uuid'], 'message': 'Remote error: test My Test Message\nNone.', 'host': self.compute.host } self.assertEqual(expected, values) return self._fill_fault(expected) try: raise messaging.RemoteError('test', 'My Test Message') except messaging.RemoteError as exc: raised_exc = exc exc_info = sys.exc_info() self.stub_out('nova.db.instance_fault_create', fake_db_fault_create) ctxt = context.get_admin_context() compute_utils.add_instance_fault_from_exc(ctxt, instance, raised_exc, exc_info) def test_add_instance_fault_user_error(self): instance = self._create_fake_instance_obj() exc_info = None def fake_db_fault_create(ctxt, values): expected = { 'code': 400, 'message': 'fake details', 'details': '', 'instance_uuid': instance['uuid'], 'host': self.compute.host } self.assertEqual(expected, values) return self._fill_fault(expected) user_exc = exception.Invalid('fake details', code=400) try: raise user_exc except exception.Invalid: exc_info = sys.exc_info() self.stub_out('nova.db.instance_fault_create', fake_db_fault_create) ctxt = context.get_admin_context() compute_utils.add_instance_fault_from_exc(ctxt, instance, user_exc, exc_info) def test_add_instance_fault_no_exc_info(self): instance = self._create_fake_instance_obj() def fake_db_fault_create(ctxt, values): expected = { 'code': 500, 'message': 'test', 'details': '', 'instance_uuid': instance['uuid'], 'host': self.compute.host } self.assertEqual(expected, values) return self._fill_fault(expected) self.stub_out('nova.db.instance_fault_create', fake_db_fault_create) ctxt = context.get_admin_context() compute_utils.add_instance_fault_from_exc(ctxt, instance, NotImplementedError('test')) def test_add_instance_fault_long_message(self): instance = self._create_fake_instance_obj() message = 300 * 'a' def fake_db_fault_create(ctxt, values): expected = { 'code': 500, 'message': message[:255], 'details': '', 'instance_uuid': instance['uuid'], 'host': self.compute.host } self.assertEqual(expected, values) return self._fill_fault(expected) self.stub_out('nova.db.instance_fault_create', fake_db_fault_create) ctxt = context.get_admin_context() compute_utils.add_instance_fault_from_exc(ctxt, instance, NotImplementedError(message)) def test_add_instance_fault_with_message(self): instance = self._create_fake_instance_obj() exc_info = None def fake_db_fault_create(ctxt, values): self.assertIn('raise NotImplementedError', values['details']) del values['details'] expected = { 'code': 500, 'message': 'hoge', 'instance_uuid': instance['uuid'], 'host': self.compute.host } self.assertEqual(expected, values) return self._fill_fault(expected) try: raise NotImplementedError('test') except NotImplementedError: exc_info = sys.exc_info() self.stub_out('nova.db.instance_fault_create', fake_db_fault_create) ctxt = context.get_admin_context() compute_utils.add_instance_fault_from_exc(ctxt, instance, NotImplementedError('test'), exc_info, fault_message='hoge') def _test_cleanup_running(self, action): admin_context = context.get_admin_context() deleted_at = (timeutils.utcnow() - datetime.timedelta(hours=1, minutes=5)) instance1 = self._create_fake_instance_obj({"deleted_at": deleted_at, "deleted": True}) instance2 = self._create_fake_instance_obj({"deleted_at": deleted_at, "deleted": True}) self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver') self.compute._get_instances_on_driver( admin_context, {'deleted': True, 'soft_deleted': False, 'host': self.compute.host}).AndReturn([instance1, instance2]) self.flags(running_deleted_instance_timeout=3600, running_deleted_instance_action=action) return admin_context, instance1, instance2 def test_cleanup_running_deleted_instances_reap(self): ctxt, inst1, inst2 = self._test_cleanup_running('reap') bdms = block_device_obj.block_device_make_list(ctxt, []) self.mox.StubOutWithMock(self.compute, "_shutdown_instance") self.mox.StubOutWithMock(objects.BlockDeviceMappingList, "get_by_instance_uuid") # Simulate an error and make sure cleanup proceeds with next instance. self.compute._shutdown_instance(ctxt, inst1, bdms, notify=False).\ AndRaise(test.TestingException) objects.BlockDeviceMappingList.get_by_instance_uuid(ctxt, inst1.uuid, use_slave=True).AndReturn(bdms) objects.BlockDeviceMappingList.get_by_instance_uuid(ctxt, inst2.uuid, use_slave=True).AndReturn(bdms) self.compute._shutdown_instance(ctxt, inst2, bdms, notify=False).\ AndReturn(None) self.mox.StubOutWithMock(self.compute, "_cleanup_volumes") self.compute._cleanup_volumes(ctxt, inst1['uuid'], bdms).\ AndReturn(None) self.mox.ReplayAll() self.compute._cleanup_running_deleted_instances(ctxt) def test_cleanup_running_deleted_instances_shutdown(self): ctxt, inst1, inst2 = self._test_cleanup_running('shutdown') self.mox.StubOutWithMock(self.compute.driver, 'set_bootable') self.mox.StubOutWithMock(self.compute.driver, 'power_off') self.compute.driver.set_bootable(inst1, False) self.compute.driver.power_off(inst1) self.compute.driver.set_bootable(inst2, False) self.compute.driver.power_off(inst2) self.mox.ReplayAll() self.compute._cleanup_running_deleted_instances(ctxt) def test_cleanup_running_deleted_instances_shutdown_notimpl(self): ctxt, inst1, inst2 = self._test_cleanup_running('shutdown') self.mox.StubOutWithMock(self.compute.driver, 'set_bootable') self.mox.StubOutWithMock(self.compute.driver, 'power_off') self.compute.driver.set_bootable(inst1, False).AndRaise( NotImplementedError) compute_manager.LOG.warn(mox.IgnoreArg()) self.compute.driver.power_off(inst1) self.compute.driver.set_bootable(inst2, False).AndRaise( NotImplementedError) compute_manager.LOG.warn(mox.IgnoreArg()) self.compute.driver.power_off(inst2) self.mox.ReplayAll() self.compute._cleanup_running_deleted_instances(ctxt) def test_cleanup_running_deleted_instances_shutdown_error(self): ctxt, inst1, inst2 = self._test_cleanup_running('shutdown') self.mox.StubOutWithMock(self.compute.driver, 'set_bootable') self.mox.StubOutWithMock(self.compute.driver, 'power_off') self.mox.StubOutWithMock(compute_manager.LOG, 'exception') e = test.TestingException('bad') self.compute.driver.set_bootable(inst1, False) self.compute.driver.power_off(inst1).AndRaise(e) compute_manager.LOG.warn(mox.IgnoreArg()) self.compute.driver.set_bootable(inst2, False) self.compute.driver.power_off(inst2).AndRaise(e) compute_manager.LOG.warn(mox.IgnoreArg()) self.mox.ReplayAll() self.compute._cleanup_running_deleted_instances(ctxt) def test_running_deleted_instances(self): admin_context = context.get_admin_context() self.compute.host = 'host' instance = self._create_fake_instance_obj() instance.deleted = True now = timeutils.utcnow() instance.deleted_at = now self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver') self.compute._get_instances_on_driver( admin_context, {'deleted': True, 'soft_deleted': False, 'host': self.compute.host}).AndReturn([instance]) self.mox.StubOutWithMock(timeutils, 'is_older_than') timeutils.is_older_than(now, CONF.running_deleted_instance_timeout).AndReturn(True) self.mox.ReplayAll() val = self.compute._running_deleted_instances(admin_context) self.assertEqual(val, [instance]) def _heal_instance_info_cache(self, _get_instance_nw_info_raise=False, _get_instance_nw_info_raise_cache=False): # Update on every call for the test self.flags(heal_instance_info_cache_interval=-1) ctxt = context.get_admin_context() instance_map = {} instances = [] for x in range(8): inst_uuid = getattr(uuids, 'db_instance_%i' % x) instance_map[inst_uuid] = fake_instance.fake_db_instance( uuid=inst_uuid, host=CONF.host, created_at=None) # These won't be in our instance since they're not requested instances.append(instance_map[inst_uuid]) call_info = {'get_all_by_host': 0, 'get_by_uuid': 0, 'get_nw_info': 0, 'expected_instance': None} def fake_instance_get_all_by_host(context, host, columns_to_join, use_slave=False): call_info['get_all_by_host'] += 1 self.assertEqual([], columns_to_join) return instances[:] def fake_instance_get_by_uuid(context, instance_uuid, columns_to_join, use_slave=False): if instance_uuid not in instance_map: raise exception.InstanceNotFound(instance_id=instance_uuid) call_info['get_by_uuid'] += 1 self.assertEqual(['system_metadata', 'info_cache', 'extra', 'extra.flavor'], columns_to_join) return instance_map[instance_uuid] # NOTE(comstud): Override the stub in setUp() def fake_get_instance_nw_info(context, instance, use_slave=False): # Note that this exception gets caught in compute/manager # and is ignored. However, the below increment of # 'get_nw_info' won't happen, and you'll get an assert # failure checking it below. self.assertEqual(call_info['expected_instance']['uuid'], instance['uuid']) call_info['get_nw_info'] += 1 if _get_instance_nw_info_raise: raise exception.InstanceNotFound(instance_id=instance['uuid']) if _get_instance_nw_info_raise_cache: raise exception.InstanceInfoCacheNotFound( instance_uuid=instance['uuid']) self.stub_out('nova.db.instance_get_all_by_host', fake_instance_get_all_by_host) self.stub_out('nova.db.instance_get_by_uuid', fake_instance_get_by_uuid) self.stubs.Set(self.compute.network_api, 'get_instance_nw_info', fake_get_instance_nw_info) # Make an instance appear to be still Building instances[0]['vm_state'] = vm_states.BUILDING # Make an instance appear to be Deleting instances[1]['task_state'] = task_states.DELETING # '0', '1' should be skipped.. call_info['expected_instance'] = instances[2] self.compute._heal_instance_info_cache(ctxt) self.assertEqual(1, call_info['get_all_by_host']) self.assertEqual(0, call_info['get_by_uuid']) self.assertEqual(1, call_info['get_nw_info']) call_info['expected_instance'] = instances[3] self.compute._heal_instance_info_cache(ctxt) self.assertEqual(1, call_info['get_all_by_host']) self.assertEqual(1, call_info['get_by_uuid']) self.assertEqual(2, call_info['get_nw_info']) # Make an instance switch hosts instances[4]['host'] = 'not-me' # Make an instance disappear instance_map.pop(instances[5]['uuid']) # Make an instance switch to be Deleting instances[6]['task_state'] = task_states.DELETING # '4', '5', and '6' should be skipped.. call_info['expected_instance'] = instances[7] self.compute._heal_instance_info_cache(ctxt) self.assertEqual(1, call_info['get_all_by_host']) self.assertEqual(4, call_info['get_by_uuid']) self.assertEqual(3, call_info['get_nw_info']) # Should be no more left. self.assertEqual(0, len(self.compute._instance_uuids_to_heal)) # This should cause a DB query now, so get a list of instances # where none can be processed to make sure we handle that case # cleanly. Use just '0' (Building) and '1' (Deleting) instances = instances[0:2] self.compute._heal_instance_info_cache(ctxt) # Should have called the list once more self.assertEqual(2, call_info['get_all_by_host']) # Stays the same because we remove invalid entries from the list self.assertEqual(4, call_info['get_by_uuid']) # Stays the same because we didn't find anything to process self.assertEqual(3, call_info['get_nw_info']) def test_heal_instance_info_cache(self): self._heal_instance_info_cache() def test_heal_instance_info_cache_with_instance_exception(self): self._heal_instance_info_cache(_get_instance_nw_info_raise=True) def test_heal_instance_info_cache_with_info_cache_exception(self): self._heal_instance_info_cache(_get_instance_nw_info_raise_cache=True) @mock.patch('nova.objects.InstanceList.get_by_filters') @mock.patch('nova.compute.api.API.unrescue') def test_poll_rescued_instances(self, unrescue, get): timed_out_time = timeutils.utcnow() - datetime.timedelta(minutes=5) not_timed_out_time = timeutils.utcnow() instances = [objects.Instance( uuid=uuids.pool_instance_1, vm_state=vm_states.RESCUED, launched_at=timed_out_time), objects.Instance( uuid=uuids.pool_instance_2, vm_state=vm_states.RESCUED, launched_at=timed_out_time), objects.Instance( uuid=uuids.pool_instance_3, vm_state=vm_states.RESCUED, launched_at=not_timed_out_time)] unrescued_instances = {uuids.pool_instance_1: False, uuids.pool_instance_2: False} def fake_instance_get_all_by_filters(context, filters, expected_attrs=None, use_slave=False): self.assertEqual(["system_metadata"], expected_attrs) return instances get.side_effect = fake_instance_get_all_by_filters def fake_unrescue(context, instance): unrescued_instances[instance['uuid']] = True unrescue.side_effect = fake_unrescue self.flags(rescue_timeout=60) ctxt = context.get_admin_context() self.compute._poll_rescued_instances(ctxt) for instance in unrescued_instances.values(): self.assertTrue(instance) @mock.patch('nova.objects.InstanceList.get_by_filters') def test_poll_rebooting_instances(self, get): reboot_timeout = 60 updated_at = timeutils.utcnow() - datetime.timedelta(minutes=5) to_poll = [objects.Instance( uuid=uuids.pool_instance_1, task_state=task_states.REBOOTING, updated_at=updated_at), objects.Instance( uuid=uuids.pool_instance_2, task_state=task_states.REBOOT_STARTED, updated_at=updated_at), objects.Instance( uuid=uuids.pool_instance_3, task_state=task_states.REBOOT_PENDING, updated_at=updated_at)] self.flags(reboot_timeout=reboot_timeout) get.return_value = to_poll ctxt = context.get_admin_context() with (mock.patch.object( self.compute.driver, 'poll_rebooting_instances' )) as mock_poll: self.compute._poll_rebooting_instances(ctxt) mock_poll.assert_called_with(reboot_timeout, to_poll) filters = {'host': 'fake-mini', 'task_state': [ task_states.REBOOTING, task_states.REBOOT_STARTED, task_states.REBOOT_PENDING]} get.assert_called_once_with(ctxt, filters, expected_attrs=[], use_slave=True) def test_poll_unconfirmed_resizes(self): instances = [ fake_instance.fake_db_instance(uuid=uuids.migration_instance_1, vm_state=vm_states.RESIZED, task_state=None), fake_instance.fake_db_instance(uuid=uuids.migration_instance_none), fake_instance.fake_db_instance(uuid=uuids.migration_instance_2, vm_state=vm_states.ERROR, task_state=None), fake_instance.fake_db_instance(uuid=uuids.migration_instance_3, vm_state=vm_states.ACTIVE, task_state= task_states.REBOOTING), fake_instance.fake_db_instance(uuid=uuids.migration_instance_4, vm_state=vm_states.RESIZED, task_state=None), fake_instance.fake_db_instance(uuid=uuids.migration_instance_5, vm_state=vm_states.ACTIVE, task_state=None), # The expceted migration result will be None instead of error # since _poll_unconfirmed_resizes will not change it # when the instance vm state is RESIZED and task state # is deleting, see bug 1301696 for more detail fake_instance.fake_db_instance(uuid=uuids.migration_instance_6, vm_state=vm_states.RESIZED, task_state='deleting'), fake_instance.fake_db_instance(uuid=uuids.migration_instance_7, vm_state=vm_states.RESIZED, task_state='soft-deleting'), fake_instance.fake_db_instance(uuid=uuids.migration_instance_8, vm_state=vm_states.ACTIVE, task_state='resize_finish')] expected_migration_status = {uuids.migration_instance_1: 'confirmed', uuids.migration_instance_none: 'error', uuids.migration_instance_2: 'error', uuids.migration_instance_3: 'error', uuids.migration_instance_4: None, uuids.migration_instance_5: 'error', uuids.migration_instance_6: None, uuids.migration_instance_7: None, uuids.migration_instance_8: None} migrations = [] for i, instance in enumerate(instances, start=1): fake_mig = test_migration.fake_db_migration() fake_mig.update({'id': i, 'instance_uuid': instance['uuid'], 'status': None}) migrations.append(fake_mig) def fake_instance_get_by_uuid(context, instance_uuid, columns_to_join=None, use_slave=False): self.assertIn('metadata', columns_to_join) self.assertIn('system_metadata', columns_to_join) # raise InstanceNotFound exception for non-existing instance # represented by UUID: uuids.migration_instance_none if instance_uuid == uuids.db_instance_nonexist: raise exception.InstanceNotFound(instance_id=instance_uuid) for instance in instances: if instance['uuid'] == instance_uuid: return instance def fake_migration_get_unconfirmed_by_dest_compute(context, resize_confirm_window, dest_compute, use_slave=False): self.assertEqual(dest_compute, CONF.host) return migrations def fake_migration_update(context, mid, updates): for migration in migrations: if migration['id'] == mid: migration.update(updates) return migration def fake_confirm_resize(context, instance, migration=None): # raise exception for uuids.migration_instance_4 to check # migration status does not get set to 'error' on confirm_resize # failure. if instance['uuid'] == uuids.migration_instance_4: raise test.TestingException('bomb') self.assertIsNotNone(migration) for migration2 in migrations: if (migration2['instance_uuid'] == migration['instance_uuid']): migration2['status'] = 'confirmed' self.stub_out('nova.db.instance_get_by_uuid', fake_instance_get_by_uuid) self.stub_out('nova.db.migration_get_unconfirmed_by_dest_compute', fake_migration_get_unconfirmed_by_dest_compute) self.stub_out('nova.db.migration_update', fake_migration_update) self.stubs.Set(self.compute.compute_api, 'confirm_resize', fake_confirm_resize) def fetch_instance_migration_status(instance_uuid): for migration in migrations: if migration['instance_uuid'] == instance_uuid: return migration['status'] self.flags(resize_confirm_window=60) ctxt = context.get_admin_context() self.compute._poll_unconfirmed_resizes(ctxt) for instance_uuid, status in six.iteritems(expected_migration_status): self.assertEqual(status, fetch_instance_migration_status(instance_uuid)) def test_instance_build_timeout_mixed_instances(self): # Tests that instances which failed to build within the configured # instance_build_timeout value are set to error state. self.flags(instance_build_timeout=30) ctxt = context.get_admin_context() created_at = timeutils.utcnow() + datetime.timedelta(seconds=-60) filters = {'vm_state': vm_states.BUILDING, 'host': CONF.host} # these are the ones that are expired old_instances = [] for x in range(4): instance = {'uuid': str(uuid.uuid4()), 'created_at': created_at} instance.update(filters) old_instances.append(fake_instance.fake_db_instance(**instance)) # not expired instances = list(old_instances) # copy the contents of old_instances new_instance = { 'uuid': str(uuid.uuid4()), 'created_at': timeutils.utcnow(), } sort_key = 'created_at' sort_dir = 'desc' new_instance.update(filters) instances.append(fake_instance.fake_db_instance(**new_instance)) # creating mocks with test.nested( mock.patch.object(self.compute.db.sqlalchemy.api, 'instance_get_all_by_filters', return_value=instances), mock.patch.object(objects.Instance, 'save'), ) as ( instance_get_all_by_filters, conductor_instance_update ): # run the code self.compute._check_instance_build_time(ctxt) # check our assertions instance_get_all_by_filters.assert_called_once_with( ctxt, filters, sort_key, sort_dir, marker=None, columns_to_join=[], limit=None) self.assertThat(conductor_instance_update.mock_calls, testtools_matchers.HasLength(len(old_instances))) for inst in old_instances: conductor_instance_update.assert_has_calls([ mock.call()]) def test_get_resource_tracker_fail(self): self.assertRaises(exception.NovaException, self.compute._get_resource_tracker, 'invalidnodename') @mock.patch.object(objects.Instance, 'save') def test_instance_update_host_check(self, mock_save): # make sure rt usage doesn't happen if the host or node is different def fail_get(nodename): raise test.TestingException("wrong host/node") self.stubs.Set(self.compute, '_get_resource_tracker', fail_get) instance = self._create_fake_instance_obj({'host': 'someotherhost'}) self.compute._instance_update(self.context, instance, vcpus=4) instance = self._create_fake_instance_obj({'node': 'someothernode'}) self.compute._instance_update(self.context, instance, vcpus=4) params = {'host': 'someotherhost', 'node': 'someothernode'} instance = self._create_fake_instance_obj(params) self.compute._instance_update(self.context, instance, vcpus=4) @mock.patch('nova.objects.MigrationList.get_by_filters') @mock.patch('nova.objects.Migration.save') def test_destroy_evacuated_instance_on_shared_storage(self, mock_save, mock_get): fake_context = context.get_admin_context() # instances in central db instances = [ # those are still related to this host self._create_fake_instance_obj( {'host': self.compute.host}), self._create_fake_instance_obj( {'host': self.compute.host}), self._create_fake_instance_obj( {'host': self.compute.host}) ] # those are already been evacuated to other host evacuated_instance = self._create_fake_instance_obj( {'host': 'otherhost'}) migration = objects.Migration(instance_uuid=evacuated_instance.uuid) mock_get.return_value = [migration] instances.append(evacuated_instance) self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver') self.mox.StubOutWithMock(self.compute.network_api, 'get_instance_nw_info') self.mox.StubOutWithMock(self.compute, '_get_instance_block_device_info') self.mox.StubOutWithMock(self.compute, '_is_instance_storage_shared') self.mox.StubOutWithMock(self.compute.driver, 'destroy') self.compute._get_instances_on_driver( fake_context, {'deleted': False}).AndReturn(instances) self.compute.network_api.get_instance_nw_info( fake_context, evacuated_instance).AndReturn('fake_network_info') self.compute._get_instance_block_device_info( fake_context, evacuated_instance).AndReturn('fake_bdi') self.compute._is_instance_storage_shared(fake_context, evacuated_instance).AndReturn(True) self.compute.driver.destroy(fake_context, evacuated_instance, 'fake_network_info', 'fake_bdi', False) self.mox.ReplayAll() self.compute._destroy_evacuated_instances(fake_context) mock_get.assert_called_once_with(fake_context, {'source_compute': self.compute.host, 'status': ['accepted', 'done'], 'migration_type': 'evacuation'}) @mock.patch('nova.objects.MigrationList.get_by_filters') @mock.patch('nova.objects.Migration.save') def test_destroy_evacuated_instance_with_disks(self, mock_save, mock_get): fake_context = context.get_admin_context() # instances in central db instances = [ # those are still related to this host self._create_fake_instance_obj( {'host': self.compute.host}), self._create_fake_instance_obj( {'host': self.compute.host}), self._create_fake_instance_obj( {'host': self.compute.host}) ] # those are already been evacuated to other host evacuated_instance = self._create_fake_instance_obj( {'host': 'otherhost'}) migration = objects.Migration(instance_uuid=evacuated_instance.uuid) mock_get.return_value = [migration] instances.append(evacuated_instance) self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver') self.mox.StubOutWithMock(self.compute.network_api, 'get_instance_nw_info') self.mox.StubOutWithMock(self.compute, '_get_instance_block_device_info') self.mox.StubOutWithMock(self.compute.driver, 'check_instance_shared_storage_local') self.mox.StubOutWithMock(self.compute.compute_rpcapi, 'check_instance_shared_storage') self.mox.StubOutWithMock(self.compute.driver, 'check_instance_shared_storage_cleanup') self.mox.StubOutWithMock(self.compute.driver, 'destroy') self.compute._get_instances_on_driver( fake_context, {'deleted': False}).AndReturn(instances) self.compute.network_api.get_instance_nw_info( fake_context, evacuated_instance).AndReturn('fake_network_info') self.compute._get_instance_block_device_info( fake_context, evacuated_instance).AndReturn('fake_bdi') self.compute.driver.check_instance_shared_storage_local(fake_context, evacuated_instance).AndReturn({'filename': 'tmpfilename'}) self.compute.compute_rpcapi.check_instance_shared_storage(fake_context, evacuated_instance, {'filename': 'tmpfilename'}, host=None).AndReturn(False) self.compute.driver.check_instance_shared_storage_cleanup(fake_context, {'filename': 'tmpfilename'}) self.compute.driver.destroy(fake_context, evacuated_instance, 'fake_network_info', 'fake_bdi', True) self.mox.ReplayAll() self.compute._destroy_evacuated_instances(fake_context) @mock.patch('nova.objects.MigrationList.get_by_filters') @mock.patch('nova.objects.Migration.save') def test_destroy_evacuated_instance_not_implemented(self, mock_save, mock_get): fake_context = context.get_admin_context() # instances in central db instances = [ # those are still related to this host self._create_fake_instance_obj( {'host': self.compute.host}), self._create_fake_instance_obj( {'host': self.compute.host}), self._create_fake_instance_obj( {'host': self.compute.host}) ] # those are already been evacuated to other host evacuated_instance = self._create_fake_instance_obj( {'host': 'otherhost'}) migration = objects.Migration(instance_uuid=evacuated_instance.uuid) mock_get.return_value = [migration] instances.append(evacuated_instance) self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver') self.mox.StubOutWithMock(self.compute.network_api, 'get_instance_nw_info') self.mox.StubOutWithMock(self.compute, '_get_instance_block_device_info') self.mox.StubOutWithMock(self.compute.driver, 'check_instance_shared_storage_local') self.mox.StubOutWithMock(self.compute.compute_rpcapi, 'check_instance_shared_storage') self.mox.StubOutWithMock(self.compute.driver, 'check_instance_shared_storage_cleanup') self.mox.StubOutWithMock(self.compute.driver, 'destroy') self.compute._get_instances_on_driver( fake_context, {'deleted': False}).AndReturn(instances) self.compute.network_api.get_instance_nw_info( fake_context, evacuated_instance).AndReturn('fake_network_info') self.compute._get_instance_block_device_info( fake_context, evacuated_instance).AndReturn('fake_bdi') self.compute.driver.check_instance_shared_storage_local(fake_context, evacuated_instance).AndRaise(NotImplementedError()) self.compute.driver.destroy(fake_context, evacuated_instance, 'fake_network_info', 'fake_bdi', True) self.mox.ReplayAll() self.compute._destroy_evacuated_instances(fake_context) def test_complete_partial_deletion(self): admin_context = context.get_admin_context() instance = objects.Instance() instance.id = 1 instance.uuid = uuids.instance instance.vm_state = vm_states.DELETED instance.task_state = None instance.system_metadata = {'fake_key': 'fake_value'} instance.vcpus = 1 instance.memory_mb = 1 instance.project_id = 'fake-prj' instance.user_id = 'fake-user' instance.deleted = False def fake_destroy(): instance.deleted = True self.stubs.Set(instance, 'destroy', fake_destroy) self.stub_out('nova.db.block_device_mapping_get_all_by_instance', lambda *a, **k: None) self.stubs.Set(self.compute, '_complete_deletion', lambda *a, **k: None) self.stubs.Set(objects.Quotas, 'reserve', lambda *a, **k: None) self.compute._complete_partial_deletion(admin_context, instance) self.assertNotEqual(0, instance.deleted) def test_terminate_instance_updates_tracker(self): rt = self.compute._get_resource_tracker(NODENAME) admin_context = context.get_admin_context() self.assertEqual(0, rt.compute_node.vcpus_used) instance = self._create_fake_instance_obj() instance.vcpus = 1 rt.instance_claim(admin_context, instance) self.assertEqual(1, rt.compute_node.vcpus_used) self.compute.terminate_instance(admin_context, instance, [], []) self.assertEqual(0, rt.compute_node.vcpus_used) @mock.patch('nova.compute.manager.ComputeManager' '._notify_about_instance_usage') @mock.patch('nova.objects.Quotas.reserve') # NOTE(cdent): At least in this test destroy() on the instance sets it # state back to active, meaning the resource tracker won't # update properly. @mock.patch('nova.objects.Instance.destroy') def test_init_deleted_instance_updates_tracker(self, noop1, noop2, noop3): rt = self.compute._get_resource_tracker(NODENAME) admin_context = context.get_admin_context() self.assertEqual(0, rt.compute_node.vcpus_used) instance = self._create_fake_instance_obj() instance.vcpus = 1 self.assertEqual(0, rt.compute_node.vcpus_used) rt.instance_claim(admin_context, instance) self.compute._init_instance(admin_context, instance) self.assertEqual(1, rt.compute_node.vcpus_used) instance.vm_state = vm_states.DELETED self.compute._init_instance(admin_context, instance) self.assertEqual(0, rt.compute_node.vcpus_used) def test_init_instance_for_partial_deletion(self): admin_context = context.get_admin_context() instance = objects.Instance(admin_context) instance.id = 1 instance.vm_state = vm_states.DELETED instance.deleted = False instance.host = self.compute.host def fake_partial_deletion(context, instance): instance['deleted'] = instance['id'] self.stubs.Set(self.compute, '_complete_partial_deletion', fake_partial_deletion) self.compute._init_instance(admin_context, instance) self.assertNotEqual(0, instance['deleted']) def test_partial_deletion_raise_exception(self): admin_context = context.get_admin_context() instance = objects.Instance(admin_context) instance.uuid = str(uuid.uuid4()) instance.vm_state = vm_states.DELETED instance.deleted = False instance.host = self.compute.host self.mox.StubOutWithMock(self.compute, '_complete_partial_deletion') self.compute._complete_partial_deletion( admin_context, instance).AndRaise(ValueError) self.mox.ReplayAll() self.compute._init_instance(admin_context, instance) def test_add_remove_fixed_ip_updates_instance_updated_at(self): def _noop(*args, **kwargs): pass self.stubs.Set(self.compute.network_api, 'add_fixed_ip_to_instance', _noop) self.stubs.Set(self.compute.network_api, 'remove_fixed_ip_from_instance', _noop) instance = self._create_fake_instance_obj() updated_at_1 = instance['updated_at'] self.compute.add_fixed_ip_to_instance(self.context, 'fake', instance) updated_at_2 = db.instance_get_by_uuid(self.context, instance['uuid'])['updated_at'] self.compute.remove_fixed_ip_from_instance(self.context, 'fake', instance) updated_at_3 = db.instance_get_by_uuid(self.context, instance['uuid'])['updated_at'] updated_ats = (updated_at_1, updated_at_2, updated_at_3) self.assertEqual(len(updated_ats), len(set(updated_ats))) def test_no_pending_deletes_for_soft_deleted_instances(self): self.flags(reclaim_instance_interval=0) ctxt = context.get_admin_context() instance = self._create_fake_instance_obj( params={'host': CONF.host, 'vm_state': vm_states.SOFT_DELETED, 'deleted_at': timeutils.utcnow()}) self.compute._run_pending_deletes(ctxt) instance = db.instance_get_by_uuid(self.context, instance['uuid']) self.assertFalse(instance['cleaned']) def test_reclaim_queued_deletes(self): self.flags(reclaim_instance_interval=3600) ctxt = context.get_admin_context() # Active self._create_fake_instance_obj(params={'host': CONF.host}) # Deleted not old enough self._create_fake_instance_obj(params={'host': CONF.host, 'vm_state': vm_states.SOFT_DELETED, 'deleted_at': timeutils.utcnow()}) # Deleted old enough (only this one should be reclaimed) deleted_at = (timeutils.utcnow() - datetime.timedelta(hours=1, minutes=5)) self._create_fake_instance_obj( params={'host': CONF.host, 'vm_state': vm_states.SOFT_DELETED, 'deleted_at': deleted_at}) # Restoring # NOTE(hanlind): This specifically tests for a race condition # where restoring a previously soft deleted instance sets # deleted_at back to None, causing reclaim to think it can be # deleted, see LP #1186243. self._create_fake_instance_obj( params={'host': CONF.host, 'vm_state': vm_states.SOFT_DELETED, 'task_state': task_states.RESTORING}) self.mox.StubOutWithMock(self.compute, '_delete_instance') self.compute._delete_instance( ctxt, mox.IsA(objects.Instance), [], mox.IsA(objects.Quotas)) self.mox.ReplayAll() self.compute._reclaim_queued_deletes(ctxt) def test_reclaim_queued_deletes_continue_on_error(self): # Verify that reclaim continues on error. self.flags(reclaim_instance_interval=3600) ctxt = context.get_admin_context() deleted_at = (timeutils.utcnow() - datetime.timedelta(hours=1, minutes=5)) instance1 = self._create_fake_instance_obj( params={'host': CONF.host, 'vm_state': vm_states.SOFT_DELETED, 'deleted_at': deleted_at}) instance2 = self._create_fake_instance_obj( params={'host': CONF.host, 'vm_state': vm_states.SOFT_DELETED, 'deleted_at': deleted_at}) instances = [] instances.append(instance1) instances.append(instance2) self.mox.StubOutWithMock(objects.InstanceList, 'get_by_filters') self.mox.StubOutWithMock(self.compute, '_deleted_old_enough') self.mox.StubOutWithMock(objects.BlockDeviceMappingList, 'get_by_instance_uuid') self.mox.StubOutWithMock(self.compute, '_delete_instance') objects.InstanceList.get_by_filters( ctxt, mox.IgnoreArg(), expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS, use_slave=True ).AndReturn(instances) # The first instance delete fails. self.compute._deleted_old_enough(instance1, 3600).AndReturn(True) objects.BlockDeviceMappingList.get_by_instance_uuid( ctxt, instance1.uuid).AndReturn([]) self.compute._delete_instance(ctxt, instance1, [], self.none_quotas).AndRaise( test.TestingException) # The second instance delete that follows. self.compute._deleted_old_enough(instance2, 3600).AndReturn(True) objects.BlockDeviceMappingList.get_by_instance_uuid( ctxt, instance2.uuid).AndReturn([]) self.compute._delete_instance(ctxt, instance2, [], self.none_quotas) self.mox.ReplayAll() self.compute._reclaim_queued_deletes(ctxt) def test_sync_power_states(self): ctxt = self.context.elevated() self._create_fake_instance_obj({'host': self.compute.host}) self._create_fake_instance_obj({'host': self.compute.host}) self._create_fake_instance_obj({'host': self.compute.host}) self.mox.StubOutWithMock(self.compute.driver, 'get_info') self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state') # Check to make sure task continues on error. self.compute.driver.get_info(mox.IgnoreArg()).AndRaise( exception.InstanceNotFound(instance_id=uuids.instance)) self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(), power_state.NOSTATE).AndRaise( exception.InstanceNotFound(instance_id=uuids.instance)) self.compute.driver.get_info(mox.IgnoreArg()).AndReturn( hardware.InstanceInfo(state=power_state.RUNNING)) self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(), power_state.RUNNING, use_slave=True) self.compute.driver.get_info(mox.IgnoreArg()).AndReturn( hardware.InstanceInfo(state=power_state.SHUTDOWN)) self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(), power_state.SHUTDOWN, use_slave=True) self.mox.ReplayAll() self.compute._sync_power_states(ctxt) def _test_lifecycle_event(self, lifecycle_event, vm_power_state, is_actual_state=True): instance = self._create_fake_instance_obj() uuid = instance['uuid'] self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state') self.mox.StubOutWithMock(self.compute, '_get_power_state') actual_state = (vm_power_state if vm_power_state is not None and is_actual_state else power_state.NOSTATE) self.compute._get_power_state( mox.IgnoreArg(), mox.ContainsKeyValue('uuid', uuid)).AndReturn(actual_state) if actual_state == vm_power_state: self.compute._sync_instance_power_state( mox.IgnoreArg(), mox.ContainsKeyValue('uuid', uuid), vm_power_state) self.mox.ReplayAll() self.compute.handle_events(event.LifecycleEvent(uuid, lifecycle_event)) self.mox.VerifyAll() self.mox.UnsetStubs() def test_lifecycle_events(self): self._test_lifecycle_event(event.EVENT_LIFECYCLE_STOPPED, power_state.SHUTDOWN) self._test_lifecycle_event(event.EVENT_LIFECYCLE_STOPPED, power_state.SHUTDOWN, is_actual_state=False) self._test_lifecycle_event(event.EVENT_LIFECYCLE_STARTED, power_state.RUNNING) self._test_lifecycle_event(event.EVENT_LIFECYCLE_PAUSED, power_state.PAUSED) self._test_lifecycle_event(event.EVENT_LIFECYCLE_RESUMED, power_state.RUNNING) self._test_lifecycle_event(-1, None) def test_lifecycle_event_non_existent_instance(self): # No error raised for non-existent instance because of inherent race # between database updates and hypervisor events. See bug #1180501. event_instance = event.LifecycleEvent('does-not-exist', event.EVENT_LIFECYCLE_STOPPED) self.compute.handle_events(event_instance) @mock.patch.object(objects.Migration, 'get_by_id') @mock.patch.object(objects.Quotas, 'rollback') def test_confirm_resize_roll_back_quota_migration_not_found(self, mock_rollback, mock_get_by_id): instance = self._create_fake_instance_obj() migration = objects.Migration() migration.instance_uuid = instance.uuid migration.status = 'finished' migration.id = 0 mock_get_by_id.side_effect = exception.MigrationNotFound( migration_id=0) self.compute.confirm_resize(self.context, instance=instance, migration=migration, reservations=[]) self.assertTrue(mock_rollback.called) @mock.patch.object(instance_obj.Instance, 'get_by_uuid') @mock.patch.object(objects.Quotas, 'rollback') def test_confirm_resize_roll_back_quota_instance_not_found(self, mock_rollback, mock_get_by_id): instance = self._create_fake_instance_obj() migration = objects.Migration() migration.instance_uuid = instance.uuid migration.status = 'finished' migration.id = 0 mock_get_by_id.side_effect = exception.InstanceNotFound( instance_id=instance.uuid) self.compute.confirm_resize(self.context, instance=instance, migration=migration, reservations=[]) self.assertTrue(mock_rollback.called) @mock.patch.object(objects.Migration, 'get_by_id') @mock.patch.object(objects.Quotas, 'rollback') def test_confirm_resize_roll_back_quota_status_confirmed(self, mock_rollback, mock_get_by_id): instance = self._create_fake_instance_obj() migration = objects.Migration() migration.instance_uuid = instance.uuid migration.status = 'confirmed' migration.id = 0 mock_get_by_id.return_value = migration self.compute.confirm_resize(self.context, instance=instance, migration=migration, reservations=[]) self.assertTrue(mock_rollback.called) @mock.patch.object(objects.Migration, 'get_by_id') @mock.patch.object(objects.Quotas, 'rollback') def test_confirm_resize_roll_back_quota_status_dummy(self, mock_rollback, mock_get_by_id): instance = self._create_fake_instance_obj() migration = objects.Migration() migration.instance_uuid = instance.uuid migration.status = 'dummy' migration.id = 0 mock_get_by_id.return_value = migration self.compute.confirm_resize(self.context, instance=instance, migration=migration, reservations=[]) self.assertTrue(mock_rollback.called) def test_allow_confirm_resize_on_instance_in_deleting_task_state(self): instance = self._create_fake_instance_obj() old_type = instance.flavor new_type = flavors.get_flavor_by_flavor_id('4') instance.flavor = new_type instance.old_flavor = old_type instance.new_flavor = new_type fake_rt = self.mox.CreateMockAnything() def fake_drop_move_claim(*args, **kwargs): pass def fake_get_resource_tracker(self): return fake_rt def fake_setup_networks_on_host(self, *args, **kwargs): pass self.stubs.Set(fake_rt, 'drop_move_claim', fake_drop_move_claim) self.stubs.Set(self.compute, '_get_resource_tracker', fake_get_resource_tracker) self.stubs.Set(self.compute.network_api, 'setup_networks_on_host', fake_setup_networks_on_host) migration = objects.Migration(context=self.context.elevated()) migration.instance_uuid = instance.uuid migration.status = 'finished' migration.migration_type = 'resize' migration.create() instance.task_state = task_states.DELETING instance.vm_state = vm_states.RESIZED instance.system_metadata = {} instance.save() self.compute.confirm_resize(self.context, instance=instance, migration=migration, reservations=[]) instance.refresh() self.assertEqual(vm_states.ACTIVE, instance['vm_state']) def _get_instance_and_bdm_for_dev_defaults_tests(self): instance = self._create_fake_instance_obj( params={'root_device_name': '/dev/vda'}) block_device_mapping = block_device_obj.block_device_make_list( self.context, [fake_block_device.FakeDbBlockDeviceDict( {'id': 3, 'instance_uuid': uuids.block_device_instance, 'device_name': '/dev/vda', 'source_type': 'volume', 'destination_type': 'volume', 'image_id': 'fake-image-id-1', 'boot_index': 0})]) return instance, block_device_mapping def test_default_block_device_names_empty_instance_root_dev(self): instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests() instance.root_device_name = None self.mox.StubOutWithMock(objects.Instance, 'save') self.mox.StubOutWithMock(self.compute, '_default_device_names_for_instance') self.compute._default_device_names_for_instance(instance, '/dev/vda', [], [], [bdm for bdm in bdms]) self.mox.ReplayAll() self.compute._default_block_device_names(self.context, instance, {}, bdms) self.assertEqual('/dev/vda', instance.root_device_name) def test_default_block_device_names_empty_root_device(self): instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests() bdms[0]['device_name'] = None self.mox.StubOutWithMock(self.compute, '_default_device_names_for_instance') self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'save') bdms[0].save().AndReturn(None) self.compute._default_device_names_for_instance(instance, '/dev/vda', [], [], [bdm for bdm in bdms]) self.mox.ReplayAll() self.compute._default_block_device_names(self.context, instance, {}, bdms) def test_default_block_device_names_no_root_device(self): instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests() instance.root_device_name = None bdms[0]['device_name'] = None self.mox.StubOutWithMock(objects.Instance, 'save') self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'save') self.mox.StubOutWithMock(self.compute, '_default_root_device_name') self.mox.StubOutWithMock(self.compute, '_default_device_names_for_instance') self.compute._default_root_device_name(instance, mox.IgnoreArg(), bdms[0]).AndReturn('/dev/vda') bdms[0].save().AndReturn(None) self.compute._default_device_names_for_instance(instance, '/dev/vda', [], [], [bdm for bdm in bdms]) self.mox.ReplayAll() self.compute._default_block_device_names(self.context, instance, {}, bdms) self.assertEqual('/dev/vda', instance.root_device_name) def test_default_block_device_names_with_blank_volumes(self): instance = self._create_fake_instance_obj() image_meta = {} root_volume = objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict({ 'id': 1, 'instance_uuid': uuids.block_device_instance, 'source_type': 'volume', 'destination_type': 'volume', 'image_id': 'fake-image-id-1', 'boot_index': 0})) blank_volume1 = objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict({ 'id': 2, 'instance_uuid': uuids.block_device_instance, 'source_type': 'blank', 'destination_type': 'volume', 'boot_index': -1})) blank_volume2 = objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict({ 'id': 3, 'instance_uuid': uuids.block_device_instance, 'source_type': 'blank', 'destination_type': 'volume', 'boot_index': -1})) ephemeral = objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict({ 'id': 4, 'instance_uuid': uuids.block_device_instance, 'source_type': 'blank', 'destination_type': 'local'})) swap = objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict({ 'id': 5, 'instance_uuid': uuids.block_device_instance, 'source_type': 'blank', 'destination_type': 'local', 'guest_format': 'swap' })) bdms = block_device_obj.block_device_make_list( self.context, [root_volume, blank_volume1, blank_volume2, ephemeral, swap]) with test.nested( mock.patch.object(self.compute, '_default_root_device_name', return_value='/dev/vda'), mock.patch.object(objects.BlockDeviceMapping, 'save'), mock.patch.object(self.compute, '_default_device_names_for_instance') ) as (default_root_device, object_save, default_device_names): self.compute._default_block_device_names(self.context, instance, image_meta, bdms) default_root_device.assert_called_once_with(instance, image_meta, bdms[0]) self.assertEqual('/dev/vda', instance.root_device_name) self.assertTrue(object_save.called) default_device_names.assert_called_once_with(instance, '/dev/vda', [bdms[-2]], [bdms[-1]], [bdm for bdm in bdms[:-2]]) def test_reserve_block_device_name(self): instance = self._create_fake_instance_obj( params={'root_device_name': '/dev/vda'}) bdm = objects.BlockDeviceMapping( **{'context': self.context, 'source_type': 'image', 'destination_type': 'local', 'image_id': uuids.image_instance, 'device_name': '/dev/vda', 'instance_uuid': instance.uuid}) bdm.create() self.compute.reserve_block_device_name(self.context, instance, '/dev/vdb', uuids.block_device_instance, 'virtio', 'disk') bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( self.context, instance.uuid) bdms = list(bdms) self.assertEqual(len(bdms), 2) bdms.sort(key=operator.attrgetter('device_name')) vol_bdm = bdms[1] self.assertEqual(vol_bdm.source_type, 'volume') self.assertIsNone(vol_bdm.boot_index) self.assertIsNone(vol_bdm.guest_format) self.assertEqual(vol_bdm.destination_type, 'volume') self.assertEqual(vol_bdm.device_name, '/dev/vdb') self.assertEqual(vol_bdm.volume_id, uuids.block_device_instance) self.assertEqual(vol_bdm.disk_bus, 'virtio') self.assertEqual(vol_bdm.device_type, 'disk') def test_reserve_block_device_name_with_iso_instance(self): instance = self._create_fake_instance_obj( params={'root_device_name': '/dev/hda'}) bdm = objects.BlockDeviceMapping( context=self.context, **{'source_type': 'image', 'destination_type': 'local', 'image_id': 'fake-image-id', 'device_name': '/dev/hda', 'instance_uuid': instance.uuid}) bdm.create() self.compute.reserve_block_device_name(self.context, instance, '/dev/vdb', uuids.block_device_instance, 'ide', 'disk') bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( self.context, instance.uuid) bdms = list(bdms) self.assertEqual(2, len(bdms)) bdms.sort(key=operator.attrgetter('device_name')) vol_bdm = bdms[1] self.assertEqual('volume', vol_bdm.source_type) self.assertEqual('volume', vol_bdm.destination_type) self.assertEqual('/dev/hdb', vol_bdm.device_name) self.assertEqual(uuids.block_device_instance, vol_bdm.volume_id) self.assertEqual('ide', vol_bdm.disk_bus) self.assertEqual('disk', vol_bdm.device_type) @mock.patch.object(cinder.API, 'get_snapshot') def test_quiesce(self, mock_snapshot_get): # ensure instance can be quiesced and unquiesced instance = self._create_fake_instance_obj() mapping = [{'source_type': 'snapshot', 'snapshot_id': 'fake-id1'}, {'source_type': 'snapshot', 'snapshot_id': 'fake-id2'}] # unquiesce should wait until volume snapshots are completed mock_snapshot_get.side_effect = [{'status': 'creating'}, {'status': 'available'}] * 2 self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.compute.quiesce_instance(self.context, instance) self.compute.unquiesce_instance(self.context, instance, mapping) self.compute.terminate_instance(self.context, instance, [], []) mock_snapshot_get.assert_any_call(mock.ANY, 'fake-id1') mock_snapshot_get.assert_any_call(mock.ANY, 'fake-id2') self.assertEqual(4, mock_snapshot_get.call_count) def test_instance_fault_message_no_rescheduled_details_without_retry(self): """This test simulates a spawn failure with no retry data. If driver spawn raises an exception and there is no retry data available, the instance fault message should not contain any details about rescheduling. The fault message field is limited in size and a long message about rescheduling displaces the original error message. """ class TestException(Exception): pass instance = self._create_fake_instance_obj() with mock.patch.object(self.compute.driver, 'spawn') as mock_spawn: mock_spawn.side_effect = TestException('Preserve this') self.compute.build_and_run_instance( self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertEqual('Preserve this', instance.fault.message) class ComputeAPITestCase(BaseTestCase): def setUp(self): def fake_get_nw_info(cls, ctxt, instance): self.assertTrue(ctxt.is_admin) return fake_network.fake_get_instance_nw_info(self, 1, 1) super(ComputeAPITestCase, self).setUp() self.useFixture(fixtures.SpawnIsSynchronousFixture()) self.stubs.Set(network_api.API, 'get_instance_nw_info', fake_get_nw_info) self.security_group_api = ( openstack_driver.get_openstack_security_group_driver()) self.compute_api = compute.API( security_group_api=self.security_group_api) self.fake_image = { 'id': 'f9000000-0000-0000-0000-000000000000', 'name': 'fake_name', 'status': 'active', 'properties': {'kernel_id': 'fake_kernel_id', 'ramdisk_id': 'fake_ramdisk_id'}, } def fake_show(obj, context, image_id, **kwargs): if image_id: return self.fake_image else: raise exception.ImageNotFound(image_id=image_id) self.fake_show = fake_show # Mock out build_instances and rebuild_instance since nothing in these # tests should need those to actually run. We do this to avoid # possible races with other tests that actually test those methods # and mock things out within them, like conductor tests. self.build_instances_mock = mock.Mock(autospec=True) self.compute_api.compute_task_api.build_instances = \ self.build_instances_mock self.rebuild_instance_mock = mock.Mock(autospec=True) self.compute_api.compute_task_api.rebuild_instance = \ self.rebuild_instance_mock def _run_instance(self, params=None): instance = self._create_fake_instance_obj(params, services=True) instance_uuid = instance['uuid'] self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.refresh() self.assertIsNone(instance['task_state']) return instance, instance_uuid def test_create_with_too_little_ram(self): # Test an instance type with too little memory. inst_type = flavors.get_default_flavor() inst_type['memory_mb'] = 1 self.fake_image['min_ram'] = 2 self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) self.assertRaises(exception.FlavorMemoryTooSmall, self.compute_api.create, self.context, inst_type, self.fake_image['id']) # Now increase the inst_type memory and make sure all is fine. inst_type['memory_mb'] = 2 (refs, resv_id) = self.compute_api.create(self.context, inst_type, self.fake_image['id']) def test_create_with_too_little_disk(self): # Test an instance type with too little disk space. inst_type = flavors.get_default_flavor() inst_type['root_gb'] = 1 self.fake_image['min_disk'] = 2 self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) self.assertRaises(exception.FlavorDiskSmallerThanMinDisk, self.compute_api.create, self.context, inst_type, self.fake_image['id']) # Now increase the inst_type disk space and make sure all is fine. inst_type['root_gb'] = 2 (refs, resv_id) = self.compute_api.create(self.context, inst_type, self.fake_image['id']) def test_create_with_too_large_image(self): # Test an instance type with too little disk space. inst_type = flavors.get_default_flavor() inst_type['root_gb'] = 1 self.fake_image['size'] = '1073741825' self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) self.assertRaises(exception.FlavorDiskSmallerThanImage, self.compute_api.create, self.context, inst_type, self.fake_image['id']) # Reduce image to 1 GB limit and ensure it works self.fake_image['size'] = '1073741824' (refs, resv_id) = self.compute_api.create(self.context, inst_type, self.fake_image['id']) def test_create_just_enough_ram_and_disk(self): # Test an instance type with just enough ram and disk space. inst_type = flavors.get_default_flavor() inst_type['root_gb'] = 2 inst_type['memory_mb'] = 2 self.fake_image['min_ram'] = 2 self.fake_image['min_disk'] = 2 self.fake_image['name'] = 'fake_name' self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) (refs, resv_id) = self.compute_api.create(self.context, inst_type, self.fake_image['id']) def test_create_with_no_ram_and_disk_reqs(self): # Test an instance type with no min_ram or min_disk. inst_type = flavors.get_default_flavor() inst_type['root_gb'] = 1 inst_type['memory_mb'] = 1 self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) (refs, resv_id) = self.compute_api.create(self.context, inst_type, self.fake_image['id']) def test_create_bdm_from_flavor(self): instance_type_params = { 'flavorid': 'test', 'name': 'test', 'swap': 1024, 'ephemeral_gb': 1, 'root_gb': 1, } self._create_instance_type(params=instance_type_params) inst_type = flavors.get_flavor_by_name('test') self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) (refs, resv_id) = self.compute_api.create(self.context, inst_type, self.fake_image['id']) instance_uuid = refs[0]['uuid'] bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid( self.context, instance_uuid) ephemeral = list(filter(block_device.new_format_is_ephemeral, bdms)) self.assertEqual(1, len(ephemeral)) swap = list(filter(block_device.new_format_is_swap, bdms)) self.assertEqual(1, len(swap)) self.assertEqual(1024, swap[0].volume_size) self.assertEqual(1, ephemeral[0].volume_size) def test_create_with_deleted_image(self): # If we're given a deleted image by glance, we should not be able to # build from it inst_type = flavors.get_default_flavor() self.fake_image['name'] = 'fake_name' self.fake_image['status'] = 'DELETED' self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) expected_message = ( exception.ImageNotActive.msg_fmt % {'image_id': self.fake_image['id']}) with testtools.ExpectedException(exception.ImageNotActive, expected_message): self.compute_api.create(self.context, inst_type, self.fake_image['id']) @mock.patch('nova.virt.hardware.numa_get_constraints') def test_create_with_numa_topology(self, numa_constraints_mock): inst_type = flavors.get_default_flavor() numa_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=0, cpuset=set([1, 2]), memory=512), objects.InstanceNUMACell( id=1, cpuset=set([3, 4]), memory=512)]) numa_constraints_mock.return_value = numa_topology instances, resv_id = self.compute_api.create(self.context, inst_type, self.fake_image['id']) numa_constraints_mock.assert_called_once_with( inst_type, test.MatchType(objects.ImageMeta)) self.assertEqual( numa_topology.cells[0].obj_to_primitive(), instances[0].numa_topology.cells[0].obj_to_primitive()) self.assertEqual( numa_topology.cells[1].obj_to_primitive(), instances[0].numa_topology.cells[1].obj_to_primitive()) def test_create_instance_defaults_display_name(self): # Verify that an instance cannot be created without a display_name. cases = [dict(), dict(display_name=None)] for instance in cases: (ref, resv_id) = self.compute_api.create(self.context, flavors.get_default_flavor(), 'f5000000-0000-0000-0000-000000000000', **instance) self.assertIsNotNone(ref[0]['display_name']) def test_create_instance_sets_system_metadata(self): # Make sure image properties are copied into system metadata. (ref, resv_id) = self.compute_api.create( self.context, instance_type=flavors.get_default_flavor(), image_href='f5000000-0000-0000-0000-000000000000') sys_metadata = db.instance_system_metadata_get(self.context, ref[0]['uuid']) image_props = {'image_kernel_id': 'fake_kernel_id', 'image_ramdisk_id': 'fake_ramdisk_id', 'image_something_else': 'meow', } for key, value in six.iteritems(image_props): self.assertIn(key, sys_metadata) self.assertEqual(value, sys_metadata[key]) def test_create_saves_flavor(self): instance_type = flavors.get_default_flavor() (ref, resv_id) = self.compute_api.create( self.context, instance_type=instance_type, image_href=uuids.image_href_id) instance = objects.Instance.get_by_uuid(self.context, ref[0]['uuid']) self.assertEqual(instance_type.flavorid, instance.flavor.flavorid) self.assertNotIn('instance_type_id', instance.system_metadata) def test_create_instance_associates_security_groups(self): # Make sure create associates security groups. group = self._create_group() (ref, resv_id) = self.compute_api.create( self.context, instance_type=flavors.get_default_flavor(), image_href=uuids.image_href_id, security_group=['testgroup']) groups_for_instance = db.security_group_get_by_instance( self.context, ref[0]['uuid']) self.assertEqual(1, len(groups_for_instance)) self.assertEqual(group.id, groups_for_instance[0].id) group_with_instances = db.security_group_get(self.context, group.id, columns_to_join=['instances']) self.assertEqual(1, len(group_with_instances.instances)) def test_create_instance_with_invalid_security_group_raises(self): instance_type = flavors.get_default_flavor() pre_build_len = len(db.instance_get_all(self.context)) self.assertRaises(exception.SecurityGroupNotFoundForProject, self.compute_api.create, self.context, instance_type=instance_type, image_href=None, security_group=['this_is_a_fake_sec_group']) self.assertEqual(pre_build_len, len(db.instance_get_all(self.context))) def test_create_with_large_user_data(self): # Test an instance type with too much user data. inst_type = flavors.get_default_flavor() self.fake_image['min_ram'] = 2 self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) self.assertRaises(exception.InstanceUserDataTooLarge, self.compute_api.create, self.context, inst_type, self.fake_image['id'], user_data=(b'1' * 65536)) def test_create_with_malformed_user_data(self): # Test an instance type with malformed user data. inst_type = flavors.get_default_flavor() self.fake_image['min_ram'] = 2 self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) self.assertRaises(exception.InstanceUserDataMalformed, self.compute_api.create, self.context, inst_type, self.fake_image['id'], user_data=b'banana') def test_create_with_base64_user_data(self): # Test an instance type with ok much user data. inst_type = flavors.get_default_flavor() self.fake_image['min_ram'] = 2 self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) # NOTE(mikal): a string of length 48510 encodes to 65532 characters of # base64 (refs, resv_id) = self.compute_api.create( self.context, inst_type, self.fake_image['id'], user_data=base64.encodestring(b'1' * 48510)) def test_populate_instance_for_create(self): base_options = {'image_ref': self.fake_image['id'], 'system_metadata': {'fake': 'value'}, 'uuid': uuids.instance} instance = objects.Instance() instance.update(base_options) inst_type = flavors.get_flavor_by_name("m1.tiny") instance = self.compute_api._populate_instance_for_create( self.context, instance, self.fake_image, 1, security_groups=objects.SecurityGroupList(), instance_type=inst_type) self.assertEqual(str(base_options['image_ref']), instance['system_metadata']['image_base_image_ref']) self.assertEqual(vm_states.BUILDING, instance['vm_state']) self.assertEqual(task_states.SCHEDULING, instance['task_state']) self.assertEqual(1, instance['launch_index']) self.assertIsNotNone(instance.get('uuid')) self.assertEqual([], instance.security_groups.objects) def test_default_hostname_generator(self): fake_uuids = [str(uuid.uuid4()) for x in range(4)] orig_populate = self.compute_api._populate_instance_for_create def _fake_populate(context, base_options, *args, **kwargs): base_options['uuid'] = fake_uuids.pop(0) return orig_populate(context, base_options, *args, **kwargs) self.stubs.Set(self.compute_api, '_populate_instance_for_create', _fake_populate) cases = [(None, 'server-%s' % fake_uuids[0]), ('Hello, Server!', 'hello-server'), ('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello'), ('hello_server', 'hello-server')] for display_name, hostname in cases: (ref, resv_id) = self.compute_api.create(self.context, flavors.get_default_flavor(), image_href=uuids.image_href_id, display_name=display_name) self.assertEqual(ref[0]['hostname'], hostname) def test_instance_create_adds_to_instance_group(self): self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) group = objects.InstanceGroup(self.context) group.uuid = str(uuid.uuid4()) group.project_id = self.context.project_id group.user_id = self.context.user_id group.create() inst_type = flavors.get_default_flavor() (refs, resv_id) = self.compute_api.create( self.context, inst_type, self.fake_image['id'], scheduler_hints={'group': group.uuid}) group = objects.InstanceGroup.get_by_uuid(self.context, group.uuid) self.assertIn(refs[0]['uuid'], group.members) def test_instance_create_with_group_name_fails(self): self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) inst_type = flavors.get_default_flavor() self.assertRaises( exception.InvalidInput, self.compute_api.create, self.context, inst_type, self.fake_image['id'], scheduler_hints={'group': 'non-uuid'}) def test_instance_create_with_group_uuid_fails_group_not_exist(self): self.stub_out('nova.tests.unit.image.fake._FakeImageService.show', self.fake_show) inst_type = flavors.get_default_flavor() self.assertRaises( exception.InstanceGroupNotFound, self.compute_api.create, self.context, inst_type, self.fake_image['id'], scheduler_hints={'group': '5b674f73-c8cf-40ef-9965-3b6fe4b304b1'}) def test_destroy_instance_disassociates_security_groups(self): # Make sure destroying disassociates security groups. group = self._create_group() (ref, resv_id) = self.compute_api.create( self.context, instance_type=flavors.get_default_flavor(), image_href=uuids.image_href_id, security_group=['testgroup']) db.instance_destroy(self.context, ref[0]['uuid']) group = db.security_group_get(self.context, group['id'], columns_to_join=['instances']) self.assertEqual(0, len(group['instances'])) def test_destroy_security_group_disassociates_instances(self): # Make sure destroying security groups disassociates instances. group = self._create_group() (ref, resv_id) = self.compute_api.create( self.context, instance_type=flavors.get_default_flavor(), image_href=uuids.image_href_id, security_group=['testgroup']) db.security_group_destroy(self.context, group['id']) admin_deleted_context = context.get_admin_context( read_deleted="only") group = db.security_group_get(admin_deleted_context, group['id'], columns_to_join=['instances']) self.assertEqual(0, len(group['instances'])) def _test_rebuild(self, vm_state): instance = self._create_fake_instance_obj() instance_uuid = instance['uuid'] self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance = objects.Instance.get_by_uuid(self.context, instance_uuid) self.assertIsNone(instance.task_state) # Set some image metadata that should get wiped out and reset # as well as some other metadata that should be preserved. instance.system_metadata.update({ 'image_kernel_id': 'old-data', 'image_ramdisk_id': 'old_data', 'image_something_else': 'old-data', 'image_should_remove': 'bye-bye', 'preserved': 'preserve this!'}) instance.save() # Make sure Compute API updates the image_ref before casting to # compute manager. info = {'image_ref': None, 'clean': False} def fake_rpc_rebuild(context, **kwargs): info['image_ref'] = kwargs['instance'].image_ref info['clean'] = ('progress' not in kwargs['instance'].obj_what_changed()) self.stubs.Set(self.compute_api.compute_task_api, 'rebuild_instance', fake_rpc_rebuild) image_ref = instance["image_ref"] + '-new_image_ref' password = "new_password" instance.vm_state = vm_state instance.save() self.compute_api.rebuild(self.context, instance, image_ref, password) self.assertEqual(info['image_ref'], image_ref) self.assertTrue(info['clean']) instance.refresh() self.assertEqual(instance.task_state, task_states.REBUILDING) sys_meta = {k: v for k, v in instance.system_metadata.items() if not k.startswith('instance_type')} self.assertEqual(sys_meta, {'image_kernel_id': 'fake_kernel_id', 'image_min_disk': '1', 'image_ramdisk_id': 'fake_ramdisk_id', 'image_something_else': 'meow', 'preserved': 'preserve this!'}) def test_rebuild(self): self._test_rebuild(vm_state=vm_states.ACTIVE) def test_rebuild_in_error_state(self): self._test_rebuild(vm_state=vm_states.ERROR) def test_rebuild_in_error_not_launched(self): instance = self._create_fake_instance_obj(params={'image_ref': ''}) self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) db.instance_update(self.context, instance['uuid'], {"vm_state": vm_states.ERROR, "launched_at": None}) instance = db.instance_get_by_uuid(self.context, instance['uuid']) self.assertRaises(exception.InstanceInvalidState, self.compute_api.rebuild, self.context, instance, instance['image_ref'], "new password") def test_rebuild_no_image(self): instance = self._create_fake_instance_obj(params={'image_ref': ''}) instance_uuid = instance.uuid self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) self.compute_api.rebuild(self.context, instance, '', 'new_password') instance = db.instance_get_by_uuid(self.context, instance_uuid) self.assertEqual(instance['task_state'], task_states.REBUILDING) def test_rebuild_with_deleted_image(self): # If we're given a deleted image by glance, we should not be able to # rebuild from it instance = self._create_fake_instance_obj(params={'image_ref': '1'}) self.fake_image['name'] = 'fake_name' self.fake_image['status'] = 'DELETED' self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) expected_message = ( exception.ImageNotActive.msg_fmt % {'image_id': self.fake_image['id']}) with testtools.ExpectedException(exception.ImageNotActive, expected_message): self.compute_api.rebuild(self.context, instance, self.fake_image['id'], 'new_password') def test_rebuild_with_too_little_ram(self): instance = self._create_fake_instance_obj(params={'image_ref': '1'}) instance.flavor.memory_mb = 64 instance.flavor.root_gb = 1 self.fake_image['min_ram'] = 128 self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) self.assertRaises(exception.FlavorMemoryTooSmall, self.compute_api.rebuild, self.context, instance, self.fake_image['id'], 'new_password') # Reduce image memory requirements and make sure it works self.fake_image['min_ram'] = 64 self.compute_api.rebuild(self.context, instance, self.fake_image['id'], 'new_password') def test_rebuild_with_too_little_disk(self): instance = self._create_fake_instance_obj(params={'image_ref': '1'}) def fake_extract_flavor(_inst, prefix=''): if prefix == '': f = objects.Flavor(**test_flavor.fake_flavor) f.memory_mb = 64 f.root_gb = 1 return f else: raise KeyError() self.stubs.Set(flavors, 'extract_flavor', fake_extract_flavor) self.fake_image['min_disk'] = 2 self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) self.assertRaises(exception.FlavorDiskSmallerThanMinDisk, self.compute_api.rebuild, self.context, instance, self.fake_image['id'], 'new_password') # Reduce image disk requirements and make sure it works self.fake_image['min_disk'] = 1 self.compute_api.rebuild(self.context, instance, self.fake_image['id'], 'new_password') def test_rebuild_with_just_enough_ram_and_disk(self): instance = self._create_fake_instance_obj(params={'image_ref': '1'}) def fake_extract_flavor(_inst, prefix=''): if prefix == '': f = objects.Flavor(**test_flavor.fake_flavor) f.memory_mb = 64 f.root_gb = 1 return f else: raise KeyError() self.stubs.Set(flavors, 'extract_flavor', fake_extract_flavor) self.fake_image['min_ram'] = 64 self.fake_image['min_disk'] = 1 self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) self.compute_api.rebuild(self.context, instance, self.fake_image['id'], 'new_password') def test_rebuild_with_no_ram_and_disk_reqs(self): instance = self._create_fake_instance_obj(params={'image_ref': '1'}) def fake_extract_flavor(_inst, prefix=''): if prefix == '': f = objects.Flavor(**test_flavor.fake_flavor) f.memory_mb = 64 f.root_gb = 1 return f else: raise KeyError() self.stubs.Set(flavors, 'extract_flavor', fake_extract_flavor) self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) self.compute_api.rebuild(self.context, instance, self.fake_image['id'], 'new_password') def test_rebuild_with_too_large_image(self): instance = self._create_fake_instance_obj(params={'image_ref': '1'}) def fake_extract_flavor(_inst, prefix=''): if prefix == '': f = objects.Flavor(**test_flavor.fake_flavor) f.memory_mb = 64 f.root_gb = 1 return f else: raise KeyError() self.stubs.Set(flavors, 'extract_flavor', fake_extract_flavor) self.fake_image['size'] = '1073741825' self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) self.assertRaises(exception.FlavorDiskSmallerThanImage, self.compute_api.rebuild, self.context, instance, self.fake_image['id'], 'new_password') # Reduce image to 1 GB limit and ensure it works self.fake_image['size'] = '1073741824' self.compute_api.rebuild(self.context, instance, self.fake_image['id'], 'new_password') def test_hostname_create(self): # Ensure instance hostname is set during creation. inst_type = flavors.get_flavor_by_name('m1.tiny') (instances, _) = self.compute_api.create(self.context, inst_type, image_href=uuids.image_href_id, display_name='test host') self.assertEqual('test-host', instances[0]['hostname']) def _fake_rescue_block_devices(self, instance, status="in-use"): fake_bdms = block_device_obj.block_device_make_list(self.context, [fake_block_device.FakeDbBlockDeviceDict( {'device_name': '/dev/vda', 'source_type': 'volume', 'boot_index': 0, 'destination_type': 'volume', 'volume_id': 'bf0b6b00-a20c-11e2-9e96-0800200c9a66'})]) volume = {'id': 'bf0b6b00-a20c-11e2-9e96-0800200c9a66', 'state': 'active', 'instance_uuid': instance['uuid']} return fake_bdms, volume @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') @mock.patch.object(cinder.API, 'get') def test_rescue_volume_backed_no_image(self, mock_get_vol, mock_get_bdms): # Instance started without an image params = {'image_ref': ''} volume_backed_inst_1 = self._create_fake_instance_obj(params=params) bdms, volume = self._fake_rescue_block_devices(volume_backed_inst_1) mock_get_vol.return_value = {'id': volume['id'], 'status': "in-use"} mock_get_bdms.return_value = bdms with mock.patch.object(self.compute, '_prep_block_device'): self.compute.build_and_run_instance(self.context, volume_backed_inst_1, {}, {}, {}, block_device_mapping=[]) self.assertRaises(exception.InstanceNotRescuable, self.compute_api.rescue, self.context, volume_backed_inst_1) @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') @mock.patch.object(cinder.API, 'get') def test_rescue_volume_backed_placeholder_image(self, mock_get_vol, mock_get_bdms): # Instance started with a placeholder image (for metadata) volume_backed_inst_2 = self._create_fake_instance_obj( {'image_ref': 'my_placeholder_img', 'root_device_name': '/dev/vda'}) bdms, volume = self._fake_rescue_block_devices(volume_backed_inst_2) mock_get_vol.return_value = {'id': volume['id'], 'status': "in-use"} mock_get_bdms.return_value = bdms with mock.patch.object(self.compute, '_prep_block_device'): self.compute.build_and_run_instance(self.context, volume_backed_inst_2, {}, {}, {}, block_device_mapping=[]) self.assertRaises(exception.InstanceNotRescuable, self.compute_api.rescue, self.context, volume_backed_inst_2) def test_get(self): # Test get instance. exp_instance = self._create_fake_instance_obj() instance = self.compute_api.get(self.context, exp_instance.uuid, want_objects=True) self.assertEqual(exp_instance.id, instance.id) def test_get_with_admin_context(self): # Test get instance. c = context.get_admin_context() exp_instance = self._create_fake_instance_obj() instance = self.compute_api.get(c, exp_instance['uuid'], want_objects=True) self.assertEqual(exp_instance.id, instance.id) def test_get_all_by_name_regexp(self): # Test searching instances by name (display_name). c = context.get_admin_context() instance1 = self._create_fake_instance_obj({'display_name': 'woot'}) instance2 = self._create_fake_instance_obj({ 'display_name': 'woo'}) instance3 = self._create_fake_instance_obj({ 'display_name': 'not-woot'}) instances = self.compute_api.get_all(c, search_opts={'name': '^woo.*'}) self.assertEqual(len(instances), 2) instance_uuids = [instance['uuid'] for instance in instances] self.assertIn(instance1['uuid'], instance_uuids) self.assertIn(instance2['uuid'], instance_uuids) instances = self.compute_api.get_all(c, search_opts={'name': '^woot.*'}) instance_uuids = [instance['uuid'] for instance in instances] self.assertEqual(len(instances), 1) self.assertIn(instance1['uuid'], instance_uuids) instances = self.compute_api.get_all(c, search_opts={'name': '.*oot.*'}) self.assertEqual(len(instances), 2) instance_uuids = [instance['uuid'] for instance in instances] self.assertIn(instance1['uuid'], instance_uuids) self.assertIn(instance3['uuid'], instance_uuids) instances = self.compute_api.get_all(c, search_opts={'name': '^n.*'}) self.assertEqual(len(instances), 1) instance_uuids = [instance['uuid'] for instance in instances] self.assertIn(instance3['uuid'], instance_uuids) instances = self.compute_api.get_all(c, search_opts={'name': 'noth.*'}) self.assertEqual(len(instances), 0) def test_get_all_by_multiple_options_at_once(self): # Test searching by multiple options at once. c = context.get_admin_context() def fake_network_info(ip): info = [{ 'address': 'aa:bb:cc:dd:ee:ff', 'id': 1, 'network': { 'bridge': 'br0', 'id': 1, 'label': 'private', 'subnets': [{ 'cidr': '192.168.0.0/24', 'ips': [{ 'address': ip, 'type': 'fixed', }] }] } }] return jsonutils.dumps(info) instance1 = self._create_fake_instance_obj({ 'display_name': 'woot', 'uuid': '00000000-0000-0000-0000-000000000010', 'info_cache': objects.InstanceInfoCache( network_info=fake_network_info('192.168.0.1'))}) self._create_fake_instance_obj({ # instance2 'display_name': 'woo', 'uuid': '00000000-0000-0000-0000-000000000020', 'info_cache': objects.InstanceInfoCache( network_info=fake_network_info('192.168.0.2'))}) instance3 = self._create_fake_instance_obj({ 'display_name': 'not-woot', 'uuid': '00000000-0000-0000-0000-000000000030', 'info_cache': objects.InstanceInfoCache( network_info=fake_network_info('192.168.0.3'))}) # ip ends up matching 2nd octet here.. so all 3 match ip # but 'name' only matches one instances = self.compute_api.get_all(c, search_opts={'ip': '.*\.1', 'name': 'not.*'}) self.assertEqual(len(instances), 1) self.assertEqual(instances[0]['uuid'], instance3['uuid']) # ip ends up matching any ip with a '1' in the last octet.. # so instance 1 and 3.. but name should only match #1 # but 'name' only matches one instances = self.compute_api.get_all(c, search_opts={'ip': '.*\.1$', 'name': '^woo.*'}) self.assertEqual(len(instances), 1) self.assertEqual(instances[0]['uuid'], instance1['uuid']) # same as above but no match on name (name matches instance1 # but the ip query doesn't instances = self.compute_api.get_all(c, search_opts={'ip': '.*\.2$', 'name': '^woot.*'}) self.assertEqual(len(instances), 0) # ip matches all 3... ipv6 matches #2+#3...name matches #3 instances = self.compute_api.get_all(c, search_opts={'ip': '.*\.1', 'name': 'not.*', 'ip6': '^.*12.*34.*'}) self.assertEqual(len(instances), 1) self.assertEqual(instances[0]['uuid'], instance3['uuid']) def test_get_all_by_image(self): # Test searching instances by image. c = context.get_admin_context() instance1 = self._create_fake_instance_obj({'image_ref': '1234'}) instance2 = self._create_fake_instance_obj({'image_ref': '4567'}) instance3 = self._create_fake_instance_obj({'image_ref': '4567'}) instances = self.compute_api.get_all(c, search_opts={'image': '123'}) self.assertEqual(len(instances), 0) instances = self.compute_api.get_all(c, search_opts={'image': '1234'}) self.assertEqual(len(instances), 1) self.assertEqual(instances[0]['uuid'], instance1['uuid']) instances = self.compute_api.get_all(c, search_opts={'image': '4567'}) self.assertEqual(len(instances), 2) instance_uuids = [instance['uuid'] for instance in instances] self.assertIn(instance2['uuid'], instance_uuids) self.assertIn(instance3['uuid'], instance_uuids) # Test passing a list as search arg instances = self.compute_api.get_all(c, search_opts={'image': ['1234', '4567']}) self.assertEqual(len(instances), 3) def test_get_all_by_flavor(self): # Test searching instances by image. c = context.get_admin_context() flavor_dict = {f.flavorid: f for f in objects.FlavorList.get_all(c)} instance1 = self._create_fake_instance_obj( {'instance_type_id': flavor_dict['1'].id}) instance2 = self._create_fake_instance_obj( {'instance_type_id': flavor_dict['2'].id}) instance3 = self._create_fake_instance_obj( {'instance_type_id': flavor_dict['2'].id}) instances = self.compute_api.get_all(c, search_opts={'flavor': 5}) self.assertEqual(len(instances), 0) # ensure unknown filter maps to an exception self.assertRaises(exception.FlavorNotFound, self.compute_api.get_all, c, search_opts={'flavor': 99}) instances = self.compute_api.get_all(c, search_opts={'flavor': 1}) self.assertEqual(len(instances), 1) self.assertEqual(instances[0]['id'], instance1['id']) instances = self.compute_api.get_all(c, search_opts={'flavor': 2}) self.assertEqual(len(instances), 2) instance_uuids = [instance['uuid'] for instance in instances] self.assertIn(instance2['uuid'], instance_uuids) self.assertIn(instance3['uuid'], instance_uuids) def test_get_all_by_state(self): # Test searching instances by state. c = context.get_admin_context() instance1 = self._create_fake_instance_obj({ 'power_state': power_state.SHUTDOWN, }) instance2 = self._create_fake_instance_obj({ 'power_state': power_state.RUNNING, }) instance3 = self._create_fake_instance_obj({ 'power_state': power_state.RUNNING, }) instances = self.compute_api.get_all(c, search_opts={'power_state': power_state.SUSPENDED}) self.assertEqual(len(instances), 0) instances = self.compute_api.get_all(c, search_opts={'power_state': power_state.SHUTDOWN}) self.assertEqual(len(instances), 1) self.assertEqual(instances[0]['uuid'], instance1['uuid']) instances = self.compute_api.get_all(c, search_opts={'power_state': power_state.RUNNING}) self.assertEqual(len(instances), 2) instance_uuids = [instance['uuid'] for instance in instances] self.assertIn(instance2['uuid'], instance_uuids) self.assertIn(instance3['uuid'], instance_uuids) # Test passing a list as search arg instances = self.compute_api.get_all(c, search_opts={'power_state': [power_state.SHUTDOWN, power_state.RUNNING]}) self.assertEqual(len(instances), 3) def test_get_all_by_metadata(self): # Test searching instances by metadata. c = context.get_admin_context() self._create_fake_instance_obj() # instance0 self._create_fake_instance_obj({ # instance1 'metadata': {'key1': 'value1'}}) instance2 = self._create_fake_instance_obj({ 'metadata': {'key2': 'value2'}}) instance3 = self._create_fake_instance_obj({ 'metadata': {'key3': 'value3'}}) instance4 = self._create_fake_instance_obj({ 'metadata': {'key3': 'value3', 'key4': 'value4'}}) # get all instances instances = self.compute_api.get_all(c, search_opts={'metadata': u"{}"}) self.assertEqual(len(instances), 5) # wrong key/value combination instances = self.compute_api.get_all(c, search_opts={'metadata': u'{"key1": "value3"}'}) self.assertEqual(len(instances), 0) # non-existing keys instances = self.compute_api.get_all(c, search_opts={'metadata': u'{"key5": "value1"}'}) self.assertEqual(len(instances), 0) # find existing instance instances = self.compute_api.get_all(c, search_opts={'metadata': u'{"key2": "value2"}'}) self.assertEqual(len(instances), 1) self.assertEqual(instances[0]['uuid'], instance2['uuid']) instances = self.compute_api.get_all(c, search_opts={'metadata': u'{"key3": "value3"}'}) self.assertEqual(len(instances), 2) instance_uuids = [instance['uuid'] for instance in instances] self.assertIn(instance3['uuid'], instance_uuids) self.assertIn(instance4['uuid'], instance_uuids) # multiple criteria as a dict instances = self.compute_api.get_all(c, search_opts={'metadata': u'{"key3": "value3","key4": "value4"}'}) self.assertEqual(len(instances), 1) self.assertEqual(instances[0]['uuid'], instance4['uuid']) # multiple criteria as a list instances = self.compute_api.get_all(c, search_opts= {'metadata': u'[{"key4": "value4"},{"key3": "value3"}]'}) self.assertEqual(len(instances), 1) self.assertEqual(instances[0]['uuid'], instance4['uuid']) def test_get_all_by_system_metadata(self): # Test searching instances by system metadata. c = context.get_admin_context() instance1 = self._create_fake_instance_obj({ 'system_metadata': {'key1': 'value1'}}) # find existing instance instances = self.compute_api.get_all(c, search_opts={'system_metadata': u'{"key1": "value1"}'}) self.assertEqual(len(instances), 1) self.assertEqual(instances[0]['uuid'], instance1['uuid']) def test_all_instance_metadata(self): self._create_fake_instance_obj({'metadata': {'key1': 'value1'}, 'user_id': 'user1', 'project_id': 'project1'}) self._create_fake_instance_obj({'metadata': {'key2': 'value2'}, 'user_id': 'user2', 'project_id': 'project2'}) _context = self.context _context.user_id = 'user1' _context.project_id = 'project1' metadata = self.compute_api.get_all_instance_metadata(_context, search_filts=[]) self.assertEqual(1, len(metadata)) self.assertEqual(metadata[0]['key'], 'key1') _context.user_id = 'user2' _context.project_id = 'project2' metadata = self.compute_api.get_all_instance_metadata(_context, search_filts=[]) self.assertEqual(1, len(metadata)) self.assertEqual(metadata[0]['key'], 'key2') _context = context.get_admin_context() metadata = self.compute_api.get_all_instance_metadata(_context, search_filts=[]) self.assertEqual(2, len(metadata)) def test_instance_metadata(self): meta_changes = [None] self.flags(notify_on_state_change='vm_state') def fake_change_instance_metadata(inst, ctxt, diff, instance=None, instance_uuid=None): meta_changes[0] = diff self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata', fake_change_instance_metadata) _context = context.get_admin_context() instance = self._create_fake_instance_obj({'metadata': {'key1': 'value1'}}) metadata = self.compute_api.get_instance_metadata(_context, instance) self.assertEqual(metadata, {'key1': 'value1'}) self.compute_api.update_instance_metadata(_context, instance, {'key2': 'value2'}) metadata = self.compute_api.get_instance_metadata(_context, instance) self.assertEqual(metadata, {'key1': 'value1', 'key2': 'value2'}) self.assertEqual(meta_changes, [{'key2': ['+', 'value2']}]) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1) msg = fake_notifier.NOTIFICATIONS[0] payload = msg.payload self.assertIn('metadata', payload) self.assertEqual(payload['metadata'], metadata) new_metadata = {'key2': 'bah', 'key3': 'value3'} self.compute_api.update_instance_metadata(_context, instance, new_metadata, delete=True) metadata = self.compute_api.get_instance_metadata(_context, instance) self.assertEqual(metadata, new_metadata) self.assertEqual(meta_changes, [{ 'key1': ['-'], 'key2': ['+', 'bah'], 'key3': ['+', 'value3'], }]) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[1] payload = msg.payload self.assertIn('metadata', payload) self.assertEqual(payload['metadata'], metadata) self.compute_api.delete_instance_metadata(_context, instance, 'key2') metadata = self.compute_api.get_instance_metadata(_context, instance) self.assertEqual(metadata, {'key3': 'value3'}) self.assertEqual(meta_changes, [{'key2': ['-']}]) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 3) msg = fake_notifier.NOTIFICATIONS[2] payload = msg.payload self.assertIn('metadata', payload) self.assertEqual(payload['metadata'], {'key3': 'value3'}) def test_disallow_metadata_changes_during_building(self): def fake_change_instance_metadata(inst, ctxt, diff, instance=None, instance_uuid=None): pass self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata', fake_change_instance_metadata) instance = self._create_fake_instance_obj( {'vm_state': vm_states.BUILDING}) self.assertRaises(exception.InstanceInvalidState, self.compute_api.delete_instance_metadata, self.context, instance, "key") self.assertRaises(exception.InstanceInvalidState, self.compute_api.update_instance_metadata, self.context, instance, "key") @staticmethod def _parse_db_block_device_mapping(bdm_ref): attr_list = ('delete_on_termination', 'device_name', 'no_device', 'virtual_name', 'volume_id', 'volume_size', 'snapshot_id') bdm = {} for attr in attr_list: val = bdm_ref.get(attr, None) if val: bdm[attr] = val return bdm def test_create_block_device_mapping(self): def _compare_bdm_object(obj1, obj2, extra_keys=()): for key in (('device_name', 'source_type', 'destination_type') + extra_keys): self.assertEqual(getattr(obj1, key), getattr(obj2, key)) swap_size = ephemeral_size = 1 instance_type = {'swap': swap_size, 'ephemeral_gb': ephemeral_size} instance = self._create_fake_instance_obj() mappings = [ {'virtual': 'ami', 'device': 'sda1'}, {'virtual': 'root', 'device': '/dev/sda1'}, {'virtual': 'swap', 'device': 'sdb4'}, {'virtual': 'swap', 'device': 'sdb3'}, {'virtual': 'swap', 'device': 'sdb2'}, {'virtual': 'swap', 'device': 'sdb1'}, {'virtual': 'ephemeral0', 'device': 'sdc1'}, {'virtual': 'ephemeral1', 'device': 'sdc2'}, {'virtual': 'ephemeral2', 'device': 'sdc3'}] block_device_mapping = [ # root {'device_name': '/dev/sda1', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '00000000-aaaa-bbbb-cccc-000000000000', 'delete_on_termination': False}, # overwrite swap {'device_name': '/dev/sdb2', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '11111111-aaaa-bbbb-cccc-111111111111', 'delete_on_termination': False}, {'device_name': '/dev/sdb3', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '22222222-aaaa-bbbb-cccc-222222222222'}, {'device_name': '/dev/sdb4', 'no_device': True}, # overwrite ephemeral {'device_name': '/dev/sdc1', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '33333333-aaaa-bbbb-cccc-333333333333', 'delete_on_termination': False}, {'device_name': '/dev/sdc2', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '33333333-aaaa-bbbb-cccc-444444444444', 'delete_on_termination': False}, {'device_name': '/dev/sdc3', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '44444444-aaaa-bbbb-cccc-555555555555'}, {'device_name': '/dev/sdc4', 'no_device': True}, # volume {'device_name': '/dev/sdd1', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '55555555-aaaa-bbbb-cccc-666666666666', 'delete_on_termination': False}, {'device_name': '/dev/sdd2', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '66666666-aaaa-bbbb-cccc-777777777777'}, {'device_name': '/dev/sdd3', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '77777777-aaaa-bbbb-cccc-888888888888'}, {'device_name': '/dev/sdd4', 'no_device': True}] image_mapping = self.compute_api._prepare_image_mapping( instance_type, mappings) image_mapping = block_device_obj.block_device_make_list_from_dicts( self.context, image_mapping) self.compute_api._create_block_device_mapping( instance_type, instance['uuid'], image_mapping) bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid( self.context, instance['uuid']) expected_result = [ {'source_type': 'blank', 'destination_type': 'local', 'guest_format': 'swap', 'device_name': '/dev/sdb1', 'volume_size': swap_size, 'delete_on_termination': True}, {'source_type': 'blank', 'destination_type': 'local', 'guest_format': CONF.default_ephemeral_format, 'device_name': '/dev/sdc3', 'delete_on_termination': True}, {'source_type': 'blank', 'destination_type': 'local', 'guest_format': CONF.default_ephemeral_format, 'device_name': '/dev/sdc1', 'delete_on_termination': True}, {'source_type': 'blank', 'destination_type': 'local', 'guest_format': CONF.default_ephemeral_format, 'device_name': '/dev/sdc2', 'delete_on_termination': True}, ] expected_result = block_device_obj.block_device_make_list_from_dicts( self.context, map(fake_block_device.AnonFakeDbBlockDeviceDict, expected_result)) bdms.sort(key=operator.attrgetter('device_name')) expected_result.sort(key=operator.attrgetter('device_name')) self.assertEqual(len(bdms), len(expected_result)) for expected, got in zip(expected_result, bdms): _compare_bdm_object( expected, got, extra_keys=('guest_format', 'delete_on_termination')) block_device_mapping = ( block_device_obj.block_device_make_list_from_dicts( self.context, map(fake_block_device.AnonFakeDbBlockDeviceDict, block_device_mapping))) self.compute_api._create_block_device_mapping( flavors.get_default_flavor(), instance['uuid'], block_device_mapping) bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid( self.context, instance['uuid']) expected_result = [ {'snapshot_id': '00000000-aaaa-bbbb-cccc-000000000000', 'device_name': '/dev/sda1', 'source_type': 'snapshot', 'destination_type': 'volume'}, {'source_type': 'blank', 'destination_type': 'local', 'guest_format': 'swap', 'device_name': '/dev/sdb1', 'volume_size': swap_size, 'delete_on_termination': True}, {'device_name': '/dev/sdb2', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '11111111-aaaa-bbbb-cccc-111111111111', 'delete_on_termination': False}, {'device_name': '/dev/sdb3', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '22222222-aaaa-bbbb-cccc-222222222222'}, {'device_name': '/dev/sdb4', 'no_device': True}, {'device_name': '/dev/sdc1', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '33333333-aaaa-bbbb-cccc-333333333333', 'delete_on_termination': False}, {'device_name': '/dev/sdc2', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '33333333-aaaa-bbbb-cccc-444444444444', 'delete_on_termination': False}, {'device_name': '/dev/sdc3', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '44444444-aaaa-bbbb-cccc-555555555555'}, {'no_device': True, 'device_name': '/dev/sdc4'}, {'device_name': '/dev/sdd1', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '55555555-aaaa-bbbb-cccc-666666666666', 'delete_on_termination': False}, {'device_name': '/dev/sdd2', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '66666666-aaaa-bbbb-cccc-777777777777'}, {'device_name': '/dev/sdd3', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': '77777777-aaaa-bbbb-cccc-888888888888'}, {'no_device': True, 'device_name': '/dev/sdd4'}] expected_result = block_device_obj.block_device_make_list_from_dicts( self.context, map(fake_block_device.AnonFakeDbBlockDeviceDict, expected_result)) bdms.sort(key=operator.itemgetter('device_name')) expected_result.sort(key=operator.itemgetter('device_name')) self.assertEqual(len(bdms), len(expected_result)) for expected, got in zip(expected_result, bdms): _compare_bdm_object( expected, got, extra_keys=('snapshot_id', 'delete_on_termination')) def _test_check_and_transform_bdm(self, bdms, expected_bdms, image_bdms=None, base_options=None, legacy_bdms=False, legacy_image_bdms=False): image_bdms = image_bdms or [] image_meta = {} if image_bdms: image_meta = {'properties': {'block_device_mapping': image_bdms}} if not legacy_image_bdms: image_meta['properties']['bdm_v2'] = True base_options = base_options or {'root_device_name': 'vda', 'image_ref': FAKE_IMAGE_REF} transformed_bdm = self.compute_api._check_and_transform_bdm( self.context, base_options, {}, image_meta, 1, 1, bdms, legacy_bdms) for expected, got in zip(expected_bdms, transformed_bdm): self.assertEqual(dict(expected.items()), dict(got.items())) def test_check_and_transform_legacy_bdm_no_image_bdms(self): legacy_bdms = [ {'device_name': '/dev/vda', 'volume_id': '33333333-aaaa-bbbb-cccc-333333333333', 'delete_on_termination': False}] expected_bdms = [block_device.BlockDeviceDict.from_legacy( legacy_bdms[0])] expected_bdms[0]['boot_index'] = 0 expected_bdms = block_device_obj.block_device_make_list_from_dicts( self.context, expected_bdms) self._test_check_and_transform_bdm(legacy_bdms, expected_bdms, legacy_bdms=True) def test_check_and_transform_legacy_bdm_legacy_image_bdms(self): image_bdms = [ {'device_name': '/dev/vda', 'volume_id': '33333333-aaaa-bbbb-cccc-333333333333', 'delete_on_termination': False}] legacy_bdms = [ {'device_name': '/dev/vdb', 'volume_id': '33333333-aaaa-bbbb-cccc-444444444444', 'delete_on_termination': False}] expected_bdms = [ block_device.BlockDeviceDict.from_legacy(legacy_bdms[0]), block_device.BlockDeviceDict.from_legacy(image_bdms[0])] expected_bdms[0]['boot_index'] = -1 expected_bdms[1]['boot_index'] = 0 expected_bdms = block_device_obj.block_device_make_list_from_dicts( self.context, expected_bdms) self._test_check_and_transform_bdm(legacy_bdms, expected_bdms, image_bdms=image_bdms, legacy_bdms=True, legacy_image_bdms=True) def test_check_and_transform_legacy_bdm_image_bdms(self): legacy_bdms = [ {'device_name': '/dev/vdb', 'volume_id': '33333333-aaaa-bbbb-cccc-444444444444', 'delete_on_termination': False}] image_bdms = [block_device.BlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'volume_id': '33333333-aaaa-bbbb-cccc-444444444444', 'boot_index': 0})] expected_bdms = [ block_device.BlockDeviceDict.from_legacy(legacy_bdms[0]), image_bdms[0]] expected_bdms[0]['boot_index'] = -1 expected_bdms = block_device_obj.block_device_make_list_from_dicts( self.context, expected_bdms) self._test_check_and_transform_bdm(legacy_bdms, expected_bdms, image_bdms=image_bdms, legacy_bdms=True) def test_check_and_transform_bdm_no_image_bdms(self): bdms = [block_device.BlockDeviceDict({'source_type': 'image', 'destination_type': 'local', 'image_id': FAKE_IMAGE_REF, 'boot_index': 0})] expected_bdms = block_device_obj.block_device_make_list_from_dicts( self.context, bdms) self._test_check_and_transform_bdm(bdms, expected_bdms) def test_check_and_transform_bdm_image_bdms(self): bdms = [block_device.BlockDeviceDict({'source_type': 'image', 'destination_type': 'local', 'image_id': FAKE_IMAGE_REF, 'boot_index': 0})] image_bdms = [block_device.BlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'volume_id': '33333333-aaaa-bbbb-cccc-444444444444'})] expected_bdms = bdms + image_bdms expected_bdms = block_device_obj.block_device_make_list_from_dicts( self.context, expected_bdms) self._test_check_and_transform_bdm(bdms, expected_bdms, image_bdms=image_bdms) def test_check_and_transform_bdm_image_bdms_w_overrides(self): bdms = [block_device.BlockDeviceDict({'source_type': 'image', 'destination_type': 'local', 'image_id': FAKE_IMAGE_REF, 'boot_index': 0}), block_device.BlockDeviceDict({'device_name': 'vdb', 'no_device': True})] image_bdms = [block_device.BlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'volume_id': '33333333-aaaa-bbbb-cccc-444444444444', 'device_name': '/dev/vdb'})] expected_bdms = block_device_obj.block_device_make_list_from_dicts( self.context, bdms) self._test_check_and_transform_bdm(bdms, expected_bdms, image_bdms=image_bdms) def test_check_and_transform_bdm_image_bdms_w_overrides_complex(self): bdms = [block_device.BlockDeviceDict({'source_type': 'image', 'destination_type': 'local', 'image_id': FAKE_IMAGE_REF, 'boot_index': 0}), block_device.BlockDeviceDict({'device_name': 'vdb', 'no_device': True}), block_device.BlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'volume_id': '11111111-aaaa-bbbb-cccc-222222222222', 'device_name': 'vdc'})] image_bdms = [ block_device.BlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'volume_id': '33333333-aaaa-bbbb-cccc-444444444444', 'device_name': '/dev/vdb'}), block_device.BlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'volume_id': '55555555-aaaa-bbbb-cccc-666666666666', 'device_name': '/dev/vdc'}), block_device.BlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'volume_id': '77777777-aaaa-bbbb-cccc-8888888888888', 'device_name': '/dev/vdd'})] expected_bdms = block_device_obj.block_device_make_list_from_dicts( self.context, bdms + [image_bdms[2]]) self._test_check_and_transform_bdm(bdms, expected_bdms, image_bdms=image_bdms) def test_check_and_transform_bdm_legacy_image_bdms(self): bdms = [block_device.BlockDeviceDict({'source_type': 'image', 'destination_type': 'local', 'image_id': FAKE_IMAGE_REF, 'boot_index': 0})] image_bdms = [{'device_name': '/dev/vda', 'volume_id': '33333333-aaaa-bbbb-cccc-333333333333', 'delete_on_termination': False}] expected_bdms = [block_device.BlockDeviceDict.from_legacy( image_bdms[0])] expected_bdms[0]['boot_index'] = 0 expected_bdms = block_device_obj.block_device_make_list_from_dicts( self.context, expected_bdms) self._test_check_and_transform_bdm(bdms, expected_bdms, image_bdms=image_bdms, legacy_image_bdms=True) def test_check_and_transform_image(self): base_options = {'root_device_name': 'vdb', 'image_ref': FAKE_IMAGE_REF} fake_legacy_bdms = [ {'device_name': '/dev/vda', 'volume_id': '33333333-aaaa-bbbb-cccc-333333333333', 'delete_on_termination': False}] image_meta = {'properties': {'block_device_mapping': [ {'device_name': '/dev/vda', 'snapshot_id': '33333333-aaaa-bbbb-cccc-333333333333', 'boot_index': 0}]}} # We get an image BDM transformed_bdm = self.compute_api._check_and_transform_bdm( self.context, base_options, {}, {}, 1, 1, fake_legacy_bdms, True) self.assertEqual(len(transformed_bdm), 2) # No image BDM created if image already defines a root BDM base_options['root_device_name'] = 'vda' base_options['image_ref'] = None transformed_bdm = self.compute_api._check_and_transform_bdm( self.context, base_options, {}, image_meta, 1, 1, [], True) self.assertEqual(len(transformed_bdm), 1) # No image BDM created transformed_bdm = self.compute_api._check_and_transform_bdm( self.context, base_options, {}, {}, 1, 1, fake_legacy_bdms, True) self.assertEqual(len(transformed_bdm), 1) # Volumes with multiple instances fails self.assertRaises(exception.InvalidRequest, self.compute_api._check_and_transform_bdm, self.context, base_options, {}, {}, 1, 2, fake_legacy_bdms, True) # Volume backed so no image_ref in base_options # v2 bdms contains a root image to volume mapping # image_meta contains a snapshot as the image # is created by nova image-create from a volume backed server # see bug 1381598 fake_v2_bdms = [{'boot_index': 0, 'connection_info': None, 'delete_on_termination': None, 'destination_type': u'volume', 'image_id': FAKE_IMAGE_REF, 'source_type': u'image', 'volume_id': None, 'volume_size': 1}] base_options['image_ref'] = None transformed_bdm = self.compute_api._check_and_transform_bdm( self.context, base_options, {}, image_meta, 1, 1, fake_v2_bdms, False) self.assertEqual(len(transformed_bdm), 1) # Image BDM overrides mappings base_options['image_ref'] = FAKE_IMAGE_REF image_meta = { 'properties': { 'mappings': [ {'virtual': 'ephemeral0', 'device': 'vdb'}], 'bdm_v2': True, 'block_device_mapping': [ {'device_name': '/dev/vdb', 'source_type': 'blank', 'destination_type': 'volume', 'volume_size': 1}]}} transformed_bdm = self.compute_api._check_and_transform_bdm( self.context, base_options, {}, image_meta, 1, 1, [], False) self.assertEqual(1, len(transformed_bdm)) self.assertEqual('volume', transformed_bdm[0]['destination_type']) self.assertEqual('/dev/vdb', transformed_bdm[0]['device_name']) def test_volume_size(self): ephemeral_size = 2 swap_size = 3 volume_size = 5 swap_bdm = {'source_type': 'blank', 'guest_format': 'swap', 'destination_type': 'local'} ephemeral_bdm = {'source_type': 'blank', 'guest_format': None, 'destination_type': 'local'} volume_bdm = {'source_type': 'volume', 'volume_size': volume_size, 'destination_type': 'volume'} blank_bdm = {'source_type': 'blank', 'destination_type': 'volume'} inst_type = {'ephemeral_gb': ephemeral_size, 'swap': swap_size} self.assertEqual( self.compute_api._volume_size(inst_type, ephemeral_bdm), ephemeral_size) ephemeral_bdm['volume_size'] = 42 self.assertEqual( self.compute_api._volume_size(inst_type, ephemeral_bdm), 42) self.assertEqual( self.compute_api._volume_size(inst_type, swap_bdm), swap_size) swap_bdm['volume_size'] = 42 self.assertEqual( self.compute_api._volume_size(inst_type, swap_bdm), 42) self.assertEqual( self.compute_api._volume_size(inst_type, volume_bdm), volume_size) self.assertIsNone( self.compute_api._volume_size(inst_type, blank_bdm)) def test_is_volume_backed_instance_no_bdm_no_image(self): ctxt = self.context instance = self._create_fake_instance_obj({'image_ref': ''}) self.assertTrue( self.compute_api.is_volume_backed_instance(ctxt, instance, None)) def test_is_volume_backed_instance_empty_bdm_with_image(self): ctxt = self.context instance = self._create_fake_instance_obj({ 'root_device_name': 'vda', 'image_ref': FAKE_IMAGE_REF }) self.assertFalse( self.compute_api.is_volume_backed_instance( ctxt, instance, block_device_obj.block_device_make_list(ctxt, []))) def test_is_volume_backed_instance_bdm_volume_no_image(self): ctxt = self.context instance = self._create_fake_instance_obj({ 'root_device_name': 'vda', 'image_ref': '' }) bdms = block_device_obj.block_device_make_list(ctxt, [fake_block_device.FakeDbBlockDeviceDict( {'source_type': 'volume', 'device_name': '/dev/vda', 'volume_id': uuids.volume_id, 'instance_uuid': 'f8000000-0000-0000-0000-000000000000', 'boot_index': 0, 'destination_type': 'volume'})]) self.assertTrue( self.compute_api.is_volume_backed_instance(ctxt, instance, bdms)) def test_is_volume_backed_instance_bdm_local_no_image(self): # if the root device is local the instance is not volume backed, even # if no image_ref is set. ctxt = self.context instance = self._create_fake_instance_obj({ 'root_device_name': 'vda', 'image_ref': '' }) bdms = block_device_obj.block_device_make_list(ctxt, [fake_block_device.FakeDbBlockDeviceDict( {'source_type': 'volume', 'device_name': '/dev/vda', 'volume_id': uuids.volume_id, 'destination_type': 'local', 'instance_uuid': 'f8000000-0000-0000-0000-000000000000', 'boot_index': 0, 'snapshot_id': None}), fake_block_device.FakeDbBlockDeviceDict( {'source_type': 'volume', 'device_name': '/dev/vdb', 'instance_uuid': 'f8000000-0000-0000-0000-000000000000', 'boot_index': 1, 'destination_type': 'volume', 'volume_id': 'c2ec2156-d75e-11e2-985b-5254009297d6', 'snapshot_id': None})]) self.assertFalse( self.compute_api.is_volume_backed_instance(ctxt, instance, bdms)) def test_is_volume_backed_instance_bdm_volume_with_image(self): ctxt = self.context instance = self._create_fake_instance_obj({ 'root_device_name': 'vda', 'image_ref': FAKE_IMAGE_REF }) bdms = block_device_obj.block_device_make_list(ctxt, [fake_block_device.FakeDbBlockDeviceDict( {'source_type': 'volume', 'device_name': '/dev/vda', 'volume_id': uuids.volume_id, 'boot_index': 0, 'destination_type': 'volume'})]) self.assertTrue( self.compute_api.is_volume_backed_instance(ctxt, instance, bdms)) def test_is_volume_backed_instance_bdm_snapshot(self): ctxt = self.context instance = self._create_fake_instance_obj({'root_device_name': 'vda'}) bdms = block_device_obj.block_device_make_list(ctxt, [fake_block_device.FakeDbBlockDeviceDict( {'source_type': 'volume', 'device_name': '/dev/vda', 'snapshot_id': 'de8836ac-d75e-11e2-8271-5254009297d6', 'instance_uuid': 'f8000000-0000-0000-0000-000000000000', 'destination_type': 'volume', 'boot_index': 0, 'volume_id': None})]) self.assertTrue( self.compute_api.is_volume_backed_instance(ctxt, instance, bdms)) @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') def test_is_volume_backed_instance_empty_bdm_by_uuid(self, mock_bdms): ctxt = self.context instance = self._create_fake_instance_obj() mock_bdms.return_value = \ block_device_obj.block_device_make_list(ctxt, []) self.assertFalse( self.compute_api.is_volume_backed_instance(ctxt, instance, None)) mock_bdms.assert_called_with(ctxt, instance.uuid) def test_reservation_id_one_instance(self): """Verify building an instance has a reservation_id that matches return value from create. """ (refs, resv_id) = self.compute_api.create(self.context, flavors.get_default_flavor(), image_href=uuids.image_href_id) self.assertEqual(len(refs), 1) self.assertEqual(refs[0]['reservation_id'], resv_id) def test_reservation_ids_two_instances(self): """Verify building 2 instances at once results in a reservation_id being returned equal to reservation id set in both instances. """ (refs, resv_id) = self.compute_api.create(self.context, flavors.get_default_flavor(), image_href=uuids.image_href_id, min_count=2, max_count=2) self.assertEqual(len(refs), 2) self.assertIsNotNone(resv_id) for instance in refs: self.assertEqual(instance['reservation_id'], resv_id) def test_multi_instance_display_name_template(self): self.flags(multi_instance_display_name_template='%(name)s') (refs, resv_id) = self.compute_api.create(self.context, flavors.get_default_flavor(), image_href=uuids.image_href_id, min_count=2, max_count=2, display_name='x') self.assertEqual(refs[0]['display_name'], 'x') self.assertEqual(refs[0]['hostname'], 'x') self.assertEqual(refs[1]['display_name'], 'x') self.assertEqual(refs[1]['hostname'], 'x') self.flags(multi_instance_display_name_template='%(name)s-%(count)d') self._multi_instance_display_name_default() self.flags(multi_instance_display_name_template='%(name)s-%(uuid)s') (refs, resv_id) = self.compute_api.create(self.context, flavors.get_default_flavor(), image_href=uuids.image_href_id, min_count=2, max_count=2, display_name='x') self.assertEqual(refs[0]['display_name'], 'x-%s' % refs[0]['uuid']) self.assertEqual(refs[0]['hostname'], 'x-%s' % refs[0]['uuid']) self.assertEqual(refs[1]['display_name'], 'x-%s' % refs[1]['uuid']) self.assertEqual(refs[1]['hostname'], 'x-%s' % refs[1]['uuid']) def test_multi_instance_display_name_default(self): self._multi_instance_display_name_default() def _multi_instance_display_name_default(self): (refs, resv_id) = self.compute_api.create(self.context, flavors.get_default_flavor(), image_href=uuids.image_href_id, min_count=2, max_count=2, display_name='x') self.assertEqual(refs[0]['display_name'], 'x-1') self.assertEqual(refs[0]['hostname'], 'x-1') self.assertEqual(refs[1]['display_name'], 'x-2') self.assertEqual(refs[1]['hostname'], 'x-2') def test_instance_architecture(self): # Test the instance architecture. i_ref = self._create_fake_instance_obj() self.assertEqual(i_ref['architecture'], arch.X86_64) def test_instance_unknown_architecture(self): # Test if the architecture is unknown. instance = self._create_fake_instance_obj( params={'architecture': ''}) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance = db.instance_get_by_uuid(self.context, instance['uuid']) self.assertNotEqual(instance['architecture'], 'Unknown') def test_instance_name_template(self): # Test the instance_name template. self.flags(instance_name_template='instance-%d') i_ref = self._create_fake_instance_obj() self.assertEqual(i_ref['name'], 'instance-%d' % i_ref['id']) self.flags(instance_name_template='instance-%(uuid)s') i_ref = self._create_fake_instance_obj() self.assertEqual(i_ref['name'], 'instance-%s' % i_ref['uuid']) self.flags(instance_name_template='%(id)d-%(uuid)s') i_ref = self._create_fake_instance_obj() self.assertEqual(i_ref['name'], '%d-%s' % (i_ref['id'], i_ref['uuid'])) # not allowed.. default is uuid self.flags(instance_name_template='%(name)s') i_ref = self._create_fake_instance_obj() self.assertEqual(i_ref['name'], i_ref['uuid']) def test_add_remove_fixed_ip(self): instance = self._create_fake_instance_obj(params={'host': CONF.host}) self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance', lambda *a, **kw: None) self.compute_api.add_fixed_ip(self.context, instance, '1') self.compute_api.remove_fixed_ip(self.context, instance, '192.168.1.1') self.compute_api.delete(self.context, instance) def test_attach_volume_invalid(self): instance = fake_instance.fake_instance_obj(None, **{ 'locked': False, 'vm_state': vm_states.ACTIVE, 'task_state': None, 'launched_at': timeutils.utcnow()}) self.assertRaises(exception.InvalidDevicePath, self.compute_api.attach_volume, self.context, instance, None, '/invalid') def test_check_dev_name_assign_dev_name(self): instance = self._create_fake_instance_obj() bdms = [objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( { 'instance_uuid': instance.uuid, 'volume_id': 'vol-id', 'source_type': 'volume', 'destination_type': 'volume', 'device_name': None, 'boot_index': None, 'disk_bus': None, 'device_type': None }))] self.compute._check_dev_name(bdms, instance) self.assertIsNotNone(bdms[0].device_name) @mock.patch.object(compute_manager.ComputeManager, '_get_device_name_for_instance') def test_check_dev_name_skip_bdms_with_dev_name(self, mock_get_dev_name): instance = self._create_fake_instance_obj() bdms = [objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( { 'instance_uuid': instance.uuid, 'volume_id': 'vol-id', 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/vda', 'boot_index': None, 'disk_bus': None, 'device_type': None }))] self.compute._check_dev_name(bdms, instance) self.assertFalse(mock_get_dev_name.called) def test_no_attach_volume_in_rescue_state(self): def fake(*args, **kwargs): pass def fake_volume_get(self, context, volume_id): return {'id': volume_id} self.stubs.Set(cinder.API, 'get', fake_volume_get) self.stubs.Set(cinder.API, 'check_attach', fake) self.stubs.Set(cinder.API, 'reserve_volume', fake) instance = fake_instance.fake_instance_obj(None, **{ 'uuid': 'f3000000-0000-0000-0000-000000000000', 'locked': False, 'vm_state': vm_states.RESCUED}) self.assertRaises(exception.InstanceInvalidState, self.compute_api.attach_volume, self.context, instance, None, '/dev/vdb') def test_no_attach_volume_in_suspended_state(self): instance = fake_instance.fake_instance_obj(None, **{ 'uuid': 'f3000000-0000-0000-0000-000000000000', 'locked': False, 'vm_state': vm_states.SUSPENDED}) self.assertRaises(exception.InstanceInvalidState, self.compute_api.attach_volume, self.context, instance, {'id': 'fake-volume-id'}, '/dev/vdb') def test_no_detach_volume_in_rescue_state(self): # Ensure volume can be detached from instance params = {'vm_state': vm_states.RESCUED} instance = self._create_fake_instance_obj(params=params) volume = {'id': 1, 'attach_status': 'attached', 'instance_uuid': instance['uuid']} self.assertRaises(exception.InstanceInvalidState, self.compute_api.detach_volume, self.context, instance, volume) @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') @mock.patch.object(cinder.API, 'get') def test_no_rescue_in_volume_state_attaching(self, mock_get_vol, mock_get_bdms): # Make sure a VM cannot be rescued while volume is being attached instance = self._create_fake_instance_obj() bdms, volume = self._fake_rescue_block_devices(instance) mock_get_vol.return_value = {'id': volume['id'], 'status': "attaching"} mock_get_bdms.return_value = bdms self.assertRaises(exception.InvalidVolume, self.compute_api.rescue, self.context, instance) def test_vnc_console(self): # Make sure we can a vnc console for an instance. fake_instance = self._fake_instance( {'uuid': 'f3000000-0000-0000-0000-000000000000', 'host': 'fake_compute_host'}) fake_console_type = "novnc" fake_connect_info = {'token': 'fake_token', 'console_type': fake_console_type, 'host': 'fake_console_host', 'port': 'fake_console_port', 'internal_access_path': 'fake_access_path', 'instance_uuid': fake_instance.uuid, 'access_url': 'fake_console_url'} rpcapi = compute_rpcapi.ComputeAPI self.mox.StubOutWithMock(rpcapi, 'get_vnc_console') rpcapi.get_vnc_console( self.context, instance=fake_instance, console_type=fake_console_type).AndReturn(fake_connect_info) self.mox.StubOutWithMock(self.compute_api.consoleauth_rpcapi, 'authorize_console') self.compute_api.consoleauth_rpcapi.authorize_console( self.context, 'fake_token', fake_console_type, 'fake_console_host', 'fake_console_port', 'fake_access_path', 'f3000000-0000-0000-0000-000000000000', access_url='fake_console_url') self.mox.ReplayAll() console = self.compute_api.get_vnc_console(self.context, fake_instance, fake_console_type) self.assertEqual(console, {'url': 'fake_console_url'}) def test_get_vnc_console_no_host(self): instance = self._create_fake_instance_obj(params={'host': ''}) self.assertRaises(exception.InstanceNotReady, self.compute_api.get_vnc_console, self.context, instance, 'novnc') def test_spice_console(self): # Make sure we can a spice console for an instance. fake_instance = self._fake_instance( {'uuid': 'f3000000-0000-0000-0000-000000000000', 'host': 'fake_compute_host'}) fake_console_type = "spice-html5" fake_connect_info = {'token': 'fake_token', 'console_type': fake_console_type, 'host': 'fake_console_host', 'port': 'fake_console_port', 'internal_access_path': 'fake_access_path', 'instance_uuid': fake_instance.uuid, 'access_url': 'fake_console_url'} rpcapi = compute_rpcapi.ComputeAPI self.mox.StubOutWithMock(rpcapi, 'get_spice_console') rpcapi.get_spice_console( self.context, instance=fake_instance, console_type=fake_console_type).AndReturn(fake_connect_info) self.mox.StubOutWithMock(self.compute_api.consoleauth_rpcapi, 'authorize_console') self.compute_api.consoleauth_rpcapi.authorize_console( self.context, 'fake_token', fake_console_type, 'fake_console_host', 'fake_console_port', 'fake_access_path', 'f3000000-0000-0000-0000-000000000000', access_url='fake_console_url') self.mox.ReplayAll() console = self.compute_api.get_spice_console(self.context, fake_instance, fake_console_type) self.assertEqual(console, {'url': 'fake_console_url'}) def test_get_spice_console_no_host(self): instance = self._create_fake_instance_obj(params={'host': ''}) self.assertRaises(exception.InstanceNotReady, self.compute_api.get_spice_console, self.context, instance, 'spice') def test_rdp_console(self): # Make sure we can a rdp console for an instance. fake_instance = self._fake_instance({ 'uuid': 'f3000000-0000-0000-0000-000000000000', 'host': 'fake_compute_host'}) fake_console_type = "rdp-html5" fake_connect_info = {'token': 'fake_token', 'console_type': fake_console_type, 'host': 'fake_console_host', 'port': 'fake_console_port', 'internal_access_path': 'fake_access_path', 'instance_uuid': fake_instance.uuid, 'access_url': 'fake_console_url'} rpcapi = compute_rpcapi.ComputeAPI self.mox.StubOutWithMock(rpcapi, 'get_rdp_console') rpcapi.get_rdp_console( self.context, instance=fake_instance, console_type=fake_console_type).AndReturn(fake_connect_info) self.mox.StubOutWithMock(self.compute_api.consoleauth_rpcapi, 'authorize_console') self.compute_api.consoleauth_rpcapi.authorize_console( self.context, 'fake_token', fake_console_type, 'fake_console_host', 'fake_console_port', 'fake_access_path', 'f3000000-0000-0000-0000-000000000000', access_url='fake_console_url') self.mox.ReplayAll() console = self.compute_api.get_rdp_console(self.context, fake_instance, fake_console_type) self.assertEqual(console, {'url': 'fake_console_url'}) def test_get_rdp_console_no_host(self): instance = self._create_fake_instance_obj(params={'host': ''}) self.assertRaises(exception.InstanceNotReady, self.compute_api.get_rdp_console, self.context, instance, 'rdp') def test_serial_console(self): # Make sure we can get a serial proxy url for an instance. fake_instance = self._fake_instance({ 'uuid': 'f3000000-0000-0000-0000-000000000000', 'host': 'fake_compute_host'}) fake_console_type = 'serial' fake_connect_info = {'token': 'fake_token', 'console_type': fake_console_type, 'host': 'fake_serial_host', 'port': 'fake_tcp_port', 'internal_access_path': 'fake_access_path', 'instance_uuid': fake_instance.uuid, 'access_url': 'fake_access_url'} rpcapi = compute_rpcapi.ComputeAPI with test.nested( mock.patch.object(rpcapi, 'get_serial_console', return_value=fake_connect_info), mock.patch.object(self.compute_api.consoleauth_rpcapi, 'authorize_console') ) as (mock_get_serial_console, mock_authorize_console): self.compute_api.consoleauth_rpcapi.authorize_console( self.context, 'fake_token', fake_console_type, 'fake_serial_host', 'fake_tcp_port', 'fake_access_path', 'f3000000-0000-0000-0000-000000000000', access_url='fake_access_url') console = self.compute_api.get_serial_console(self.context, fake_instance, fake_console_type) self.assertEqual(console, {'url': 'fake_access_url'}) def test_get_serial_console_no_host(self): # Make sure an exception is raised when instance is not Active. instance = self._create_fake_instance_obj(params={'host': ''}) self.assertRaises(exception.InstanceNotReady, self.compute_api.get_serial_console, self.context, instance, 'serial') def test_mks_console(self): fake_instance = self._fake_instance({ 'uuid': 'f3000000-0000-0000-0000-000000000000', 'host': 'fake_compute_host'}) fake_console_type = 'webmks' fake_connect_info = {'token': 'fake_token', 'console_type': fake_console_type, 'host': 'fake_mks_host', 'port': 'fake_tcp_port', 'internal_access_path': 'fake_access_path', 'instance_uuid': fake_instance.uuid, 'access_url': 'fake_access_url'} with test.nested( mock.patch.object(self.compute_api.compute_rpcapi, 'get_mks_console', return_value=fake_connect_info), mock.patch.object(self.compute_api.consoleauth_rpcapi, 'authorize_console') ) as (mock_get_mks_console, mock_authorize_console): console = self.compute_api.get_mks_console(self.context, fake_instance, fake_console_type) self.assertEqual(console, {'url': 'fake_access_url'}) def test_get_mks_console_no_host(self): # Make sure an exception is raised when instance is not Active. instance = self._create_fake_instance_obj(params={'host': ''}) self.assertRaises(exception.InstanceNotReady, self.compute_api.get_mks_console, self.context, instance, 'mks') def test_console_output(self): fake_instance = self._fake_instance({ 'uuid': 'f3000000-0000-0000-0000-000000000000', 'host': 'fake_compute_host'}) fake_tail_length = 699 fake_console_output = 'fake console output' rpcapi = compute_rpcapi.ComputeAPI self.mox.StubOutWithMock(rpcapi, 'get_console_output') rpcapi.get_console_output( self.context, instance=fake_instance, tail_length=fake_tail_length).AndReturn(fake_console_output) self.mox.ReplayAll() output = self.compute_api.get_console_output(self.context, fake_instance, tail_length=fake_tail_length) self.assertEqual(output, fake_console_output) def test_console_output_no_host(self): instance = self._create_fake_instance_obj(params={'host': ''}) self.assertRaises(exception.InstanceNotReady, self.compute_api.get_console_output, self.context, instance) def test_attach_interface(self): new_type = flavors.get_flavor_by_flavor_id('4') instance = objects.Instance(image_ref=uuids.image_instance, system_metadata={}, flavor=new_type, host='fake-host') self.mox.StubOutWithMock(self.compute.network_api, 'allocate_port_for_instance') nwinfo = [fake_network_cache_model.new_vif()] network_id = nwinfo[0]['network']['id'] port_id = nwinfo[0]['id'] req_ip = '1.2.3.4' self.compute.network_api.allocate_port_for_instance( self.context, instance, port_id, network_id, req_ip, bind_host_id='fake-host' ).AndReturn(nwinfo) self.mox.ReplayAll() vif = self.compute.attach_interface(self.context, instance, network_id, port_id, req_ip) self.assertEqual(vif['id'], network_id) return nwinfo, port_id def test_attach_interface_failed(self): new_type = flavors.get_flavor_by_flavor_id('4') instance = objects.Instance( id=42, uuid=uuids.interface_failed_instance, image_ref='foo', system_metadata={}, flavor=new_type, host='fake-host') nwinfo = [fake_network_cache_model.new_vif()] network_id = nwinfo[0]['network']['id'] port_id = nwinfo[0]['id'] req_ip = '1.2.3.4' with test.nested( mock.patch.object(self.compute.driver, 'attach_interface'), mock.patch.object(self.compute.network_api, 'allocate_port_for_instance'), mock.patch.object(self.compute.network_api, 'deallocate_port_for_instance')) as ( mock_attach, mock_allocate, mock_deallocate): mock_allocate.return_value = nwinfo mock_attach.side_effect = exception.NovaException("attach_failed") self.assertRaises(exception.InterfaceAttachFailed, self.compute.attach_interface, self.context, instance, network_id, port_id, req_ip) mock_allocate.assert_called_once_with(self.context, instance, network_id, port_id, req_ip, bind_host_id='fake-host') mock_deallocate.assert_called_once_with(self.context, instance, port_id) def test_detach_interface(self): nwinfo, port_id = self.test_attach_interface() self.stubs.Set(self.compute.network_api, 'deallocate_port_for_instance', lambda a, b, c: []) instance = objects.Instance() instance.info_cache = objects.InstanceInfoCache.new( self.context, uuids.info_cache_instance) instance.info_cache.network_info = network_model.NetworkInfo.hydrate( nwinfo) self.compute.detach_interface(self.context, instance, port_id) self.assertEqual(self.compute.driver._interfaces, {}) def test_detach_interface_failed(self): nwinfo, port_id = self.test_attach_interface() instance = objects.Instance(id=42) instance['uuid'] = uuids.info_cache_instance instance.info_cache = objects.InstanceInfoCache.new( self.context, uuids.info_cache_instance) instance.info_cache.network_info = network_model.NetworkInfo.hydrate( nwinfo) with test.nested( mock.patch.object(self.compute.driver, 'detach_interface', side_effect=exception.NovaException('detach_failed')), mock.patch.object(self.compute.network_api, 'deallocate_port_for_instance')) as ( mock_detach, mock_deallocate): self.assertRaises(exception.InterfaceDetachFailed, self.compute.detach_interface, self.context, instance, port_id) self.assertFalse(mock_deallocate.called) @mock.patch.object(compute_manager.LOG, 'warning') def test_detach_interface_deallocate_port_for_instance_failed(self, warn_mock): # Tests that when deallocate_port_for_instance fails we log the failure # before exiting compute.detach_interface. nwinfo, port_id = self.test_attach_interface() instance = objects.Instance(id=42, uuid=uuidutils.generate_uuid()) instance.info_cache = objects.InstanceInfoCache.new( self.context, uuids.info_cache_instance) instance.info_cache.network_info = network_model.NetworkInfo.hydrate( nwinfo) # Sometimes neutron errors slip through the neutronv2 API so we want # to make sure we catch those in the compute manager and not just # NovaExceptions. error = neutron_exceptions.PortNotFoundClient() with test.nested( mock.patch.object(self.compute.driver, 'detach_interface'), mock.patch.object(self.compute.network_api, 'deallocate_port_for_instance', side_effect=error), mock.patch.object(self.compute, '_instance_update')) as ( mock_detach, mock_deallocate, mock_instance_update): ex = self.assertRaises(neutron_exceptions.PortNotFoundClient, self.compute.detach_interface, self.context, instance, port_id) self.assertEqual(error, ex) mock_deallocate.assert_called_once_with( self.context, instance, port_id) self.assertEqual(1, warn_mock.call_count) def test_attach_volume(self): fake_bdm = fake_block_device.FakeDbBlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'volume_id': uuids.volume_id, 'device_name': '/dev/vdb'}) bdm = block_device_obj.BlockDeviceMapping()._from_db_object( self.context, block_device_obj.BlockDeviceMapping(), fake_bdm) instance = self._create_fake_instance_obj() instance.id = 42 fake_volume = {'id': 'fake-volume-id'} with test.nested( mock.patch.object(cinder.API, 'get', return_value=fake_volume), mock.patch.object(cinder.API, 'check_attach'), mock.patch.object(cinder.API, 'reserve_volume'), mock.patch.object(compute_rpcapi.ComputeAPI, 'reserve_block_device_name', return_value=bdm), mock.patch.object(compute_rpcapi.ComputeAPI, 'attach_volume') ) as (mock_get, mock_check_attach, mock_reserve_vol, mock_reserve_bdm, mock_attach): self.compute_api.attach_volume( self.context, instance, 'fake-volume-id', '/dev/vdb', 'ide', 'cdrom') mock_reserve_bdm.assert_called_once_with( self.context, instance, '/dev/vdb', 'fake-volume-id', disk_bus='ide', device_type='cdrom') self.assertEqual(mock_get.call_args, mock.call(self.context, 'fake-volume-id')) self.assertEqual(mock_check_attach.call_args, mock.call( self.context, fake_volume, instance=instance)) mock_reserve_vol.assert_called_once_with( self.context, 'fake-volume-id') a, kw = mock_attach.call_args self.assertEqual(a[2].device_name, '/dev/vdb') self.assertEqual(a[2].volume_id, uuids.volume_id) def test_attach_volume_shelved_offloaded(self): instance = self._create_fake_instance_obj() with test.nested( mock.patch.object(compute_api.API, '_check_attach_and_reserve_volume'), mock.patch.object(cinder.API, 'attach') ) as (mock_attach_and_reserve, mock_attach): self.compute_api._attach_volume_shelved_offloaded( self.context, instance, 'fake-volume-id', '/dev/vdb', 'ide', 'cdrom') mock_attach_and_reserve.assert_called_once_with(self.context, 'fake-volume-id', instance) mock_attach.assert_called_once_with(self.context, 'fake-volume-id', instance.uuid, '/dev/vdb') self.assertTrue(mock_attach.called) def test_attach_volume_no_device(self): called = {} def fake_check_attach(*args, **kwargs): called['fake_check_attach'] = True def fake_reserve_volume(*args, **kwargs): called['fake_reserve_volume'] = True def fake_volume_get(self, context, volume_id): called['fake_volume_get'] = True return {'id': volume_id} def fake_rpc_attach_volume(self, context, instance, bdm): called['fake_rpc_attach_volume'] = True def fake_rpc_reserve_block_device_name(self, context, instance, device, volume_id, **kwargs): called['fake_rpc_reserve_block_device_name'] = True bdm = block_device_obj.BlockDeviceMapping(context=context) bdm['device_name'] = '/dev/vdb' return bdm self.stubs.Set(cinder.API, 'get', fake_volume_get) self.stubs.Set(cinder.API, 'check_attach', fake_check_attach) self.stubs.Set(cinder.API, 'reserve_volume', fake_reserve_volume) self.stubs.Set(compute_rpcapi.ComputeAPI, 'reserve_block_device_name', fake_rpc_reserve_block_device_name) self.stubs.Set(compute_rpcapi.ComputeAPI, 'attach_volume', fake_rpc_attach_volume) instance = self._create_fake_instance_obj() self.compute_api.attach_volume(self.context, instance, 1, device=None) self.assertTrue(called.get('fake_check_attach')) self.assertTrue(called.get('fake_reserve_volume')) self.assertTrue(called.get('fake_volume_get')) self.assertTrue(called.get('fake_rpc_reserve_block_device_name')) self.assertTrue(called.get('fake_rpc_attach_volume')) def test_detach_volume(self): # Ensure volume can be detached from instance called = {} instance = self._create_fake_instance_obj() # Set attach_status to 'fake' as nothing is reading the value. volume = {'id': 1, 'attach_status': 'fake'} def fake_check_detach(*args, **kwargs): called['fake_check_detach'] = True def fake_begin_detaching(*args, **kwargs): called['fake_begin_detaching'] = True def fake_rpc_detach_volume(self, context, **kwargs): called['fake_rpc_detach_volume'] = True self.stubs.Set(cinder.API, 'check_detach', fake_check_detach) self.stubs.Set(cinder.API, 'begin_detaching', fake_begin_detaching) self.stubs.Set(compute_rpcapi.ComputeAPI, 'detach_volume', fake_rpc_detach_volume) self.compute_api.detach_volume(self.context, instance, volume) self.assertTrue(called.get('fake_check_detach')) self.assertTrue(called.get('fake_begin_detaching')) self.assertTrue(called.get('fake_rpc_detach_volume')) @mock.patch.object(compute_api.API, '_check_and_begin_detach') @mock.patch.object(compute_api.API, '_local_cleanup_bdm_volumes') @mock.patch.object(objects.BlockDeviceMapping, 'get_by_volume_id') def test_detach_volume_shelved_offloaded(self, mock_block_dev, mock_local_cleanup, mock_check_begin_detach): mock_block_dev.return_value = [block_device_obj.BlockDeviceMapping( context=context)] instance = self._create_fake_instance_obj() volume = {'id': 1, 'attach_status': 'fake'} self.compute_api._detach_volume_shelved_offloaded(self.context, instance, volume) mock_check_begin_detach.assert_called_once_with(self.context, volume, instance) self.assertTrue(mock_local_cleanup.called) def test_detach_invalid_volume(self): # Ensure exception is raised while detaching an un-attached volume fake_instance = self._fake_instance({ 'uuid': 'f7000000-0000-0000-0000-000000000001', 'locked': False, 'launched_at': timeutils.utcnow(), 'vm_state': vm_states.ACTIVE, 'task_state': None}) volume = {'id': 1, 'attach_status': 'detached', 'status': 'available'} self.assertRaises(exception.InvalidVolume, self.compute_api.detach_volume, self.context, fake_instance, volume) def test_detach_unattached_volume(self): # Ensure exception is raised when volume's idea of attached # instance doesn't match. fake_instance = self._fake_instance({ 'uuid': 'f7000000-0000-0000-0000-000000000001', 'locked': False, 'launched_at': timeutils.utcnow(), 'vm_state': vm_states.ACTIVE, 'task_state': None}) volume = {'id': 1, 'attach_status': 'attached', 'status': 'in-use', 'attachments': {'fake_uuid': {'attachment_id': 'fakeid'}}} self.assertRaises(exception.VolumeUnattached, self.compute_api.detach_volume, self.context, fake_instance, volume) def test_detach_suspended_instance_fails(self): fake_instance = self._fake_instance({ 'uuid': 'f7000000-0000-0000-0000-000000000001', 'locked': False, 'launched_at': timeutils.utcnow(), 'vm_state': vm_states.SUSPENDED, 'task_state': None}) # Unused volume = {} self.assertRaises(exception.InstanceInvalidState, self.compute_api.detach_volume, self.context, fake_instance, volume) def test_detach_volume_libvirt_is_down(self): # Ensure rollback during detach if libvirt goes down called = {} instance = self._create_fake_instance_obj() fake_bdm = fake_block_device.FakeDbBlockDeviceDict( {'device_name': '/dev/vdb', 'volume_id': uuids.volume_id, 'source_type': 'snapshot', 'destination_type': 'volume', 'connection_info': '{"test": "test"}'}) def fake_libvirt_driver_instance_exists(_instance): called['fake_libvirt_driver_instance_exists'] = True return False def fake_libvirt_driver_detach_volume_fails(*args, **kwargs): called['fake_libvirt_driver_detach_volume_fails'] = True raise AttributeError() def fake_roll_detaching(*args, **kwargs): called['fake_roll_detaching'] = True self.stubs.Set(cinder.API, 'roll_detaching', fake_roll_detaching) self.stubs.Set(self.compute.driver, "instance_exists", fake_libvirt_driver_instance_exists) self.stubs.Set(self.compute.driver, "detach_volume", fake_libvirt_driver_detach_volume_fails) self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'get_by_volume_and_instance') objects.BlockDeviceMapping.get_by_volume_and_instance( self.context, 1, instance.uuid).\ AndReturn(objects.BlockDeviceMapping( context=self.context, **fake_bdm)) self.mox.ReplayAll() self.assertRaises(AttributeError, self.compute.detach_volume, self.context, 1, instance) self.assertTrue(called.get('fake_libvirt_driver_instance_exists')) self.assertTrue(called.get('fake_roll_detaching')) def test_detach_volume_not_found(self): # Ensure that a volume can be detached even when it is removed # from an instance but remaining in bdm. See bug #1367964. instance = self._create_fake_instance_obj() fake_bdm = fake_block_device.FakeDbBlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'volume_id': 'fake-id', 'device_name': '/dev/vdb', 'connection_info': '{"test": "test"}'}) bdm = objects.BlockDeviceMapping(context=self.context, **fake_bdm) # Stub out fake_volume_get so cinder api does not raise exception # and manager gets to call bdm.destroy() def fake_volume_get(self, context, volume_id): return {'id': volume_id} self.stub_out('nova.volume.cinder.API.get', fake_volume_get) with test.nested( mock.patch.object(self.compute.driver, 'detach_volume', side_effect=exception.DiskNotFound('sdb')), mock.patch.object(objects.BlockDeviceMapping, 'get_by_volume_and_instance', return_value=bdm), mock.patch.object(cinder.API, 'terminate_connection'), mock.patch.object(bdm, 'destroy'), mock.patch.object(self.compute, '_notify_about_instance_usage'), mock.patch.object(self.compute.volume_api, 'detach'), mock.patch.object(self.compute.driver, 'get_volume_connector', return_value='fake-connector') ) as (mock_detach_volume, mock_volume, mock_terminate_connection, mock_destroy, mock_notify, mock_detach, mock_volume_connector): self.compute.detach_volume(self.context, 'fake-id', instance) self.assertTrue(mock_detach_volume.called) mock_terminate_connection.assert_called_once_with(self.context, 'fake-id', 'fake-connector') mock_destroy.assert_called_once_with() mock_detach.assert_called_once_with(mock.ANY, 'fake-id', instance.uuid, None) def test_terminate_with_volumes(self): # Make sure that volumes get detached during instance termination. admin = context.get_admin_context() instance = self._create_fake_instance_obj() volume_id = 'fake' values = {'instance_uuid': instance['uuid'], 'device_name': '/dev/vdc', 'delete_on_termination': False, 'volume_id': volume_id, 'destination_type': 'volume' } db.block_device_mapping_create(admin, values) def fake_volume_get(self, context, volume_id): return {'id': volume_id} self.stubs.Set(cinder.API, "get", fake_volume_get) # Stub out and record whether it gets detached result = {"detached": False} def fake_detach(self, context, volume_id_param, instance_uuid): result["detached"] = volume_id_param == volume_id self.stubs.Set(cinder.API, "detach", fake_detach) def fake_terminate_connection(self, context, volume_id, connector): return {} self.stubs.Set(cinder.API, "terminate_connection", fake_terminate_connection) # Kill the instance and check that it was detached bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( admin, instance['uuid']) self.compute.terminate_instance(admin, instance, bdms, []) self.assertTrue(result["detached"]) def test_terminate_deletes_all_bdms(self): admin = context.get_admin_context() instance = self._create_fake_instance_obj() img_bdm = {'context': admin, 'instance_uuid': instance['uuid'], 'device_name': '/dev/vda', 'source_type': 'image', 'destination_type': 'local', 'delete_on_termination': False, 'boot_index': 0, 'image_id': 'fake_image'} vol_bdm = {'context': admin, 'instance_uuid': instance['uuid'], 'device_name': '/dev/vdc', 'source_type': 'volume', 'destination_type': 'volume', 'delete_on_termination': False, 'volume_id': 'fake_vol'} bdms = [] for bdm in img_bdm, vol_bdm: bdm_obj = objects.BlockDeviceMapping(**bdm) bdm_obj.create() bdms.append(bdm_obj) self.stub_out('nova.volume.cinder.API.terminate_connection', mox.MockAnything()) self.stub_out('nova.volume.cinder.API.detach', mox.MockAnything()) def fake_volume_get(self, context, volume_id): return {'id': volume_id} self.stub_out('nova.volume.cinder.API.get', fake_volume_get) self.stubs.Set(self.compute, '_prep_block_device', mox.MockAnything()) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.compute.terminate_instance(self.context, instance, bdms, []) bdms = db.block_device_mapping_get_all_by_instance(admin, instance['uuid']) self.assertEqual(len(bdms), 0) def test_inject_network_info(self): instance = self._create_fake_instance_obj(params={'host': CONF.host}) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance = self.compute_api.get(self.context, instance['uuid'], want_objects=True) self.compute_api.inject_network_info(self.context, instance) def test_reset_network(self): instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance = self.compute_api.get(self.context, instance['uuid'], want_objects=True) self.compute_api.reset_network(self.context, instance) def test_lock(self): instance = self._create_fake_instance_obj() self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance', lambda *a, **kw: None) self.compute_api.lock(self.context, instance) def test_unlock(self): instance = self._create_fake_instance_obj() self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance', lambda *a, **kw: None) self.compute_api.unlock(self.context, instance) def test_add_remove_security_group(self): instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance = self.compute_api.get(self.context, instance.uuid, want_objects=True) security_group_name = self._create_group()['name'] self.security_group_api.add_to_instance(self.context, instance, security_group_name) self.security_group_api.remove_from_instance(self.context, instance, security_group_name) def test_get_diagnostics(self): instance = self._create_fake_instance_obj() rpcapi = compute_rpcapi.ComputeAPI self.mox.StubOutWithMock(rpcapi, 'get_diagnostics') rpcapi.get_diagnostics(self.context, instance=instance) self.mox.ReplayAll() self.compute_api.get_diagnostics(self.context, instance) def test_get_instance_diagnostics(self): instance = self._create_fake_instance_obj() rpcapi = compute_rpcapi.ComputeAPI self.mox.StubOutWithMock(rpcapi, 'get_instance_diagnostics') rpcapi.get_instance_diagnostics(self.context, instance=instance) self.mox.ReplayAll() self.compute_api.get_instance_diagnostics(self.context, instance) @mock.patch.object(compute_rpcapi.ComputeAPI, 'refresh_instance_security_rules') def test_refresh_instance_security_rules(self, mock_refresh): inst1 = self._create_fake_instance_obj() inst2 = self._create_fake_instance_obj({'host': None}) self.security_group_api._refresh_instance_security_rules( self.context, [inst1, inst2]) mock_refresh.assert_called_once_with(self.context, inst1.host, inst1) @mock.patch.object(compute_rpcapi.ComputeAPI, 'refresh_instance_security_rules') def test_refresh_instance_security_rules_empty(self, mock_refresh): self.security_group_api._refresh_instance_security_rules(self.context, []) self.assertFalse(mock_refresh.called) @mock.patch.object(compute_api.SecurityGroupAPI, '_refresh_instance_security_rules') @mock.patch.object(objects.InstanceList, 'get_by_grantee_security_group_ids') def test_secgroup_refresh(self, mock_get, mock_refresh): mock_get.return_value = mock.sentinel.instances self.security_group_api.trigger_members_refresh(mock.sentinel.ctxt, mock.sentinel.ids) mock_get.assert_called_once_with(mock.sentinel.ctxt, mock.sentinel.ids) mock_refresh.assert_called_once_with(mock.sentinel.ctxt, mock.sentinel.instances) @mock.patch.object(compute_api.SecurityGroupAPI, '_refresh_instance_security_rules') @mock.patch.object(objects.InstanceList, 'get_by_security_group_id') def test_secrule_refresh(self, mock_get, mock_refresh): mock_get.return_value = mock.sentinel.instances self.security_group_api.trigger_rules_refresh(mock.sentinel.ctxt, mock.sentinel.id) mock_get.assert_called_once_with(mock.sentinel.ctxt, mock.sentinel.id) mock_refresh.assert_called_once_with(mock.sentinel.ctxt, mock.sentinel.instances) def test_live_migrate(self): instance, instance_uuid = self._run_instance() rpcapi = self.compute_api.compute_task_api fake_spec = objects.RequestSpec() @mock.patch.object(rpcapi, 'live_migrate_instance') @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid') @mock.patch.object(self.compute_api, '_record_action_start') def do_test(record_action_start, get_by_instance_uuid, live_migrate_instance): get_by_instance_uuid.return_value = fake_spec self.compute_api.live_migrate(self.context, instance, block_migration=True, disk_over_commit=True, host_name='fake_dest_host') record_action_start.assert_called_once_with(self.context, instance, 'live-migration') live_migrate_instance.assert_called_once_with( self.context, instance, 'fake_dest_host', block_migration=True, disk_over_commit=True, request_spec=fake_spec) do_test() instance.refresh() self.assertEqual(instance['task_state'], task_states.MIGRATING) def test_evacuate(self): instance = self._create_fake_instance_obj(services=True) self.assertIsNone(instance.task_state) ctxt = self.context.elevated() fake_spec = objects.RequestSpec() def fake_rebuild_instance(*args, **kwargs): instance.host = kwargs['host'] instance.save() @mock.patch.object(self.compute_api.compute_task_api, 'rebuild_instance') @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid') @mock.patch.object(self.compute_api.servicegroup_api, 'service_is_up') def do_test(service_is_up, get_by_instance_uuid, rebuild_instance): service_is_up.return_value = False get_by_instance_uuid.return_value = fake_spec rebuild_instance.side_effect = fake_rebuild_instance self.compute_api.evacuate(ctxt, instance, host='fake_dest_host', on_shared_storage=True, admin_password=None) rebuild_instance.assert_called_once_with( ctxt, instance=instance, new_pass=None, injected_files=None, image_ref=None, orig_image_ref=None, orig_sys_metadata=None, bdms=None, recreate=True, on_shared_storage=True, request_spec=fake_spec, host='fake_dest_host') do_test() instance.refresh() self.assertEqual(instance.task_state, task_states.REBUILDING) self.assertEqual(instance.host, 'fake_dest_host') migs = objects.MigrationList.get_by_filters( self.context, {'source_host': 'fake_host'}) self.assertEqual(1, len(migs)) self.assertEqual(self.compute.host, migs[0].source_compute) self.assertEqual('accepted', migs[0].status) self.assertEqual('compute.instance.evacuate', fake_notifier.NOTIFICATIONS[0].event_type) def test_fail_evacuate_from_non_existing_host(self): inst = {} inst['vm_state'] = vm_states.ACTIVE inst['launched_at'] = timeutils.utcnow() inst['image_ref'] = FAKE_IMAGE_REF inst['reservation_id'] = 'r-fakeres' inst['user_id'] = self.user_id inst['project_id'] = self.project_id inst['host'] = 'fake_host' inst['node'] = NODENAME type_id = flavors.get_flavor_by_name('m1.tiny')['id'] inst['instance_type_id'] = type_id inst['ami_launch_index'] = 0 inst['memory_mb'] = 0 inst['vcpus'] = 0 inst['root_gb'] = 0 inst['ephemeral_gb'] = 0 inst['architecture'] = arch.X86_64 inst['os_type'] = 'Linux' instance = self._create_fake_instance_obj(inst) self.assertIsNone(instance.task_state) self.assertRaises(exception.ComputeHostNotFound, self.compute_api.evacuate, self.context.elevated(), instance, host='fake_dest_host', on_shared_storage=True, admin_password=None) def test_fail_evacuate_from_running_host(self): instance = self._create_fake_instance_obj(services=True) self.assertIsNone(instance.task_state) def fake_service_is_up(*args, **kwargs): return True self.stubs.Set(self.compute_api.servicegroup_api, 'service_is_up', fake_service_is_up) self.assertRaises(exception.ComputeServiceInUse, self.compute_api.evacuate, self.context.elevated(), instance, host='fake_dest_host', on_shared_storage=True, admin_password=None) def test_fail_evacuate_instance_in_wrong_state(self): states = [vm_states.BUILDING, vm_states.PAUSED, vm_states.SUSPENDED, vm_states.RESCUED, vm_states.RESIZED, vm_states.SOFT_DELETED, vm_states.DELETED] instances = [self._create_fake_instance_obj({'vm_state': state}) for state in states] for instance in instances: self.assertRaises(exception.InstanceInvalidState, self.compute_api.evacuate, self.context, instance, host='fake_dest_host', on_shared_storage=True, admin_password=None) def test_get_migrations(self): migration = test_migration.fake_db_migration() filters = {'host': 'host1'} self.mox.StubOutWithMock(db, "migration_get_all_by_filters") db.migration_get_all_by_filters(self.context, filters).AndReturn([migration]) self.mox.ReplayAll() migrations = self.compute_api.get_migrations(self.context, filters) self.assertEqual(1, len(migrations)) self.assertEqual(migrations[0].id, migration['id']) @mock.patch("nova.db.migration_get_in_progress_by_instance") def test_get_migrations_in_progress_by_instance(self, mock_get): migration = test_migration.fake_db_migration(instance_uuid="1234") mock_get.return_value = [migration] db.migration_get_in_progress_by_instance(self.context, "1234") migrations = self.compute_api.get_migrations_in_progress_by_instance( self.context, "1234") self.assertEqual(1, len(migrations)) self.assertEqual(migrations[0].id, migration['id']) @mock.patch("nova.db.migration_get_by_id_and_instance") def test_get_migration_by_id_and_instance(self, mock_get): migration = test_migration.fake_db_migration(instance_uuid="1234") mock_get.return_value = migration db.migration_get_by_id_and_instance( self.context, migration['id'], uuid) res = self.compute_api.get_migration_by_id_and_instance( self.context, migration['id'], "1234") self.assertEqual(res.id, migration['id']) class ComputeAPIIpFilterTestCase(test.NoDBTestCase): '''Verifies the IP filtering in the compute API.''' def setUp(self): super(ComputeAPIIpFilterTestCase, self).setUp() self.compute_api = compute.API() def _get_ip_filtering_instances(self): '''Utility function to get instances for the IP filtering tests.''' info = [{ 'address': 'aa:bb:cc:dd:ee:ff', 'id': 1, 'network': { 'bridge': 'br0', 'id': 1, 'label': 'private', 'subnets': [{ 'cidr': '192.168.0.0/24', 'ips': [{ 'address': '192.168.0.10', 'type': 'fixed' }, { 'address': '192.168.0.11', 'type': 'fixed' }] }] } }, { 'address': 'aa:bb:cc:dd:ee:ff', 'id': 2, 'network': { 'bridge': 'br1', 'id': 2, 'label': 'private', 'subnets': [{ 'cidr': '192.164.0.0/24', 'ips': [{ 'address': '192.164.0.10', 'type': 'fixed' }] }] } }] info1 = objects.InstanceInfoCache(network_info=jsonutils.dumps(info)) inst1 = objects.Instance(id=1, info_cache=info1) info[0]['network']['subnets'][0]['ips'][0]['address'] = '192.168.0.20' info[0]['network']['subnets'][0]['ips'][1]['address'] = '192.168.0.21' info[1]['network']['subnets'][0]['ips'][0]['address'] = '192.164.0.20' info2 = objects.InstanceInfoCache(network_info=jsonutils.dumps(info)) inst2 = objects.Instance(id=2, info_cache=info2) return objects.InstanceList(objects=[inst1, inst2]) def test_ip_filtering_no_matches(self): instances = self._get_ip_filtering_instances() insts = self.compute_api._ip_filter(instances, {'ip': '.*30'}, None) self.assertEqual(0, len(insts)) def test_ip_filtering_one_match(self): instances = self._get_ip_filtering_instances() for val in ('192.168.0.10', '192.168.0.1', '192.164.0.10', '.*10'): insts = self.compute_api._ip_filter(instances, {'ip': val}, None) self.assertEqual([1], [i.id for i in insts]) def test_ip_filtering_one_match_limit(self): instances = self._get_ip_filtering_instances() for limit in (None, 1, 2): insts = self.compute_api._ip_filter(instances, {'ip': '.*10'}, limit) self.assertEqual([1], [i.id for i in insts]) def test_ip_filtering_two_matches(self): instances = self._get_ip_filtering_instances() for val in ('192.16', '192.168', '192.164'): insts = self.compute_api._ip_filter(instances, {'ip': val}, None) self.assertEqual([1, 2], [i.id for i in insts]) def test_ip_filtering_two_matches_limit(self): instances = self._get_ip_filtering_instances() # Up to 2 match, based on the passed limit for limit in (None, 1, 2, 3): insts = self.compute_api._ip_filter(instances, {'ip': '192.168.0.*'}, limit) expected_ids = [1, 2] if limit: expected_len = min(limit, len(expected_ids)) expected_ids = expected_ids[:expected_len] self.assertEqual(expected_ids, [inst.id for inst in insts]) def test_ip_filtering_no_limit_to_db(self): c = context.get_admin_context() # Limit is not supplied to the DB when using an IP filter with mock.patch('nova.objects.InstanceList.get_by_filters') as m_get: self.compute_api.get_all(c, search_opts={'ip': '.10'}, limit=1) self.assertEqual(1, m_get.call_count) kwargs = m_get.call_args[1] self.assertIsNone(kwargs['limit']) def test_ip_filtering_pass_limit_to_db(self): c = context.get_admin_context() # No IP filter, verify that the limit is passed with mock.patch('nova.objects.InstanceList.get_by_filters') as m_get: self.compute_api.get_all(c, search_opts={}, limit=1) self.assertEqual(1, m_get.call_count) kwargs = m_get.call_args[1] self.assertEqual(1, kwargs['limit']) def fake_rpc_method(context, method, **kwargs): pass def _create_service_entries(context, values=[['avail_zone1', ['fake_host1', 'fake_host2']], ['avail_zone2', ['fake_host3']]]): for (avail_zone, hosts) in values: for host in hosts: db.service_create(context, {'host': host, 'binary': 'nova-compute', 'topic': 'compute', 'report_count': 0}) return values class ComputeAPIAggrTestCase(BaseTestCase): """This is for unit coverage of aggregate-related methods defined in nova.compute.api. """ def setUp(self): super(ComputeAPIAggrTestCase, self).setUp() self.api = compute_api.AggregateAPI() self.context = context.get_admin_context() self.stubs.Set(self.api.compute_rpcapi.client, 'call', fake_rpc_method) self.stubs.Set(self.api.compute_rpcapi.client, 'cast', fake_rpc_method) def test_aggregate_no_zone(self): # Ensure we can create an aggregate without an availability zone aggr = self.api.create_aggregate(self.context, 'fake_aggregate', None) self.api.delete_aggregate(self.context, aggr.id) db.aggregate_get(self.context.elevated(read_deleted='yes'), aggr.id) self.assertRaises(exception.AggregateNotFound, self.api.delete_aggregate, self.context, aggr.id) def test_check_az_for_aggregate(self): # Ensure all conflict hosts can be returned values = _create_service_entries(self.context) fake_zone = values[0][0] fake_host1 = values[0][1][0] fake_host2 = values[0][1][1] aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1', fake_zone, fake_host1) aggr1 = self._init_aggregate_with_host(aggr1, None, None, fake_host2) aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None, fake_host2) aggr2 = self._init_aggregate_with_host(aggr2, None, None, fake_host1) metadata = {'availability_zone': 'another_zone'} self.assertRaises(exception.InvalidAggregateActionUpdate, self.api.update_aggregate, self.context, aggr2.id, metadata) def test_update_aggregate(self): # Ensure metadata can be updated. aggr = self.api.create_aggregate(self.context, 'fake_aggregate', 'fake_zone') fake_notifier.NOTIFICATIONS = [] self.api.update_aggregate(self.context, aggr.id, {'name': 'new_fake_aggregate'}) self.assertIsNone(availability_zones._get_cache().get('cache')) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'aggregate.updateprop.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'aggregate.updateprop.end') def test_update_aggregate_no_az(self): # Ensure metadata without availability zone can be # updated,even the aggregate contains hosts belong # to another availability zone values = _create_service_entries(self.context) fake_zone = values[0][0] fake_host = values[0][1][0] self._init_aggregate_with_host(None, 'fake_aggregate1', fake_zone, fake_host) aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None, fake_host) metadata = {'name': 'new_fake_aggregate'} fake_notifier.NOTIFICATIONS = [] self.api.update_aggregate(self.context, aggr2.id, metadata) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'aggregate.updateprop.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'aggregate.updateprop.end') def test_update_aggregate_az_change(self): # Ensure availability zone can be updated, # when the aggregate is the only one with # availability zone values = _create_service_entries(self.context) fake_zone = values[0][0] fake_host = values[0][1][0] aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1', fake_zone, fake_host) self._init_aggregate_with_host(None, 'fake_aggregate2', None, fake_host) metadata = {'availability_zone': 'new_fake_zone'} fake_notifier.NOTIFICATIONS = [] self.api.update_aggregate(self.context, aggr1.id, metadata) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'aggregate.updatemetadata.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'aggregate.updatemetadata.end') def test_update_aggregate_az_fails(self): # Ensure aggregate's availability zone can't be updated, # when aggregate has hosts in other availability zone fake_notifier.NOTIFICATIONS = [] values = _create_service_entries(self.context) fake_zone = values[0][0] fake_host = values[0][1][0] self._init_aggregate_with_host(None, 'fake_aggregate1', fake_zone, fake_host) aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None, fake_host) metadata = {'availability_zone': 'another_zone'} self.assertRaises(exception.InvalidAggregateActionUpdate, self.api.update_aggregate, self.context, aggr2.id, metadata) fake_host2 = values[0][1][1] aggr3 = self._init_aggregate_with_host(None, 'fake_aggregate3', None, fake_host2) metadata = {'availability_zone': fake_zone} self.api.update_aggregate(self.context, aggr3.id, metadata) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 15) msg = fake_notifier.NOTIFICATIONS[13] self.assertEqual(msg.event_type, 'aggregate.updatemetadata.start') msg = fake_notifier.NOTIFICATIONS[14] self.assertEqual(msg.event_type, 'aggregate.updatemetadata.end') aggr4 = self.api.create_aggregate(self.context, 'fake_aggregate', None) metadata = {'availability_zone': ""} self.assertRaises(exception.InvalidAggregateActionUpdate, self.api.update_aggregate, self.context, aggr4.id, metadata) def test_update_aggregate_az_fails_with_nova_az(self): # Ensure aggregate's availability zone can't be updated, # when aggregate has hosts in other availability zone fake_notifier.NOTIFICATIONS = [] values = _create_service_entries(self.context) fake_host = values[0][1][0] self._init_aggregate_with_host(None, 'fake_aggregate1', CONF.default_availability_zone, fake_host) aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None, fake_host) metadata = {'availability_zone': 'another_zone'} self.assertRaises(exception.InvalidAggregateActionUpdate, self.api.update_aggregate, self.context, aggr2.id, metadata) def test_update_aggregate_metadata(self): # Ensure metadata can be updated. aggr = self.api.create_aggregate(self.context, 'fake_aggregate', 'fake_zone') metadata = {'foo_key1': 'foo_value1', 'foo_key2': 'foo_value2', 'availability_zone': 'fake_zone'} fake_notifier.NOTIFICATIONS = [] availability_zones._get_cache().add('fake_key', 'fake_value') aggr = self.api.update_aggregate_metadata(self.context, aggr.id, metadata) self.assertIsNone(availability_zones._get_cache().get('fake_key')) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'aggregate.updatemetadata.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'aggregate.updatemetadata.end') fake_notifier.NOTIFICATIONS = [] metadata['foo_key1'] = None expected_payload_meta_data = {'foo_key1': None, 'foo_key2': 'foo_value2', 'availability_zone': 'fake_zone'} expected = self.api.update_aggregate_metadata(self.context, aggr.id, metadata) self.assertEqual(2, len(fake_notifier.NOTIFICATIONS)) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual('aggregate.updatemetadata.start', msg.event_type) self.assertEqual(expected_payload_meta_data, msg.payload['meta_data']) msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual('aggregate.updatemetadata.end', msg.event_type) self.assertEqual(expected_payload_meta_data, msg.payload['meta_data']) self.assertThat(expected.metadata, matchers.DictMatches({'availability_zone': 'fake_zone', 'foo_key2': 'foo_value2'})) def test_update_aggregate_metadata_no_az(self): # Ensure metadata without availability zone can be # updated,even the aggregate contains hosts belong # to another availability zone values = _create_service_entries(self.context) fake_zone = values[0][0] fake_host = values[0][1][0] self._init_aggregate_with_host(None, 'fake_aggregate1', fake_zone, fake_host) aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None, fake_host) metadata = {'foo_key2': 'foo_value3'} fake_notifier.NOTIFICATIONS = [] aggr2 = self.api.update_aggregate_metadata(self.context, aggr2.id, metadata) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'aggregate.updatemetadata.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'aggregate.updatemetadata.end') self.assertThat(aggr2.metadata, matchers.DictMatches({'foo_key2': 'foo_value3'})) def test_update_aggregate_metadata_az_change(self): # Ensure availability zone can be updated, # when the aggregate is the only one with # availability zone values = _create_service_entries(self.context) fake_zone = values[0][0] fake_host = values[0][1][0] aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1', fake_zone, fake_host) self._init_aggregate_with_host(None, 'fake_aggregate2', None, fake_host) metadata = {'availability_zone': 'new_fake_zone'} fake_notifier.NOTIFICATIONS = [] self.api.update_aggregate_metadata(self.context, aggr1.id, metadata) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'aggregate.updatemetadata.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'aggregate.updatemetadata.end') def test_update_aggregate_az_do_not_replace_existing_metadata(self): # Ensure that that update of the aggregate availability zone # does not replace the aggregate existing metadata aggr = self.api.create_aggregate(self.context, 'fake_aggregate', 'fake_zone') metadata = {'foo_key1': 'foo_value1'} aggr = self.api.update_aggregate_metadata(self.context, aggr.id, metadata) metadata = {'availability_zone': 'new_fake_zone'} aggr = self.api.update_aggregate(self.context, aggr.id, metadata) self.assertThat(aggr.metadata, matchers.DictMatches( {'availability_zone': 'new_fake_zone', 'foo_key1': 'foo_value1'})) def test_update_aggregate_metadata_az_fails(self): # Ensure aggregate's availability zone can't be updated, # when aggregate has hosts in other availability zone fake_notifier.NOTIFICATIONS = [] values = _create_service_entries(self.context) fake_zone = values[0][0] fake_host = values[0][1][0] self._init_aggregate_with_host(None, 'fake_aggregate1', fake_zone, fake_host) aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None, fake_host) metadata = {'availability_zone': 'another_zone'} self.assertRaises(exception.InvalidAggregateActionUpdateMeta, self.api.update_aggregate_metadata, self.context, aggr2.id, metadata) aggr3 = self._init_aggregate_with_host(None, 'fake_aggregate3', None, fake_host) metadata = {'availability_zone': fake_zone} self.api.update_aggregate_metadata(self.context, aggr3.id, metadata) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 15) msg = fake_notifier.NOTIFICATIONS[13] self.assertEqual(msg.event_type, 'aggregate.updatemetadata.start') msg = fake_notifier.NOTIFICATIONS[14] self.assertEqual(msg.event_type, 'aggregate.updatemetadata.end') aggr4 = self.api.create_aggregate(self.context, 'fake_aggregate', None) metadata = {'availability_zone': ""} self.assertRaises(exception.InvalidAggregateActionUpdateMeta, self.api.update_aggregate_metadata, self.context, aggr4.id, metadata) def test_delete_aggregate(self): # Ensure we can delete an aggregate. fake_notifier.NOTIFICATIONS = [] aggr = self.api.create_aggregate(self.context, 'fake_aggregate', 'fake_zone') self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'aggregate.create.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'aggregate.create.end') fake_notifier.NOTIFICATIONS = [] self.api.delete_aggregate(self.context, aggr.id) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'aggregate.delete.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'aggregate.delete.end') db.aggregate_get(self.context.elevated(read_deleted='yes'), aggr.id) self.assertRaises(exception.AggregateNotFound, self.api.delete_aggregate, self.context, aggr.id) def test_delete_non_empty_aggregate(self): # Ensure InvalidAggregateAction is raised when non empty aggregate. _create_service_entries(self.context, [['fake_availability_zone', ['fake_host']]]) aggr = self.api.create_aggregate(self.context, 'fake_aggregate', 'fake_availability_zone') self.api.add_host_to_aggregate(self.context, aggr.id, 'fake_host') self.assertRaises(exception.InvalidAggregateActionDelete, self.api.delete_aggregate, self.context, aggr.id) def test_add_host_to_aggregate(self): # Ensure we can add a host to an aggregate. values = _create_service_entries(self.context) fake_zone = values[0][0] fake_host = values[0][1][0] aggr = self.api.create_aggregate(self.context, 'fake_aggregate', fake_zone) def fake_add_aggregate_host(*args, **kwargs): hosts = kwargs["aggregate"].hosts self.assertIn(fake_host, hosts) self.stubs.Set(self.api.compute_rpcapi, 'add_aggregate_host', fake_add_aggregate_host) self.mox.StubOutWithMock(availability_zones, 'update_host_availability_zone_cache') availability_zones.update_host_availability_zone_cache(self.context, fake_host) self.mox.ReplayAll() fake_notifier.NOTIFICATIONS = [] aggr = self.api.add_host_to_aggregate(self.context, aggr.id, fake_host) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'aggregate.addhost.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'aggregate.addhost.end') self.assertEqual(len(aggr.hosts), 1) def test_add_host_to_aggr_with_no_az(self): values = _create_service_entries(self.context) fake_zone = values[0][0] fake_host = values[0][1][0] aggr = self.api.create_aggregate(self.context, 'fake_aggregate', fake_zone) aggr = self.api.add_host_to_aggregate(self.context, aggr.id, fake_host) aggr_no_az = self.api.create_aggregate(self.context, 'fake_aggregate2', None) aggr_no_az = self.api.add_host_to_aggregate(self.context, aggr_no_az.id, fake_host) self.assertIn(fake_host, aggr.hosts) self.assertIn(fake_host, aggr_no_az.hosts) def test_add_host_to_multi_az(self): # Ensure we can't add a host to different availability zone values = _create_service_entries(self.context) fake_zone = values[0][0] fake_host = values[0][1][0] aggr = self.api.create_aggregate(self.context, 'fake_aggregate', fake_zone) aggr = self.api.add_host_to_aggregate(self.context, aggr.id, fake_host) self.assertEqual(len(aggr.hosts), 1) fake_zone2 = "another_zone" aggr2 = self.api.create_aggregate(self.context, 'fake_aggregate2', fake_zone2) self.assertRaises(exception.InvalidAggregateActionAdd, self.api.add_host_to_aggregate, self.context, aggr2.id, fake_host) def test_add_host_to_multi_az_with_nova_agg(self): # Ensure we can't add a host if already existing in an agg with AZ set # to default values = _create_service_entries(self.context) fake_host = values[0][1][0] aggr = self.api.create_aggregate(self.context, 'fake_aggregate', CONF.default_availability_zone) aggr = self.api.add_host_to_aggregate(self.context, aggr.id, fake_host) self.assertEqual(len(aggr.hosts), 1) fake_zone2 = "another_zone" aggr2 = self.api.create_aggregate(self.context, 'fake_aggregate2', fake_zone2) self.assertRaises(exception.InvalidAggregateActionAdd, self.api.add_host_to_aggregate, self.context, aggr2.id, fake_host) def test_add_host_to_aggregate_multiple(self): # Ensure we can add multiple hosts to an aggregate. values = _create_service_entries(self.context) fake_zone = values[0][0] aggr = self.api.create_aggregate(self.context, 'fake_aggregate', fake_zone) for host in values[0][1]: aggr = self.api.add_host_to_aggregate(self.context, aggr.id, host) self.assertEqual(len(aggr.hosts), len(values[0][1])) def test_add_host_to_aggregate_raise_not_found(self): # Ensure ComputeHostNotFound is raised when adding invalid host. aggr = self.api.create_aggregate(self.context, 'fake_aggregate', 'fake_zone') fake_notifier.NOTIFICATIONS = [] self.assertRaises(exception.ComputeHostNotFound, self.api.add_host_to_aggregate, self.context, aggr.id, 'invalid_host') self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) self.assertEqual(fake_notifier.NOTIFICATIONS[1].publisher_id, 'compute.fake-mini') def test_remove_host_from_aggregate_active(self): # Ensure we can remove a host from an aggregate. values = _create_service_entries(self.context) fake_zone = values[0][0] aggr = self.api.create_aggregate(self.context, 'fake_aggregate', fake_zone) for host in values[0][1]: aggr = self.api.add_host_to_aggregate(self.context, aggr.id, host) host_to_remove = values[0][1][0] def fake_remove_aggregate_host(*args, **kwargs): hosts = kwargs["aggregate"].hosts self.assertNotIn(host_to_remove, hosts) self.stubs.Set(self.api.compute_rpcapi, 'remove_aggregate_host', fake_remove_aggregate_host) self.mox.StubOutWithMock(availability_zones, 'update_host_availability_zone_cache') availability_zones.update_host_availability_zone_cache(self.context, host_to_remove) self.mox.ReplayAll() fake_notifier.NOTIFICATIONS = [] expected = self.api.remove_host_from_aggregate(self.context, aggr.id, host_to_remove) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'aggregate.removehost.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'aggregate.removehost.end') self.assertEqual(len(aggr.hosts) - 1, len(expected.hosts)) def test_remove_host_from_aggregate_raise_not_found(self): # Ensure ComputeHostNotFound is raised when removing invalid host. _create_service_entries(self.context, [['fake_zone', ['fake_host']]]) aggr = self.api.create_aggregate(self.context, 'fake_aggregate', 'fake_zone') self.assertRaises(exception.ComputeHostNotFound, self.api.remove_host_from_aggregate, self.context, aggr.id, 'invalid_host') def test_aggregate_list(self): aggregate = self.api.create_aggregate(self.context, 'fake_aggregate', 'fake_zone') metadata = {'foo_key1': 'foo_value1', 'foo_key2': 'foo_value2'} meta_aggregate = self.api.create_aggregate(self.context, 'fake_aggregate2', 'fake_zone2') self.api.update_aggregate_metadata(self.context, meta_aggregate.id, metadata) aggregate_list = self.api.get_aggregate_list(self.context) self.assertIn(aggregate.id, map(lambda x: x.id, aggregate_list)) self.assertIn(meta_aggregate.id, map(lambda x: x.id, aggregate_list)) self.assertIn('fake_aggregate', map(lambda x: x.name, aggregate_list)) self.assertIn('fake_aggregate2', map(lambda x: x.name, aggregate_list)) self.assertIn('fake_zone', map(lambda x: x.availability_zone, aggregate_list)) self.assertIn('fake_zone2', map(lambda x: x.availability_zone, aggregate_list)) test_agg_meta = aggregate_list[1].metadata self.assertIn('foo_key1', test_agg_meta) self.assertIn('foo_key2', test_agg_meta) self.assertEqual('foo_value1', test_agg_meta['foo_key1']) self.assertEqual('foo_value2', test_agg_meta['foo_key2']) def test_aggregate_list_with_hosts(self): values = _create_service_entries(self.context) fake_zone = values[0][0] host_aggregate = self.api.create_aggregate(self.context, 'fake_aggregate', fake_zone) self.api.add_host_to_aggregate(self.context, host_aggregate.id, values[0][1][0]) aggregate_list = self.api.get_aggregate_list(self.context) aggregate = aggregate_list[0] hosts = aggregate.hosts if 'hosts' in aggregate else None self.assertIn(values[0][1][0], hosts) class ComputeAPIAggrCallsSchedulerTestCase(test.NoDBTestCase): """This is for making sure that all Aggregate API methods which are updating the aggregates DB table also notifies the Scheduler by using its client. """ def setUp(self): super(ComputeAPIAggrCallsSchedulerTestCase, self).setUp() self.api = compute_api.AggregateAPI() self.context = context.RequestContext('fake', 'fake') @mock.patch.object(scheduler_client.SchedulerClient, 'update_aggregates') def test_create_aggregate(self, update_aggregates): with mock.patch.object(objects.Aggregate, 'create'): agg = self.api.create_aggregate(self.context, 'fake', None) update_aggregates.assert_called_once_with(self.context, [agg]) @mock.patch.object(scheduler_client.SchedulerClient, 'update_aggregates') def test_update_aggregate(self, update_aggregates): self.api.is_safe_to_update_az = mock.Mock() agg = objects.Aggregate() with mock.patch.object(objects.Aggregate, 'get_by_id', return_value=agg): self.api.update_aggregate(self.context, 1, {}) update_aggregates.assert_called_once_with(self.context, [agg]) @mock.patch.object(scheduler_client.SchedulerClient, 'update_aggregates') def test_update_aggregate_metadata(self, update_aggregates): self.api.is_safe_to_update_az = mock.Mock() agg = objects.Aggregate() agg.update_metadata = mock.Mock() with mock.patch.object(objects.Aggregate, 'get_by_id', return_value=agg): self.api.update_aggregate_metadata(self.context, 1, {}) update_aggregates.assert_called_once_with(self.context, [agg]) @mock.patch.object(scheduler_client.SchedulerClient, 'delete_aggregate') def test_delete_aggregate(self, delete_aggregate): self.api.is_safe_to_update_az = mock.Mock() agg = objects.Aggregate(hosts=[]) agg.destroy = mock.Mock() with mock.patch.object(objects.Aggregate, 'get_by_id', return_value=agg): self.api.delete_aggregate(self.context, 1) delete_aggregate.assert_called_once_with(self.context, agg) @mock.patch.object(scheduler_client.SchedulerClient, 'update_aggregates') def test_add_host_to_aggregate(self, update_aggregates): self.api.is_safe_to_update_az = mock.Mock() self.api._update_az_cache_for_host = mock.Mock() agg = objects.Aggregate(name='fake', metadata={}) agg.add_host = mock.Mock() with test.nested( mock.patch.object(objects.Service, 'get_by_compute_host'), mock.patch.object(objects.Aggregate, 'get_by_id', return_value=agg)): self.api.add_host_to_aggregate(self.context, 1, 'fakehost') update_aggregates.assert_called_once_with(self.context, [agg]) @mock.patch.object(scheduler_client.SchedulerClient, 'update_aggregates') def test_remove_host_from_aggregate(self, update_aggregates): self.api._update_az_cache_for_host = mock.Mock() agg = objects.Aggregate(name='fake', metadata={}) agg.delete_host = mock.Mock() with test.nested( mock.patch.object(objects.Service, 'get_by_compute_host'), mock.patch.object(objects.Aggregate, 'get_by_id', return_value=agg)): self.api.remove_host_from_aggregate(self.context, 1, 'fakehost') update_aggregates.assert_called_once_with(self.context, [agg]) class ComputeAggrTestCase(BaseTestCase): """This is for unit coverage of aggregate-related methods defined in nova.compute.manager. """ def setUp(self): super(ComputeAggrTestCase, self).setUp() self.context = context.get_admin_context() values = {'name': 'test_aggr'} az = {'availability_zone': 'test_zone'} self.aggr = db.aggregate_create(self.context, values, metadata=az) def test_add_aggregate_host(self): def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore): fake_driver_add_to_aggregate.called = True return {"foo": "bar"} self.stubs.Set(self.compute.driver, "add_to_aggregate", fake_driver_add_to_aggregate) self.compute.add_aggregate_host(self.context, host="host", aggregate=jsonutils.to_primitive(self.aggr), slave_info=None) self.assertTrue(fake_driver_add_to_aggregate.called) def test_remove_aggregate_host(self): def fake_driver_remove_from_aggregate(context, aggregate, host, **_ignore): fake_driver_remove_from_aggregate.called = True self.assertEqual("host", host, "host") return {"foo": "bar"} self.stubs.Set(self.compute.driver, "remove_from_aggregate", fake_driver_remove_from_aggregate) self.compute.remove_aggregate_host(self.context, aggregate=jsonutils.to_primitive(self.aggr), host="host", slave_info=None) self.assertTrue(fake_driver_remove_from_aggregate.called) def test_add_aggregate_host_passes_slave_info_to_driver(self): def driver_add_to_aggregate(context, aggregate, host, **kwargs): self.assertEqual(self.context, context) self.assertEqual(aggregate['id'], self.aggr['id']) self.assertEqual(host, "the_host") self.assertEqual("SLAVE_INFO", kwargs.get("slave_info")) self.stubs.Set(self.compute.driver, "add_to_aggregate", driver_add_to_aggregate) self.compute.add_aggregate_host(self.context, host="the_host", slave_info="SLAVE_INFO", aggregate=jsonutils.to_primitive(self.aggr)) def test_remove_from_aggregate_passes_slave_info_to_driver(self): def driver_remove_from_aggregate(context, aggregate, host, **kwargs): self.assertEqual(self.context, context) self.assertEqual(aggregate['id'], self.aggr['id']) self.assertEqual(host, "the_host") self.assertEqual("SLAVE_INFO", kwargs.get("slave_info")) self.stubs.Set(self.compute.driver, "remove_from_aggregate", driver_remove_from_aggregate) self.compute.remove_aggregate_host(self.context, aggregate=jsonutils.to_primitive(self.aggr), host="the_host", slave_info="SLAVE_INFO") class ComputePolicyTestCase(BaseTestCase): def setUp(self): super(ComputePolicyTestCase, self).setUp() self.compute_api = compute.API() def test_actions_are_prefixed(self): self.mox.StubOutWithMock(policy, 'enforce') nova.policy.enforce(self.context, 'compute:reboot', {}) self.mox.ReplayAll() compute_api.check_policy(self.context, 'reboot', {}) def test_wrapped_method(self): instance = self._create_fake_instance_obj(params={'host': None, 'cell_name': 'foo'}) # force delete to fail rules = {"compute:delete": [["false:false"]]} self.policy.set_rules(rules) self.assertRaises(exception.PolicyNotAuthorized, self.compute_api.delete, self.context, instance) # reset rules to allow deletion rules = {"compute:delete": []} self.policy.set_rules(rules) self.compute_api.delete(self.context, instance) def test_create_fail(self): rules = {"compute:create": [["false:false"]]} self.policy.set_rules(rules) self.assertRaises(exception.PolicyNotAuthorized, self.compute_api.create, self.context, '1', '1') def test_create_attach_volume_fail(self): rules = { "compute:create": [], "compute:create:attach_network": [["false:false"]], "compute:create:attach_volume": [], } self.policy.set_rules(rules) self.assertRaises(exception.PolicyNotAuthorized, self.compute_api.create, self.context, '1', '1', requested_networks='blah', block_device_mapping='blah') def test_create_attach_network_fail(self): rules = { "compute:create": [], "compute:create:attach_network": [], "compute:create:attach_volume": [["false:false"]], } self.policy.set_rules(rules) self.assertRaises(exception.PolicyNotAuthorized, self.compute_api.create, self.context, '1', '1', requested_networks='blah', block_device_mapping='blah') def test_get_fail(self): instance = self._create_fake_instance_obj() rules = { "compute:get": [["false:false"]], } self.policy.set_rules(rules) self.assertRaises(exception.PolicyNotAuthorized, self.compute_api.get, self.context, instance['uuid']) def test_get_all_fail(self): rules = { "compute:get_all": [["false:false"]], } self.policy.set_rules(rules) self.assertRaises(exception.PolicyNotAuthorized, self.compute_api.get_all, self.context) def test_force_host_fail(self): rules = {"compute:create": [], "compute:create:forced_host": [["role:fake"]], "network:validate_networks": []} self.policy.set_rules(rules) self.assertRaises(exception.PolicyNotAuthorized, self.compute_api.create, self.context, None, '1', availability_zone='1', forced_host='1') def test_force_host_pass(self): rules = {"compute:create": [], "compute:create:forced_host": [], "network:validate_networks": []} self.policy.set_rules(rules) self.compute_api.create(self.context, objects.Flavor(id=1, disabled=False, memory_mb=256, vcpus=1, root_gb=1, ephemeral_gb=1, swap=0), image_href=uuids.host_instance, availability_zone='1', forced_host='1') class DisabledInstanceTypesTestCase(BaseTestCase): """Some instance-types are marked 'disabled' which means that they will not show up in customer-facing listings. We do, however, want those instance-types to be available for emergency migrations and for rebuilding of existing instances. One legitimate use of the 'disabled' field would be when phasing out a particular instance-type. We still want customers to be able to use an instance that of the old type, and we want Ops to be able perform migrations against it, but we *don't* want customers building new instances with the phased-out instance-type. """ def setUp(self): super(DisabledInstanceTypesTestCase, self).setUp() self.compute_api = compute.API() self.inst_type = flavors.get_default_flavor() def test_can_build_instance_from_visible_instance_type(self): self.inst_type['disabled'] = False # Assert that exception.FlavorNotFound is not raised self.compute_api.create(self.context, self.inst_type, image_href=uuids.image_instance) def test_cannot_build_instance_from_disabled_instance_type(self): self.inst_type['disabled'] = True self.assertRaises(exception.FlavorNotFound, self.compute_api.create, self.context, self.inst_type, None) def test_can_resize_to_visible_instance_type(self): instance = self._create_fake_instance_obj() orig_get_flavor_by_flavor_id =\ flavors.get_flavor_by_flavor_id def fake_get_flavor_by_flavor_id(flavor_id, ctxt=None, read_deleted="yes"): instance_type = orig_get_flavor_by_flavor_id(flavor_id, ctxt, read_deleted) instance_type['disabled'] = False return instance_type self.stubs.Set(flavors, 'get_flavor_by_flavor_id', fake_get_flavor_by_flavor_id) self._stub_migrate_server() self.compute_api.resize(self.context, instance, '4') def test_cannot_resize_to_disabled_instance_type(self): instance = self._create_fake_instance_obj() orig_get_flavor_by_flavor_id = \ flavors.get_flavor_by_flavor_id def fake_get_flavor_by_flavor_id(flavor_id, ctxt=None, read_deleted="yes"): instance_type = orig_get_flavor_by_flavor_id(flavor_id, ctxt, read_deleted) instance_type['disabled'] = True return instance_type self.stubs.Set(flavors, 'get_flavor_by_flavor_id', fake_get_flavor_by_flavor_id) self.assertRaises(exception.FlavorNotFound, self.compute_api.resize, self.context, instance, '4') class ComputeReschedulingTestCase(BaseTestCase): """Tests re-scheduling logic for new build requests.""" def setUp(self): super(ComputeReschedulingTestCase, self).setUp() self.expected_task_state = task_states.SCHEDULING def fake_update(*args, **kwargs): self.updated_task_state = kwargs.get('task_state') self.stubs.Set(self.compute, '_instance_update', fake_update) def _reschedule(self, request_spec=None, filter_properties=None, exc_info=None): if not filter_properties: filter_properties = {} instance = self._create_fake_instance_obj() scheduler_method = self.compute.compute_task_api.resize_instance method_args = (instance, None, dict(filter_properties=filter_properties), {}, None) return self.compute._reschedule(self.context, request_spec, filter_properties, instance, scheduler_method, method_args, self.expected_task_state, exc_info=exc_info) def test_reschedule_no_filter_properties(self): # no filter_properties will disable re-scheduling. self.assertFalse(self._reschedule()) def test_reschedule_no_retry_info(self): # no retry info will also disable re-scheduling. filter_properties = {} self.assertFalse(self._reschedule(filter_properties=filter_properties)) def test_reschedule_no_request_spec(self): # no request spec will also disable re-scheduling. retry = dict(num_attempts=1) filter_properties = dict(retry=retry) self.assertFalse(self._reschedule(filter_properties=filter_properties)) def test_reschedule_success(self): retry = dict(num_attempts=1) filter_properties = dict(retry=retry) request_spec = {'num_instances': 1} try: raise test.TestingException("just need an exception") except test.TestingException: exc_info = sys.exc_info() exc_str = traceback.format_exception_only(exc_info[0], exc_info[1]) self.assertTrue(self._reschedule(filter_properties=filter_properties, request_spec=request_spec, exc_info=exc_info)) self.assertEqual(self.updated_task_state, self.expected_task_state) self.assertEqual(exc_str, filter_properties['retry']['exc']) class InnerTestingException(Exception): pass class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase): """Test logic and exception handling around rescheduling prep resize requests """ def setUp(self): super(ComputeRescheduleResizeOrReraiseTestCase, self).setUp() self.instance = self._create_fake_instance_obj() self.instance_uuid = self.instance['uuid'] self.instance_type = flavors.get_flavor_by_name( "m1.tiny") def test_reschedule_resize_or_reraise_called(self): """Verify the rescheduling logic gets called when there is an error during prep_resize. """ inst_obj = self._create_fake_instance_obj() self.mox.StubOutWithMock(self.compute.db, 'migration_create') self.mox.StubOutWithMock(self.compute, '_reschedule_resize_or_reraise') self.compute.db.migration_create(mox.IgnoreArg(), mox.IgnoreArg()).AndRaise(test.TestingException("Original")) self.compute._reschedule_resize_or_reraise(mox.IgnoreArg(), None, inst_obj, mox.IgnoreArg(), self.instance_type, mox.IgnoreArg(), {}, {}) self.mox.ReplayAll() self.compute.prep_resize(self.context, image=None, instance=inst_obj, instance_type=self.instance_type, reservations=[], request_spec={}, filter_properties={}, node=None, clean_shutdown=True) def test_reschedule_fails_with_exception(self): """Original exception should be raised if the _reschedule method raises another exception """ instance = self._create_fake_instance_obj() scheduler_hint = dict(filter_properties={}) method_args = (instance, None, scheduler_hint, self.instance_type, None) self.mox.StubOutWithMock(self.compute, "_reschedule") self.compute._reschedule( self.context, None, None, instance, self.compute.compute_task_api.resize_instance, method_args, task_states.RESIZE_PREP).AndRaise( InnerTestingException("Inner")) self.mox.ReplayAll() try: raise test.TestingException("Original") except Exception: exc_info = sys.exc_info() self.assertRaises(test.TestingException, self.compute._reschedule_resize_or_reraise, self.context, None, instance, exc_info, self.instance_type, self.none_quotas, {}, {}) def test_reschedule_false(self): """Original exception should be raised if the resize is not rescheduled. """ instance = self._create_fake_instance_obj() scheduler_hint = dict(filter_properties={}) method_args = (instance, None, scheduler_hint, self.instance_type, None) self.mox.StubOutWithMock(self.compute, "_reschedule") self.compute._reschedule( self.context, None, None, instance, self.compute.compute_task_api.resize_instance, method_args, task_states.RESIZE_PREP).AndReturn(False) self.mox.ReplayAll() try: raise test.TestingException("Original") except Exception: exc_info = sys.exc_info() self.assertRaises(test.TestingException, self.compute._reschedule_resize_or_reraise, self.context, None, instance, exc_info, self.instance_type, self.none_quotas, {}, {}) def test_reschedule_true(self): # If rescheduled, the original resize exception should be logged. instance = self._create_fake_instance_obj() scheduler_hint = dict(filter_properties={}) method_args = (instance, None, scheduler_hint, self.instance_type, None) try: raise test.TestingException("Original") except Exception: exc_info = sys.exc_info() self.mox.StubOutWithMock(self.compute, "_reschedule") self.mox.StubOutWithMock(self.compute, "_log_original_error") self.compute._reschedule(self.context, {}, {}, instance, self.compute.compute_task_api.resize_instance, method_args, task_states.RESIZE_PREP, exc_info).AndReturn(True) self.compute._log_original_error(exc_info, instance.uuid) self.mox.ReplayAll() self.compute._reschedule_resize_or_reraise( self.context, None, instance, exc_info, self.instance_type, self.none_quotas, {}, {}) class ComputeInactiveImageTestCase(BaseTestCase): def setUp(self): super(ComputeInactiveImageTestCase, self).setUp() def fake_show(meh, context, id, **kwargs): return {'id': id, 'name': 'fake_name', 'status': 'deleted', 'min_ram': 0, 'min_disk': 0, 'properties': {'kernel_id': 'fake_kernel_id', 'ramdisk_id': 'fake_ramdisk_id', 'something_else': 'meow'}} fake_image.stub_out_image_service(self) self.stubs.Set(fake_image._FakeImageService, 'show', fake_show) self.compute_api = compute.API() def test_create_instance_with_deleted_image(self): # Make sure we can't start an instance with a deleted image. inst_type = flavors.get_flavor_by_name('m1.tiny') self.assertRaises(exception.ImageNotActive, self.compute_api.create, self.context, inst_type, uuids.image_instance) class EvacuateHostTestCase(BaseTestCase): def setUp(self): super(EvacuateHostTestCase, self).setUp() self.inst = self._create_fake_instance_obj( {'host': 'fake_host_2', 'node': 'fakenode2'}) self.inst.task_state = task_states.REBUILDING self.inst.save() def fake_get_compute_info(context, host): cn = objects.ComputeNode(hypervisor_hostname=self.rt.nodename) return cn self.stubs.Set(self.compute, '_get_compute_info', fake_get_compute_info) self.useFixture(fixtures.SpawnIsSynchronousFixture()) def tearDown(self): db.instance_destroy(self.context, self.inst.uuid) super(EvacuateHostTestCase, self).tearDown() def _rebuild(self, on_shared_storage=True, migration=None, send_node=False): network_api = self.compute.network_api ctxt = context.get_admin_context() node = limits = None if send_node: node = NODENAME limits = {} @mock.patch.object(network_api, 'setup_networks_on_host') @mock.patch.object(network_api, 'setup_instance_network_on_host') @mock.patch('nova.context.RequestContext.elevated', return_value=ctxt) def _test_rebuild(mock_context, mock_setup_instance_network_on_host, mock_setup_networks_on_host): orig_image_ref = None image_ref = None injected_files = None bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( self.context, self.inst.uuid) self.compute.rebuild_instance( ctxt, self.inst, orig_image_ref, image_ref, injected_files, 'newpass', {}, bdms, recreate=True, on_shared_storage=on_shared_storage, migration=migration, scheduled_node=node, limits=limits) mock_setup_networks_on_host.assert_called_once_with( ctxt, self.inst, self.inst.host) mock_setup_instance_network_on_host.assert_called_once_with( ctxt, self.inst, self.inst.host) _test_rebuild() def test_rebuild_on_host_updated_target(self): """Confirm evacuate scenario updates host and node.""" self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True) def fake_get_compute_info(context, host): self.assertTrue(context.is_admin) self.assertEqual('fake-mini', host) cn = objects.ComputeNode(hypervisor_hostname=self.rt.nodename) return cn self.stubs.Set(self.compute, '_get_compute_info', fake_get_compute_info) self.mox.ReplayAll() self._rebuild() # Should be on destination host instance = db.instance_get(self.context, self.inst.id) self.assertEqual(instance['host'], self.compute.host) self.assertEqual(NODENAME, instance['node']) def test_rebuild_on_host_updated_target_node_not_found(self): """Confirm evacuate scenario where compute_node isn't found.""" self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True) def fake_get_compute_info(context, host): raise exception.ComputeHostNotFound(host=host) self.stubs.Set(self.compute, '_get_compute_info', fake_get_compute_info) self.mox.ReplayAll() self._rebuild() # Should be on destination host instance = db.instance_get(self.context, self.inst.id) self.assertEqual(instance['host'], self.compute.host) self.assertIsNone(instance['node']) def test_rebuild_on_host_node_passed(self): patch_get_info = mock.patch.object(self.compute, '_get_compute_info') patch_on_disk = mock.patch.object( self.compute.driver, 'instance_on_disk', return_value=True) with patch_get_info as get_compute_info, patch_on_disk: self._rebuild(send_node=True) self.assertEqual(0, get_compute_info.call_count) # Should be on destination host and node set to what was passed in instance = db.instance_get(self.context, self.inst.id) self.assertEqual(instance['host'], self.compute.host) self.assertEqual(instance['node'], NODENAME) def test_rebuild_with_instance_in_stopped_state(self): """Confirm evacuate scenario updates vm_state to stopped if instance is in stopped state """ # Initialize the VM to stopped state db.instance_update(self.context, self.inst.uuid, {"vm_state": vm_states.STOPPED}) self.inst.vm_state = vm_states.STOPPED self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True) self.mox.ReplayAll() self._rebuild() # Check the vm state is reset to stopped instance = db.instance_get(self.context, self.inst.id) self.assertEqual(instance['vm_state'], vm_states.STOPPED) def test_rebuild_with_wrong_shared_storage(self): """Confirm evacuate scenario does not update host.""" self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True) self.mox.ReplayAll() self.assertRaises(exception.InvalidSharedStorage, lambda: self._rebuild(on_shared_storage=False)) # Should remain on original host instance = db.instance_get(self.context, self.inst.id) self.assertEqual(instance['host'], 'fake_host_2') def test_rebuild_on_remote_host_with_volumes(self): """Confirm that the evacuate scenario does not attempt a driver detach when rebuilding an instance with volumes on a remote host """ values = {'instance_uuid': self.inst.uuid, 'source_type': 'volume', 'device_name': '/dev/vdc', 'delete_on_termination': False, 'volume_id': uuids.volume_id, 'connection_info': '{}'} db.block_device_mapping_create(self.context, values) def fake_volume_get(self, context, volume): return {'id': 'fake_volume_id'} self.stubs.Set(cinder.API, "get", fake_volume_get) # Stub out and record whether it gets detached result = {"detached": False} def fake_detach(self, context, volume, instance_uuid, attachment_id): result["detached"] = volume["id"] == 'fake_volume_id' self.stubs.Set(cinder.API, "detach", fake_detach) def fake_terminate_connection(self, context, volume, connector): return {} self.stubs.Set(cinder.API, "terminate_connection", fake_terminate_connection) # make sure volumes attach, detach are called self.mox.StubOutWithMock(self.compute.volume_api, 'detach') self.compute.volume_api.detach(mox.IsA(self.context), mox.IgnoreArg(), mox.IgnoreArg(), None) self.mox.StubOutWithMock(self.compute, '_prep_block_device') self.compute._prep_block_device(mox.IsA(self.context), mox.IsA(objects.Instance), mox.IgnoreArg()) self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True) self.mox.ReplayAll() self._rebuild() self.mox.VerifyAll() # cleanup bdms = db.block_device_mapping_get_all_by_instance(self.context, self.inst.uuid) if not bdms: self.fail('BDM entry for the attached volume is missing') for bdm in bdms: db.block_device_mapping_destroy(self.context, bdm['id']) def test_rebuild_on_host_with_shared_storage(self): """Confirm evacuate scenario on shared storage.""" self.mox.StubOutWithMock(self.compute.driver, 'spawn') self.compute.driver.spawn(mox.IsA(self.context), mox.IsA(objects.Instance), mox.IsA(objects.ImageMeta), mox.IgnoreArg(), 'newpass', network_info=mox.IgnoreArg(), block_device_info=mox.IgnoreArg()) self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True) self.mox.ReplayAll() self._rebuild() def test_rebuild_on_host_without_shared_storage(self): """Confirm evacuate scenario without shared storage (rebuild from image) """ self.mox.StubOutWithMock(self.compute.driver, 'spawn') self.compute.driver.spawn(mox.IsA(self.context), mox.IsA(objects.Instance), mox.IsA(objects.ImageMeta), mox.IgnoreArg(), mox.IsA('newpass'), network_info=mox.IgnoreArg(), block_device_info=mox.IgnoreArg()) self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: False) self.mox.ReplayAll() self._rebuild(on_shared_storage=False) def test_rebuild_on_host_instance_exists(self): """Rebuild if instance exists raises an exception.""" db.instance_update(self.context, self.inst.uuid, {"task_state": task_states.SCHEDULING}) self.compute.build_and_run_instance(self.context, self.inst, {}, {}, {}, block_device_mapping=[]) self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True) self.assertRaises(exception.InstanceExists, lambda: self._rebuild(on_shared_storage=True)) def test_driver_does_not_support_recreate(self): with mock.patch.dict(self.compute.driver.capabilities, supports_recreate=False): self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True) self.assertRaises(exception.InstanceRecreateNotSupported, lambda: self._rebuild(on_shared_storage=True)) @mock.patch('nova.objects.ImageMeta.from_image_ref') def test_on_shared_storage_not_provided_host_without_shared_storage(self, mock_image_meta): # 'spawn' should be called with the image_meta from the image_ref self.mox.StubOutWithMock(self.compute.driver, 'spawn') self.compute.driver.spawn(mox.IsA(self.context), mox.IsA(objects.Instance), mock_image_meta.return_value, mox.IgnoreArg(), mox.IsA('newpass'), network_info=mox.IgnoreArg(), block_device_info=mox.IgnoreArg()) self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: False) self.mox.ReplayAll() self._rebuild(on_shared_storage=None) @mock.patch('nova.objects.Instance.image_meta', new_callable=mock.PropertyMock) def test_on_shared_storage_not_provided_host_with_shared_storage(self, mock_image_meta): # 'spawn' should be called with the image_meta from the instance self.mox.StubOutWithMock(self.compute.driver, 'spawn') self.compute.driver.spawn(mox.IsA(self.context), mox.IsA(objects.Instance), mock_image_meta.return_value, mox.IgnoreArg(), 'newpass', network_info=mox.IgnoreArg(), block_device_info=mox.IgnoreArg()) self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True) self.mox.ReplayAll() self._rebuild(on_shared_storage=None) def test_rebuild_migration_passed_in(self): migration = mock.Mock(spec=objects.Migration) patch_spawn = mock.patch.object(self.compute.driver, 'spawn') patch_on_disk = mock.patch.object( self.compute.driver, 'instance_on_disk', return_value=True) with patch_spawn, patch_on_disk: self._rebuild(migration=migration) self.assertEqual('done', migration.status) migration.save.assert_called_once_with() def test_rebuild_migration_node_passed_in(self): patch_spawn = mock.patch.object(self.compute.driver, 'spawn') patch_on_disk = mock.patch.object( self.compute.driver, 'instance_on_disk', return_value=True) with patch_spawn, patch_on_disk: self._rebuild(send_node=True) migrations = objects.MigrationList.get_in_progress_by_host_and_node( self.context, self.compute.host, NODENAME) self.assertEqual(1, len(migrations)) migration = migrations[0] self.assertEqual("evacuation", migration.migration_type) self.assertEqual("pre-migrating", migration.status) def test_rebuild_migration_claim_fails(self): migration = mock.Mock(spec=objects.Migration) patch_spawn = mock.patch.object(self.compute.driver, 'spawn') patch_on_disk = mock.patch.object( self.compute.driver, 'instance_on_disk', return_value=True) patch_claim = mock.patch.object( self.compute._resource_tracker_dict[NODENAME], 'rebuild_claim', side_effect=exception.ComputeResourcesUnavailable(reason="boom")) with patch_spawn, patch_on_disk, patch_claim: self.assertRaises(exception.BuildAbortException, self._rebuild, migration=migration, send_node=True) self.assertEqual("failed", migration.status) migration.save.assert_called_once_with() def test_rebuild_fails_migration_failed(self): migration = mock.Mock(spec=objects.Migration) patch_spawn = mock.patch.object(self.compute.driver, 'spawn') patch_on_disk = mock.patch.object( self.compute.driver, 'instance_on_disk', return_value=True) patch_claim = mock.patch.object( self.compute._resource_tracker_dict[NODENAME], 'rebuild_claim') patch_rebuild = mock.patch.object( self.compute, '_do_rebuild_instance_with_claim', side_effect=test.TestingException()) with patch_spawn, patch_on_disk, patch_claim, patch_rebuild: self.assertRaises(test.TestingException, self._rebuild, migration=migration, send_node=True) self.assertEqual("failed", migration.status) migration.save.assert_called_once_with() def test_rebuild_numa_migration_context_honoured(self): numa_topology = ( test_instance_numa_topology.get_fake_obj_numa_topology( self.context)) # NOTE(ndipanov): Make sure that we pass the topology from the context def fake_spawn(context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): self.assertIsNone(instance.numa_topology) self.inst.numa_topology = numa_topology patch_spawn = mock.patch.object(self.compute.driver, 'spawn', side_effect=fake_spawn) patch_on_disk = mock.patch.object( self.compute.driver, 'instance_on_disk', return_value=True) with patch_spawn, patch_on_disk: self._rebuild(send_node=True) self.assertIsNone(self.inst.numa_topology) self.assertIsNone(self.inst.migration_context) class ComputeInjectedFilesTestCase(BaseTestCase): # Test that running instances with injected_files decodes files correctly def setUp(self): super(ComputeInjectedFilesTestCase, self).setUp() self.instance = self._create_fake_instance_obj() self.stubs.Set(self.compute.driver, 'spawn', self._spawn) self.useFixture(fixtures.SpawnIsSynchronousFixture()) def _spawn(self, context, instance, image_meta, injected_files, admin_password, nw_info, block_device_info, db_api=None): self.assertEqual(self.expected, injected_files) def _test(self, injected_files, decoded_files): self.expected = decoded_files self.compute.build_and_run_instance(self.context, self.instance, {}, {}, {}, block_device_mapping=[], injected_files=injected_files) def test_injected_none(self): # test an input of None for injected_files self._test(None, []) def test_injected_empty(self): # test an input of [] for injected_files self._test([], []) def test_injected_success(self): # test with valid b64 encoded content. injected_files = [ ('/a/b/c', base64.b64encode(b'foobarbaz')), ('/d/e/f', base64.b64encode(b'seespotrun')), ] decoded_files = [ ('/a/b/c', 'foobarbaz'), ('/d/e/f', 'seespotrun'), ] self._test(injected_files, decoded_files) def test_injected_invalid(self): # test with invalid b64 encoded content injected_files = [ ('/a/b/c', base64.b64encode(b'foobarbaz')), ('/d/e/f', 'seespotrun'), ] self.assertRaises(exception.Base64Exception, self.compute.build_and_run_instance, self.context, self.instance, {}, {}, {}, block_device_mapping=[], injected_files=injected_files) class CheckConfigDriveTestCase(test.NoDBTestCase): # NOTE(sirp): `TestCase` is far too heavyweight for this test, this should # probably derive from a `test.FastTestCase` that omits DB and env # handling def setUp(self): super(CheckConfigDriveTestCase, self).setUp() self.compute_api = compute.API() def _assertCheck(self, expected, config_drive): self.assertEqual(expected, self.compute_api._check_config_drive(config_drive)) def _assertInvalid(self, config_drive): self.assertRaises(exception.ConfigDriveInvalidValue, self.compute_api._check_config_drive, config_drive) def test_config_drive_false_values(self): self._assertCheck('', None) self._assertCheck('', '') self._assertCheck('', 'False') self._assertCheck('', 'f') self._assertCheck('', '0') def test_config_drive_true_values(self): self._assertCheck(True, 'True') self._assertCheck(True, 't') self._assertCheck(True, '1') def test_config_drive_bogus_values_raise(self): self._assertInvalid('asd') self._assertInvalid(uuidutils.generate_uuid()) class CheckRequestedImageTestCase(test.TestCase): def setUp(self): super(CheckRequestedImageTestCase, self).setUp() self.compute_api = compute.API() self.context = context.RequestContext( 'fake_user_id', 'fake_project_id') self.instance_type = flavors.get_default_flavor() self.instance_type['memory_mb'] = 64 self.instance_type['root_gb'] = 1 def test_no_image_specified(self): self.compute_api._check_requested_image(self.context, None, None, self.instance_type, None) def test_image_status_must_be_active(self): image = dict(id='123', status='foo') self.assertRaises(exception.ImageNotActive, self.compute_api._check_requested_image, self.context, image['id'], image, self.instance_type, None) image['status'] = 'active' self.compute_api._check_requested_image(self.context, image['id'], image, self.instance_type, None) def test_image_min_ram_check(self): image = dict(id='123', status='active', min_ram='65') self.assertRaises(exception.FlavorMemoryTooSmall, self.compute_api._check_requested_image, self.context, image['id'], image, self.instance_type, None) image['min_ram'] = '64' self.compute_api._check_requested_image(self.context, image['id'], image, self.instance_type, None) def test_image_min_disk_check(self): image = dict(id='123', status='active', min_disk='2') self.assertRaises(exception.FlavorDiskSmallerThanMinDisk, self.compute_api._check_requested_image, self.context, image['id'], image, self.instance_type, None) image['min_disk'] = '1' self.compute_api._check_requested_image(self.context, image['id'], image, self.instance_type, None) def test_image_too_large(self): image = dict(id='123', status='active', size='1073741825') self.assertRaises(exception.FlavorDiskSmallerThanImage, self.compute_api._check_requested_image, self.context, image['id'], image, self.instance_type, None) image['size'] = '1073741824' self.compute_api._check_requested_image(self.context, image['id'], image, self.instance_type, None) def test_root_gb_zero_disables_size_check(self): self.instance_type['root_gb'] = 0 image = dict(id='123', status='active', size='1073741825') self.compute_api._check_requested_image(self.context, image['id'], image, self.instance_type, None) def test_root_gb_zero_disables_min_disk(self): self.instance_type['root_gb'] = 0 image = dict(id='123', status='active', min_disk='2') self.compute_api._check_requested_image(self.context, image['id'], image, self.instance_type, None) def test_config_drive_option(self): image = {'id': 1, 'status': 'active'} image['properties'] = {'img_config_drive': 'optional'} self.compute_api._check_requested_image(self.context, image['id'], image, self.instance_type, None) image['properties'] = {'img_config_drive': 'mandatory'} self.compute_api._check_requested_image(self.context, image['id'], image, self.instance_type, None) image['properties'] = {'img_config_drive': 'bar'} self.assertRaises(exception.InvalidImageConfigDrive, self.compute_api._check_requested_image, self.context, image['id'], image, self.instance_type, None) def test_volume_blockdevicemapping(self): # We should allow a root volume which is larger than the flavor root # disk. # We should allow a root volume created from an image whose min_disk is # larger than the flavor root disk. image_uuid = str(uuid.uuid4()) image = dict(id=image_uuid, status='active', size=self.instance_type.root_gb * units.Gi, min_disk=self.instance_type.root_gb + 1) volume_uuid = str(uuid.uuid4()) root_bdm = block_device_obj.BlockDeviceMapping( source_type='volume', destination_type='volume', volume_id=volume_uuid, volume_size=self.instance_type.root_gb + 1) self.compute_api._check_requested_image(self.context, image['id'], image, self.instance_type, root_bdm) def test_volume_blockdevicemapping_min_disk(self): # A bdm object volume smaller than the image's min_disk should not be # allowed image_uuid = str(uuid.uuid4()) image = dict(id=image_uuid, status='active', size=self.instance_type.root_gb * units.Gi, min_disk=self.instance_type.root_gb + 1) volume_uuid = str(uuid.uuid4()) root_bdm = block_device_obj.BlockDeviceMapping( source_type='image', destination_type='volume', image_id=image_uuid, volume_id=volume_uuid, volume_size=self.instance_type.root_gb) self.assertRaises(exception.VolumeSmallerThanMinDisk, self.compute_api._check_requested_image, self.context, image_uuid, image, self.instance_type, root_bdm) def test_volume_blockdevicemapping_min_disk_no_size(self): # We should allow a root volume whose size is not given image_uuid = str(uuid.uuid4()) image = dict(id=image_uuid, status='active', size=self.instance_type.root_gb * units.Gi, min_disk=self.instance_type.root_gb) volume_uuid = str(uuid.uuid4()) root_bdm = block_device_obj.BlockDeviceMapping( source_type='volume', destination_type='volume', volume_id=volume_uuid, volume_size=None) self.compute_api._check_requested_image(self.context, image['id'], image, self.instance_type, root_bdm) def test_image_blockdevicemapping(self): # Test that we can succeed when passing bdms, and the root bdm isn't a # volume image_uuid = str(uuid.uuid4()) image = dict(id=image_uuid, status='active', size=self.instance_type.root_gb * units.Gi, min_disk=0) root_bdm = block_device_obj.BlockDeviceMapping( source_type='image', destination_type='local', image_id=image_uuid) self.compute_api._check_requested_image(self.context, image['id'], image, self.instance_type, root_bdm) def test_image_blockdevicemapping_too_big(self): # We should do a size check against flavor if we were passed bdms but # the root bdm isn't a volume image_uuid = str(uuid.uuid4()) image = dict(id=image_uuid, status='active', size=(self.instance_type.root_gb + 1) * units.Gi, min_disk=0) root_bdm = block_device_obj.BlockDeviceMapping( source_type='image', destination_type='local', image_id=image_uuid) self.assertRaises(exception.FlavorDiskSmallerThanImage, self.compute_api._check_requested_image, self.context, image['id'], image, self.instance_type, root_bdm) def test_image_blockdevicemapping_min_disk(self): # We should do a min_disk check against flavor if we were passed bdms # but the root bdm isn't a volume image_uuid = str(uuid.uuid4()) image = dict(id=image_uuid, status='active', size=0, min_disk=self.instance_type.root_gb + 1) root_bdm = block_device_obj.BlockDeviceMapping( source_type='image', destination_type='local', image_id=image_uuid) self.assertRaises(exception.FlavorDiskSmallerThanMinDisk, self.compute_api._check_requested_image, self.context, image['id'], image, self.instance_type, root_bdm) class ComputeHooksTestCase(test.BaseHookTestCase): def test_delete_instance_has_hook(self): delete_func = compute_manager.ComputeManager._delete_instance self.assert_has_hook('delete_instance', delete_func) def test_create_instance_has_hook(self): create_func = compute_api.API.create self.assert_has_hook('create_instance', create_func) def test_build_instance_has_hook(self): build_instance_func = (compute_manager.ComputeManager. _do_build_and_run_instance) self.assert_has_hook('build_instance', build_instance_func) nova-13.1.4/nova/tests/unit/compute/fake_resource_tracker.py0000664000567000056710000000155313064447152025360 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.compute import resource_tracker class FakeResourceTracker(resource_tracker.ResourceTracker): """Version without a DB requirement.""" def _update(self, context): self._write_ext_resources(self.compute_node) nova-13.1.4/nova/tests/unit/compute/test_resource_tracker.py0000664000567000056710000016503213064447152025434 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for compute resource tracking.""" import copy import datetime import uuid import mock from oslo_config import cfg from oslo_serialization import jsonutils from oslo_utils import timeutils import six from nova.compute.monitors import base as monitor_base from nova.compute import resource_tracker from nova.compute import resources from nova.compute import task_states from nova.compute import vm_states from nova import context from nova import exception from nova import objects from nova.objects import base as obj_base from nova.objects import fields from nova.objects import pci_device_pool from nova import rpc from nova import test from nova.tests.unit.pci import fakes as pci_fakes from nova.tests import uuidsentinel from nova.virt import driver FAKE_VIRT_MEMORY_MB = 5 FAKE_VIRT_MEMORY_OVERHEAD = 1 FAKE_VIRT_MEMORY_WITH_OVERHEAD = ( FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD) FAKE_VIRT_NUMA_TOPOLOGY = objects.NUMATopology( cells=[objects.NUMACell(id=0, cpuset=set([1, 2]), memory=3072, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), objects.NUMACell(id=1, cpuset=set([3, 4]), memory=3072, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([]))]) FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD = objects.NUMATopologyLimits( cpu_allocation_ratio=2, ram_allocation_ratio=2) ROOT_GB = 5 EPHEMERAL_GB = 1 FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB FAKE_VIRT_VCPUS = 1 FAKE_VIRT_STATS = {'virt_stat': 10} FAKE_VIRT_STATS_COERCED = {'virt_stat': '10'} FAKE_VIRT_STATS_JSON = jsonutils.dumps(FAKE_VIRT_STATS) RESOURCE_NAMES = ['vcpu'] CONF = cfg.CONF class UnsupportedVirtDriver(driver.ComputeDriver): """Pretend version of a lame virt driver.""" def __init__(self): super(UnsupportedVirtDriver, self).__init__(None) def get_host_ip_addr(self): return '127.0.0.1' def get_available_resource(self, nodename): # no support for getting resource usage info return {} class FakeVirtDriver(driver.ComputeDriver): def __init__(self, pci_support=False, stats=None, numa_topology=FAKE_VIRT_NUMA_TOPOLOGY): super(FakeVirtDriver, self).__init__(None) self.memory_mb = FAKE_VIRT_MEMORY_MB self.local_gb = FAKE_VIRT_LOCAL_GB self.vcpus = FAKE_VIRT_VCPUS self.numa_topology = numa_topology self.memory_mb_used = 0 self.local_gb_used = 0 self.pci_support = pci_support self.pci_devices = [ { 'label': 'label_8086_0443', 'dev_type': fields.PciDeviceType.SRIOV_VF, 'compute_node_id': 1, 'address': '0000:00:01.1', 'product_id': '0443', 'vendor_id': '8086', 'status': 'available', 'extra_k1': 'v1', 'numa_node': 1, 'parent_addr': '0000:00:01.0', }, { 'label': 'label_8086_0443', 'dev_type': fields.PciDeviceType.SRIOV_VF, 'compute_node_id': 1, 'address': '0000:00:01.2', 'product_id': '0443', 'vendor_id': '8086', 'status': 'available', 'extra_k1': 'v1', 'numa_node': 1, 'parent_addr': '0000:00:01.0', }, { 'label': 'label_8086_0443', 'dev_type': fields.PciDeviceType.SRIOV_PF, 'compute_node_id': 1, 'address': '0000:00:01.0', 'product_id': '0443', 'vendor_id': '8086', 'status': 'available', 'extra_k1': 'v1', 'numa_node': 1, }, { 'label': 'label_8086_0123', 'dev_type': 'type-PCI', 'compute_node_id': 1, 'address': '0000:00:01.0', 'product_id': '0123', 'vendor_id': '8086', 'status': 'available', 'extra_k1': 'v1', 'numa_node': 1, }, { 'label': 'label_8086_7891', 'dev_type': fields.PciDeviceType.SRIOV_VF, 'compute_node_id': 1, 'address': '0000:00:01.0', 'product_id': '7891', 'vendor_id': '8086', 'status': 'available', 'extra_k1': 'v1', 'numa_node': None, 'parent_addr': '0000:08:01.0', }, ] if self.pci_support else [] self.pci_stats = [ { 'count': 2, 'vendor_id': '8086', 'product_id': '0443', 'numa_node': 1, 'dev_type': fields.PciDeviceType.SRIOV_VF }, { 'count': 1, 'vendor_id': '8086', 'product_id': '0443', 'numa_node': 1, 'dev_type': fields.PciDeviceType.SRIOV_PF }, { 'count': 1, 'vendor_id': '8086', 'product_id': '7891', 'numa_node': None, 'dev_type': fields.PciDeviceType.SRIOV_VF }, ] if self.pci_support else [] if stats is not None: self.stats = stats def get_host_ip_addr(self): return '127.0.0.1' def get_available_resource(self, nodename): d = { 'vcpus': self.vcpus, 'memory_mb': self.memory_mb, 'local_gb': self.local_gb, 'vcpus_used': 0, 'memory_mb_used': self.memory_mb_used, 'local_gb_used': self.local_gb_used, 'hypervisor_type': 'fake', 'hypervisor_version': 0, 'hypervisor_hostname': 'fakehost', 'cpu_info': '', 'numa_topology': ( self.numa_topology._to_json() if self.numa_topology else None), } if self.pci_support: d['pci_passthrough_devices'] = jsonutils.dumps(self.pci_devices) if hasattr(self, 'stats'): d['stats'] = self.stats return d def estimate_instance_overhead(self, instance_info): instance_info['memory_mb'] # make sure memory value is present overhead = { 'memory_mb': FAKE_VIRT_MEMORY_OVERHEAD } return overhead # just return a constant value for testing class BaseTestCase(test.TestCase): @mock.patch('stevedore.enabled.EnabledExtensionManager') def setUp(self, _mock_ext_mgr): super(BaseTestCase, self).setUp() self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self.context = context.get_admin_context() self._set_pci_passthrough_whitelist() self.flags(use_local=True, group='conductor') self.conductor = self.start_service('conductor', manager=CONF.conductor.manager) self._instances = {} self._instance_types = {} self.stubs.Set(objects.InstanceList, 'get_by_host_and_node', self._fake_instance_get_by_host_and_node) self.stubs.Set(self.conductor.db, 'flavor_get', self._fake_flavor_get) self.host = 'fakehost' self.compute = self._create_compute_node() self.updated = False self.deleted = False self.update_call_count = 0 def _set_pci_passthrough_whitelist(self): self.flags(pci_passthrough_whitelist=[ '{"vendor_id": "8086", "product_id": "0443"}', '{"vendor_id": "8086", "product_id": "7891"}']) def _create_compute_node(self, values=None): # This creates a db representation of a compute_node. compute = { "id": 1, "uuid": uuidsentinel.fake_compute_node, "service_id": 1, "host": "fakehost", "vcpus": 1, "memory_mb": 1, "local_gb": 1, "vcpus_used": 1, "memory_mb_used": 1, "local_gb_used": 1, "free_ram_mb": 1, "free_disk_gb": 1, "current_workload": 1, "running_vms": 0, "cpu_info": None, "numa_topology": None, "stats": '{"num_instances": "1"}', "hypervisor_hostname": "fakenode", 'hypervisor_version': 1, 'hypervisor_type': 'fake-hyp', 'disk_available_least': None, 'host_ip': None, 'metrics': None, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'cpu_allocation_ratio': None, 'ram_allocation_ratio': None, 'disk_allocation_ratio': None, } if values: compute.update(values) return compute def _create_compute_node_obj(self, context): # Use the db representation of a compute node returned # by _create_compute_node() to create an equivalent compute # node object. compute = self._create_compute_node() compute_obj = objects.ComputeNode() compute_obj = objects.ComputeNode._from_db_object( context, compute_obj, compute) return compute_obj def _create_service(self, host="fakehost", compute=None): if compute: compute = [compute] service = { "id": 1, "host": host, "binary": "nova-compute", "topic": "compute", "compute_node": compute, "report_count": 0, 'disabled': False, 'disabled_reason': None, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'last_seen_up': None, 'forced_down': False, 'version': 0, } return service def _fake_instance_obj(self, stash=True, flavor=None, **kwargs): # Default to an instance ready to resize to or from the same # instance_type flavor = flavor or self._fake_flavor_create() if not isinstance(flavor, objects.Flavor): flavor = objects.Flavor(**flavor) instance_uuid = str(uuid.uuid1()) instance = objects.Instance(context=self.context, uuid=instance_uuid, flavor=flavor) instance.update({ 'vm_state': vm_states.RESIZED, 'task_state': None, 'ephemeral_key_uuid': None, 'os_type': 'Linux', 'project_id': '123456', 'host': None, 'node': None, 'instance_type_id': flavor['id'], 'memory_mb': flavor['memory_mb'], 'vcpus': flavor['vcpus'], 'root_gb': flavor['root_gb'], 'ephemeral_gb': flavor['ephemeral_gb'], 'launched_on': None, 'system_metadata': {}, 'availability_zone': None, 'vm_mode': None, 'reservation_id': None, 'display_name': None, 'default_swap_device': None, 'power_state': None, 'access_ip_v6': None, 'access_ip_v4': None, 'key_name': None, 'updated_at': None, 'cell_name': None, 'locked': None, 'locked_by': None, 'launch_index': None, 'architecture': None, 'auto_disk_config': None, 'terminated_at': None, 'ramdisk_id': None, 'user_data': None, 'cleaned': None, 'deleted_at': None, 'id': 333, 'disable_terminate': None, 'hostname': None, 'display_description': None, 'key_data': None, 'deleted': None, 'default_ephemeral_device': None, 'progress': None, 'launched_at': None, 'config_drive': None, 'kernel_id': None, 'user_id': None, 'shutdown_terminate': None, 'created_at': None, 'image_ref': None, 'root_device_name': None, }) if stash: instance.old_flavor = flavor instance.new_flavor = flavor instance.numa_topology = kwargs.pop('numa_topology', None) instance.update(kwargs) self._instances[instance_uuid] = instance return instance def _fake_flavor_create(self, **kwargs): instance_type = { 'id': 1, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'disabled': False, 'is_public': True, 'name': 'fakeitype', 'memory_mb': FAKE_VIRT_MEMORY_MB, 'vcpus': FAKE_VIRT_VCPUS, 'root_gb': ROOT_GB, 'ephemeral_gb': EPHEMERAL_GB, 'swap': 0, 'rxtx_factor': 1.0, 'vcpu_weight': 1, 'flavorid': 'fakeflavor', 'extra_specs': {}, } instance_type.update(**kwargs) instance_type = objects.Flavor(**instance_type) id_ = instance_type['id'] self._instance_types[id_] = instance_type return instance_type def _fake_instance_get_by_host_and_node(self, context, host, nodename, expected_attrs=None): return objects.InstanceList( objects=[i for i in self._instances.values() if i['host'] == host]) def _fake_flavor_get(self, ctxt, id_): return self._instance_types[id_] def _fake_compute_node_update(self, ctx, compute_node_id, values, prune_stats=False): self.update_call_count += 1 self.updated = True self.compute.update(values) return self.compute def _driver(self): return FakeVirtDriver() def _tracker(self, host=None): if host is None: host = self.host node = "fakenode" driver = self._driver() tracker = resource_tracker.ResourceTracker(host, driver, node) tracker.compute_node = self._create_compute_node_obj(self.context) tracker.ext_resources_handler = \ resources.ResourceHandler(RESOURCE_NAMES, True) return tracker class UnsupportedDriverTestCase(BaseTestCase): """Resource tracking should be disabled when the virt driver doesn't support it. """ def setUp(self): super(UnsupportedDriverTestCase, self).setUp() self.tracker = self._tracker() # seed tracker with data: self.tracker.update_available_resource(self.context) def _driver(self): return UnsupportedVirtDriver() def test_disabled(self): # disabled = no compute node stats self.assertTrue(self.tracker.disabled) self.assertIsNone(self.tracker.compute_node) def test_disabled_claim(self): # basic claim: instance = self._fake_instance_obj() with mock.patch.object(instance, 'save'): claim = self.tracker.instance_claim(self.context, instance) self.assertEqual(0, claim.memory_mb) def test_disabled_instance_claim(self): # instance variation: instance = self._fake_instance_obj() with mock.patch.object(instance, 'save'): claim = self.tracker.instance_claim(self.context, instance) self.assertEqual(0, claim.memory_mb) @mock.patch('nova.objects.Instance.save') def test_disabled_instance_context_claim(self, mock_save): # instance context manager variation: instance = self._fake_instance_obj() self.tracker.instance_claim(self.context, instance) with self.tracker.instance_claim(self.context, instance) as claim: self.assertEqual(0, claim.memory_mb) def test_disabled_updated_usage(self): instance = self._fake_instance_obj(host='fakehost', memory_mb=5, root_gb=10) self.tracker.update_usage(self.context, instance) def test_disabled_resize_claim(self): instance = self._fake_instance_obj() instance_type = self._fake_flavor_create() claim = self.tracker.resize_claim(self.context, instance, instance_type) self.assertEqual(0, claim.memory_mb) self.assertEqual(instance['uuid'], claim.migration['instance_uuid']) self.assertEqual(instance_type['id'], claim.migration['new_instance_type_id']) def test_disabled_resize_context_claim(self): instance = self._fake_instance_obj() instance_type = self._fake_flavor_create() with self.tracker.resize_claim(self.context, instance, instance_type) \ as claim: self.assertEqual(0, claim.memory_mb) class MissingComputeNodeTestCase(BaseTestCase): def setUp(self): super(MissingComputeNodeTestCase, self).setUp() self.tracker = self._tracker() self.stub_out('nova.db.service_get_by_compute_host', self._fake_service_get_by_compute_host) self.stub_out('nova.db.compute_node_get_by_host_and_nodename', self._fake_compute_node_get_by_host_and_nodename) self.stub_out('nova.db.compute_node_create', self._fake_create_compute_node) self.tracker.scheduler_client.update_resource_stats = mock.Mock() def _fake_create_compute_node(self, context, values): self.created = True return self._create_compute_node(values) def _fake_service_get_by_compute_host(self, ctx, host): # return a service with no joined compute service = self._create_service() return service def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename): # return no compute node raise exception.ComputeHostNotFound(host=host) def test_create_compute_node(self): self.tracker.compute_node = None self.tracker.update_available_resource(self.context) self.assertTrue(self.created) def test_enabled(self): self.tracker.update_available_resource(self.context) self.assertFalse(self.tracker.disabled) class BaseTrackerTestCase(BaseTestCase): def setUp(self): # setup plumbing for a working resource tracker with required # database models and a compatible compute driver: super(BaseTrackerTestCase, self).setUp() self.tracker = self._tracker() self._migrations = {} self.stub_out('nova.db.service_get_by_compute_host', self._fake_service_get_by_compute_host) self.stub_out('nova.db.compute_node_get_by_host_and_nodename', self._fake_compute_node_get_by_host_and_nodename) self.stub_out('nova.db.compute_node_update', self._fake_compute_node_update) self.stub_out('nova.db.compute_node_delete', self._fake_compute_node_delete) self.stub_out('nova.db.migration_update', self._fake_migration_update) self.stub_out('nova.db.migration_get_in_progress_by_host_and_node', self._fake_migration_get_in_progress_by_host_and_node) # Note that this must be called before the call to _init_tracker() patcher = pci_fakes.fake_pci_whitelist() self.addCleanup(patcher.stop) self._init_tracker() self.limits = self._limits() def _fake_service_get_by_compute_host(self, ctx, host): self.service = self._create_service(host, compute=self.compute) return self.service def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename): self.compute = self._create_compute_node() return self.compute def _fake_compute_node_update(self, ctx, compute_node_id, values, prune_stats=False): self.update_call_count += 1 self.updated = True self.compute.update(values) return self.compute def _fake_compute_node_delete(self, ctx, compute_node_id): self.deleted = True self.compute.update({'deleted': 1}) return self.compute def _fake_migration_get_in_progress_by_host_and_node(self, ctxt, host, node): status = ['confirmed', 'reverted', 'error'] migrations = [] for migration in self._migrations.values(): migration = obj_base.obj_to_primitive(migration) if migration['status'] in status: continue uuid = migration['instance_uuid'] migration['instance'] = self._instances[uuid] migrations.append(migration) return migrations def _fake_migration_update(self, ctxt, migration_id, values): # cheat and assume there's only 1 migration present migration = list(self._migrations.values())[0] migration.update(values) return migration def _init_tracker(self): self.tracker.update_available_resource(self.context) def _limits(self, memory_mb=FAKE_VIRT_MEMORY_WITH_OVERHEAD, disk_gb=FAKE_VIRT_LOCAL_GB, vcpus=FAKE_VIRT_VCPUS, numa_topology=FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD): """Create limits dictionary used for oversubscribing resources.""" return { 'memory_mb': memory_mb, 'disk_gb': disk_gb, 'vcpu': vcpus, 'numa_topology': numa_topology, } def assertEqualNUMAHostTopology(self, expected, got): attrs = ('cpuset', 'memory', 'id', 'cpu_usage', 'memory_usage') if None in (expected, got): if expected != got: raise AssertionError("Topologies don't match. Expected: " "%(expected)s, but got: %(got)s" % {'expected': expected, 'got': got}) else: return if len(expected) != len(got): raise AssertionError("Topologies don't match due to different " "number of cells. Expected: " "%(expected)s, but got: %(got)s" % {'expected': expected, 'got': got}) for exp_cell, got_cell in zip(expected.cells, got.cells): for attr in attrs: if getattr(exp_cell, attr) != getattr(got_cell, attr): raise AssertionError("Topologies don't match. Expected: " "%(expected)s, but got: %(got)s" % {'expected': expected, 'got': got}) def assertEqualPciDevicePool(self, expected, observed): self.assertEqual(expected.product_id, observed.product_id) self.assertEqual(expected.vendor_id, observed.vendor_id) self.assertEqual(expected.tags, observed.tags) self.assertEqual(expected.count, observed.count) def assertEqualPciDevicePoolList(self, expected, observed): ex_objs = expected.objects ob_objs = observed.objects self.assertEqual(len(ex_objs), len(ob_objs)) for i in range(len(ex_objs)): self.assertEqualPciDevicePool(ex_objs[i], ob_objs[i]) def _assert(self, value, field, tracker=None): if tracker is None: tracker = self.tracker if field not in tracker.compute_node: raise test.TestingException( "'%(field)s' not in compute node." % {'field': field}) x = tracker.compute_node[field] if field == 'numa_topology': self.assertEqualNUMAHostTopology( value, objects.NUMATopology.obj_from_db_obj(x)) else: self.assertEqual(value, x) class TrackerTestCase(BaseTrackerTestCase): def test_free_ram_resource_value(self): driver = FakeVirtDriver() mem_free = driver.memory_mb - driver.memory_mb_used self.assertEqual(mem_free, self.tracker.compute_node.free_ram_mb) def test_free_disk_resource_value(self): driver = FakeVirtDriver() mem_free = driver.local_gb - driver.local_gb_used self.assertEqual(mem_free, self.tracker.compute_node.free_disk_gb) def test_update_compute_node(self): self.assertFalse(self.tracker.disabled) self.assertTrue(self.updated) def test_init(self): driver = self._driver() self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb') self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb') self._assert(FAKE_VIRT_VCPUS, 'vcpus') self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology') self._assert(0, 'memory_mb_used') self._assert(0, 'local_gb_used') self._assert(0, 'vcpus_used') self._assert(0, 'running_vms') self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb') self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb') self.assertFalse(self.tracker.disabled) self.assertEqual(0, self.tracker.compute_node.current_workload) expected = pci_device_pool.from_pci_stats(driver.pci_stats) self.assertEqual(len(expected), len(self.tracker.compute_node.pci_device_pools)) for expected_pool, actual_pool in zip( expected, self.tracker.compute_node.pci_device_pools): self.assertEqual(expected_pool, actual_pool) def test_set_instance_host_and_node(self): inst = objects.Instance() with mock.patch.object(inst, 'save') as mock_save: self.tracker._set_instance_host_and_node(inst) mock_save.assert_called_once_with() self.assertEqual(self.tracker.host, inst.host) self.assertEqual(self.tracker.nodename, inst.node) self.assertEqual(self.tracker.host, inst.launched_on) def test_unset_instance_host_and_node(self): inst = objects.Instance() with mock.patch.object(inst, 'save') as mock_save: self.tracker._set_instance_host_and_node(inst) self.tracker._unset_instance_host_and_node(inst) self.assertEqual(2, mock_save.call_count) self.assertIsNone(inst.host) self.assertIsNone(inst.node) self.assertEqual(self.tracker.host, inst.launched_on) class SchedulerClientTrackerTestCase(BaseTrackerTestCase): def setUp(self): super(SchedulerClientTrackerTestCase, self).setUp() self.tracker.scheduler_client.update_resource_stats = mock.Mock() def test_update_resource(self): # NOTE(pmurray): we are not doing a full pass through the resource # trackers update path, so safest to do two updates and look for # differences then to rely on the initial state being the same # as an update urs_mock = self.tracker.scheduler_client.update_resource_stats self.tracker._update(self.context) urs_mock.reset_mock() # change a compute node value to simulate a change self.tracker.compute_node.local_gb_used += 1 self.tracker._update(self.context) urs_mock.assert_called_once_with(self.tracker.compute_node) def test_no_update_resource(self): # NOTE(pmurray): we are not doing a full pass through the resource # trackers update path, so safest to do two updates and look for # differences then to rely on the initial state being the same # as an update self.tracker._update(self.context) update = self.tracker.scheduler_client.update_resource_stats update.reset_mock() self.tracker._update(self.context) self.assertFalse(update.called, "update_resource_stats should not be " "called when there is no change") class TrackerPciStatsTestCase(BaseTrackerTestCase): def test_update_compute_node(self): self.assertFalse(self.tracker.disabled) self.assertTrue(self.updated) def test_init(self): driver = self._driver() self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb') self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb') self._assert(FAKE_VIRT_VCPUS, 'vcpus') self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology') self._assert(0, 'memory_mb_used') self._assert(0, 'local_gb_used') self._assert(0, 'vcpus_used') self._assert(0, 'running_vms') self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb') self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb') self.assertFalse(self.tracker.disabled) self.assertEqual(0, self.tracker.compute_node.current_workload) expected_pools = pci_device_pool.from_pci_stats(driver.pci_stats) observed_pools = self.tracker.compute_node.pci_device_pools self.assertEqualPciDevicePoolList(expected_pools, observed_pools) def _driver(self): return FakeVirtDriver(pci_support=True) class TrackerExtraResourcesTestCase(BaseTrackerTestCase): def test_set_empty_ext_resources(self): resources = self._create_compute_node_obj(self.context) del resources.stats self.tracker._write_ext_resources(resources) self.assertEqual({}, resources.stats) def test_set_extra_resources(self): def fake_write_resources(resources): resources['stats']['resA'] = '123' resources['stats']['resB'] = 12 self.stubs.Set(self.tracker.ext_resources_handler, 'write_resources', fake_write_resources) resources = self._create_compute_node_obj(self.context) del resources.stats self.tracker._write_ext_resources(resources) expected = {"resA": "123", "resB": "12"} self.assertEqual(sorted(expected), sorted(resources.stats)) class InstanceClaimTestCase(BaseTrackerTestCase): def _instance_topology(self, mem): mem = mem * 1024 return objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=0, cpuset=set([1]), memory=mem), objects.InstanceNUMACell( id=1, cpuset=set([3]), memory=mem)]) def _claim_topology(self, mem, cpus=1): if self.tracker.driver.numa_topology is None: return None mem = mem * 1024 return objects.NUMATopology( cells=[objects.NUMACell( id=0, cpuset=set([1, 2]), memory=3072, cpu_usage=cpus, memory_usage=mem, mempages=[], siblings=[], pinned_cpus=set([])), objects.NUMACell( id=1, cpuset=set([3, 4]), memory=3072, cpu_usage=cpus, memory_usage=mem, mempages=[], siblings=[], pinned_cpus=set([]))]) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid', return_value=objects.InstancePCIRequests(requests=[])) def test_instance_claim_with_oversubscription(self, mock_get): memory_mb = FAKE_VIRT_MEMORY_MB * 2 root_gb = ephemeral_gb = FAKE_VIRT_LOCAL_GB vcpus = FAKE_VIRT_VCPUS * 2 claim_topology = self._claim_topology(3) instance_topology = self._instance_topology(3) limits = {'memory_mb': memory_mb + FAKE_VIRT_MEMORY_OVERHEAD, 'disk_gb': root_gb * 2, 'vcpu': vcpus, 'numa_topology': FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD} instance = self._fake_instance_obj(memory_mb=memory_mb, root_gb=root_gb, ephemeral_gb=ephemeral_gb, numa_topology=instance_topology) with mock.patch.object(instance, 'save'): self.tracker.instance_claim(self.context, instance, limits) self.assertEqual(memory_mb + FAKE_VIRT_MEMORY_OVERHEAD, self.tracker.compute_node.memory_mb_used) self.assertEqualNUMAHostTopology( claim_topology, objects.NUMATopology.obj_from_db_obj( self.compute['numa_topology'])) self.assertEqual(root_gb * 2, self.tracker.compute_node.local_gb_used) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid', return_value=objects.InstancePCIRequests(requests=[])) @mock.patch('nova.objects.Instance.save') def test_additive_claims(self, mock_save, mock_get): self.limits['vcpu'] = 2 claim_topology = self._claim_topology(2, cpus=2) flavor = self._fake_flavor_create( memory_mb=1, root_gb=1, ephemeral_gb=0) instance_topology = self._instance_topology(1) instance = self._fake_instance_obj( flavor=flavor, numa_topology=instance_topology) with self.tracker.instance_claim(self.context, instance, self.limits): pass instance = self._fake_instance_obj( flavor=flavor, numa_topology=instance_topology) with self.tracker.instance_claim(self.context, instance, self.limits): pass self.assertEqual(2 * (flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD), self.tracker.compute_node.memory_mb_used) self.assertEqual(2 * (flavor['root_gb'] + flavor['ephemeral_gb']), self.tracker.compute_node.local_gb_used) self.assertEqual(2 * flavor['vcpus'], self.tracker.compute_node.vcpus_used) self.assertEqualNUMAHostTopology( claim_topology, objects.NUMATopology.obj_from_db_obj( self.compute['numa_topology'])) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid', return_value=objects.InstancePCIRequests(requests=[])) @mock.patch('nova.objects.Instance.save') def test_context_claim_with_exception(self, mock_save, mock_get): instance = self._fake_instance_obj(memory_mb=1, root_gb=1, ephemeral_gb=1) try: with self.tracker.instance_claim(self.context, instance): # raise test.TestingException() except test.TestingException: pass self.assertEqual(0, self.tracker.compute_node.memory_mb_used) self.assertEqual(0, self.tracker.compute_node.local_gb_used) self.assertEqual(0, self.compute['memory_mb_used']) self.assertEqual(0, self.compute['local_gb_used']) self.assertEqualNUMAHostTopology( FAKE_VIRT_NUMA_TOPOLOGY, objects.NUMATopology.obj_from_db_obj( self.compute['numa_topology'])) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid', return_value=objects.InstancePCIRequests(requests=[])) def test_update_load_stats_for_instance(self, mock_get): instance = self._fake_instance_obj(task_state=task_states.SCHEDULING) with mock.patch.object(instance, 'save'): with self.tracker.instance_claim(self.context, instance): pass self.assertEqual(1, self.tracker.compute_node.current_workload) instance['vm_state'] = vm_states.ACTIVE instance['task_state'] = None instance['host'] = 'fakehost' self.tracker.update_usage(self.context, instance) self.assertEqual(0, self.tracker.compute_node.current_workload) @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid', return_value=objects.InstancePCIRequests(requests=[])) @mock.patch('nova.objects.Instance.save') def test_cpu_stats(self, mock_save, mock_get): limits = {'disk_gb': 100, 'memory_mb': 100} self.assertEqual(0, self.tracker.compute_node.vcpus_used) vcpus = 1 instance = self._fake_instance_obj(vcpus=vcpus) # should not do anything until a claim is made: self.tracker.update_usage(self.context, instance) self.assertEqual(0, self.tracker.compute_node.vcpus_used) with self.tracker.instance_claim(self.context, instance, limits): pass self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used) # instance state can change without modifying vcpus in use: instance['task_state'] = task_states.SCHEDULING self.tracker.update_usage(self.context, instance) self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used) add_vcpus = 10 vcpus += add_vcpus instance = self._fake_instance_obj(vcpus=add_vcpus) with self.tracker.instance_claim(self.context, instance, limits): pass self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used) instance['vm_state'] = vm_states.DELETED self.tracker.update_usage(self.context, instance) vcpus -= add_vcpus self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used) def test_skip_deleted_instances(self): # ensure that the audit process skips instances that have vm_state # DELETED, but the DB record is not yet deleted. self._fake_instance_obj(vm_state=vm_states.DELETED, host=self.host) self.tracker.update_available_resource(self.context) self.assertEqual(0, self.tracker.compute_node.memory_mb_used) self.assertEqual(0, self.tracker.compute_node.local_gb_used) @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') def test_deleted_instances_with_migrations(self, mock_migration_list): migration = objects.Migration(context=self.context, migration_type='resize', instance_uuid='invalid') mock_migration_list.return_value = [migration] self.tracker.update_available_resource(self.context) self.assertEqual(0, self.tracker.compute_node.memory_mb_used) self.assertEqual(0, self.tracker.compute_node.local_gb_used) mock_migration_list.assert_called_once_with(self.context, "fakehost", "fakenode") @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') @mock.patch('nova.objects.InstanceList.get_by_host_and_node') def test_instances_with_live_migrations(self, mock_instance_list, mock_migration_list): instance = self._fake_instance_obj() migration = objects.Migration(context=self.context, migration_type='live-migration', instance_uuid=instance.uuid) mock_migration_list.return_value = [migration] mock_instance_list.return_value = [instance] with mock.patch.object(self.tracker, '_pair_instances_to_migrations' ) as mock_pair: self.tracker.update_available_resource(self.context) self.assertTrue(mock_pair.called) self.assertEqual( instance.uuid, mock_pair.call_args_list[0][0][0][0].instance_uuid) self.assertEqual(instance.uuid, mock_pair.call_args_list[0][0][1][0].uuid) self.assertEqual( ['system_metadata', 'numa_topology', 'flavor', 'migration_context'], mock_instance_list.call_args_list[0][1]['expected_attrs']) self.assertEqual(FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD, self.tracker.compute_node['memory_mb_used']) self.assertEqual(ROOT_GB + EPHEMERAL_GB, self.tracker.compute_node['local_gb_used']) mock_migration_list.assert_called_once_with(self.context, "fakehost", "fakenode") def test_pair_instances_to_migrations(self): migrations = [objects.Migration(instance_uuid=uuidsentinel.instance1), objects.Migration(instance_uuid=uuidsentinel.instance2)] instances = [objects.Instance(uuid=uuidsentinel.instance2), objects.Instance(uuid=uuidsentinel.instance1)] self.tracker._pair_instances_to_migrations(migrations, instances) order = [uuidsentinel.instance1, uuidsentinel.instance2] for i, migration in enumerate(migrations): self.assertEqual(order[i], migration.instance.uuid) @mock.patch('nova.compute.claims.Claim') @mock.patch('nova.objects.Instance.save') def test_claim_saves_numa_topology(self, mock_save, mock_claim): def fake_save(): self.assertEqual(set(['numa_topology', 'host', 'node', 'launched_on']), inst.obj_what_changed()) mock_save.side_effect = fake_save inst = objects.Instance(host=None, node=None, memory_mb=1024, uuid=uuidsentinel.instance1) inst.obj_reset_changes() numa = objects.InstanceNUMATopology() claim = mock.MagicMock() claim.claimed_numa_topology = numa mock_claim.return_value = claim with mock.patch.object(self.tracker, '_update_usage_from_instance'): self.tracker.instance_claim(self.context, inst) mock_save.assert_called_once_with() @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid', return_value=objects.InstancePCIRequests(requests=[])) def test_claim_sets_instance_host_and_node(self, mock_get): instance = self._fake_instance_obj() self.assertIsNone(instance['host']) self.assertIsNone(instance['launched_on']) self.assertIsNone(instance['node']) with mock.patch.object(instance, 'save'): claim = self.tracker.instance_claim(self.context, instance) self.assertNotEqual(0, claim.memory_mb) self.assertEqual('fakehost', instance['host']) self.assertEqual('fakehost', instance['launched_on']) self.assertEqual('fakenode', instance['node']) class _MoveClaimTestCase(BaseTrackerTestCase): def setUp(self): super(_MoveClaimTestCase, self).setUp() self.instance = self._fake_instance_obj() self.instance_type = self._fake_flavor_create() self.claim_method = self.tracker._move_claim @mock.patch('nova.objects.Instance.save') @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid', return_value=objects.InstancePCIRequests(requests=[])) def test_additive_claims(self, mock_get, mock_save): limits = self._limits( 2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, 2 * FAKE_VIRT_LOCAL_GB, 2 * FAKE_VIRT_VCPUS) self.claim_method( self.context, self.instance, self.instance_type, limits=limits) mock_save.assert_called_once_with() mock_save.reset_mock() instance2 = self._fake_instance_obj() self.claim_method( self.context, instance2, self.instance_type, limits=limits) mock_save.assert_called_once_with() self._assert(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used') self._assert(2 * FAKE_VIRT_LOCAL_GB, 'local_gb_used') self._assert(2 * FAKE_VIRT_VCPUS, 'vcpus_used') @mock.patch('nova.objects.Instance.save') @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid', return_value=objects.InstancePCIRequests(requests=[])) def test_move_type_not_tracked(self, mock_get, mock_save): self.claim_method(self.context, self.instance, self.instance_type, limits=self.limits, move_type="live-migration") mock_save.assert_called_once_with() self._assert(0, 'memory_mb_used') self._assert(0, 'local_gb_used') self._assert(0, 'vcpus_used') self.assertEqual(0, len(self.tracker.tracked_migrations)) @mock.patch('nova.objects.Instance.save') @mock.patch.object(objects.Migration, 'save') def test_existing_migration(self, save_mock, save_inst_mock): migration = objects.Migration(self.context, id=42, instance_uuid=self.instance.uuid, source_compute='fake-other-compute', source_node='fake-other-node', status='accepted', migration_type='evacuation') self.claim_method(self.context, self.instance, self.instance_type, migration=migration) self.assertEqual(self.tracker.host, migration.dest_compute) self.assertEqual(self.tracker.nodename, migration.dest_node) self.assertEqual("pre-migrating", migration.status) self.assertEqual(1, len(self.tracker.tracked_migrations)) save_mock.assert_called_once_with() save_inst_mock.assert_called_once_with() class ResizeClaimTestCase(_MoveClaimTestCase): def setUp(self): super(ResizeClaimTestCase, self).setUp() self.claim_method = self.tracker.resize_claim def test_move_type_not_tracked(self): self.skipTest("Resize_claim does already sets the move_type.") def test_existing_migration(self): self.skipTest("Resize_claim does not support having existing " "migration record.") class OrphanTestCase(BaseTrackerTestCase): def _driver(self): class OrphanVirtDriver(FakeVirtDriver): def get_per_instance_usage(self): return { '1-2-3-4-5': {'memory_mb': FAKE_VIRT_MEMORY_MB, 'uuid': '1-2-3-4-5'}, '2-3-4-5-6': {'memory_mb': FAKE_VIRT_MEMORY_MB, 'uuid': '2-3-4-5-6'}, } return OrphanVirtDriver() def test_usage(self): self.assertEqual(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, self.tracker.compute_node.memory_mb_used) def test_find(self): # create one legit instance and verify the 2 orphans remain self._fake_instance_obj() orphans = self.tracker._find_orphaned_instances() self.assertEqual(2, len(orphans)) class ComputeMonitorTestCase(BaseTestCase): def setUp(self): super(ComputeMonitorTestCase, self).setUp() self.tracker = self._tracker() self.node_name = 'nodename' self.user_id = 'fake' self.project_id = 'fake' self.info = {} self.context = context.RequestContext(self.user_id, self.project_id) def test_get_host_metrics_none(self): self.tracker.monitors = [] metrics = self.tracker._get_host_metrics(self.context, self.node_name) self.assertEqual(len(metrics), 0) @mock.patch.object(resource_tracker.LOG, 'warning') def test_get_host_metrics_exception(self, mock_LOG_warning): monitor = mock.MagicMock() monitor.add_metrics_to_list.side_effect = Exception self.tracker.monitors = [monitor] metrics = self.tracker._get_host_metrics(self.context, self.node_name) mock_LOG_warning.assert_called_once_with( u'Cannot get the metrics from %(mon)s; error: %(exc)s', mock.ANY) self.assertEqual(0, len(metrics)) def test_get_host_metrics(self): class FakeCPUMonitor(monitor_base.MonitorBase): NOW_TS = timeutils.utcnow() def __init__(self, *args): super(FakeCPUMonitor, self).__init__(*args) self.source = 'FakeCPUMonitor' def get_metric_names(self): return set(["cpu.frequency"]) def get_metrics(self): return [("cpu.frequency", 100, self.NOW_TS)] self.tracker.monitors = [FakeCPUMonitor(None)] mock_notifier = mock.Mock() with mock.patch.object(rpc, 'get_notifier', return_value=mock_notifier) as mock_get: metrics = self.tracker._get_host_metrics(self.context, self.node_name) mock_get.assert_called_once_with(service='compute', host=self.node_name) expected_metrics = [ { 'timestamp': FakeCPUMonitor.NOW_TS.isoformat(), 'name': 'cpu.frequency', 'value': 100, 'source': 'FakeCPUMonitor' }, ] payload = { 'metrics': expected_metrics, 'host': self.tracker.host, 'host_ip': CONF.my_ip, 'nodename': self.node_name } mock_notifier.info.assert_called_once_with( self.context, 'compute.metrics.update', payload) self.assertEqual(metrics, expected_metrics) class TrackerPeriodicTestCase(BaseTrackerTestCase): def test_periodic_status_update(self): # verify update called on instantiation self.assertEqual(1, self.update_call_count) # verify update not called if no change to resources self.tracker.update_available_resource(self.context) self.assertEqual(1, self.update_call_count) # verify update is called when resources change driver = self.tracker.driver driver.memory_mb += 1 self.tracker.update_available_resource(self.context) self.assertEqual(2, self.update_call_count) def test_update_available_resource_calls_locked_inner(self): @mock.patch.object(self.tracker, 'driver') @mock.patch.object(self.tracker, '_update_available_resource') @mock.patch.object(self.tracker, '_verify_resources') @mock.patch.object(self.tracker, '_report_hypervisor_resource_view') def _test(mock_rhrv, mock_vr, mock_uar, mock_driver): resources = {'there is someone in my head': 'but it\'s not me'} mock_driver.get_available_resource.return_value = resources self.tracker.update_available_resource(self.context) mock_uar.assert_called_once_with(self.context, resources) _test() class StatsDictTestCase(BaseTrackerTestCase): """Test stats handling for a virt driver that provides stats as a dictionary. """ def _driver(self): return FakeVirtDriver(stats=FAKE_VIRT_STATS) def test_virt_stats(self): # start with virt driver stats stats = self.tracker.compute_node.stats self.assertEqual(FAKE_VIRT_STATS_COERCED, stats) # adding an instance should keep virt driver stats self._fake_instance_obj(vm_state=vm_states.ACTIVE, host=self.host) self.tracker.update_available_resource(self.context) stats = self.tracker.compute_node.stats # compute node stats are coerced to strings expected_stats = copy.deepcopy(FAKE_VIRT_STATS_COERCED) for k, v in self.tracker.stats.items(): expected_stats[k] = six.text_type(v) self.assertEqual(expected_stats, stats) # removing the instances should keep only virt driver stats self._instances = {} self.tracker.update_available_resource(self.context) stats = self.tracker.compute_node.stats self.assertEqual(FAKE_VIRT_STATS_COERCED, stats) class StatsInvalidTypeTestCase(BaseTrackerTestCase): """Test stats handling for a virt driver that provides an invalid type for stats. """ def _driver(self): return FakeVirtDriver(stats=10) def _init_tracker(self): # do not do initial update in setup pass def test_virt_stats(self): # should throw exception for incorrect stats value type self.assertRaises(ValueError, self.tracker.update_available_resource, context=self.context) class UpdateUsageFromInstanceTestCase(BaseTrackerTestCase): @mock.patch.object(resource_tracker.ResourceTracker, '_update_usage') def test_building(self, mock_update_usage): instance = self._fake_instance_obj() instance.vm_state = vm_states.BUILDING self.tracker._update_usage_from_instance(self.context, instance) mock_update_usage.assert_called_once_with(instance, sign=1) @mock.patch.object(resource_tracker.ResourceTracker, '_update_usage') def test_shelve_offloading(self, mock_update_usage): instance = self._fake_instance_obj() instance.vm_state = vm_states.SHELVED_OFFLOADED self.tracker.tracked_instances = {} self.tracker.tracked_instances[ instance.uuid] = obj_base.obj_to_primitive(instance) self.tracker._update_usage_from_instance(self.context, instance) mock_update_usage.assert_called_once_with(instance, sign=-1) @mock.patch.object(resource_tracker.ResourceTracker, '_update_usage') def test_unshelving(self, mock_update_usage): instance = self._fake_instance_obj() instance.vm_state = vm_states.SHELVED_OFFLOADED self.tracker._update_usage_from_instance(self.context, instance) mock_update_usage.assert_called_once_with(instance, sign=1) @mock.patch.object(resource_tracker.ResourceTracker, '_update_usage') def test_deleted(self, mock_update_usage): instance = self._fake_instance_obj() instance.vm_state = vm_states.DELETED self.tracker.tracked_instances = {} self.tracker.tracked_instances[ instance.uuid] = obj_base.obj_to_primitive(instance) self.tracker._update_usage_from_instance(self.context, instance, True) mock_update_usage.assert_called_once_with(instance, sign=-1) class UpdateUsageFromMigrationsTestCase(BaseTrackerTestCase): @mock.patch.object(resource_tracker.ResourceTracker, '_update_usage_from_migration') def test_no_migrations(self, mock_update_usage): migrations = [] self.tracker._update_usage_from_migrations(self.context, migrations) self.assertFalse(mock_update_usage.called) @mock.patch.object(resource_tracker.ResourceTracker, '_update_usage_from_migration') @mock.patch('nova.objects.instance.Instance.get_by_uuid') def test_instance_not_found(self, mock_get_instance, mock_update_usage): mock_get_instance.side_effect = exception.InstanceNotFound( instance_id='some_id', ) migration = objects.Migration( context=self.context, instance_uuid='some_uuid', ) self.tracker._update_usage_from_migrations(self.context, [migration]) mock_get_instance.assert_called_once_with(self.context, 'some_uuid') self.assertFalse(mock_update_usage.called) @mock.patch.object(resource_tracker.ResourceTracker, '_update_usage_from_migration') @mock.patch('nova.objects.instance.Instance.get_by_uuid') def test_update_usage_called(self, mock_get_instance, mock_update_usage): instance = self._fake_instance_obj() mock_get_instance.return_value = instance migration = objects.Migration( context=self.context, instance_uuid=instance.uuid, ) self.tracker._update_usage_from_migrations(self.context, [migration]) mock_get_instance.assert_called_once_with(self.context, instance.uuid) mock_update_usage.assert_called_once_with( self.context, instance, None, migration) @mock.patch.object(resource_tracker.ResourceTracker, '_update_usage_from_migration') @mock.patch('nova.objects.instance.Instance.get_by_uuid') def test_flavor_not_found(self, mock_get_instance, mock_update_usage): mock_update_usage.side_effect = exception.FlavorNotFound(flavor_id='') instance = self._fake_instance_obj() mock_get_instance.return_value = instance migration = objects.Migration( context=self.context, instance_uuid=instance.uuid, ) self.tracker._update_usage_from_migrations(self.context, [migration]) mock_get_instance.assert_called_once_with(self.context, instance.uuid) mock_update_usage.assert_called_once_with( self.context, instance, None, migration) @mock.patch.object(resource_tracker.ResourceTracker, '_update_usage_from_migration') @mock.patch('nova.objects.instance.Instance.get_by_uuid') def test_not_resizing_state(self, mock_get_instance, mock_update_usage): instance = self._fake_instance_obj() instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.SUSPENDING mock_get_instance.return_value = instance migration = objects.Migration( context=self.context, instance_uuid=instance.uuid, ) self.tracker._update_usage_from_migrations(self.context, [migration]) mock_get_instance.assert_called_once_with(self.context, instance.uuid) self.assertFalse(mock_update_usage.called) @mock.patch.object(resource_tracker.ResourceTracker, '_update_usage_from_migration') @mock.patch('nova.objects.instance.Instance.get_by_uuid') def test_use_most_recent(self, mock_get_instance, mock_update_usage): instance = self._fake_instance_obj() mock_get_instance.return_value = instance migration_2002 = objects.Migration( id=2002, context=self.context, instance_uuid=instance.uuid, updated_at=datetime.datetime(2002, 1, 1, 0, 0, 0), ) migration_2003 = objects.Migration( id=2003, context=self.context, instance_uuid=instance.uuid, updated_at=datetime.datetime(2003, 1, 1, 0, 0, 0), ) migration_2001 = objects.Migration( id=2001, context=self.context, instance_uuid=instance.uuid, updated_at=datetime.datetime(2001, 1, 1, 0, 0, 0), ) self.tracker._update_usage_from_migrations( self.context, [migration_2002, migration_2003, migration_2001]) mock_get_instance.assert_called_once_with(self.context, instance.uuid) mock_update_usage.assert_called_once_with( self.context, instance, None, migration_2003) nova-13.1.4/nova/tests/unit/compute/eventlet_utils.py0000664000567000056710000000146713064447140024077 0ustar jenkinsjenkins00000000000000# Rackspace Hosting 2014 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import eventlet class SyncPool(eventlet.GreenPool): """Synchronous pool for testing threaded code without adding sleep waits. """ def spawn_n(self, func, *args, **kwargs): func(*args, **kwargs) nova-13.1.4/nova/tests/unit/compute/test_virtapi.py0000664000567000056710000001443013064447152023543 0ustar jenkinsjenkins00000000000000# Copyright 2012 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from mox3 import mox from nova.compute import manager as compute_manager from nova import context from nova import db from nova import exception from nova import objects from nova import test from nova.virt import fake from nova.virt import virtapi class VirtAPIBaseTest(test.NoDBTestCase, test.APICoverage): cover_api = virtapi.VirtAPI def setUp(self): super(VirtAPIBaseTest, self).setUp() self.context = context.RequestContext('fake-user', 'fake-project') self.set_up_virtapi() def set_up_virtapi(self): self.virtapi = virtapi.VirtAPI() def assertExpected(self, method, *args, **kwargs): self.assertRaises(NotImplementedError, getattr(self.virtapi, method), self.context, *args, **kwargs) def test_wait_for_instance_event(self): self.assertExpected('wait_for_instance_event', 'instance', ['event']) class FakeVirtAPITest(VirtAPIBaseTest): cover_api = fake.FakeVirtAPI def set_up_virtapi(self): self.virtapi = fake.FakeVirtAPI() def assertExpected(self, method, *args, **kwargs): if method == 'wait_for_instance_event': run = False with self.virtapi.wait_for_instance_event(*args, **kwargs): run = True self.assertTrue(run) return self.mox.StubOutWithMock(db, method) if method in ('aggregate_metadata_add', 'aggregate_metadata_delete', 'security_group_rule_get_by_security_group'): # NOTE(danms): FakeVirtAPI will convert the first argument to # argument['id'], so expect that in the actual db call e_args = tuple([args[0]['id']] + list(args[1:])) elif method == 'security_group_get_by_instance': e_args = tuple([args[0]['uuid']] + list(args[1:])) else: e_args = args getattr(db, method)(self.context, *e_args, **kwargs).AndReturn( 'it worked') self.mox.ReplayAll() result = getattr(self.virtapi, method)(self.context, *args, **kwargs) self.assertEqual(result, 'it worked') class FakeCompute(object): def __init__(self): self.conductor_api = mox.MockAnything() self.db = mox.MockAnything() self._events = [] self.instance_events = mock.MagicMock() self.instance_events.prepare_for_instance_event.side_effect = \ self._prepare_for_instance_event def _event_waiter(self): event = mock.MagicMock() event.status = 'completed' return event def _prepare_for_instance_event(self, instance, event_name): m = mock.MagicMock() m.instance = instance m.event_name = event_name m.wait.side_effect = self._event_waiter self._events.append(m) return m class ComputeVirtAPITest(VirtAPIBaseTest): cover_api = compute_manager.ComputeVirtAPI def set_up_virtapi(self): self.compute = FakeCompute() self.virtapi = compute_manager.ComputeVirtAPI(self.compute) def assertExpected(self, method, *args, **kwargs): self.mox.StubOutWithMock(self.compute.conductor_api, method) getattr(self.compute.conductor_api, method)( self.context, *args, **kwargs).AndReturn('it worked') self.mox.ReplayAll() result = getattr(self.virtapi, method)(self.context, *args, **kwargs) self.assertEqual(result, 'it worked') def test_wait_for_instance_event(self): and_i_ran = '' event_1_tag = objects.InstanceExternalEvent.make_key( 'event1') event_2_tag = objects.InstanceExternalEvent.make_key( 'event2', 'tag') events = { 'event1': event_1_tag, ('event2', 'tag'): event_2_tag, } with self.virtapi.wait_for_instance_event('instance', events.keys()): and_i_ran = 'I ran so far a-waa-y' self.assertEqual('I ran so far a-waa-y', and_i_ran) self.assertEqual(2, len(self.compute._events)) for event in self.compute._events: self.assertEqual('instance', event.instance) self.assertIn(event.event_name, events.values()) event.wait.assert_called_once_with() def test_wait_for_instance_event_failed(self): def _failer(): event = mock.MagicMock() event.status = 'failed' return event @mock.patch.object(self.virtapi._compute, '_event_waiter', _failer) def do_test(): with self.virtapi.wait_for_instance_event('instance', ['foo']): pass self.assertRaises(exception.NovaException, do_test) def test_wait_for_instance_event_failed_callback(self): def _failer(): event = mock.MagicMock() event.status = 'failed' return event @mock.patch.object(self.virtapi._compute, '_event_waiter', _failer) def do_test(): callback = mock.MagicMock() with self.virtapi.wait_for_instance_event('instance', ['foo'], error_callback=callback): pass callback.assert_called_with('foo', 'instance') do_test() def test_wait_for_instance_event_timeout(self): class TestException(Exception): pass def _failer(): raise TestException() @mock.patch.object(self.virtapi._compute, '_event_waiter', _failer) @mock.patch('eventlet.timeout.Timeout') def do_test(timeout): with self.virtapi.wait_for_instance_event('instance', ['foo']): pass self.assertRaises(TestException, do_test) nova-13.1.4/nova/tests/unit/compute/test_host_api.py0000664000567000056710000005105413064447152023676 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from oslo_serialization import jsonutils from nova.cells import utils as cells_utils from nova import compute from nova import context from nova import exception from nova import objects from nova import test from nova.tests.unit import fake_notifier from nova.tests.unit.objects import test_objects from nova.tests.unit.objects import test_service class ComputeHostAPITestCase(test.TestCase): def setUp(self): super(ComputeHostAPITestCase, self).setUp() self.host_api = compute.HostAPI() self.ctxt = context.get_admin_context() fake_notifier.stub_notifier(self.stubs) self.addCleanup(fake_notifier.reset) def _compare_obj(self, obj, db_obj): test_objects.compare_obj(self, obj, db_obj, allow_missing=test_service.OPTIONAL) def _compare_objs(self, obj_list, db_obj_list): for index, obj in enumerate(obj_list): self._compare_obj(obj, db_obj_list[index]) def _mock_rpc_call(self, method, **kwargs): self.mox.StubOutWithMock(self.host_api.rpcapi, method) getattr(self.host_api.rpcapi, method)( self.ctxt, **kwargs).AndReturn('fake-result') def _mock_assert_host_exists(self): """Sets it so that the host API always thinks that 'fake_host' exists. """ def fake_assert_host_exists(context, host_name, must_be_up=False): return 'fake_host' self.stubs.Set(self.host_api, '_assert_host_exists', fake_assert_host_exists) def test_set_host_enabled(self): self._mock_assert_host_exists() self._mock_rpc_call('set_host_enabled', host='fake_host', enabled='fake_enabled') self.mox.ReplayAll() fake_notifier.NOTIFICATIONS = [] result = self.host_api.set_host_enabled(self.ctxt, 'fake_host', 'fake_enabled') self.assertEqual('fake-result', result) self.assertEqual(2, len(fake_notifier.NOTIFICATIONS)) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual('HostAPI.set_enabled.start', msg.event_type) self.assertEqual('api.fake_host', msg.publisher_id) self.assertEqual('INFO', msg.priority) self.assertEqual('fake_enabled', msg.payload['enabled']) self.assertEqual('fake_host', msg.payload['host_name']) msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual('HostAPI.set_enabled.end', msg.event_type) self.assertEqual('api.fake_host', msg.publisher_id) self.assertEqual('INFO', msg.priority) self.assertEqual('fake_enabled', msg.payload['enabled']) self.assertEqual('fake_host', msg.payload['host_name']) def test_host_name_from_assert_hosts_exists(self): self._mock_assert_host_exists() self._mock_rpc_call('set_host_enabled', host='fake_host', enabled='fake_enabled') self.mox.ReplayAll() result = self.host_api.set_host_enabled(self.ctxt, 'fake_hosT', 'fake_enabled') self.assertEqual('fake-result', result) def test_get_host_uptime(self): self._mock_assert_host_exists() self._mock_rpc_call('get_host_uptime', host='fake_host') self.mox.ReplayAll() result = self.host_api.get_host_uptime(self.ctxt, 'fake_host') self.assertEqual('fake-result', result) def test_get_host_uptime_service_down(self): def fake_service_get_by_compute_host(context, host_name): return dict(test_service.fake_service, id=1) self.stubs.Set(self.host_api.db, 'service_get_by_compute_host', fake_service_get_by_compute_host) def fake_service_is_up(service): return False self.stubs.Set(self.host_api.servicegroup_api, 'service_is_up', fake_service_is_up) self.assertRaises(exception.ComputeServiceUnavailable, self.host_api.get_host_uptime, self.ctxt, 'fake_host') def test_host_power_action(self): self._mock_assert_host_exists() self._mock_rpc_call('host_power_action', host='fake_host', action='fake_action') self.mox.ReplayAll() fake_notifier.NOTIFICATIONS = [] result = self.host_api.host_power_action(self.ctxt, 'fake_host', 'fake_action') self.assertEqual('fake-result', result) self.assertEqual(2, len(fake_notifier.NOTIFICATIONS)) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual('HostAPI.power_action.start', msg.event_type) self.assertEqual('api.fake_host', msg.publisher_id) self.assertEqual('INFO', msg.priority) self.assertEqual('fake_action', msg.payload['action']) self.assertEqual('fake_host', msg.payload['host_name']) msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual('HostAPI.power_action.end', msg.event_type) self.assertEqual('api.fake_host', msg.publisher_id) self.assertEqual('INFO', msg.priority) self.assertEqual('fake_action', msg.payload['action']) self.assertEqual('fake_host', msg.payload['host_name']) def test_set_host_maintenance(self): self._mock_assert_host_exists() self._mock_rpc_call('host_maintenance_mode', host='fake_host', host_param='fake_host', mode='fake_mode') self.mox.ReplayAll() fake_notifier.NOTIFICATIONS = [] result = self.host_api.set_host_maintenance(self.ctxt, 'fake_host', 'fake_mode') self.assertEqual('fake-result', result) self.assertEqual(2, len(fake_notifier.NOTIFICATIONS)) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual('HostAPI.set_maintenance.start', msg.event_type) self.assertEqual('api.fake_host', msg.publisher_id) self.assertEqual('INFO', msg.priority) self.assertEqual('fake_host', msg.payload['host_name']) self.assertEqual('fake_mode', msg.payload['mode']) msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual('HostAPI.set_maintenance.end', msg.event_type) self.assertEqual('api.fake_host', msg.publisher_id) self.assertEqual('INFO', msg.priority) self.assertEqual('fake_host', msg.payload['host_name']) self.assertEqual('fake_mode', msg.payload['mode']) def test_service_get_all_no_zones(self): services = [dict(test_service.fake_service, id=1, topic='compute', host='host1'), dict(test_service.fake_service, topic='compute', host='host2')] self.mox.StubOutWithMock(self.host_api.db, 'service_get_all') # Test no filters self.host_api.db.service_get_all(self.ctxt, disabled=None).AndReturn(services) self.mox.ReplayAll() result = self.host_api.service_get_all(self.ctxt) self.mox.VerifyAll() self._compare_objs(result, services) # Test no filters #2 self.mox.ResetAll() self.host_api.db.service_get_all(self.ctxt, disabled=None).AndReturn(services) self.mox.ReplayAll() result = self.host_api.service_get_all(self.ctxt, filters={}) self.mox.VerifyAll() self._compare_objs(result, services) # Test w/ filter self.mox.ResetAll() self.host_api.db.service_get_all(self.ctxt, disabled=None).AndReturn(services) self.mox.ReplayAll() result = self.host_api.service_get_all(self.ctxt, filters=dict(host='host2')) self.mox.VerifyAll() self._compare_objs(result, [services[1]]) def test_service_get_all(self): services = [dict(test_service.fake_service, topic='compute', host='host1'), dict(test_service.fake_service, topic='compute', host='host2')] exp_services = [] for service in services: exp_service = {} exp_service.update(availability_zone='nova', **service) exp_services.append(exp_service) self.mox.StubOutWithMock(self.host_api.db, 'service_get_all') # Test no filters self.host_api.db.service_get_all(self.ctxt, disabled=None).AndReturn(services) self.mox.ReplayAll() result = self.host_api.service_get_all(self.ctxt, set_zones=True) self.mox.VerifyAll() self._compare_objs(result, exp_services) # Test no filters #2 self.mox.ResetAll() self.host_api.db.service_get_all(self.ctxt, disabled=None).AndReturn(services) self.mox.ReplayAll() result = self.host_api.service_get_all(self.ctxt, filters={}, set_zones=True) self.mox.VerifyAll() self._compare_objs(result, exp_services) # Test w/ filter self.mox.ResetAll() self.host_api.db.service_get_all(self.ctxt, disabled=None).AndReturn(services) self.mox.ReplayAll() result = self.host_api.service_get_all(self.ctxt, filters=dict(host='host2'), set_zones=True) self.mox.VerifyAll() self._compare_objs(result, [exp_services[1]]) # Test w/ zone filter but no set_zones arg. self.mox.ResetAll() self.host_api.db.service_get_all(self.ctxt, disabled=None).AndReturn(services) self.mox.ReplayAll() filters = {'availability_zone': 'nova'} result = self.host_api.service_get_all(self.ctxt, filters=filters) self.mox.VerifyAll() self._compare_objs(result, exp_services) def test_service_get_by_compute_host(self): self.mox.StubOutWithMock(self.host_api.db, 'service_get_by_compute_host') self.host_api.db.service_get_by_compute_host(self.ctxt, 'fake-host').AndReturn(test_service.fake_service) self.mox.ReplayAll() result = self.host_api.service_get_by_compute_host(self.ctxt, 'fake-host') self.assertEqual(test_service.fake_service['id'], result.id) def test_service_update(self): host_name = 'fake-host' binary = 'nova-compute' params_to_update = dict(disabled=True) service_id = 42 expected_result = dict(test_service.fake_service, id=service_id) self.mox.StubOutWithMock(self.host_api.db, 'service_get_by_host_and_binary') self.host_api.db.service_get_by_host_and_binary(self.ctxt, host_name, binary).AndReturn(expected_result) self.mox.StubOutWithMock(self.host_api.db, 'service_update') self.host_api.db.service_update( self.ctxt, service_id, params_to_update).AndReturn(expected_result) self.mox.ReplayAll() result = self.host_api.service_update( self.ctxt, host_name, binary, params_to_update) self._compare_obj(result, expected_result) @mock.patch.object(objects.InstanceList, 'get_by_host', return_value = ['fake-responses']) def test_instance_get_all_by_host(self, mock_get): result = self.host_api.instance_get_all_by_host(self.ctxt, 'fake-host') self.assertEqual(['fake-responses'], result) def test_task_log_get_all(self): self.mox.StubOutWithMock(self.host_api.db, 'task_log_get_all') self.host_api.db.task_log_get_all(self.ctxt, 'fake-name', 'fake-begin', 'fake-end', host='fake-host', state='fake-state').AndReturn('fake-response') self.mox.ReplayAll() result = self.host_api.task_log_get_all(self.ctxt, 'fake-name', 'fake-begin', 'fake-end', host='fake-host', state='fake-state') self.assertEqual('fake-response', result) def test_service_delete(self): with test.nested( mock.patch.object(objects.Service, 'get_by_id', return_value=objects.Service()), mock.patch.object(objects.Service, 'destroy') ) as ( get_by_id, destroy ): self.host_api.service_delete(self.ctxt, 1) get_by_id.assert_called_once_with(self.ctxt, 1) destroy.assert_called_once_with() class ComputeHostAPICellsTestCase(ComputeHostAPITestCase): def setUp(self): self.flags(enable=True, group='cells') self.flags(cell_type='api', group='cells') super(ComputeHostAPICellsTestCase, self).setUp() def _mock_rpc_call(self, method, **kwargs): if 'host_param' in kwargs: kwargs.pop('host_param') else: kwargs.pop('host') rpc_message = { 'method': method, 'namespace': None, 'args': kwargs, 'version': self.host_api.rpcapi.client.target.version, } cells_rpcapi = self.host_api.rpcapi.client.cells_rpcapi self.mox.StubOutWithMock(cells_rpcapi, 'proxy_rpc_to_manager') cells_rpcapi.proxy_rpc_to_manager(self.ctxt, rpc_message, 'compute.fake_host', call=True).AndReturn('fake-result') def test_service_get_all_no_zones(self): services = [ cells_utils.ServiceProxy( objects.Service(id=1, topic='compute', host='host1'), 'cell1'), cells_utils.ServiceProxy( objects.Service(id=2, topic='compute', host='host2'), 'cell1')] fake_filters = {'host': 'host1'} self.mox.StubOutWithMock(self.host_api.cells_rpcapi, 'service_get_all') self.host_api.cells_rpcapi.service_get_all(self.ctxt, filters=fake_filters).AndReturn(services) self.mox.ReplayAll() result = self.host_api.service_get_all(self.ctxt, filters=fake_filters) self.assertEqual(services, result) def _test_service_get_all(self, fake_filters, **kwargs): service_attrs = dict(test_service.fake_service) del service_attrs['version'] services = [ cells_utils.ServiceProxy( objects.Service(**dict(service_attrs, id=1, topic='compute', host='host1')), 'cell1'), cells_utils.ServiceProxy( objects.Service(**dict(service_attrs, id=2, topic='compute', host='host2')), 'cell1')] exp_services = [] for service in services: exp_service = copy.copy(service) exp_service.update({'availability_zone': 'nova'}) exp_services.append(exp_service) self.mox.StubOutWithMock(self.host_api.cells_rpcapi, 'service_get_all') self.host_api.cells_rpcapi.service_get_all(self.ctxt, filters=fake_filters).AndReturn(services) self.mox.ReplayAll() result = self.host_api.service_get_all(self.ctxt, filters=fake_filters, **kwargs) self.mox.VerifyAll() self.assertEqual(jsonutils.to_primitive(exp_services), jsonutils.to_primitive(result)) def test_service_get_all(self): fake_filters = {'availability_zone': 'nova'} self._test_service_get_all(fake_filters) def test_service_get_all_set_zones(self): fake_filters = {'key1': 'val1'} self._test_service_get_all(fake_filters, set_zones=True) def test_service_get_by_compute_host(self): self.mox.StubOutWithMock(self.host_api.cells_rpcapi, 'service_get_by_compute_host') obj = objects.Service(id=1, host='fake') fake_service = cells_utils.ServiceProxy(obj, 'cell1') self.host_api.cells_rpcapi.service_get_by_compute_host(self.ctxt, 'fake-host').AndReturn(fake_service) self.mox.ReplayAll() result = self.host_api.service_get_by_compute_host(self.ctxt, 'fake-host') self.assertEqual(fake_service, result) def test_service_update(self): host_name = 'fake-host' binary = 'nova-compute' params_to_update = dict(disabled=True) obj = objects.Service(id=42, host='fake') fake_service = cells_utils.ServiceProxy(obj, 'cell1') self.mox.StubOutWithMock(self.host_api.cells_rpcapi, 'service_update') self.host_api.cells_rpcapi.service_update( self.ctxt, host_name, binary, params_to_update).AndReturn(fake_service) self.mox.ReplayAll() result = self.host_api.service_update( self.ctxt, host_name, binary, params_to_update) self.assertEqual(fake_service, result) def test_service_delete(self): cell_service_id = cells_utils.cell_with_item('cell1', 1) with mock.patch.object(self.host_api.cells_rpcapi, 'service_delete') as service_delete: self.host_api.service_delete(self.ctxt, cell_service_id) service_delete.assert_called_once_with( self.ctxt, cell_service_id) @mock.patch.object(objects.InstanceList, 'get_by_host') def test_instance_get_all_by_host(self, mock_get): instances = [dict(id=1, cell_name='cell1', host='host1'), dict(id=2, cell_name='cell2', host='host1'), dict(id=3, cell_name='cell1', host='host2')] mock_get.return_value = instances expected_result = [instances[0], instances[2]] cell_and_host = cells_utils.cell_with_item('cell1', 'fake-host') result = self.host_api.instance_get_all_by_host(self.ctxt, cell_and_host) self.assertEqual(expected_result, result) def test_task_log_get_all(self): self.mox.StubOutWithMock(self.host_api.cells_rpcapi, 'task_log_get_all') self.host_api.cells_rpcapi.task_log_get_all(self.ctxt, 'fake-name', 'fake-begin', 'fake-end', host='fake-host', state='fake-state').AndReturn('fake-response') self.mox.ReplayAll() result = self.host_api.task_log_get_all(self.ctxt, 'fake-name', 'fake-begin', 'fake-end', host='fake-host', state='fake-state') self.assertEqual('fake-response', result) def test_get_host_uptime_service_down(self): # The corresponding Compute test case depends on the # _assert_host_exists which is a no-op in the cells api pass def test_get_host_uptime(self): self.mox.StubOutWithMock(self.host_api.cells_rpcapi, 'get_host_uptime') self.host_api.cells_rpcapi.get_host_uptime(self.ctxt, 'fake-host'). \ AndReturn('fake-response') self.mox.ReplayAll() result = self.host_api.get_host_uptime(self.ctxt, 'fake-host') self.assertEqual('fake-response', result) nova-13.1.4/nova/tests/unit/compute/test_compute_xen.py0000664000567000056710000000531313064447152024413 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for expectations of behaviour from the Xen driver.""" from oslo_utils import importutils from nova.compute import power_state import nova.conf from nova import context from nova import objects from nova.objects import instance as instance_obj from nova.tests.unit.compute import eventlet_utils from nova.tests.unit import fake_instance from nova.tests.unit.virt.xenapi import stubs from nova.virt.xenapi import vm_utils CONF = nova.conf.CONF CONF.import_opt('compute_manager', 'nova.service') class ComputeXenTestCase(stubs.XenAPITestBaseNoDB): def setUp(self): super(ComputeXenTestCase, self).setUp() self.flags(compute_driver='xenapi.XenAPIDriver') self.flags(connection_url='test_url', connection_password='test_pass', group='xenserver') stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) self.compute = importutils.import_object(CONF.compute_manager) # execute power syncing synchronously for testing: self.compute._sync_power_pool = eventlet_utils.SyncPool() def test_sync_power_states_instance_not_found(self): db_instance = fake_instance.fake_db_instance() ctxt = context.get_admin_context() instance_list = instance_obj._make_instance_list(ctxt, objects.InstanceList(), [db_instance], None) instance = instance_list[0] self.mox.StubOutWithMock(objects.InstanceList, 'get_by_host') self.mox.StubOutWithMock(self.compute.driver, 'get_num_instances') self.mox.StubOutWithMock(vm_utils, 'lookup') self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state') objects.InstanceList.get_by_host(ctxt, self.compute.host, expected_attrs=[], use_slave=True).AndReturn(instance_list) self.compute.driver.get_num_instances().AndReturn(1) vm_utils.lookup(self.compute.driver._session, instance['name'], False).AndReturn(None) self.compute._sync_instance_power_state(ctxt, instance, power_state.NOSTATE) self.mox.ReplayAll() self.compute._sync_power_states(ctxt) nova-13.1.4/nova/tests/unit/compute/test_compute_api.py0000664000567000056710000051667613064447152024415 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for compute API.""" import copy import datetime import iso8601 import mock from mox3 import mox from oslo_messaging import exceptions as oslo_exceptions from oslo_policy import policy as oslo_policy from oslo_serialization import jsonutils from oslo_utils import fixture as utils_fixture from oslo_utils import timeutils from oslo_utils import uuidutils from nova.compute import api as compute_api from nova.compute import arch from nova.compute import cells_api as compute_cells_api from nova.compute import flavors from nova.compute import instance_actions from nova.compute import rpcapi as compute_rpcapi from nova.compute import task_states from nova.compute import utils as compute_utils from nova.compute import vm_mode from nova.compute import vm_states from nova import conductor from nova import context from nova import db from nova import exception from nova import objects from nova.objects import base as obj_base from nova.objects import fields as fields_obj from nova.objects import quotas as quotas_obj from nova import policy from nova import quota from nova import test from nova.tests.unit import fake_block_device from nova.tests.unit import fake_instance from nova.tests.unit import fake_volume from nova.tests.unit.image import fake as fake_image from nova.tests.unit import matchers from nova.tests.unit.objects import test_flavor from nova.tests.unit.objects import test_migration from nova.tests.unit.objects import test_service from nova.tests import uuidsentinel as uuids from nova import utils from nova.volume import cinder FAKE_IMAGE_REF = 'fake-image-ref' NODENAME = 'fakenode1' SHELVED_IMAGE = 'fake-shelved-image' SHELVED_IMAGE_NOT_FOUND = 'fake-shelved-image-notfound' SHELVED_IMAGE_NOT_AUTHORIZED = 'fake-shelved-image-not-authorized' SHELVED_IMAGE_EXCEPTION = 'fake-shelved-image-exception' class _ComputeAPIUnitTestMixIn(object): def setUp(self): super(_ComputeAPIUnitTestMixIn, self).setUp() self.user_id = 'fake' self.project_id = 'fake' self.compute_api = compute_api.API() self.context = context.RequestContext(self.user_id, self.project_id) def _get_vm_states(self, exclude_states=None): vm_state = set([vm_states.ACTIVE, vm_states.BUILDING, vm_states.PAUSED, vm_states.SUSPENDED, vm_states.RESCUED, vm_states.STOPPED, vm_states.RESIZED, vm_states.SOFT_DELETED, vm_states.DELETED, vm_states.ERROR, vm_states.SHELVED, vm_states.SHELVED_OFFLOADED]) if not exclude_states: exclude_states = set() return vm_state - exclude_states def _create_flavor(self, **updates): flavor = {'id': 1, 'flavorid': 1, 'name': 'm1.tiny', 'memory_mb': 512, 'vcpus': 1, 'vcpu_weight': None, 'root_gb': 1, 'ephemeral_gb': 0, 'rxtx_factor': 1, 'swap': 0, 'deleted': 0, 'disabled': False, 'is_public': True, 'deleted_at': None, 'created_at': datetime.datetime(2012, 1, 19, 18, 49, 30, 877329), 'updated_at': None, } if updates: flavor.update(updates) return objects.Flavor._from_db_object(self.context, objects.Flavor(), flavor) def _create_instance_obj(self, params=None, flavor=None): """Create a test instance.""" if not params: params = {} if flavor is None: flavor = self._create_flavor() now = timeutils.utcnow() instance = objects.Instance() instance.metadata = {} instance.metadata.update(params.pop('metadata', {})) instance.system_metadata = params.pop('system_metadata', {}) instance._context = self.context instance.id = 1 instance.uuid = uuidutils.generate_uuid() instance.cell_name = 'api!child' instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.image_ref = FAKE_IMAGE_REF instance.reservation_id = 'r-fakeres' instance.user_id = self.user_id instance.project_id = self.project_id instance.host = 'fake_host' instance.node = NODENAME instance.instance_type_id = flavor.id instance.ami_launch_index = 0 instance.memory_mb = 0 instance.vcpus = 0 instance.root_gb = 0 instance.ephemeral_gb = 0 instance.architecture = arch.X86_64 instance.os_type = 'Linux' instance.locked = False instance.created_at = now instance.updated_at = now instance.launched_at = now instance.disable_terminate = False instance.info_cache = objects.InstanceInfoCache() instance.flavor = flavor instance.old_flavor = instance.new_flavor = None if params: instance.update(params) instance.obj_reset_changes() return instance def _obj_to_list_obj(self, list_obj, obj): list_obj.objects = [] list_obj.objects.append(obj) list_obj._context = self.context list_obj.obj_reset_changes() return list_obj def test_create_quota_exceeded_messages(self): image_href = "image_href" image_id = 0 instance_type = self._create_flavor() self.mox.StubOutWithMock(self.compute_api, "_get_image") self.mox.StubOutWithMock(quota.QUOTAS, "limit_check") self.mox.StubOutWithMock(quota.QUOTAS, "reserve") quotas = {'instances': 1, 'cores': 1, 'ram': 1} usages = {r: {'in_use': 1, 'reserved': 1} for r in ['instances', 'cores', 'ram']} quota_exception = exception.OverQuota(quotas=quotas, usages=usages, overs=['instances']) for _unused in range(2): self.compute_api._get_image(self.context, image_href).AndReturn( (image_id, {})) quota.QUOTAS.limit_check(self.context, metadata_items=mox.IsA(int), project_id=mox.IgnoreArg(), user_id=mox.IgnoreArg()) quota.QUOTAS.reserve(self.context, instances=40, cores=mox.IsA(int), expire=mox.IgnoreArg(), project_id=mox.IgnoreArg(), user_id=mox.IgnoreArg(), ram=mox.IsA(int)).AndRaise(quota_exception) self.mox.ReplayAll() for min_count, message in [(20, '20-40'), (40, '40')]: try: self.compute_api.create(self.context, instance_type, "image_href", min_count=min_count, max_count=40) except exception.TooManyInstances as e: self.assertEqual(message, e.kwargs['req']) else: self.fail("Exception not raised") def _test_create_max_net_count(self, max_net_count, min_count, max_count): with test.nested( mock.patch.object(self.compute_api, '_get_image', return_value=(None, {})), mock.patch.object(self.compute_api, '_check_auto_disk_config'), mock.patch.object(self.compute_api, '_validate_and_build_base_options', return_value=({}, max_net_count)) ) as ( get_image, check_auto_disk_config, validate_and_build_base_options ): self.assertRaises(exception.PortLimitExceeded, self.compute_api.create, self.context, 'fake_flavor', 'image_id', min_count=min_count, max_count=max_count) def test_max_net_count_zero(self): # Test when max_net_count is zero. max_net_count = 0 min_count = 2 max_count = 3 self._test_create_max_net_count(max_net_count, min_count, max_count) def test_max_net_count_less_than_min_count(self): # Test when max_net_count is nonzero but less than min_count. max_net_count = 1 min_count = 2 max_count = 3 self._test_create_max_net_count(max_net_count, min_count, max_count) def test_specified_port_and_multiple_instances_neutronv2(self): # Tests that if port is specified there is only one instance booting # (i.e max_count == 1) as we can't share the same port across multiple # instances. self.flags(use_neutron=True) port = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' address = '10.0.0.1' min_count = 1 max_count = 2 requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(address=address, port_id=port)]) self.assertRaises(exception.MultiplePortsNotApplicable, self.compute_api.create, self.context, 'fake_flavor', 'image_id', min_count=min_count, max_count=max_count, requested_networks=requested_networks) def _test_specified_ip_and_multiple_instances_helper(self, requested_networks): # Tests that if ip is specified there is only one instance booting # (i.e max_count == 1) min_count = 1 max_count = 2 self.assertRaises(exception.InvalidFixedIpAndMaxCountRequest, self.compute_api.create, self.context, "fake_flavor", 'image_id', min_count=min_count, max_count=max_count, requested_networks=requested_networks) def test_specified_ip_and_multiple_instances(self): network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' address = '10.0.0.1' requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id=network, address=address)]) self._test_specified_ip_and_multiple_instances_helper( requested_networks) def test_specified_ip_and_multiple_instances_neutronv2(self): self.flags(use_neutron=True) network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' address = '10.0.0.1' requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(network_id=network, address=address)]) self._test_specified_ip_and_multiple_instances_helper( requested_networks) @mock.patch.object(compute_rpcapi.ComputeAPI, 'reserve_block_device_name') def test_create_volume_bdm_call_reserve_dev_name(self, mock_reserve): bdm = objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( { 'id': 1, 'volume_id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': 'vda', 'boot_index': 1, })) mock_reserve.return_value = bdm instance = self._create_instance_obj() result = self.compute_api._create_volume_bdm(self.context, instance, 'vda', '1', None, None) self.assertTrue(mock_reserve.called) self.assertEqual(result, bdm) @mock.patch.object(objects.BlockDeviceMapping, 'create') def test_create_volume_bdm_local_creation(self, bdm_create): instance = self._create_instance_obj() volume_id = 'fake-vol-id' bdm = objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( { 'instance_uuid': instance.uuid, 'volume_id': volume_id, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': 'vda', 'boot_index': None, 'disk_bus': None, 'device_type': None })) result = self.compute_api._create_volume_bdm(self.context, instance, '/dev/vda', volume_id, None, None, is_local_creation=True) self.assertEqual(result.instance_uuid, bdm.instance_uuid) self.assertIsNone(result.device_name) self.assertEqual(result.volume_id, bdm.volume_id) self.assertTrue(bdm_create.called) @mock.patch.object(compute_rpcapi.ComputeAPI, 'reserve_block_device_name') @mock.patch.object(compute_rpcapi.ComputeAPI, 'attach_volume') def test_attach_volume(self, mock_attach, mock_reserve): instance = self._create_instance_obj() volume = fake_volume.fake_volume(1, 'test-vol', 'test-vol', None, None, None, None, None) fake_bdm = mock.MagicMock(spec=objects.BlockDeviceMapping) mock_reserve.return_value = fake_bdm mock_volume_api = mock.patch.object(self.compute_api, 'volume_api', mock.MagicMock(spec=cinder.API)) with mock_volume_api as mock_v_api: mock_v_api.get.return_value = volume self.compute_api.attach_volume( self.context, instance, volume['id']) mock_v_api.check_attach.assert_called_once_with(self.context, volume, instance=instance) mock_v_api.reserve_volume.assert_called_once_with(self.context, volume['id']) mock_attach.assert_called_once_with(self.context, instance, fake_bdm) @mock.patch.object(compute_rpcapi.ComputeAPI, 'reserve_block_device_name') @mock.patch.object(compute_rpcapi.ComputeAPI, 'attach_volume') def test_attach_volume_reserve_fails(self, mock_attach, mock_reserve): instance = self._create_instance_obj() volume = fake_volume.fake_volume(1, 'test-vol', 'test-vol', None, None, None, None, None) fake_bdm = mock.MagicMock(spec=objects.BlockDeviceMapping) mock_reserve.return_value = fake_bdm mock_volume_api = mock.patch.object(self.compute_api, 'volume_api', mock.MagicMock(spec=cinder.API)) with mock_volume_api as mock_v_api: mock_v_api.get.return_value = volume mock_v_api.reserve_volume.side_effect = test.TestingException() self.assertRaises(test.TestingException, self.compute_api.attach_volume, self.context, instance, volume['id']) mock_v_api.check_attach.assert_called_once_with(self.context, volume, instance=instance) mock_v_api.reserve_volume.assert_called_once_with(self.context, volume['id']) self.assertEqual(0, mock_attach.call_count) fake_bdm.destroy.assert_called_once_with() def test_suspend(self): # Ensure instance can be suspended. instance = self._create_instance_obj() self.assertEqual(instance.vm_state, vm_states.ACTIVE) self.assertIsNone(instance.task_state) self.mox.StubOutWithMock(instance, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') if self.cell_type == 'api': rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'suspend_instance') instance.save(expected_task_state=[None]) self.compute_api._record_action_start(self.context, instance, instance_actions.SUSPEND) rpcapi.suspend_instance(self.context, instance) self.mox.ReplayAll() self.compute_api.suspend(self.context, instance) self.assertEqual(vm_states.ACTIVE, instance.vm_state) self.assertEqual(task_states.SUSPENDING, instance.task_state) def _test_suspend_fails(self, vm_state): params = dict(vm_state=vm_state) instance = self._create_instance_obj(params=params) self.assertIsNone(instance.task_state) self.assertRaises(exception.InstanceInvalidState, self.compute_api.suspend, self.context, instance) def test_suspend_fails_invalid_states(self): invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE])) for state in invalid_vm_states: self._test_suspend_fails(state) def test_resume(self): # Ensure instance can be resumed (if suspended). instance = self._create_instance_obj( params=dict(vm_state=vm_states.SUSPENDED)) self.assertEqual(instance.vm_state, vm_states.SUSPENDED) self.assertIsNone(instance.task_state) self.mox.StubOutWithMock(instance, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') if self.cell_type == 'api': rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'resume_instance') instance.save(expected_task_state=[None]) self.compute_api._record_action_start(self.context, instance, instance_actions.RESUME) rpcapi.resume_instance(self.context, instance) self.mox.ReplayAll() self.compute_api.resume(self.context, instance) self.assertEqual(vm_states.SUSPENDED, instance.vm_state) self.assertEqual(task_states.RESUMING, instance.task_state) def test_start(self): params = dict(vm_state=vm_states.STOPPED) instance = self._create_instance_obj(params=params) self.mox.StubOutWithMock(instance, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') instance.save(expected_task_state=[None]) self.compute_api._record_action_start(self.context, instance, instance_actions.START) if self.cell_type == 'api': rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'start_instance') rpcapi.start_instance(self.context, instance) self.mox.ReplayAll() self.compute_api.start(self.context, instance) self.assertEqual(task_states.POWERING_ON, instance.task_state) def test_start_invalid_state(self): instance = self._create_instance_obj() self.assertEqual(instance.vm_state, vm_states.ACTIVE) self.assertRaises(exception.InstanceInvalidState, self.compute_api.start, self.context, instance) def test_start_no_host(self): params = dict(vm_state=vm_states.STOPPED, host='') instance = self._create_instance_obj(params=params) self.assertRaises(exception.InstanceNotReady, self.compute_api.start, self.context, instance) def _test_stop(self, vm_state, force=False, clean_shutdown=True): # Make sure 'progress' gets reset params = dict(task_state=None, progress=99, vm_state=vm_state) instance = self._create_instance_obj(params=params) self.mox.StubOutWithMock(instance, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') instance.save(expected_task_state=[None]) self.compute_api._record_action_start(self.context, instance, instance_actions.STOP) if self.cell_type == 'api': rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'stop_instance') rpcapi.stop_instance(self.context, instance, do_cast=True, clean_shutdown=clean_shutdown) self.mox.ReplayAll() if force: self.compute_api.force_stop(self.context, instance, clean_shutdown=clean_shutdown) else: self.compute_api.stop(self.context, instance, clean_shutdown=clean_shutdown) self.assertEqual(task_states.POWERING_OFF, instance.task_state) self.assertEqual(0, instance.progress) def test_stop(self): self._test_stop(vm_states.ACTIVE) def test_stop_stopped_instance_with_bypass(self): self._test_stop(vm_states.STOPPED, force=True) def test_stop_forced_shutdown(self): self._test_stop(vm_states.ACTIVE, force=True) def test_stop_without_clean_shutdown(self): self._test_stop(vm_states.ACTIVE, clean_shutdown=False) def test_stop_forced_without_clean_shutdown(self): self._test_stop(vm_states.ACTIVE, force=True, clean_shutdown=False) def _test_stop_invalid_state(self, vm_state): params = dict(vm_state=vm_state) instance = self._create_instance_obj(params=params) self.assertRaises(exception.InstanceInvalidState, self.compute_api.stop, self.context, instance) def test_stop_fails_invalid_states(self): invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE, vm_states.ERROR])) for state in invalid_vm_states: self._test_stop_invalid_state(state) def test_stop_a_stopped_inst(self): params = {'vm_state': vm_states.STOPPED} instance = self._create_instance_obj(params=params) self.assertRaises(exception.InstanceInvalidState, self.compute_api.stop, self.context, instance) def test_stop_no_host(self): params = {'host': ''} instance = self._create_instance_obj(params=params) self.assertRaises(exception.InstanceNotReady, self.compute_api.stop, self.context, instance) @mock.patch('nova.compute.api.API._record_action_start') @mock.patch('nova.compute.rpcapi.ComputeAPI.trigger_crash_dump') def test_trigger_crash_dump(self, trigger_crash_dump, _record_action_start): instance = self._create_instance_obj() self.compute_api.trigger_crash_dump(self.context, instance) _record_action_start.assert_called_once_with(self.context, instance, instance_actions.TRIGGER_CRASH_DUMP) if self.cell_type == 'api': # cell api has not been implemented. pass else: trigger_crash_dump.assert_called_once_with(self.context, instance) self.assertIsNone(instance.task_state) def test_trigger_crash_dump_invalid_state(self): params = dict(vm_state=vm_states.STOPPED) instance = self._create_instance_obj(params) self.assertRaises(exception.InstanceInvalidState, self.compute_api.trigger_crash_dump, self.context, instance) def test_trigger_crash_dump_no_host(self): params = dict(host='') instance = self._create_instance_obj(params=params) self.assertRaises(exception.InstanceNotReady, self.compute_api.trigger_crash_dump, self.context, instance) def test_trigger_crash_dump_locked(self): params = dict(locked=True) instance = self._create_instance_obj(params=params) self.assertRaises(exception.InstanceIsLocked, self.compute_api.trigger_crash_dump, self.context, instance) def _test_shelve(self, vm_state=vm_states.ACTIVE, boot_from_volume=False, clean_shutdown=True): params = dict(task_state=None, vm_state=vm_state, display_name='fake-name') instance = self._create_instance_obj(params=params) with test.nested( mock.patch.object(self.compute_api, 'is_volume_backed_instance', return_value=boot_from_volume), mock.patch.object(self.compute_api, '_create_image', return_value=dict(id='fake-image-id')), mock.patch.object(instance, 'save'), mock.patch.object(self.compute_api, '_record_action_start'), mock.patch.object(self.compute_api.compute_rpcapi, 'shelve_instance'), mock.patch.object(self.compute_api.compute_rpcapi, 'shelve_offload_instance') ) as ( volume_backed_inst, create_image, instance_save, record_action_start, rpcapi_shelve_instance, rpcapi_shelve_offload_instance ): self.compute_api.shelve(self.context, instance, clean_shutdown=clean_shutdown) # assert field values set on the instance object self.assertEqual(task_states.SHELVING, instance.task_state) # assert our mock calls volume_backed_inst.assert_called_once_with( self.context, instance) instance_save.assert_called_once_with(expected_task_state=[None]) record_action_start.assert_called_once_with( self.context, instance, instance_actions.SHELVE) if boot_from_volume: rpcapi_shelve_offload_instance.assert_called_once_with( self.context, instance=instance, clean_shutdown=clean_shutdown) else: rpcapi_shelve_instance.assert_called_once_with( self.context, instance=instance, image_id='fake-image-id', clean_shutdown=clean_shutdown) def test_shelve(self): self._test_shelve() def test_shelve_stopped(self): self._test_shelve(vm_state=vm_states.STOPPED) def test_shelve_paused(self): self._test_shelve(vm_state=vm_states.PAUSED) def test_shelve_suspended(self): self._test_shelve(vm_state=vm_states.SUSPENDED) def test_shelve_boot_from_volume(self): self._test_shelve(boot_from_volume=True) def test_shelve_forced_shutdown(self): self._test_shelve(clean_shutdown=False) def test_shelve_boot_from_volume_forced_shutdown(self): self._test_shelve(boot_from_volume=True, clean_shutdown=False) def _test_shelve_invalid_state(self, vm_state): params = dict(vm_state=vm_state) instance = self._create_instance_obj(params=params) self.assertRaises(exception.InstanceInvalidState, self.compute_api.shelve, self.context, instance) def test_shelve_fails_invalid_states(self): invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE, vm_states.STOPPED, vm_states.PAUSED, vm_states.SUSPENDED])) for state in invalid_vm_states: self._test_shelve_invalid_state(state) def _test_shelve_offload(self, clean_shutdown=True): params = dict(task_state=None, vm_state=vm_states.SHELVED) instance = self._create_instance_obj(params=params) with test.nested( mock.patch.object(instance, 'save'), mock.patch.object(self.compute_api.compute_rpcapi, 'shelve_offload_instance') ) as ( instance_save, rpcapi_shelve_offload_instance ): self.compute_api.shelve_offload(self.context, instance, clean_shutdown=clean_shutdown) # assert field values set on the instance object self.assertEqual(task_states.SHELVING_OFFLOADING, instance.task_state) instance_save.assert_called_once_with(expected_task_state=[None]) rpcapi_shelve_offload_instance.assert_called_once_with( self.context, instance=instance, clean_shutdown=clean_shutdown) def test_shelve_offload(self): self._test_shelve_offload() def test_shelve_offload_forced_shutdown(self): self._test_shelve_offload(clean_shutdown=False) def _test_shelve_offload_invalid_state(self, vm_state): params = dict(vm_state=vm_state) instance = self._create_instance_obj(params=params) self.assertRaises(exception.InstanceInvalidState, self.compute_api.shelve_offload, self.context, instance) def test_shelve_offload_fails_invalid_states(self): invalid_vm_states = self._get_vm_states(set([vm_states.SHELVED])) for state in invalid_vm_states: self._test_shelve_offload_invalid_state(state) def _test_reboot_type(self, vm_state, reboot_type, task_state=None): # Ensure instance can be soft rebooted. inst = self._create_instance_obj() inst.vm_state = vm_state inst.task_state = task_state self.mox.StubOutWithMock(self.context, 'elevated') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(inst, 'save') expected_task_state = [None] if reboot_type == 'HARD': expected_task_state.extend([task_states.REBOOTING, task_states.REBOOT_PENDING, task_states.REBOOT_STARTED, task_states.REBOOTING_HARD, task_states.RESUMING, task_states.UNPAUSING, task_states.SUSPENDING]) inst.save(expected_task_state=expected_task_state) self.compute_api._record_action_start(self.context, inst, instance_actions.REBOOT) if self.cell_type == 'api': rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'reboot_instance') rpcapi.reboot_instance(self.context, instance=inst, block_device_info=None, reboot_type=reboot_type) self.mox.ReplayAll() self.compute_api.reboot(self.context, inst, reboot_type) def _test_reboot_type_fails(self, reboot_type, **updates): inst = self._create_instance_obj() inst.update(updates) self.assertRaises(exception.InstanceInvalidState, self.compute_api.reboot, self.context, inst, reboot_type) def test_reboot_hard_active(self): self._test_reboot_type(vm_states.ACTIVE, 'HARD') def test_reboot_hard_error(self): self._test_reboot_type(vm_states.ERROR, 'HARD') def test_reboot_hard_rebooting(self): self._test_reboot_type(vm_states.ACTIVE, 'HARD', task_state=task_states.REBOOTING) def test_reboot_hard_reboot_started(self): self._test_reboot_type(vm_states.ACTIVE, 'HARD', task_state=task_states.REBOOT_STARTED) def test_reboot_hard_reboot_pending(self): self._test_reboot_type(vm_states.ACTIVE, 'HARD', task_state=task_states.REBOOT_PENDING) def test_reboot_hard_rescued(self): self._test_reboot_type_fails('HARD', vm_state=vm_states.RESCUED) def test_reboot_hard_resuming(self): self._test_reboot_type(vm_states.ACTIVE, 'HARD', task_state=task_states.RESUMING) def test_reboot_hard_pausing(self): self._test_reboot_type(vm_states.ACTIVE, 'HARD', task_state=task_states.PAUSING) def test_reboot_hard_unpausing(self): self._test_reboot_type(vm_states.ACTIVE, 'HARD', task_state=task_states.UNPAUSING) def test_reboot_hard_suspending(self): self._test_reboot_type(vm_states.ACTIVE, 'HARD', task_state=task_states.SUSPENDING) def test_reboot_hard_error_not_launched(self): self._test_reboot_type_fails('HARD', vm_state=vm_states.ERROR, launched_at=None) def test_reboot_soft(self): self._test_reboot_type(vm_states.ACTIVE, 'SOFT') def test_reboot_soft_error(self): self._test_reboot_type_fails('SOFT', vm_state=vm_states.ERROR) def test_reboot_soft_paused(self): self._test_reboot_type_fails('SOFT', vm_state=vm_states.PAUSED) def test_reboot_soft_stopped(self): self._test_reboot_type_fails('SOFT', vm_state=vm_states.STOPPED) def test_reboot_soft_suspended(self): self._test_reboot_type_fails('SOFT', vm_state=vm_states.SUSPENDED) def test_reboot_soft_rebooting(self): self._test_reboot_type_fails('SOFT', task_state=task_states.REBOOTING) def test_reboot_soft_rebooting_hard(self): self._test_reboot_type_fails('SOFT', task_state=task_states.REBOOTING_HARD) def test_reboot_soft_reboot_started(self): self._test_reboot_type_fails('SOFT', task_state=task_states.REBOOT_STARTED) def test_reboot_soft_reboot_pending(self): self._test_reboot_type_fails('SOFT', task_state=task_states.REBOOT_PENDING) def test_reboot_soft_rescued(self): self._test_reboot_type_fails('SOFT', vm_state=vm_states.RESCUED) def test_reboot_soft_error_not_launched(self): self._test_reboot_type_fails('SOFT', vm_state=vm_states.ERROR, launched_at=None) def test_reboot_soft_resuming(self): self._test_reboot_type_fails('SOFT', task_state=task_states.RESUMING) def test_reboot_soft_pausing(self): self._test_reboot_type_fails('SOFT', task_state=task_states.PAUSING) def test_reboot_soft_unpausing(self): self._test_reboot_type_fails('SOFT', task_state=task_states.UNPAUSING) def test_reboot_soft_suspending(self): self._test_reboot_type_fails('SOFT', task_state=task_states.SUSPENDING) def _test_delete_resizing_part(self, inst, deltas): old_flavor = inst.old_flavor deltas['cores'] = -old_flavor.vcpus deltas['ram'] = -old_flavor.memory_mb def _test_delete_resized_part(self, inst): migration = objects.Migration._from_db_object( self.context, objects.Migration(), test_migration.fake_db_migration()) self.mox.StubOutWithMock(objects.Migration, 'get_by_instance_and_status') self.context.elevated().AndReturn(self.context) objects.Migration.get_by_instance_and_status( self.context, inst.uuid, 'finished').AndReturn(migration) compute_utils.downsize_quota_delta(self.context, inst).AndReturn('deltas') fake_quotas = objects.Quotas.from_reservations(self.context, ['rsvs']) compute_utils.reserve_quota_delta(self.context, 'deltas', inst).AndReturn(fake_quotas) self.compute_api._record_action_start( self.context, inst, instance_actions.CONFIRM_RESIZE) self.compute_api.compute_rpcapi.confirm_resize( self.context, inst, migration, migration['source_compute'], fake_quotas.reservations, cast=False) def _test_delete_shelved_part(self, inst): image_api = self.compute_api.image_api self.mox.StubOutWithMock(image_api, 'delete') snapshot_id = inst.system_metadata.get('shelved_image_id') if snapshot_id == SHELVED_IMAGE: image_api.delete(self.context, snapshot_id).AndReturn(True) elif snapshot_id == SHELVED_IMAGE_NOT_FOUND: image_api.delete(self.context, snapshot_id).AndRaise( exception.ImageNotFound(image_id=snapshot_id)) elif snapshot_id == SHELVED_IMAGE_NOT_AUTHORIZED: image_api.delete(self.context, snapshot_id).AndRaise( exception.ImageNotAuthorized(image_id=snapshot_id)) elif snapshot_id == SHELVED_IMAGE_EXCEPTION: image_api.delete(self.context, snapshot_id).AndRaise( test.TestingException("Unexpected error")) def _test_downed_host_part(self, inst, updates, delete_time, delete_type): compute_utils.notify_about_instance_usage( self.compute_api.notifier, self.context, inst, '%s.start' % delete_type) self.context.elevated().AndReturn(self.context) self.compute_api.network_api.deallocate_for_instance( self.context, inst) state = ('soft' in delete_type and vm_states.SOFT_DELETED or vm_states.DELETED) updates.update({'vm_state': state, 'task_state': None, 'terminated_at': delete_time}) inst.save() updates.update({'deleted_at': delete_time, 'deleted': True}) fake_inst = fake_instance.fake_db_instance(**updates) self.compute_api._local_cleanup_bdm_volumes([], inst, self.context) db.instance_destroy(self.context, inst.uuid, constraint=None).AndReturn(fake_inst) compute_utils.notify_about_instance_usage( self.compute_api.notifier, self.context, inst, '%s.end' % delete_type, system_metadata=inst.system_metadata) def _test_delete(self, delete_type, **attrs): reservations = ['fake-resv'] inst = self._create_instance_obj() inst.update(attrs) inst._context = self.context deltas = {'instances': -1, 'cores': -inst.vcpus, 'ram': -inst.memory_mb} delete_time = datetime.datetime(1955, 11, 5, 9, 30, tzinfo=iso8601.iso8601.Utc()) self.useFixture(utils_fixture.TimeFixture(delete_time)) task_state = (delete_type == 'soft_delete' and task_states.SOFT_DELETING or task_states.DELETING) updates = {'progress': 0, 'task_state': task_state} if delete_type == 'soft_delete': updates['deleted_at'] = delete_time self.mox.StubOutWithMock(inst, 'save') self.mox.StubOutWithMock(objects.BlockDeviceMappingList, 'get_by_instance_uuid') self.mox.StubOutWithMock(quota.QUOTAS, 'reserve') self.mox.StubOutWithMock(self.context, 'elevated') self.mox.StubOutWithMock(db, 'service_get_by_compute_host') self.mox.StubOutWithMock(self.compute_api.servicegroup_api, 'service_is_up') self.mox.StubOutWithMock(compute_utils, 'downsize_quota_delta') self.mox.StubOutWithMock(compute_utils, 'reserve_quota_delta') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.mox.StubOutWithMock(self.compute_api.network_api, 'deallocate_for_instance') self.mox.StubOutWithMock(db, 'instance_system_metadata_get') self.mox.StubOutWithMock(db, 'instance_destroy') self.mox.StubOutWithMock(compute_utils, 'notify_about_instance_usage') self.mox.StubOutWithMock(quota.QUOTAS, 'commit') rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'confirm_resize') if (inst.vm_state in (vm_states.SHELVED, vm_states.SHELVED_OFFLOADED)): self._test_delete_shelved_part(inst) if self.cell_type == 'api': rpcapi = self.compute_api.cells_rpcapi self.mox.StubOutWithMock(rpcapi, 'terminate_instance') self.mox.StubOutWithMock(rpcapi, 'soft_delete_instance') objects.BlockDeviceMappingList.get_by_instance_uuid( self.context, inst.uuid).AndReturn([]) inst.save() if inst.task_state == task_states.RESIZE_FINISH: self._test_delete_resizing_part(inst, deltas) quota.QUOTAS.reserve(self.context, project_id=inst.project_id, user_id=inst.user_id, expire=mox.IgnoreArg(), **deltas).AndReturn(reservations) # NOTE(comstud): This is getting messy. But what we are wanting # to test is: # If cells is enabled and we're the API cell: # * Cast to cells_rpcapi. with reservations=None # * Commit reservations # Otherwise: # * Check for downed host # * If downed host: # * Clean up instance, destroying it, sending notifications. # (Tested in _test_downed_host_part()) # * Commit reservations # * If not downed host: # * Record the action start. # * Cast to compute_rpcapi. with the reservations cast = True commit_quotas = True soft_delete = False if self.cell_type != 'api': if inst.vm_state == vm_states.RESIZED: self._test_delete_resized_part(inst) if inst.vm_state == vm_states.SOFT_DELETED: soft_delete = True if inst.vm_state != vm_states.SHELVED_OFFLOADED: self.context.elevated().AndReturn(self.context) db.service_get_by_compute_host( self.context, inst.host).AndReturn( test_service.fake_service) self.compute_api.servicegroup_api.service_is_up( mox.IsA(objects.Service)).AndReturn( inst.host != 'down-host') if (inst.host == 'down-host' or inst.vm_state == vm_states.SHELVED_OFFLOADED): self._test_downed_host_part(inst, updates, delete_time, delete_type) cast = False else: # Happens on the manager side commit_quotas = False if cast: if self.cell_type != 'api': self.compute_api._record_action_start(self.context, inst, instance_actions.DELETE) if commit_quotas or soft_delete: cast_reservations = None else: cast_reservations = reservations if delete_type == 'soft_delete': rpcapi.soft_delete_instance(self.context, inst, reservations=cast_reservations) elif delete_type in ['delete', 'force_delete']: rpcapi.terminate_instance(self.context, inst, [], reservations=cast_reservations, delete_type=delete_type) if commit_quotas: # Local delete or when we're testing API cell. quota.QUOTAS.commit(self.context, reservations, project_id=inst.project_id, user_id=inst.user_id) self.mox.ReplayAll() getattr(self.compute_api, delete_type)(self.context, inst) for k, v in updates.items(): self.assertEqual(inst[k], v) self.mox.UnsetStubs() def test_delete(self): self._test_delete('delete') def test_delete_if_not_launched(self): self._test_delete('delete', launched_at=None) def test_delete_in_resizing(self): old_flavor = objects.Flavor(vcpus=1, memory_mb=512, extra_specs={}) self._test_delete('delete', task_state=task_states.RESIZE_FINISH, old_flavor=old_flavor) def test_delete_in_resized(self): self._test_delete('delete', vm_state=vm_states.RESIZED) def test_delete_shelved(self): fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE} self._test_delete('delete', vm_state=vm_states.SHELVED, system_metadata=fake_sys_meta) def test_delete_shelved_offloaded(self): fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE} self._test_delete('delete', vm_state=vm_states.SHELVED_OFFLOADED, system_metadata=fake_sys_meta) def test_delete_shelved_image_not_found(self): fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE_NOT_FOUND} self._test_delete('delete', vm_state=vm_states.SHELVED_OFFLOADED, system_metadata=fake_sys_meta) def test_delete_shelved_image_not_authorized(self): fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE_NOT_AUTHORIZED} self._test_delete('delete', vm_state=vm_states.SHELVED_OFFLOADED, system_metadata=fake_sys_meta) def test_delete_shelved_exception(self): fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE_EXCEPTION} self._test_delete('delete', vm_state=vm_states.SHELVED, system_metadata=fake_sys_meta) def test_delete_with_down_host(self): self._test_delete('delete', host='down-host') def test_delete_soft_with_down_host(self): self._test_delete('soft_delete', host='down-host') def test_delete_soft(self): self._test_delete('soft_delete') def test_delete_forced(self): fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE} for vm_state in self._get_vm_states(): if vm_state in (vm_states.SHELVED, vm_states.SHELVED_OFFLOADED): self._test_delete('force_delete', vm_state=vm_state, system_metadata=fake_sys_meta) self._test_delete('force_delete', vm_state=vm_state) def test_delete_fast_if_host_not_set(self): inst = self._create_instance_obj() inst.host = '' quotas = quotas_obj.Quotas(self.context) updates = {'progress': 0, 'task_state': task_states.DELETING} self.mox.StubOutWithMock(inst, 'save') self.mox.StubOutWithMock(db, 'block_device_mapping_get_all_by_instance') self.mox.StubOutWithMock(db, 'constraint') self.mox.StubOutWithMock(db, 'instance_destroy') self.mox.StubOutWithMock(self.compute_api, '_create_reservations') self.mox.StubOutWithMock(compute_utils, 'notify_about_instance_usage') if self.cell_type == 'api': rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'terminate_instance') db.block_device_mapping_get_all_by_instance(self.context, inst.uuid).AndReturn([]) inst.save() self.compute_api._create_reservations(self.context, inst, inst.task_state, inst.project_id, inst.user_id ).AndReturn(quotas) if self.cell_type == 'api': rpcapi.terminate_instance( self.context, inst, mox.IsA(objects.BlockDeviceMappingList), reservations=None, delete_type='delete') else: compute_utils.notify_about_instance_usage( self.compute_api.notifier, self.context, inst, 'delete.start') db.constraint(host=mox.IgnoreArg()).AndReturn('constraint') delete_time = datetime.datetime(1955, 11, 5, 9, 30, tzinfo=iso8601.iso8601.Utc()) updates['deleted_at'] = delete_time updates['deleted'] = True fake_inst = fake_instance.fake_db_instance(**updates) db.instance_destroy(self.context, inst.uuid, constraint='constraint').AndReturn(fake_inst) compute_utils.notify_about_instance_usage( self.compute_api.notifier, self.context, inst, 'delete.end', system_metadata=inst.system_metadata) self.mox.ReplayAll() self.compute_api.delete(self.context, inst) for k, v in updates.items(): self.assertEqual(inst[k], v) def _fake_do_delete(context, instance, bdms, rservations=None, local=False): pass def test_local_delete_with_deleted_volume(self): bdms = [objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( {'id': 42, 'volume_id': 'volume_id', 'source_type': 'volume', 'destination_type': 'volume', 'delete_on_termination': False}))] inst = self._create_instance_obj() inst._context = self.context self.mox.StubOutWithMock(inst, 'destroy') self.mox.StubOutWithMock(self.context, 'elevated') self.mox.StubOutWithMock(self.compute_api.network_api, 'deallocate_for_instance') self.mox.StubOutWithMock(db, 'instance_system_metadata_get') self.mox.StubOutWithMock(compute_utils, 'notify_about_instance_usage') self.mox.StubOutWithMock(self.compute_api.volume_api, 'detach') self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'destroy') compute_utils.notify_about_instance_usage( self.compute_api.notifier, self.context, inst, 'delete.start') self.context.elevated().MultipleTimes().AndReturn(self.context) if self.cell_type != 'api': self.compute_api.network_api.deallocate_for_instance( self.context, inst) self.compute_api.volume_api.detach( mox.IgnoreArg(), 'volume_id', inst.uuid).\ AndRaise(exception.VolumeNotFound('volume_id')) bdms[0].destroy() inst.destroy() compute_utils.notify_about_instance_usage( self.compute_api.notifier, self.context, inst, 'delete.end', system_metadata=inst.system_metadata) self.mox.ReplayAll() self.compute_api._local_delete(self.context, inst, bdms, 'delete', self._fake_do_delete) @mock.patch.object(objects.BlockDeviceMapping, 'destroy') def test_local_cleanup_bdm_volumes_stashed_connector(self, mock_destroy): """Tests that we call volume_api.terminate_connection when we found a stashed connector in the bdm.connection_info dict. """ inst = self._create_instance_obj() # create two fake bdms, one is a volume and one isn't, both will be # destroyed but we only cleanup the volume bdm in cinder conn_info = {'connector': {'host': inst.host}} vol_bdm = objects.BlockDeviceMapping(self.context, id=1, instance_uuid=inst.uuid, volume_id=uuids.volume_id, source_type='volume', destination_type='volume', delete_on_termination=True, connection_info=jsonutils.dumps( conn_info )) loc_bdm = objects.BlockDeviceMapping(self.context, id=2, instance_uuid=inst.uuid, volume_id=uuids.volume_id2, source_type='blank', destination_type='local') bdms = objects.BlockDeviceMappingList(objects=[vol_bdm, loc_bdm]) @mock.patch.object(self.compute_api.volume_api, 'terminate_connection') @mock.patch.object(self.compute_api.volume_api, 'detach') @mock.patch.object(self.compute_api.volume_api, 'delete') @mock.patch.object(self.context, 'elevated', return_value=self.context) def do_test(self, mock_elevated, mock_delete, mock_detach, mock_terminate): self.compute_api._local_cleanup_bdm_volumes( bdms, inst, self.context) mock_terminate.assert_called_once_with( self.context, uuids.volume_id, conn_info['connector']) mock_detach.assert_called_once_with( self.context, uuids.volume_id, inst.uuid) mock_delete.assert_called_once_with(self.context, uuids.volume_id) self.assertEqual(2, mock_destroy.call_count) do_test(self) def test_get_stashed_volume_connector_none(self): inst = self._create_instance_obj() # connection_info isn't set bdm = objects.BlockDeviceMapping(self.context) self.assertIsNone( self.compute_api._get_stashed_volume_connector(bdm, inst)) # connection_info is None bdm.connection_info = None self.assertIsNone( self.compute_api._get_stashed_volume_connector(bdm, inst)) # connector is not set in connection_info bdm.connection_info = jsonutils.dumps({}) self.assertIsNone( self.compute_api._get_stashed_volume_connector(bdm, inst)) # connector is set but different host conn_info = {'connector': {'host': 'other_host'}} bdm.connection_info = jsonutils.dumps(conn_info) self.assertIsNone( self.compute_api._get_stashed_volume_connector(bdm, inst)) def test_local_delete_without_info_cache(self): inst = self._create_instance_obj() with test.nested( mock.patch.object(inst, 'destroy'), mock.patch.object(self.context, 'elevated'), mock.patch.object(self.compute_api.network_api, 'deallocate_for_instance'), mock.patch.object(db, 'instance_system_metadata_get'), mock.patch.object(compute_utils, 'notify_about_instance_usage') ) as ( inst_destroy, context_elevated, net_api_deallocate_for_instance, db_instance_system_metadata_get, notify_about_instance_usage ): compute_utils.notify_about_instance_usage( self.compute_api.notifier, self.context, inst, 'delete.start') self.context.elevated().MultipleTimes().AndReturn(self.context) if self.cell_type != 'api': self.compute_api.network_api.deallocate_for_instance( self.context, inst) inst.destroy() compute_utils.notify_about_instance_usage( self.compute_api.notifier, self.context, inst, 'delete.end', system_metadata=inst.system_metadata) inst.info_cache = None self.compute_api._local_delete(self.context, inst, [], 'delete', self._fake_do_delete) def test_delete_disabled(self): inst = self._create_instance_obj() inst.disable_terminate = True self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.mox.ReplayAll() self.compute_api.delete(self.context, inst) def test_delete_soft_rollback(self): inst = self._create_instance_obj() self.mox.StubOutWithMock(db, 'block_device_mapping_get_all_by_instance') self.mox.StubOutWithMock(inst, 'save') delete_time = datetime.datetime(1955, 11, 5) self.useFixture(utils_fixture.TimeFixture(delete_time)) db.block_device_mapping_get_all_by_instance( self.context, inst.uuid).AndReturn([]) inst.save().AndRaise(test.TestingException) self.mox.ReplayAll() self.assertRaises(test.TestingException, self.compute_api.soft_delete, self.context, inst) def _test_confirm_resize(self, mig_ref_passed=False): params = dict(vm_state=vm_states.RESIZED) fake_inst = self._create_instance_obj(params=params) fake_mig = objects.Migration._from_db_object( self.context, objects.Migration(), test_migration.fake_db_migration()) self.mox.StubOutWithMock(self.context, 'elevated') self.mox.StubOutWithMock(objects.Migration, 'get_by_instance_and_status') self.mox.StubOutWithMock(compute_utils, 'downsize_quota_delta') self.mox.StubOutWithMock(compute_utils, 'reserve_quota_delta') self.mox.StubOutWithMock(fake_mig, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api.compute_rpcapi, 'confirm_resize') self.context.elevated().AndReturn(self.context) if not mig_ref_passed: objects.Migration.get_by_instance_and_status( self.context, fake_inst['uuid'], 'finished').AndReturn( fake_mig) compute_utils.downsize_quota_delta(self.context, fake_inst).AndReturn('deltas') resvs = ['resvs'] fake_quotas = objects.Quotas.from_reservations(self.context, resvs) compute_utils.reserve_quota_delta(self.context, 'deltas', fake_inst).AndReturn(fake_quotas) def _check_mig(expected_task_state=None): self.assertEqual('confirming', fake_mig.status) fake_mig.save().WithSideEffects(_check_mig) if self.cell_type: fake_quotas.commit() self.compute_api._record_action_start(self.context, fake_inst, 'confirmResize') self.compute_api.compute_rpcapi.confirm_resize( self.context, fake_inst, fake_mig, 'compute-source', [] if self.cell_type else fake_quotas.reservations) self.mox.ReplayAll() if mig_ref_passed: self.compute_api.confirm_resize(self.context, fake_inst, migration=fake_mig) else: self.compute_api.confirm_resize(self.context, fake_inst) def test_confirm_resize(self): self._test_confirm_resize() def test_confirm_resize_with_migration_ref(self): self._test_confirm_resize(mig_ref_passed=True) def _test_revert_resize(self): params = dict(vm_state=vm_states.RESIZED) fake_inst = self._create_instance_obj(params=params) fake_mig = objects.Migration._from_db_object( self.context, objects.Migration(), test_migration.fake_db_migration()) self.mox.StubOutWithMock(self.context, 'elevated') self.mox.StubOutWithMock(objects.Migration, 'get_by_instance_and_status') self.mox.StubOutWithMock(compute_utils, 'reverse_upsize_quota_delta') self.mox.StubOutWithMock(compute_utils, 'reserve_quota_delta') self.mox.StubOutWithMock(fake_inst, 'save') self.mox.StubOutWithMock(fake_mig, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api.compute_rpcapi, 'revert_resize') self.context.elevated().AndReturn(self.context) objects.Migration.get_by_instance_and_status( self.context, fake_inst['uuid'], 'finished').AndReturn( fake_mig) compute_utils.reverse_upsize_quota_delta( self.context, fake_inst).AndReturn('deltas') resvs = ['resvs'] fake_quotas = objects.Quotas.from_reservations(self.context, resvs) compute_utils.reserve_quota_delta(self.context, 'deltas', fake_inst).AndReturn(fake_quotas) def _check_state(expected_task_state=None): self.assertEqual(task_states.RESIZE_REVERTING, fake_inst.task_state) fake_inst.save(expected_task_state=[None]).WithSideEffects( _check_state) def _check_mig(expected_task_state=None): self.assertEqual('reverting', fake_mig.status) fake_mig.save().WithSideEffects(_check_mig) if self.cell_type: fake_quotas.commit() self.compute_api._record_action_start(self.context, fake_inst, 'revertResize') self.compute_api.compute_rpcapi.revert_resize( self.context, fake_inst, fake_mig, 'compute-dest', [] if self.cell_type else fake_quotas.reservations) self.mox.ReplayAll() self.compute_api.revert_resize(self.context, fake_inst) def test_revert_resize(self): self._test_revert_resize() def test_revert_resize_concurrent_fail(self): params = dict(vm_state=vm_states.RESIZED) fake_inst = self._create_instance_obj(params=params) fake_mig = objects.Migration._from_db_object( self.context, objects.Migration(), test_migration.fake_db_migration()) self.mox.StubOutWithMock(self.context, 'elevated') self.mox.StubOutWithMock(objects.Migration, 'get_by_instance_and_status') self.mox.StubOutWithMock(compute_utils, 'reverse_upsize_quota_delta') self.mox.StubOutWithMock(compute_utils, 'reserve_quota_delta') self.mox.StubOutWithMock(fake_inst, 'save') self.context.elevated().AndReturn(self.context) objects.Migration.get_by_instance_and_status( self.context, fake_inst['uuid'], 'finished').AndReturn(fake_mig) delta = ['delta'] compute_utils.reverse_upsize_quota_delta( self.context, fake_inst).AndReturn(delta) resvs = ['resvs'] fake_quotas = objects.Quotas.from_reservations(self.context, resvs) compute_utils.reserve_quota_delta( self.context, delta, fake_inst).AndReturn(fake_quotas) exc = exception.UnexpectedTaskStateError( instance_uuid=fake_inst['uuid'], actual={'task_state': task_states.RESIZE_REVERTING}, expected={'task_state': [None]}) fake_inst.save(expected_task_state=[None]).AndRaise(exc) fake_quotas.rollback() self.mox.ReplayAll() self.assertRaises(exception.UnexpectedTaskStateError, self.compute_api.revert_resize, self.context, fake_inst) def _test_resize(self, flavor_id_passed=True, same_host=False, allow_same_host=False, project_id=None, extra_kwargs=None, same_flavor=False, clean_shutdown=True): if extra_kwargs is None: extra_kwargs = {} self.flags(allow_resize_to_same_host=allow_same_host) params = {} if project_id is not None: # To test instance w/ different project id than context (admin) params['project_id'] = project_id fake_inst = self._create_instance_obj(params=params) self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id') self.mox.StubOutWithMock(compute_utils, 'upsize_quota_delta') self.mox.StubOutWithMock(compute_utils, 'reserve_quota_delta') self.mox.StubOutWithMock(fake_inst, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api.compute_task_api, 'resize_instance') current_flavor = fake_inst.get_flavor() if flavor_id_passed: new_flavor = self._create_flavor(id=200, flavorid='new-flavor-id', name='new_flavor', disabled=False) if same_flavor: new_flavor.id = current_flavor.id flavors.get_flavor_by_flavor_id( 'new-flavor-id', read_deleted='no').AndReturn(new_flavor) else: new_flavor = current_flavor if (self.cell_type == 'compute' or not (flavor_id_passed and same_flavor)): resvs = ['resvs'] project_id, user_id = quotas_obj.ids_from_instance(self.context, fake_inst) fake_quotas = objects.Quotas.from_reservations(self.context, resvs) if flavor_id_passed: compute_utils.upsize_quota_delta( self.context, mox.IsA(objects.Flavor), mox.IsA(objects.Flavor)).AndReturn('deltas') compute_utils.reserve_quota_delta( self.context, 'deltas', fake_inst).AndReturn(fake_quotas) def _check_state(expected_task_state=None): self.assertEqual(task_states.RESIZE_PREP, fake_inst.task_state) self.assertEqual(fake_inst.progress, 0) for key, value in extra_kwargs.items(): self.assertEqual(value, getattr(fake_inst, key)) fake_inst.save(expected_task_state=[None]).WithSideEffects( _check_state) if allow_same_host: filter_properties = {'ignore_hosts': []} else: filter_properties = {'ignore_hosts': [fake_inst['host']]} if flavor_id_passed: expected_reservations = fake_quotas.reservations else: expected_reservations = [] if self.cell_type == 'api': fake_quotas.commit() expected_reservations = [] mig = objects.Migration() def _get_migration(context=None): return mig def _check_mig(): self.assertEqual(fake_inst.uuid, mig.instance_uuid) self.assertEqual(current_flavor.id, mig.old_instance_type_id) self.assertEqual(new_flavor.id, mig.new_instance_type_id) self.assertEqual('finished', mig.status) if new_flavor.id != current_flavor.id: self.assertEqual('resize', mig.migration_type) else: self.assertEqual('migration', mig.migration_type) self.stubs.Set(objects, 'Migration', _get_migration) self.mox.StubOutWithMock(self.context, 'elevated') self.mox.StubOutWithMock(mig, 'create') self.context.elevated().AndReturn(self.context) mig.create().WithSideEffects(_check_mig) if flavor_id_passed: self.compute_api._record_action_start(self.context, fake_inst, 'resize') else: self.compute_api._record_action_start(self.context, fake_inst, 'migrate') scheduler_hint = {'filter_properties': filter_properties} self.compute_api.compute_task_api.resize_instance( self.context, fake_inst, extra_kwargs, scheduler_hint=scheduler_hint, flavor=mox.IsA(objects.Flavor), reservations=expected_reservations, clean_shutdown=clean_shutdown) self.mox.ReplayAll() if flavor_id_passed: self.compute_api.resize(self.context, fake_inst, flavor_id='new-flavor-id', clean_shutdown=clean_shutdown, **extra_kwargs) else: self.compute_api.resize(self.context, fake_inst, clean_shutdown=clean_shutdown, **extra_kwargs) def _test_migrate(self, *args, **kwargs): self._test_resize(*args, flavor_id_passed=False, **kwargs) def test_resize(self): self._test_resize() def test_resize_with_kwargs(self): self._test_resize(extra_kwargs=dict(cow='moo')) def test_resize_same_host_and_allowed(self): self._test_resize(same_host=True, allow_same_host=True) def test_resize_same_host_and_not_allowed(self): self._test_resize(same_host=True, allow_same_host=False) def test_resize_different_project_id(self): self._test_resize(project_id='different') def test_resize_forced_shutdown(self): self._test_resize(clean_shutdown=False) def test_migrate(self): self._test_migrate() def test_migrate_with_kwargs(self): self._test_migrate(extra_kwargs=dict(cow='moo')) def test_migrate_same_host_and_allowed(self): self._test_migrate(same_host=True, allow_same_host=True) def test_migrate_same_host_and_not_allowed(self): self._test_migrate(same_host=True, allow_same_host=False) def test_migrate_different_project_id(self): self._test_migrate(project_id='different') def test_resize_invalid_flavor_fails(self): self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id') # Should never reach these. self.mox.StubOutWithMock(compute_utils, 'reserve_quota_delta') self.mox.StubOutWithMock(quota.QUOTAS, 'commit') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api.compute_task_api, 'resize_instance') fake_inst = self._create_instance_obj() exc = exception.FlavorNotFound(flavor_id='flavor-id') flavors.get_flavor_by_flavor_id('flavor-id', read_deleted='no').AndRaise(exc) self.mox.ReplayAll() with mock.patch.object(fake_inst, 'save') as mock_save: self.assertRaises(exception.FlavorNotFound, self.compute_api.resize, self.context, fake_inst, flavor_id='flavor-id') self.assertFalse(mock_save.called) def test_resize_disabled_flavor_fails(self): self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id') # Should never reach these. self.mox.StubOutWithMock(compute_utils, 'reserve_quota_delta') self.mox.StubOutWithMock(quota.QUOTAS, 'commit') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api.compute_task_api, 'resize_instance') fake_inst = self._create_instance_obj() fake_flavor = self._create_flavor(id=200, flavorid='flavor-id', name='foo', disabled=True) flavors.get_flavor_by_flavor_id( 'flavor-id', read_deleted='no').AndReturn(fake_flavor) self.mox.ReplayAll() with mock.patch.object(fake_inst, 'save') as mock_save: self.assertRaises(exception.FlavorNotFound, self.compute_api.resize, self.context, fake_inst, flavor_id='flavor-id') self.assertFalse(mock_save.called) @mock.patch.object(flavors, 'get_flavor_by_flavor_id') def test_resize_to_zero_disk_flavor_fails(self, get_flavor_by_flavor_id): fake_inst = self._create_instance_obj() fake_flavor = self._create_flavor(id=200, flavorid='flavor-id', name='foo', root_gb=0) get_flavor_by_flavor_id.return_value = fake_flavor with mock.patch.object(self.compute_api, 'is_volume_backed_instance', return_value=False): self.assertRaises(exception.CannotResizeDisk, self.compute_api.resize, self.context, fake_inst, flavor_id='flavor-id') @mock.patch('nova.compute.api.API._record_action_start') @mock.patch('nova.compute.api.API._resize_cells_support') @mock.patch('nova.conductor.conductor_api.ComputeTaskAPI.resize_instance') @mock.patch.object(flavors, 'get_flavor_by_flavor_id') def test_resize_to_zero_disk_flavor_volume_backed(self, get_flavor_by_flavor_id, resize_instance_mock, cells_support_mock, record_mock): params = dict(image_ref='') fake_inst = self._create_instance_obj(params=params) fake_flavor = self._create_flavor(id=200, flavorid='flavor-id', name='foo', root_gb=0) get_flavor_by_flavor_id.return_value = fake_flavor @mock.patch.object(self.compute_api, 'is_volume_backed_instance', return_value=True) @mock.patch.object(fake_inst, 'save') def do_test(mock_save, mock_volume): self.compute_api.resize(self.context, fake_inst, flavor_id='flavor-id') mock_volume.assert_called_once_with(self.context, fake_inst) do_test() def test_resize_quota_exceeds_fails(self): self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id') self.mox.StubOutWithMock(compute_utils, 'upsize_quota_delta') self.mox.StubOutWithMock(compute_utils, 'reserve_quota_delta') # Should never reach these. self.mox.StubOutWithMock(quota.QUOTAS, 'commit') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api.compute_task_api, 'resize_instance') fake_inst = self._create_instance_obj() fake_flavor = self._create_flavor(id=200, flavorid='flavor-id', name='foo', disabled=False) flavors.get_flavor_by_flavor_id( 'flavor-id', read_deleted='no').AndReturn(fake_flavor) deltas = dict(resource=0) compute_utils.upsize_quota_delta( self.context, mox.IsA(objects.Flavor), mox.IsA(objects.Flavor)).AndReturn(deltas) usage = dict(in_use=0, reserved=0) quotas = {'resource': 0} usages = {'resource': usage} overs = ['resource'] over_quota_args = dict(quotas=quotas, usages=usages, overs=overs) compute_utils.reserve_quota_delta(self.context, deltas, fake_inst).AndRaise( exception.OverQuota(**over_quota_args)) self.mox.ReplayAll() with mock.patch.object(fake_inst, 'save') as mock_save: self.assertRaises(exception.TooManyInstances, self.compute_api.resize, self.context, fake_inst, flavor_id='flavor-id') self.assertFalse(mock_save.called) def test_check_instance_quota_exceeds_with_multiple_resources(self): quotas = {'cores': 1, 'instances': 1, 'ram': 512} usages = {'cores': dict(in_use=1, reserved=0), 'instances': dict(in_use=1, reserved=0), 'ram': dict(in_use=512, reserved=0)} overs = ['cores', 'instances', 'ram'] over_quota_args = dict(quotas=quotas, usages=usages, overs=overs) e = exception.OverQuota(**over_quota_args) fake_flavor = self._create_flavor() instance_num = 1 with mock.patch.object(objects.Quotas, 'reserve', side_effect=e): try: self.compute_api._check_num_instances_quota(self.context, fake_flavor, instance_num, instance_num) except exception.TooManyInstances as e: self.assertEqual('cores, instances, ram', e.kwargs['overs']) self.assertEqual('1, 1, 512', e.kwargs['req']) self.assertEqual('1, 1, 512', e.kwargs['used']) self.assertEqual('1, 1, 512', e.kwargs['allowed']) else: self.fail("Exception not raised") @mock.patch.object(flavors, 'get_flavor_by_flavor_id') @mock.patch.object(objects.Quotas, 'reserve') def test_resize_instance_quota_exceeds_with_multiple_resources( self, mock_reserve, mock_get_flavor): quotas = {'cores': 1, 'ram': 512} usages = {'cores': dict(in_use=1, reserved=0), 'ram': dict(in_use=512, reserved=0)} overs = ['cores', 'ram'] over_quota_args = dict(quotas=quotas, usages=usages, overs=overs) mock_reserve.side_effect = exception.OverQuota(**over_quota_args) mock_get_flavor.return_value = self._create_flavor(id=333, vcpus=3, memory_mb=1536) try: self.compute_api.resize(self.context, self._create_instance_obj(), 'fake_flavor_id') except exception.TooManyInstances as e: self.assertEqual('cores, ram', e.kwargs['overs']) self.assertEqual('2, 1024', e.kwargs['req']) self.assertEqual('1, 512', e.kwargs['used']) self.assertEqual('1, 512', e.kwargs['allowed']) mock_get_flavor.assert_called_once_with('fake_flavor_id', read_deleted="no") else: self.fail("Exception not raised") def test_pause(self): # Ensure instance can be paused. instance = self._create_instance_obj() self.assertEqual(instance.vm_state, vm_states.ACTIVE) self.assertIsNone(instance.task_state) self.mox.StubOutWithMock(instance, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') if self.cell_type == 'api': rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'pause_instance') instance.save(expected_task_state=[None]) self.compute_api._record_action_start(self.context, instance, instance_actions.PAUSE) rpcapi.pause_instance(self.context, instance) self.mox.ReplayAll() self.compute_api.pause(self.context, instance) self.assertEqual(vm_states.ACTIVE, instance.vm_state) self.assertEqual(task_states.PAUSING, instance.task_state) def _test_pause_fails(self, vm_state): params = dict(vm_state=vm_state) instance = self._create_instance_obj(params=params) self.assertIsNone(instance.task_state) self.assertRaises(exception.InstanceInvalidState, self.compute_api.pause, self.context, instance) def test_pause_fails_invalid_states(self): invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE])) for state in invalid_vm_states: self._test_pause_fails(state) def test_unpause(self): # Ensure instance can be unpaused. params = dict(vm_state=vm_states.PAUSED) instance = self._create_instance_obj(params=params) self.assertEqual(instance.vm_state, vm_states.PAUSED) self.assertIsNone(instance.task_state) self.mox.StubOutWithMock(instance, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') if self.cell_type == 'api': rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'unpause_instance') instance.save(expected_task_state=[None]) self.compute_api._record_action_start(self.context, instance, instance_actions.UNPAUSE) rpcapi.unpause_instance(self.context, instance) self.mox.ReplayAll() self.compute_api.unpause(self.context, instance) self.assertEqual(vm_states.PAUSED, instance.vm_state) self.assertEqual(task_states.UNPAUSING, instance.task_state) def test_live_migrate_active_vm_state(self): instance = self._create_instance_obj() self._live_migrate_instance(instance) def test_live_migrate_paused_vm_state(self): paused_state = dict(vm_state=vm_states.PAUSED) instance = self._create_instance_obj(params=paused_state) self._live_migrate_instance(instance) @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid') @mock.patch.object(objects.InstanceAction, 'action_start') @mock.patch.object(objects.Instance, 'save') def test_live_migrate_messaging_timeout(self, _save, _action, get_spec, add_instance_fault_from_exc): instance = self._create_instance_obj() if self.cell_type == 'api': api = self.compute_api.cells_rpcapi else: api = conductor.api.ComputeTaskAPI with mock.patch.object(api, 'live_migrate_instance', side_effect=oslo_exceptions.MessagingTimeout): self.assertRaises(oslo_exceptions.MessagingTimeout, self.compute_api.live_migrate, self.context, instance, host_name='fake_dest_host', block_migration=True, disk_over_commit=True) add_instance_fault_from_exc.assert_called_once_with( self.context, instance, mock.ANY) @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid') @mock.patch.object(objects.Instance, 'save') @mock.patch.object(objects.InstanceAction, 'action_start') def _live_migrate_instance(self, instance, _save, _action, get_spec): # TODO(gilliard): This logic is upside-down (different # behaviour depending on which class this method is mixed-into. Once # we have cellsv2 we can remove this kind of logic from this test if self.cell_type == 'api': api = self.compute_api.cells_rpcapi else: api = conductor.api.ComputeTaskAPI fake_spec = objects.RequestSpec() get_spec.return_value = fake_spec with mock.patch.object(api, 'live_migrate_instance') as task: self.compute_api.live_migrate(self.context, instance, block_migration=True, disk_over_commit=True, host_name='fake_dest_host') self.assertEqual(task_states.MIGRATING, instance.task_state) task.assert_called_once_with(self.context, instance, 'fake_dest_host', block_migration=True, disk_over_commit=True, request_spec=fake_spec) def test_swap_volume_volume_api_usage(self): # This test ensures that volume_id arguments are passed to volume_api # and that volumes return to previous states in case of error. def fake_vol_api_begin_detaching(context, volume_id): self.assertTrue(uuidutils.is_uuid_like(volume_id)) volumes[volume_id]['status'] = 'detaching' def fake_vol_api_roll_detaching(context, volume_id): self.assertTrue(uuidutils.is_uuid_like(volume_id)) if volumes[volume_id]['status'] == 'detaching': volumes[volume_id]['status'] = 'in-use' def fake_vol_api_reserve(context, volume_id): self.assertTrue(uuidutils.is_uuid_like(volume_id)) self.assertEqual(volumes[volume_id]['status'], 'available') volumes[volume_id]['status'] = 'attaching' def fake_vol_api_unreserve(context, volume_id): self.assertTrue(uuidutils.is_uuid_like(volume_id)) if volumes[volume_id]['status'] == 'attaching': volumes[volume_id]['status'] = 'available' def fake_swap_volume_exc(context, instance, old_volume_id, new_volume_id): raise AttributeError # Random exception # Should fail if VM state is not valid instance = fake_instance.fake_instance_obj(None, **{ 'vm_state': vm_states.BUILDING, 'launched_at': timeutils.utcnow(), 'locked': False, 'availability_zone': 'fake_az', 'uuid': uuids.vol_instance}) volumes = {} old_volume_id = uuidutils.generate_uuid() volumes[old_volume_id] = {'id': old_volume_id, 'display_name': 'old_volume', 'attach_status': 'attached', 'size': 5, 'status': 'in-use', 'multiattach': False, 'attachments': {uuids.vol_instance: { 'attachment_id': 'fakeid' } } } new_volume_id = uuidutils.generate_uuid() volumes[new_volume_id] = {'id': new_volume_id, 'display_name': 'new_volume', 'attach_status': 'detached', 'size': 5, 'status': 'available', 'multiattach': False} self.assertRaises(exception.InstanceInvalidState, self.compute_api.swap_volume, self.context, instance, volumes[old_volume_id], volumes[new_volume_id]) instance['vm_state'] = vm_states.ACTIVE instance['task_state'] = None # Should fail if old volume is not attached volumes[old_volume_id]['attach_status'] = 'detached' self.assertRaises(exception.VolumeUnattached, self.compute_api.swap_volume, self.context, instance, volumes[old_volume_id], volumes[new_volume_id]) self.assertEqual(volumes[old_volume_id]['status'], 'in-use') self.assertEqual(volumes[new_volume_id]['status'], 'available') volumes[old_volume_id]['attach_status'] = 'attached' # Should fail if old volume's instance_uuid is not that of the instance volumes[old_volume_id]['attachments'] = {uuids.vol_instance_2: {'attachment_id': 'fakeid'}} self.assertRaises(exception.InvalidVolume, self.compute_api.swap_volume, self.context, instance, volumes[old_volume_id], volumes[new_volume_id]) self.assertEqual(volumes[old_volume_id]['status'], 'in-use') self.assertEqual(volumes[new_volume_id]['status'], 'available') volumes[old_volume_id]['attachments'] = {uuids.vol_instance: {'attachment_id': 'fakeid'}} # Should fail if new volume is attached volumes[new_volume_id]['attach_status'] = 'attached' self.assertRaises(exception.InvalidVolume, self.compute_api.swap_volume, self.context, instance, volumes[old_volume_id], volumes[new_volume_id]) self.assertEqual(volumes[old_volume_id]['status'], 'in-use') self.assertEqual(volumes[new_volume_id]['status'], 'available') volumes[new_volume_id]['attach_status'] = 'detached' # Should fail if new volume is smaller than the old volume volumes[new_volume_id]['size'] = 4 self.assertRaises(exception.InvalidVolume, self.compute_api.swap_volume, self.context, instance, volumes[old_volume_id], volumes[new_volume_id]) self.assertEqual(volumes[old_volume_id]['status'], 'in-use') self.assertEqual(volumes[new_volume_id]['status'], 'available') volumes[new_volume_id]['size'] = 5 # Fail call to swap_volume self.stubs.Set(self.compute_api.volume_api, 'begin_detaching', fake_vol_api_begin_detaching) self.stubs.Set(self.compute_api.volume_api, 'roll_detaching', fake_vol_api_roll_detaching) self.stubs.Set(self.compute_api.volume_api, 'reserve_volume', fake_vol_api_reserve) self.stubs.Set(self.compute_api.volume_api, 'unreserve_volume', fake_vol_api_unreserve) self.stubs.Set(self.compute_api.compute_rpcapi, 'swap_volume', fake_swap_volume_exc) self.assertRaises(AttributeError, self.compute_api.swap_volume, self.context, instance, volumes[old_volume_id], volumes[new_volume_id]) self.assertEqual(volumes[old_volume_id]['status'], 'in-use') self.assertEqual(volumes[new_volume_id]['status'], 'available') # Should succeed self.stubs.Set(self.compute_api.compute_rpcapi, 'swap_volume', lambda c, instance, old_volume_id, new_volume_id: True) self.compute_api.swap_volume(self.context, instance, volumes[old_volume_id], volumes[new_volume_id]) def _test_snapshot_and_backup(self, is_snapshot=True, with_base_ref=False, min_ram=None, min_disk=None, create_fails=False, instance_vm_state=vm_states.ACTIVE): params = dict(locked=True) instance = self._create_instance_obj(params=params) instance.vm_state = instance_vm_state # 'cache_in_nova' is for testing non-inheritable properties # 'user_id' should also not be carried from sys_meta into # image property...since it should be set explicitly by # _create_image() in compute api. fake_image_meta = { 'is_public': True, 'name': 'base-name', 'disk_format': 'fake', 'container_format': 'fake', 'properties': { 'user_id': 'meow', 'foo': 'bar', 'blah': 'bug?', 'cache_in_nova': 'dropped', }, } image_type = is_snapshot and 'snapshot' or 'backup' sent_meta = { 'is_public': False, 'name': 'fake-name', 'disk_format': 'fake', 'container_format': 'fake', 'properties': { 'user_id': self.context.user_id, 'instance_uuid': instance.uuid, 'image_type': image_type, 'foo': 'bar', 'blah': 'bug?', 'cow': 'moo', 'cat': 'meow', }, } if is_snapshot: if min_ram is not None: fake_image_meta['min_ram'] = min_ram sent_meta['min_ram'] = min_ram if min_disk is not None: fake_image_meta['min_disk'] = min_disk sent_meta['min_disk'] = min_disk sent_meta.pop('disk_format', None) sent_meta.pop('container_format', None) else: sent_meta['properties']['backup_type'] = 'fake-backup-type' extra_props = dict(cow='moo', cat='meow') self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(self.compute_api.image_api, 'create') self.mox.StubOutWithMock(instance, 'save') self.mox.StubOutWithMock(self.compute_api.compute_rpcapi, 'snapshot_instance') self.mox.StubOutWithMock(self.compute_api.compute_rpcapi, 'backup_instance') if not is_snapshot: self.mox.StubOutWithMock(self.compute_api, 'is_volume_backed_instance') self.compute_api.is_volume_backed_instance(self.context, instance).AndReturn(False) utils.get_image_from_system_metadata( instance.system_metadata).AndReturn(fake_image_meta) fake_image = dict(id='fake-image-id') mock_method = self.compute_api.image_api.create( self.context, sent_meta) if create_fails: mock_method.AndRaise(test.TestingException()) else: mock_method.AndReturn(fake_image) def check_state(expected_task_state=None): expected_state = (is_snapshot and task_states.IMAGE_SNAPSHOT_PENDING or task_states.IMAGE_BACKUP) self.assertEqual(expected_state, instance.task_state) if not create_fails: instance.save(expected_task_state=[None]).WithSideEffects( check_state) if is_snapshot: self.compute_api.compute_rpcapi.snapshot_instance( self.context, instance, fake_image['id']) else: self.compute_api.compute_rpcapi.backup_instance( self.context, instance, fake_image['id'], 'fake-backup-type', 'fake-rotation') self.mox.ReplayAll() got_exc = False try: if is_snapshot: res = self.compute_api.snapshot(self.context, instance, 'fake-name', extra_properties=extra_props) else: res = self.compute_api.backup(self.context, instance, 'fake-name', 'fake-backup-type', 'fake-rotation', extra_properties=extra_props) self.assertEqual(fake_image, res) except test.TestingException: got_exc = True self.assertEqual(create_fails, got_exc) self.mox.UnsetStubs() def test_snapshot(self): self._test_snapshot_and_backup() def test_snapshot_fails(self): self._test_snapshot_and_backup(create_fails=True) def test_snapshot_invalid_state(self): instance = self._create_instance_obj() instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.IMAGE_SNAPSHOT self.assertRaises(exception.InstanceInvalidState, self.compute_api.snapshot, self.context, instance, 'fake-name') instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.IMAGE_BACKUP self.assertRaises(exception.InstanceInvalidState, self.compute_api.snapshot, self.context, instance, 'fake-name') instance.vm_state = vm_states.BUILDING instance.task_state = None self.assertRaises(exception.InstanceInvalidState, self.compute_api.snapshot, self.context, instance, 'fake-name') def test_snapshot_with_base_image_ref(self): self._test_snapshot_and_backup(with_base_ref=True) def test_snapshot_min_ram(self): self._test_snapshot_and_backup(min_ram=42) def test_snapshot_min_disk(self): self._test_snapshot_and_backup(min_disk=42) def test_backup(self): for state in [vm_states.ACTIVE, vm_states.STOPPED, vm_states.PAUSED, vm_states.SUSPENDED]: self._test_snapshot_and_backup(is_snapshot=False, instance_vm_state=state) def test_backup_fails(self): self._test_snapshot_and_backup(is_snapshot=False, create_fails=True) def test_backup_invalid_state(self): instance = self._create_instance_obj() instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.IMAGE_SNAPSHOT self.assertRaises(exception.InstanceInvalidState, self.compute_api.backup, self.context, instance, 'fake-name', 'fake', 'fake') instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.IMAGE_BACKUP self.assertRaises(exception.InstanceInvalidState, self.compute_api.backup, self.context, instance, 'fake-name', 'fake', 'fake') instance.vm_state = vm_states.BUILDING instance.task_state = None self.assertRaises(exception.InstanceInvalidState, self.compute_api.backup, self.context, instance, 'fake-name', 'fake', 'fake') def test_backup_with_base_image_ref(self): self._test_snapshot_and_backup(is_snapshot=False, with_base_ref=True) def test_backup_volume_backed_instance(self): instance = self._create_instance_obj() with mock.patch.object(self.compute_api, 'is_volume_backed_instance', return_value=True) as mock_is_volume_backed: self.assertRaises(exception.InvalidRequest, self.compute_api.backup, self.context, instance, 'fake-name', 'weekly', 3, extra_properties={}) mock_is_volume_backed.assert_called_once_with(self.context, instance) def _test_snapshot_volume_backed(self, quiesce_required, quiesce_fails, vm_state=vm_states.ACTIVE): fake_sys_meta = {'image_min_ram': '11', 'image_min_disk': '22', 'image_container_format': 'ami', 'image_disk_format': 'ami', 'image_ram_disk': 'fake_ram_disk_id', 'image_bdm_v2': 'True', 'image_block_device_mapping': '[]', 'image_mappings': '[]', 'image_cache_in_nova': 'True'} if quiesce_required: fake_sys_meta['image_os_require_quiesce'] = 'yes' params = dict(locked=True, vm_state=vm_state, system_metadata=fake_sys_meta) instance = self._create_instance_obj(params=params) instance['root_device_name'] = 'vda' instance_bdms = [] expect_meta = { 'name': 'test-snapshot', 'properties': {'root_device_name': 'vda', 'ram_disk': 'fake_ram_disk_id'}, 'size': 0, 'min_disk': '22', 'is_public': False, 'min_ram': '11', } if quiesce_required: expect_meta['properties']['os_require_quiesce'] = 'yes' quiesced = [False, False] quiesce_expected = not quiesce_fails and vm_state == vm_states.ACTIVE def fake_get_all_by_instance(context, instance, use_slave=False): return copy.deepcopy(instance_bdms) def fake_image_create(context, image_meta, data=None): self.assertThat(image_meta, matchers.DictMatches(expect_meta)) def fake_volume_get(context, volume_id): return {'id': volume_id, 'display_description': ''} def fake_volume_create_snapshot(context, volume_id, name, description): return {'id': '%s-snapshot' % volume_id} def fake_quiesce_instance(context, instance): if quiesce_fails: raise exception.InstanceQuiesceNotSupported( instance_id=instance['uuid'], reason='test') quiesced[0] = True def fake_unquiesce_instance(context, instance, mapping=None): quiesced[1] = True self.stub_out('nova.db.block_device_mapping_get_all_by_instance', fake_get_all_by_instance) self.stubs.Set(self.compute_api.image_api, 'create', fake_image_create) self.stubs.Set(self.compute_api.volume_api, 'get', fake_volume_get) self.stubs.Set(self.compute_api.volume_api, 'create_snapshot_force', fake_volume_create_snapshot) self.stubs.Set(self.compute_api.compute_rpcapi, 'quiesce_instance', fake_quiesce_instance) self.stubs.Set(self.compute_api.compute_rpcapi, 'unquiesce_instance', fake_unquiesce_instance) fake_image.stub_out_image_service(self) # No block devices defined self.compute_api.snapshot_volume_backed( self.context, instance, 'test-snapshot') bdm = fake_block_device.FakeDbBlockDeviceDict( {'no_device': False, 'volume_id': '1', 'boot_index': 0, 'connection_info': 'inf', 'device_name': '/dev/vda', 'source_type': 'volume', 'destination_type': 'volume'}) instance_bdms.append(bdm) expect_meta['properties']['bdm_v2'] = True expect_meta['properties']['block_device_mapping'] = [] expect_meta['properties']['block_device_mapping'].append( {'guest_format': None, 'boot_index': 0, 'no_device': None, 'image_id': None, 'volume_id': None, 'disk_bus': None, 'volume_size': None, 'source_type': 'snapshot', 'device_type': None, 'snapshot_id': '1-snapshot', 'device_name': '/dev/vda', 'destination_type': 'volume', 'delete_on_termination': False}) # All the db_only fields and the volume ones are removed self.compute_api.snapshot_volume_backed( self.context, instance, 'test-snapshot') self.assertEqual(quiesce_expected, quiesced[0]) self.assertEqual(quiesce_expected, quiesced[1]) instance.system_metadata['image_mappings'] = jsonutils.dumps( [{'virtual': 'ami', 'device': 'vda'}, {'device': 'vda', 'virtual': 'ephemeral0'}, {'device': 'vdb', 'virtual': 'swap'}, {'device': 'vdc', 'virtual': 'ephemeral1'}])[:255] instance.system_metadata['image_block_device_mapping'] = ( jsonutils.dumps( [{'source_type': 'snapshot', 'destination_type': 'volume', 'guest_format': None, 'device_type': 'disk', 'boot_index': 1, 'disk_bus': 'ide', 'device_name': '/dev/vdf', 'delete_on_termination': True, 'snapshot_id': 'snapshot-2', 'volume_id': None, 'volume_size': 100, 'image_id': None, 'no_device': None}])[:255]) bdm = fake_block_device.FakeDbBlockDeviceDict( {'no_device': False, 'volume_id': None, 'boot_index': -1, 'connection_info': 'inf', 'device_name': '/dev/vdh', 'source_type': 'blank', 'destination_type': 'local', 'guest_format': 'swap', 'delete_on_termination': True}) instance_bdms.append(bdm) expect_meta['properties']['block_device_mapping'].append( {'guest_format': 'swap', 'boot_index': -1, 'no_device': False, 'image_id': None, 'volume_id': None, 'disk_bus': None, 'volume_size': None, 'source_type': 'blank', 'device_type': None, 'snapshot_id': None, 'device_name': '/dev/vdh', 'destination_type': 'local', 'delete_on_termination': True}) quiesced = [False, False] # Check that the mappgins from the image properties are not included self.compute_api.snapshot_volume_backed( self.context, instance, 'test-snapshot') self.assertEqual(quiesce_expected, quiesced[0]) self.assertEqual(quiesce_expected, quiesced[1]) def test_snapshot_volume_backed(self): self._test_snapshot_volume_backed(False, False) def test_snapshot_volume_backed_with_quiesce(self): self._test_snapshot_volume_backed(True, False) def test_snapshot_volume_backed_with_quiesce_skipped(self): self._test_snapshot_volume_backed(False, True) def test_snapshot_volume_backed_with_quiesce_exception(self): self.assertRaises(exception.NovaException, self._test_snapshot_volume_backed, True, True) def test_snapshot_volume_backed_with_quiesce_stopped(self): self._test_snapshot_volume_backed(True, True, vm_state=vm_states.STOPPED) def test_volume_snapshot_create(self): volume_id = '1' create_info = {'id': 'eyedee'} fake_bdm = fake_block_device.FakeDbBlockDeviceDict({ 'id': 123, 'device_name': '/dev/sda2', 'source_type': 'volume', 'destination_type': 'volume', 'connection_info': "{'fake': 'connection_info'}", 'volume_id': 1, 'boot_index': -1}) fake_bdm['instance'] = fake_instance.fake_db_instance() fake_bdm['instance_uuid'] = fake_bdm['instance']['uuid'] fake_bdm = objects.BlockDeviceMapping._from_db_object( self.context, objects.BlockDeviceMapping(), fake_bdm, expected_attrs=['instance']) self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'get_by_volume') self.mox.StubOutWithMock(self.compute_api.compute_rpcapi, 'volume_snapshot_create') objects.BlockDeviceMapping.get_by_volume( self.context, volume_id, expected_attrs=['instance']).AndReturn(fake_bdm) self.compute_api.compute_rpcapi.volume_snapshot_create(self.context, fake_bdm['instance'], volume_id, create_info) self.mox.ReplayAll() snapshot = self.compute_api.volume_snapshot_create(self.context, volume_id, create_info) expected_snapshot = { 'snapshot': { 'id': create_info['id'], 'volumeId': volume_id, }, } self.assertEqual(snapshot, expected_snapshot) def test_volume_snapshot_delete(self): volume_id = '1' snapshot_id = '2' fake_bdm = fake_block_device.FakeDbBlockDeviceDict({ 'id': 123, 'device_name': '/dev/sda2', 'source_type': 'volume', 'destination_type': 'volume', 'connection_info': "{'fake': 'connection_info'}", 'volume_id': 1, 'boot_index': -1}) fake_bdm['instance'] = fake_instance.fake_db_instance() fake_bdm['instance_uuid'] = fake_bdm['instance']['uuid'] fake_bdm = objects.BlockDeviceMapping._from_db_object( self.context, objects.BlockDeviceMapping(), fake_bdm, expected_attrs=['instance']) self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'get_by_volume') self.mox.StubOutWithMock(self.compute_api.compute_rpcapi, 'volume_snapshot_delete') objects.BlockDeviceMapping.get_by_volume( self.context, volume_id, expected_attrs=['instance']).AndReturn(fake_bdm) self.compute_api.compute_rpcapi.volume_snapshot_delete(self.context, fake_bdm['instance'], volume_id, snapshot_id, {}) self.mox.ReplayAll() self.compute_api.volume_snapshot_delete(self.context, volume_id, snapshot_id, {}) def _test_boot_volume_bootable(self, is_bootable=False): def get_vol_data(*args, **kwargs): return {'bootable': is_bootable} block_device_mapping = [{ 'id': 1, 'device_name': 'vda', 'no_device': None, 'virtual_name': None, 'snapshot_id': None, 'volume_id': '1', 'delete_on_termination': False, }] expected_meta = {'min_disk': 0, 'min_ram': 0, 'properties': {}, 'size': 0, 'status': 'active'} with mock.patch.object(self.compute_api.volume_api, 'get', side_effect=get_vol_data): if not is_bootable: self.assertRaises(exception.InvalidBDMVolumeNotBootable, self.compute_api._get_bdm_image_metadata, self.context, block_device_mapping) else: meta = self.compute_api._get_bdm_image_metadata(self.context, block_device_mapping) self.assertEqual(expected_meta, meta) def test_boot_volume_non_bootable(self): self._test_boot_volume_bootable(False) def test_boot_volume_bootable(self): self._test_boot_volume_bootable(True) def test_boot_volume_basic_property(self): block_device_mapping = [{ 'id': 1, 'device_name': 'vda', 'no_device': None, 'virtual_name': None, 'snapshot_id': None, 'volume_id': '1', 'delete_on_termination': False, }] fake_volume = {"volume_image_metadata": {"min_ram": 256, "min_disk": 128, "foo": "bar"}} with mock.patch.object(self.compute_api.volume_api, 'get', return_value=fake_volume): meta = self.compute_api._get_bdm_image_metadata( self.context, block_device_mapping) self.assertEqual(256, meta['min_ram']) self.assertEqual(128, meta['min_disk']) self.assertEqual('active', meta['status']) self.assertEqual('bar', meta['properties']['foo']) def test_boot_volume_snapshot_basic_property(self): block_device_mapping = [{ 'id': 1, 'device_name': 'vda', 'no_device': None, 'virtual_name': None, 'snapshot_id': '2', 'volume_id': None, 'delete_on_termination': False, }] fake_volume = {"volume_image_metadata": {"min_ram": 256, "min_disk": 128, "foo": "bar"}} fake_snapshot = {"volume_id": "1"} with test.nested( mock.patch.object(self.compute_api.volume_api, 'get', return_value=fake_volume), mock.patch.object(self.compute_api.volume_api, 'get_snapshot', return_value=fake_snapshot)) as ( volume_get, volume_get_snapshot): meta = self.compute_api._get_bdm_image_metadata( self.context, block_device_mapping) self.assertEqual(256, meta['min_ram']) self.assertEqual(128, meta['min_disk']) self.assertEqual('active', meta['status']) self.assertEqual('bar', meta['properties']['foo']) volume_get_snapshot.assert_called_once_with(self.context, block_device_mapping[0]['snapshot_id']) volume_get.assert_called_once_with(self.context, fake_snapshot['volume_id']) def _create_instance_with_disabled_disk_config(self, object=False): sys_meta = {"image_auto_disk_config": "Disabled"} params = {"system_metadata": sys_meta} instance = self._create_instance_obj(params=params) if object: return instance return obj_base.obj_to_primitive(instance) def _setup_fake_image_with_disabled_disk_config(self): self.fake_image = { 'id': 1, 'name': 'fake_name', 'status': 'active', 'properties': {"auto_disk_config": "Disabled"}, } def fake_show(obj, context, image_id, **kwargs): return self.fake_image fake_image.stub_out_image_service(self) self.stubs.Set(fake_image._FakeImageService, 'show', fake_show) return self.fake_image['id'] def test_resize_with_disabled_auto_disk_config_fails(self): fake_inst = self._create_instance_with_disabled_disk_config( object=True) self.assertRaises(exception.AutoDiskConfigDisabledByImage, self.compute_api.resize, self.context, fake_inst, auto_disk_config=True) def test_create_with_disabled_auto_disk_config_fails(self): image_id = self._setup_fake_image_with_disabled_disk_config() self.assertRaises(exception.AutoDiskConfigDisabledByImage, self.compute_api.create, self.context, "fake_flavor", image_id, auto_disk_config=True) def test_rebuild_with_disabled_auto_disk_config_fails(self): fake_inst = self._create_instance_with_disabled_disk_config( object=True) image_id = self._setup_fake_image_with_disabled_disk_config() self.assertRaises(exception.AutoDiskConfigDisabledByImage, self.compute_api.rebuild, self.context, fake_inst, image_id, "new password", auto_disk_config=True) @mock.patch.object(objects.Instance, 'save') @mock.patch.object(objects.Instance, 'get_flavor') @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') @mock.patch.object(compute_api.API, '_get_image') @mock.patch.object(compute_api.API, '_check_auto_disk_config') @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild') @mock.patch.object(compute_api.API, '_record_action_start') def test_rebuild(self, _record_action_start, _checks_for_create_and_rebuild, _check_auto_disk_config, _get_image, bdm_get_by_instance_uuid, get_flavor, instance_save): orig_system_metadata = {} instance = fake_instance.fake_instance_obj(self.context, vm_state=vm_states.ACTIVE, cell_name='fake-cell', launched_at=timeutils.utcnow(), system_metadata=orig_system_metadata, image_ref='foo', expected_attrs=['system_metadata']) get_flavor.return_value = test_flavor.fake_flavor flavor = instance.get_flavor() image_href = 'foo' image = {"min_ram": 10, "min_disk": 1, "properties": {'architecture': arch.X86_64}} admin_pass = '' files_to_inject = [] bdms = objects.BlockDeviceMappingList() _get_image.return_value = (None, image) bdm_get_by_instance_uuid.return_value = bdms with mock.patch.object(self.compute_api.compute_task_api, 'rebuild_instance') as rebuild_instance: self.compute_api.rebuild(self.context, instance, image_href, admin_pass, files_to_inject) rebuild_instance.assert_called_once_with(self.context, instance=instance, new_pass=admin_pass, injected_files=files_to_inject, image_ref=image_href, orig_image_ref=image_href, orig_sys_metadata=orig_system_metadata, bdms=bdms, preserve_ephemeral=False, host=instance.host, kwargs={}) _check_auto_disk_config.assert_called_once_with(image=image) _checks_for_create_and_rebuild.assert_called_once_with(self.context, None, image, flavor, {}, [], None) self.assertNotEqual(orig_system_metadata, instance.system_metadata) @mock.patch.object(objects.Instance, 'save') @mock.patch.object(objects.Instance, 'get_flavor') @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') @mock.patch.object(compute_api.API, '_get_image') @mock.patch.object(compute_api.API, '_check_auto_disk_config') @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild') @mock.patch.object(compute_api.API, '_record_action_start') def test_rebuild_change_image(self, _record_action_start, _checks_for_create_and_rebuild, _check_auto_disk_config, _get_image, bdm_get_by_instance_uuid, get_flavor, instance_save): orig_system_metadata = {} get_flavor.return_value = test_flavor.fake_flavor orig_image_href = 'orig_image' orig_image = {"min_ram": 10, "min_disk": 1, "properties": {'architecture': arch.X86_64, 'vm_mode': 'hvm'}} new_image_href = 'new_image' new_image = {"min_ram": 10, "min_disk": 1, "properties": {'architecture': arch.X86_64, 'vm_mode': 'xen'}} admin_pass = '' files_to_inject = [] bdms = objects.BlockDeviceMappingList() instance = fake_instance.fake_instance_obj(self.context, vm_state=vm_states.ACTIVE, cell_name='fake-cell', launched_at=timeutils.utcnow(), system_metadata=orig_system_metadata, expected_attrs=['system_metadata'], image_ref=orig_image_href, vm_mode=vm_mode.HVM) flavor = instance.get_flavor() def get_image(context, image_href): if image_href == new_image_href: return (None, new_image) if image_href == orig_image_href: return (None, orig_image) _get_image.side_effect = get_image bdm_get_by_instance_uuid.return_value = bdms with mock.patch.object(self.compute_api.compute_task_api, 'rebuild_instance') as rebuild_instance: self.compute_api.rebuild(self.context, instance, new_image_href, admin_pass, files_to_inject) rebuild_instance.assert_called_once_with(self.context, instance=instance, new_pass=admin_pass, injected_files=files_to_inject, image_ref=new_image_href, orig_image_ref=orig_image_href, orig_sys_metadata=orig_system_metadata, bdms=bdms, preserve_ephemeral=False, host=instance.host, kwargs={}) _check_auto_disk_config.assert_called_once_with(image=new_image) _checks_for_create_and_rebuild.assert_called_once_with(self.context, None, new_image, flavor, {}, [], None) self.assertEqual(vm_mode.XEN, instance.vm_mode) def _test_check_injected_file_quota_onset_file_limit_exceeded(self, side_effect): injected_files = [ { "path": "/etc/banner.txt", "contents": "foo" } ] with mock.patch.object(quota.QUOTAS, 'limit_check', side_effect=side_effect): self.compute_api._check_injected_file_quota( self.context, injected_files) def test_check_injected_file_quota_onset_file_limit_exceeded(self): # This is the first call to limit_check. side_effect = exception.OverQuota(overs='injected_files') self.assertRaises(exception.OnsetFileLimitExceeded, self._test_check_injected_file_quota_onset_file_limit_exceeded, side_effect) def test_check_injected_file_quota_onset_file_path_limit(self): # This is the second call to limit_check. side_effect = (mock.DEFAULT, exception.OverQuota(overs='injected_file_path_bytes')) self.assertRaises(exception.OnsetFilePathLimitExceeded, self._test_check_injected_file_quota_onset_file_limit_exceeded, side_effect) def test_check_injected_file_quota_onset_file_content_limit(self): # This is the second call to limit_check but with different overs. side_effect = (mock.DEFAULT, exception.OverQuota(overs='injected_file_content_bytes')) self.assertRaises(exception.OnsetFileContentLimitExceeded, self._test_check_injected_file_quota_onset_file_limit_exceeded, side_effect) @mock.patch('nova.objects.Quotas.commit') @mock.patch('nova.objects.Quotas.reserve') @mock.patch('nova.objects.Instance.save') @mock.patch('nova.objects.InstanceAction.action_start') def test_restore_by_admin(self, action_start, instance_save, quota_reserve, quota_commit): admin_context = context.RequestContext('admin_user', 'admin_project', True) instance = self._create_instance_obj() instance.vm_state = vm_states.SOFT_DELETED instance.task_state = None instance.save() with mock.patch.object(self.compute_api, 'compute_rpcapi') as rpc: self.compute_api.restore(admin_context, instance) rpc.restore_instance.assert_called_once_with(admin_context, instance) self.assertEqual(instance.task_state, task_states.RESTORING) self.assertEqual(1, quota_commit.call_count) quota_reserve.assert_called_once_with(instances=1, cores=instance.flavor.vcpus, ram=instance.flavor.memory_mb, project_id=instance.project_id, user_id=instance.user_id) @mock.patch('nova.objects.Quotas.commit') @mock.patch('nova.objects.Quotas.reserve') @mock.patch('nova.objects.Instance.save') @mock.patch('nova.objects.InstanceAction.action_start') def test_restore_by_instance_owner(self, action_start, instance_save, quota_reserve, quota_commit): instance = self._create_instance_obj() instance.vm_state = vm_states.SOFT_DELETED instance.task_state = None instance.save() with mock.patch.object(self.compute_api, 'compute_rpcapi') as rpc: self.compute_api.restore(self.context, instance) rpc.restore_instance.assert_called_once_with(self.context, instance) self.assertEqual(instance.project_id, self.context.project_id) self.assertEqual(instance.task_state, task_states.RESTORING) self.assertEqual(1, quota_commit.call_count) quota_reserve.assert_called_once_with(instances=1, cores=instance.flavor.vcpus, ram=instance.flavor.memory_mb, project_id=instance.project_id, user_id=instance.user_id) def test_external_instance_event(self): instances = [ objects.Instance(uuid=uuids.instance_1, host='host1'), objects.Instance(uuid=uuids.instance_2, host='host1'), objects.Instance(uuid=uuids.instance_3, host='host2'), ] events = [ objects.InstanceExternalEvent( instance_uuid=uuids.instance_1), objects.InstanceExternalEvent( instance_uuid=uuids.instance_2), objects.InstanceExternalEvent( instance_uuid=uuids.instance_3), ] self.compute_api.compute_rpcapi = mock.MagicMock() self.compute_api.external_instance_event(self.context, instances, events) method = self.compute_api.compute_rpcapi.external_instance_event method.assert_any_call(self.context, instances[0:2], events[0:2]) method.assert_any_call(self.context, instances[2:], events[2:]) self.assertEqual(2, method.call_count) def test_volume_ops_invalid_task_state(self): instance = self._create_instance_obj() self.assertEqual(instance.vm_state, vm_states.ACTIVE) instance.task_state = 'Any' volume_id = uuidutils.generate_uuid() self.assertRaises(exception.InstanceInvalidState, self.compute_api.attach_volume, self.context, instance, volume_id) self.assertRaises(exception.InstanceInvalidState, self.compute_api.detach_volume, self.context, instance, volume_id) new_volume_id = uuidutils.generate_uuid() self.assertRaises(exception.InstanceInvalidState, self.compute_api.swap_volume, self.context, instance, volume_id, new_volume_id) @mock.patch.object(cinder.API, 'get', side_effect=exception.CinderConnectionFailed(reason='error')) def test_get_bdm_image_metadata_with_cinder_down(self, mock_get): bdms = [objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( { 'id': 1, 'volume_id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': 'vda', }))] self.assertRaises(exception.CinderConnectionFailed, self.compute_api._get_bdm_image_metadata, self.context, bdms, legacy_bdm=True) @mock.patch.object(cinder.API, 'get') @mock.patch.object(cinder.API, 'check_attach', side_effect=exception.InvalidVolume(reason='error')) def test_validate_bdm_with_error_volume(self, mock_check_attach, mock_get): # Tests that an InvalidVolume exception raised from # volume_api.check_attach due to the volume status not being # 'available' results in _validate_bdm re-raising InvalidVolume. instance = self._create_instance_obj() instance_type = self._create_flavor() volume_id = 'e856840e-9f5b-4894-8bde-58c6e29ac1e8' volume_info = {'status': 'error', 'attach_status': 'detached', 'id': volume_id} mock_get.return_value = volume_info bdms = [objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( { 'boot_index': 0, 'volume_id': volume_id, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': 'vda', }))] self.assertRaises(exception.InvalidVolume, self.compute_api._validate_bdm, self.context, instance, instance_type, bdms) mock_get.assert_called_once_with(self.context, volume_id) mock_check_attach.assert_called_once_with( self.context, volume_info, instance=instance) @mock.patch.object(cinder.API, 'get_snapshot', side_effect=exception.CinderConnectionFailed(reason='error')) @mock.patch.object(cinder.API, 'get', side_effect=exception.CinderConnectionFailed(reason='error')) def test_validate_bdm_with_cinder_down(self, mock_get, mock_get_snapshot): instance = self._create_instance_obj() instance_type = self._create_flavor() bdm = [objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( { 'id': 1, 'volume_id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': 'vda', 'boot_index': 0, }))] bdms = [objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( { 'id': 1, 'snapshot_id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': 'vda', 'boot_index': 0, }))] self.assertRaises(exception.CinderConnectionFailed, self.compute_api._validate_bdm, self.context, instance, instance_type, bdm) self.assertRaises(exception.CinderConnectionFailed, self.compute_api._validate_bdm, self.context, instance, instance_type, bdms) def _test_create_db_entry_for_new_instance_with_cinder_error(self, expected_exception): @mock.patch.object(objects.Instance, 'create') @mock.patch.object(compute_api.SecurityGroupAPI, 'ensure_default') @mock.patch.object(compute_api.API, '_populate_instance_names') @mock.patch.object(compute_api.API, '_populate_instance_for_create') def do_test(self, mock_create, mock_names, mock_ensure, mock_inst_create): instance = self._create_instance_obj() instance['display_name'] = 'FAKE_DISPLAY_NAME' instance['shutdown_terminate'] = False instance_type = self._create_flavor() fake_image = { 'id': 'fake-image-id', 'properties': {'mappings': []}, 'status': 'fake-status', 'location': 'far-away'} fake_security_group = None fake_num_instances = 1 fake_index = 1 bdm = [objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( { 'id': 1, 'volume_id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': 'vda', 'boot_index': 0, }))] with mock.patch.object(instance, "destroy") as destroy: self.assertRaises(expected_exception, self.compute_api. create_db_entry_for_new_instance, self.context, instance_type, fake_image, instance, fake_security_group, bdm, fake_num_instances, fake_index) destroy.assert_called_once_with() # We use a nested method so we can decorate with the mocks. do_test(self) @mock.patch.object(cinder.API, 'get', side_effect=exception.CinderConnectionFailed(reason='error')) def test_create_db_entry_for_new_instancewith_cinder_down(self, mock_get): self._test_create_db_entry_for_new_instance_with_cinder_error( expected_exception=exception.CinderConnectionFailed) @mock.patch.object(cinder.API, 'get', return_value={'id': 1, 'status': 'error', 'attach_status': 'detached'}) def test_create_db_entry_for_new_instancewith_error_volume(self, mock_get): self._test_create_db_entry_for_new_instance_with_cinder_error( expected_exception=exception.InvalidVolume) def test_provision_instances_creates_request_spec(self): @mock.patch.object(self.compute_api, '_check_num_instances_quota') @mock.patch.object(objects.Instance, 'create') @mock.patch.object(self.compute_api.security_group_api, 'ensure_default') @mock.patch.object(self.compute_api, '_validate_bdm') @mock.patch.object(self.compute_api, '_create_block_device_mapping') @mock.patch.object(objects.RequestSpec, 'from_components') @mock.patch.object(objects, 'BuildRequest') def do_test(_mock_build_req, mock_req_spec_from_components, _mock_create_bdm, _mock_validate_bdm, _mock_ensure_default, _mock_create, mock_check_num_inst_quota): quota_mock = mock.MagicMock() req_spec_mock = mock.MagicMock() mock_check_num_inst_quota.return_value = (1, quota_mock) mock_req_spec_from_components.return_value = req_spec_mock ctxt = context.RequestContext('fake-user', 'fake-project') flavor = self._create_flavor() min_count = max_count = 1 boot_meta = { 'id': 'fake-image-id', 'properties': {'mappings': []}, 'status': 'fake-status', 'location': 'far-away'} base_options = {'image_ref': 'fake-ref', 'display_name': 'fake-name', 'project_id': 'fake-project', 'availability_zone': None, 'metadata': {}, 'access_ip_v4': None, 'access_ip_v6': None, 'config_drive': None, 'key_name': None, 'numa_topology': None, 'pci_requests': None} security_groups = {} block_device_mapping = [objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( { 'id': 1, 'volume_id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': 'vda', 'boot_index': 0, }))] shutdown_terminate = True instance_group = None check_server_group_quota = False filter_properties = {'scheduler_hints': None, 'instance_type': flavor} instances = self.compute_api._provision_instances(ctxt, flavor, min_count, max_count, base_options, boot_meta, security_groups, block_device_mapping, shutdown_terminate, instance_group, check_server_group_quota, filter_properties) self.assertTrue(uuidutils.is_uuid_like(instances[0].uuid)) mock_req_spec_from_components.assert_called_once_with(ctxt, mock.ANY, boot_meta, flavor, base_options['numa_topology'], base_options['pci_requests'], filter_properties, instance_group, base_options['availability_zone']) req_spec_mock.create.assert_called_once_with() do_test() def test_provision_instances_creates_destroys_build_request(self): @mock.patch.object(self.compute_api, '_check_num_instances_quota') @mock.patch.object(objects.Instance, 'create') @mock.patch.object(objects.Instance, 'save') @mock.patch.object(self.compute_api.security_group_api, 'ensure_default') @mock.patch.object(self.compute_api, '_validate_bdm') @mock.patch.object(self.compute_api, '_create_block_device_mapping') @mock.patch.object(objects.RequestSpec, 'from_components') @mock.patch.object(objects, 'BuildRequest') def do_test(mock_build_req, mock_req_spec_from_components, _mock_create_bdm, _mock_validate_bdm, _mock_ensure_default, _mock_inst_create, _mock_inst_save, mock_check_num_inst_quota): quota_mock = mock.MagicMock() req_spec_mock = mock.MagicMock() build_req_mock = mock.MagicMock() mock_check_num_inst_quota.return_value = (2, quota_mock) mock_req_spec_from_components.return_value = req_spec_mock mock_build_req.return_value = build_req_mock ctxt = context.RequestContext('fake-user', 'fake-project') flavor = self._create_flavor() min_count = 1 max_count = 2 boot_meta = { 'id': 'fake-image-id', 'properties': {'mappings': []}, 'status': 'fake-status', 'location': 'far-away'} base_options = {'image_ref': 'fake-ref', 'display_name': 'fake-name', 'project_id': 'fake-project', 'availability_zone': None, 'metadata': {}, 'access_ip_v4': None, 'access_ip_v6': None, 'config_drive': None, 'key_name': None, 'numa_topology': None, 'pci_requests': None} security_groups = {} block_device_mapping = [objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( { 'id': 1, 'volume_id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': 'vda', 'boot_index': 0, }))] shutdown_terminate = True instance_group = None check_server_group_quota = False filter_properties = {'scheduler_hints': None, 'instance_type': flavor} instances = self.compute_api._provision_instances(ctxt, flavor, min_count, max_count, base_options, boot_meta, security_groups, block_device_mapping, shutdown_terminate, instance_group, check_server_group_quota, filter_properties) self.assertTrue(uuidutils.is_uuid_like(instances[0].uuid)) display_names = ['fake-name-1', 'fake-name-2'] build_req_calls = [ mock.call(ctxt, request_spec=req_spec_mock, project_id=ctxt.project_id, user_id=ctxt.user_id, display_name=display_names[0], instance_metadata=base_options['metadata'], progress=0, vm_state=vm_states.BUILDING, task_state=task_states.SCHEDULING, image_ref=base_options['image_ref'], access_ip_v4=base_options['access_ip_v4'], access_ip_v6=base_options['access_ip_v6'], info_cache=mock.ANY, security_groups=mock.ANY, config_drive=False, key_name=base_options['config_drive'], locked_by=None), mock.call().create(), mock.call().destroy(), mock.call(ctxt, request_spec=req_spec_mock, project_id=ctxt.project_id, user_id=ctxt.user_id, display_name=display_names[1], instance_metadata=base_options['metadata'], progress=0, vm_state=vm_states.BUILDING, task_state=task_states.SCHEDULING, image_ref=base_options['image_ref'], access_ip_v4=base_options['access_ip_v4'], access_ip_v6=base_options['access_ip_v6'], info_cache=mock.ANY, security_groups=mock.ANY, config_drive=False, key_name=base_options['config_drive'], locked_by=None), mock.call().create(), mock.call().destroy() ] mock_build_req.assert_has_calls(build_req_calls) do_test() def _test_rescue(self, vm_state=vm_states.ACTIVE, rescue_password=None, rescue_image=None, clean_shutdown=True): instance = self._create_instance_obj(params={'vm_state': vm_state}) bdms = [] with test.nested( mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid', return_value=bdms), mock.patch.object(self.compute_api, 'is_volume_backed_instance', return_value=False), mock.patch.object(instance, 'save'), mock.patch.object(self.compute_api, '_record_action_start'), mock.patch.object(self.compute_api.compute_rpcapi, 'rescue_instance') ) as ( bdm_get_by_instance_uuid, volume_backed_inst, instance_save, record_action_start, rpcapi_rescue_instance ): self.compute_api.rescue(self.context, instance, rescue_password=rescue_password, rescue_image_ref=rescue_image, clean_shutdown=clean_shutdown) # assert field values set on the instance object self.assertEqual(task_states.RESCUING, instance.task_state) # assert our mock calls bdm_get_by_instance_uuid.assert_called_once_with( self.context, instance.uuid) volume_backed_inst.assert_called_once_with( self.context, instance, bdms) instance_save.assert_called_once_with(expected_task_state=[None]) record_action_start.assert_called_once_with( self.context, instance, instance_actions.RESCUE) rpcapi_rescue_instance.assert_called_once_with( self.context, instance=instance, rescue_password=rescue_password, rescue_image_ref=rescue_image, clean_shutdown=clean_shutdown) def test_rescue_active(self): self._test_rescue() def test_rescue_stopped(self): self._test_rescue(vm_state=vm_states.STOPPED) def test_rescue_error(self): self._test_rescue(vm_state=vm_states.ERROR) def test_rescue_with_password(self): self._test_rescue(rescue_password='fake-password') def test_rescue_with_image(self): self._test_rescue(rescue_image='fake-image') def test_rescue_forced_shutdown(self): self._test_rescue(clean_shutdown=False) def test_unrescue(self): instance = self._create_instance_obj( params={'vm_state': vm_states.RESCUED}) with test.nested( mock.patch.object(instance, 'save'), mock.patch.object(self.compute_api, '_record_action_start'), mock.patch.object(self.compute_api.compute_rpcapi, 'unrescue_instance') ) as ( instance_save, record_action_start, rpcapi_unrescue_instance ): self.compute_api.unrescue(self.context, instance) # assert field values set on the instance object self.assertEqual(task_states.UNRESCUING, instance.task_state) # assert our mock calls instance_save.assert_called_once_with(expected_task_state=[None]) record_action_start.assert_called_once_with( self.context, instance, instance_actions.UNRESCUE) rpcapi_unrescue_instance.assert_called_once_with( self.context, instance=instance) def test_set_admin_password_invalid_state(self): # Tests that InstanceInvalidState is raised when not ACTIVE. instance = self._create_instance_obj({'vm_state': vm_states.STOPPED}) self.assertRaises(exception.InstanceInvalidState, self.compute_api.set_admin_password, self.context, instance) def test_set_admin_password(self): # Ensure instance can have its admin password set. instance = self._create_instance_obj() @mock.patch.object(objects.Instance, 'save') @mock.patch.object(self.compute_api, '_record_action_start') @mock.patch.object(self.compute_api.compute_rpcapi, 'set_admin_password') def do_test(compute_rpcapi_mock, record_mock, instance_save_mock): # call the API self.compute_api.set_admin_password(self.context, instance) # make our assertions instance_save_mock.assert_called_once_with( expected_task_state=[None]) record_mock.assert_called_once_with( self.context, instance, instance_actions.CHANGE_PASSWORD) compute_rpcapi_mock.assert_called_once_with( self.context, instance=instance, new_pass=None) do_test() def _test_attach_interface_invalid_state(self, state): instance = self._create_instance_obj( params={'vm_state': state}) self.assertRaises(exception.InstanceInvalidState, self.compute_api.attach_interface, self.context, instance, '', '', '', []) def test_attach_interface_invalid_state(self): for state in [vm_states.BUILDING, vm_states.DELETED, vm_states.ERROR, vm_states.RESCUED, vm_states.RESIZED, vm_states.SOFT_DELETED, vm_states.SUSPENDED, vm_states.SHELVED, vm_states.SHELVED_OFFLOADED]: self._test_attach_interface_invalid_state(state) def _test_detach_interface_invalid_state(self, state): instance = self._create_instance_obj( params={'vm_state': state}) self.assertRaises(exception.InstanceInvalidState, self.compute_api.detach_interface, self.context, instance, '', '', '', []) def test_detach_interface_invalid_state(self): for state in [vm_states.BUILDING, vm_states.DELETED, vm_states.ERROR, vm_states.RESCUED, vm_states.RESIZED, vm_states.SOFT_DELETED, vm_states.SUSPENDED, vm_states.SHELVED, vm_states.SHELVED_OFFLOADED]: self._test_detach_interface_invalid_state(state) def _test_check_and_transform_bdm(self, block_device_mapping): instance_type = self._create_flavor() base_options = {'uuid': uuids.bdm_instance, 'image_ref': 'fake_image_ref', 'metadata': {}} image_meta = {'status': 'active', 'name': 'image_name', 'deleted': False, 'container_format': 'bare', 'id': 'image_id'} legacy_bdm = False block_device_mapping = block_device_mapping self.assertRaises(exception.InvalidRequest, self.compute_api._check_and_transform_bdm, self.context, base_options, instance_type, image_meta, 1, 1, block_device_mapping, legacy_bdm) def test_check_and_transform_bdm_source_volume(self): block_device_mapping = [{'boot_index': 0, 'device_name': None, 'image_id': 'image_id', 'source_type': 'image'}, {'device_name': '/dev/vda', 'source_type': 'volume', 'destination_type': 'volume', 'device_type': None, 'volume_id': 'volume_id'}] self._test_check_and_transform_bdm(block_device_mapping) def test_check_and_transform_bdm_source_snapshot(self): block_device_mapping = [{'boot_index': 0, 'device_name': None, 'image_id': 'image_id', 'source_type': 'image'}, {'device_name': '/dev/vda', 'source_type': 'snapshot', 'destination_type': 'volume', 'device_type': None, 'volume_id': 'volume_id'}] self._test_check_and_transform_bdm(block_device_mapping) @mock.patch.object(objects.Instance, 'save') @mock.patch.object(objects.InstanceAction, 'action_start') @mock.patch.object(compute_rpcapi.ComputeAPI, 'pause_instance') @mock.patch.object(objects.Instance, 'get_by_uuid') @mock.patch.object(compute_api.API, '_get_instances_by_filters', return_value=[]) @mock.patch.object(compute_api.API, '_create_instance') def test_skip_policy_check(self, mock_create, mock_get_ins_by_filters, mock_get, mock_pause, mock_action, mock_save): policy.reset() rules = {'compute:pause': '!', 'compute:get': '!', 'compute:get_all': '!', 'compute:create': '!'} policy.set_rules(oslo_policy.Rules.from_dict(rules)) instance = self._create_instance_obj() mock_get.return_value = instance self.assertRaises(exception.PolicyNotAuthorized, self.compute_api.pause, self.context, instance) api = compute_api.API(skip_policy_check=True) api.pause(self.context, instance) self.assertRaises(exception.PolicyNotAuthorized, self.compute_api.get, self.context, instance.uuid) api = compute_api.API(skip_policy_check=True) api.get(self.context, instance.uuid) self.assertRaises(exception.PolicyNotAuthorized, self.compute_api.get_all, self.context) api = compute_api.API(skip_policy_check=True) api.get_all(self.context) self.assertRaises(exception.PolicyNotAuthorized, self.compute_api.create, self.context, None, None) api = compute_api.API(skip_policy_check=True) api.create(self.context, None, None) @mock.patch.object(compute_api.API, '_get_instances_by_filters') def test_tenant_to_project_conversion(self, mock_get): mock_get.return_value = [] api = compute_api.API() api.get_all(self.context, search_opts={'tenant_id': 'foo'}) filters = mock_get.call_args_list[0][0][1] self.assertEqual({'project_id': 'foo'}, filters) def test_metadata_invalid_return_empty_object(self): api = compute_api.API() ret = api.get_all(self.context, want_objects=True, search_opts={'metadata': 'foo'}) self.assertIsInstance(ret, objects.InstanceList) self.assertEqual(0, len(ret)) def test_metadata_invalid_return_empty_list(self): api = compute_api.API() ret = api.get_all(self.context, want_objects=False, search_opts={'metadata': 'foo'}) self.assertIsInstance(ret, list) self.assertEqual(0, len(ret)) def test_populate_instance_names_host_name(self): params = dict(display_name="vm1") instance = self._create_instance_obj(params=params) self.compute_api._populate_instance_names(instance, 1) self.assertEqual('vm1', instance.hostname) def test_populate_instance_names_host_name_is_empty(self): params = dict(display_name=u'\u865a\u62df\u673a\u662f\u4e2d\u6587') instance = self._create_instance_obj(params=params) self.compute_api._populate_instance_names(instance, 1) self.assertEqual('Server-%s' % instance.uuid, instance.hostname) def test_populate_instance_names_host_name_multi(self): params = dict(display_name="vm") instance = self._create_instance_obj(params=params) with mock.patch.object(instance, 'save'): self.compute_api._apply_instance_name_template(self.context, instance, 1) self.assertEqual('vm-2', instance.hostname) def test_populate_instance_names_host_name_is_empty_multi(self): params = dict(display_name=u'\u865a\u62df\u673a\u662f\u4e2d\u6587') instance = self._create_instance_obj(params=params) with mock.patch.object(instance, 'save'): self.compute_api._apply_instance_name_template(self.context, instance, 1) self.assertEqual('Server-%s' % instance.uuid, instance.hostname) def test_host_statuses(self): instances = [ objects.Instance(uuid=uuids.instance_1, host='host1', services= self._obj_to_list_obj(objects.ServiceList( self.context), objects.Service(id=0, host='host1', disabled=True, forced_down=True, binary='nova-compute'))), objects.Instance(uuid=uuids.instance_2, host='host2', services= self._obj_to_list_obj(objects.ServiceList( self.context), objects.Service(id=0, host='host2', disabled=True, forced_down=False, binary='nova-compute'))), objects.Instance(uuid=uuids.instance_3, host='host3', services= self._obj_to_list_obj(objects.ServiceList( self.context), objects.Service(id=0, host='host3', disabled=False, last_seen_up=timeutils.utcnow() - datetime.timedelta(minutes=5), forced_down=False, binary='nova-compute'))), objects.Instance(uuid=uuids.instance_4, host='host4', services= self._obj_to_list_obj(objects.ServiceList( self.context), objects.Service(id=0, host='host4', disabled=False, last_seen_up=timeutils.utcnow(), forced_down=False, binary='nova-compute'))), objects.Instance(uuid=uuids.instance_5, host='host5', services= objects.ServiceList()), objects.Instance(uuid=uuids.instance_6, host=None, services= self._obj_to_list_obj(objects.ServiceList( self.context), objects.Service(id=0, host='host6', disabled=True, forced_down=False, binary='nova-compute'))), objects.Instance(uuid=uuids.instance_7, host='host2', services= self._obj_to_list_obj(objects.ServiceList( self.context), objects.Service(id=0, host='host2', disabled=True, forced_down=False, binary='nova-compute'))) ] host_statuses = self.compute_api.get_instances_host_statuses( instances) expect_statuses = {uuids.instance_1: fields_obj.HostStatus.DOWN, uuids.instance_2: fields_obj.HostStatus.MAINTENANCE, uuids.instance_3: fields_obj.HostStatus.UNKNOWN, uuids.instance_4: fields_obj.HostStatus.UP, uuids.instance_5: fields_obj.HostStatus.NONE, uuids.instance_6: fields_obj.HostStatus.NONE, uuids.instance_7: fields_obj.HostStatus.MAINTENANCE} for instance in instances: self.assertEqual(expect_statuses[instance.uuid], host_statuses[instance.uuid]) @mock.patch.object(objects.Migration, 'get_by_id_and_instance') @mock.patch.object(objects.InstanceAction, 'action_start') def test_live_migrate_force_complete_succeeded( self, action_start, get_by_id_and_instance): if self.cell_type == 'api': # cell api has not been implemented. return rpcapi = self.compute_api.compute_rpcapi instance = self._create_instance_obj() instance.task_state = task_states.MIGRATING migration = objects.Migration() migration.id = 0 migration.status = 'running' get_by_id_and_instance.return_value = migration with mock.patch.object( rpcapi, 'live_migration_force_complete') as lm_force_complete: self.compute_api.live_migrate_force_complete( self.context, instance, migration.id) lm_force_complete.assert_called_once_with(self.context, instance, 0) action_start.assert_called_once_with( self.context, instance.uuid, 'live_migration_force_complete', want_result=False) @mock.patch.object(objects.Migration, 'get_by_id_and_instance') def test_live_migrate_force_complete_invalid_migration_state( self, get_by_id_and_instance): instance = self._create_instance_obj() instance.task_state = task_states.MIGRATING migration = objects.Migration() migration.id = 0 migration.status = 'error' get_by_id_and_instance.return_value = migration self.assertRaises(exception.InvalidMigrationState, self.compute_api.live_migrate_force_complete, self.context, instance, migration.id) def test_live_migrate_force_complete_invalid_vm_state(self): instance = self._create_instance_obj() instance.task_state = None self.assertRaises(exception.InstanceInvalidState, self.compute_api.live_migrate_force_complete, self.context, instance, '1') def _get_migration(self, migration_id, status, migration_type): migration = objects.Migration() migration.id = migration_id migration.status = status migration.migration_type = migration_type return migration @mock.patch('nova.compute.api.API._record_action_start') @mock.patch.object(compute_rpcapi.ComputeAPI, 'live_migration_abort') @mock.patch.object(objects.Migration, 'get_by_id_and_instance') def test_live_migrate_abort_succeeded(self, mock_get_migration, mock_lm_abort, mock_rec_action): instance = self._create_instance_obj() instance.task_state = task_states.MIGRATING migration = self._get_migration(21, 'running', 'live-migration') mock_get_migration.return_value = migration self.compute_api.live_migrate_abort(self.context, instance, migration.id) mock_rec_action.assert_called_once_with(self.context, instance, instance_actions.LIVE_MIGRATION_CANCEL) mock_lm_abort.called_once_with(self.context, instance, migration.id) @mock.patch.object(objects.Migration, 'get_by_id_and_instance') def test_live_migration_abort_wrong_migration_status(self, mock_get_migration): instance = self._create_instance_obj() instance.task_state = task_states.MIGRATING migration = self._get_migration(21, 'completed', 'live-migration') mock_get_migration.return_value = migration self.assertRaises(exception.InvalidMigrationState, self.compute_api.live_migrate_abort, self.context, instance, migration.id) class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase): def setUp(self): super(ComputeAPIUnitTestCase, self).setUp() self.compute_api = compute_api.API() self.cell_type = None def test_resize_same_flavor_fails(self): self.assertRaises(exception.CannotResizeToSameFlavor, self._test_resize, same_flavor=True) class ComputeAPIAPICellUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase): def setUp(self): super(ComputeAPIAPICellUnitTestCase, self).setUp() self.flags(cell_type='api', enable=True, group='cells') self.compute_api = compute_cells_api.ComputeCellsAPI() self.cell_type = 'api' def test_resize_same_flavor_fails(self): self.assertRaises(exception.CannotResizeToSameFlavor, self._test_resize, same_flavor=True) @mock.patch.object(compute_cells_api, 'ComputeRPCAPIRedirect') def test_create_volume_bdm_call_reserve_dev_name(self, mock_reserve): instance = self._create_instance_obj() # In the cells rpcapi there isn't the call for the # reserve_block_device_name so the volume_bdm returned # by the _create_volume_bdm is None result = self.compute_api._create_volume_bdm(self.context, instance, 'vda', '1', None, None) self.assertIsNone(result, None) @mock.patch.object(compute_cells_api.ComputeCellsAPI, '_call_to_cells') def test_attach_volume(self, mock_attach): instance = self._create_instance_obj() volume = fake_volume.fake_volume(1, 'test-vol', 'test-vol', None, None, None, None, None) mock_volume_api = mock.patch.object(self.compute_api, 'volume_api', mock.MagicMock(spec=cinder.API)) with mock_volume_api as mock_v_api: mock_v_api.get.return_value = volume self.compute_api.attach_volume( self.context, instance, volume['id']) mock_v_api.check_attach.assert_called_once_with(self.context, volume, instance=instance) mock_attach.assert_called_once_with(self.context, instance, 'attach_volume', volume['id'], None, None, None) def test_attach_volume_reserve_fails(self): self.skipTest("Reserve is never done in the API cell.") def _test_shelve(self, vm_state=vm_states.ACTIVE, boot_from_volume=False, clean_shutdown=True): params = dict(task_state=None, vm_state=vm_state, display_name='fake-name') instance = self._create_instance_obj(params=params) with mock.patch.object(self.compute_api, '_cast_to_cells') as cast_to_cells: self.compute_api.shelve(self.context, instance, clean_shutdown=clean_shutdown) cast_to_cells.assert_called_once_with(self.context, instance, 'shelve', clean_shutdown=clean_shutdown ) class ComputeAPIComputeCellUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase): def setUp(self): super(ComputeAPIComputeCellUnitTestCase, self).setUp() self.flags(cell_type='compute', enable=True, group='cells') self.compute_api = compute_api.API() self.cell_type = 'compute' def test_resize_same_flavor_passes(self): self._test_resize(same_flavor=True) class DiffDictTestCase(test.NoDBTestCase): """Unit tests for _diff_dict().""" def test_no_change(self): old = dict(a=1, b=2, c=3) new = dict(a=1, b=2, c=3) diff = compute_api._diff_dict(old, new) self.assertEqual(diff, {}) def test_new_key(self): old = dict(a=1, b=2, c=3) new = dict(a=1, b=2, c=3, d=4) diff = compute_api._diff_dict(old, new) self.assertEqual(diff, dict(d=['+', 4])) def test_changed_key(self): old = dict(a=1, b=2, c=3) new = dict(a=1, b=4, c=3) diff = compute_api._diff_dict(old, new) self.assertEqual(diff, dict(b=['+', 4])) def test_removed_key(self): old = dict(a=1, b=2, c=3) new = dict(a=1, c=3) diff = compute_api._diff_dict(old, new) self.assertEqual(diff, dict(b=['-'])) class SecurityGroupAPITest(test.NoDBTestCase): def setUp(self): super(SecurityGroupAPITest, self).setUp() self.secgroup_api = compute_api.SecurityGroupAPI() self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id) def test_get_instance_security_groups(self): groups = objects.SecurityGroupList() groups.objects = [objects.SecurityGroup(name='foo'), objects.SecurityGroup(name='bar')] instance = objects.Instance(security_groups=groups) names = self.secgroup_api.get_instance_security_groups(self.context, instance) self.assertEqual(sorted([{'name': 'bar'}, {'name': 'foo'}], key=str), sorted(names, key=str)) @mock.patch('nova.objects.security_group.make_secgroup_list') def test_populate_security_groups(self, mock_msl): r = self.secgroup_api.populate_security_groups([mock.sentinel.group]) mock_msl.assert_called_once_with([mock.sentinel.group]) self.assertEqual(r, mock_msl.return_value) nova-13.1.4/nova/tests/unit/compute/test_compute_cells.py0000664000567000056710000005037113064447152024727 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Compute w/ Cells """ import functools import inspect import mock from mox3 import mox from oslo_utils import timeutils from nova import block_device from nova.cells import manager from nova.compute import api as compute_api from nova.compute import cells_api as compute_cells_api from nova.compute import flavors from nova.compute import task_states from nova.compute import utils as compute_utils from nova.compute import vm_states import nova.conf from nova import context from nova import db from nova import exception from nova import objects from nova import quota from nova import test from nova.tests.unit.compute import test_compute from nova.tests.unit.compute import test_shelve from nova.tests.unit import fake_instance from nova.tests.unit.objects import test_flavor from nova.tests import uuidsentinel as uuids ORIG_COMPUTE_API = None CONF = nova.conf.CONF def stub_call_to_cells(context, instance, method, *args, **kwargs): fn = getattr(ORIG_COMPUTE_API, method) original_instance = kwargs.pop('original_instance', None) if original_instance: instance = original_instance # Restore this in 'child cell DB' db.instance_update(context, instance['uuid'], dict(vm_state=instance['vm_state'], task_state=instance['task_state'])) # Use NoopQuotaDriver in child cells. saved_quotas = quota.QUOTAS quota.QUOTAS = quota.QuotaEngine( quota_driver_class=quota.NoopQuotaDriver()) compute_api.QUOTAS = quota.QUOTAS try: return fn(context, instance, *args, **kwargs) finally: quota.QUOTAS = saved_quotas compute_api.QUOTAS = saved_quotas def stub_cast_to_cells(context, instance, method, *args, **kwargs): fn = getattr(ORIG_COMPUTE_API, method) original_instance = kwargs.pop('original_instance', None) if original_instance: instance = original_instance # Restore this in 'child cell DB' db.instance_update(context, instance['uuid'], dict(vm_state=instance['vm_state'], task_state=instance['task_state'])) # Use NoopQuotaDriver in child cells. saved_quotas = quota.QUOTAS quota.QUOTAS = quota.QuotaEngine( quota_driver_class=quota.NoopQuotaDriver()) compute_api.QUOTAS = quota.QUOTAS try: fn(context, instance, *args, **kwargs) finally: quota.QUOTAS = saved_quotas compute_api.QUOTAS = saved_quotas def deploy_stubs(stubs, api, original_instance=None): call = stub_call_to_cells cast = stub_cast_to_cells if original_instance: kwargs = dict(original_instance=original_instance) call = functools.partial(stub_call_to_cells, **kwargs) cast = functools.partial(stub_cast_to_cells, **kwargs) stubs.Set(api, '_call_to_cells', call) stubs.Set(api, '_cast_to_cells', cast) class CellsComputeAPITestCase(test_compute.ComputeAPITestCase): def setUp(self): super(CellsComputeAPITestCase, self).setUp() global ORIG_COMPUTE_API ORIG_COMPUTE_API = self.compute_api self.flags(enable=True, group='cells') def _fake_validate_cell(*args, **kwargs): return self.compute_api = compute_cells_api.ComputeCellsAPI() self.stubs.Set(self.compute_api, '_validate_cell', _fake_validate_cell) deploy_stubs(self.stubs, self.compute_api) def tearDown(self): global ORIG_COMPUTE_API self.compute_api = ORIG_COMPUTE_API super(CellsComputeAPITestCase, self).tearDown() def test_instance_metadata(self): self.skipTest("Test is incompatible with cells.") def test_evacuate(self): @mock.patch.object(compute_api.API, 'evacuate') def _test(mock_evacuate): instance = objects.Instance(uuid=uuids.evacuate_instance, cell_name='fake_cell_name') dest_host = 'fake_cell_name@fakenode2' self.compute_api.evacuate(self.context, instance, host=dest_host) mock_evacuate.assert_called_once_with( self.context, instance, 'fakenode2') _test() def test_error_evacuate(self): self.skipTest("Test is incompatible with cells.") def _test_delete_instance_no_cell(self, method_name): cells_rpcapi = self.compute_api.cells_rpcapi self.mox.StubOutWithMock(cells_rpcapi, 'instance_delete_everywhere') self.mox.StubOutWithMock(compute_api.API, '_local_delete') inst = self._create_fake_instance_obj() delete_type = method_name == 'soft_delete' and 'soft' or 'hard' cells_rpcapi.instance_delete_everywhere(self.context, inst, delete_type) compute_api.API._local_delete(self.context, inst, mox.IsA(objects.BlockDeviceMappingList), method_name, mox.IgnoreArg()) self.mox.ReplayAll() self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance', lambda *a, **kw: None) getattr(self.compute_api, method_name)(self.context, inst) def test_delete_instance_no_cell_constraint_failure_does_not_loop(self): with mock.patch.object(self.compute_api.cells_rpcapi, 'instance_delete_everywhere'): inst = self._create_fake_instance_obj() inst.cell_name = None inst.destroy = mock.MagicMock() inst.destroy.side_effect = exception.ObjectActionError(action='', reason='') inst.refresh = mock.MagicMock() self.assertRaises(exception.ObjectActionError, self.compute_api.delete, self.context, inst) inst.destroy.assert_called_once_with() def test_delete_instance_no_cell_constraint_failure_corrects_itself(self): def add_cell_name(context, instance, delete_type): instance.cell_name = 'fake_cell_name' @mock.patch.object(compute_api.API, 'delete') @mock.patch.object(self.compute_api.cells_rpcapi, 'instance_delete_everywhere', side_effect=add_cell_name) def _test(mock_delete_everywhere, mock_compute_delete): inst = self._create_fake_instance_obj() inst.cell_name = None inst.destroy = mock.MagicMock() inst.destroy.side_effect = exception.ObjectActionError(action='', reason='') inst.refresh = mock.MagicMock() self.compute_api.delete(self.context, inst) inst.destroy.assert_called_once_with() mock_compute_delete.assert_called_once_with(self.context, inst) _test() def test_delete_instance_no_cell_destroy_fails_already_deleted(self): # If the instance.destroy() is reached during _local_delete, # it will raise ObjectActionError if the instance has already # been deleted by a instance_destroy_at_top, and instance.refresh() # will raise InstanceNotFound instance = objects.Instance(uuid=uuids.destroy_instance, cell_name=None) actionerror = exception.ObjectActionError(action='destroy', reason='') notfound = exception.InstanceNotFound(instance_id=instance.uuid) @mock.patch.object(compute_api.API, 'delete') @mock.patch.object(self.compute_api.cells_rpcapi, 'instance_delete_everywhere') @mock.patch.object(compute_api.API, '_local_delete', side_effect=actionerror) @mock.patch.object(instance, 'refresh', side_effect=notfound) def _test(mock_refresh, mock_local_delete, mock_delete_everywhere, mock_compute_delete): self.compute_api.delete(self.context, instance) mock_delete_everywhere.assert_called_once_with(self.context, instance, 'hard') mock_local_delete.assert_called_once_with(self.context, instance, mock.ANY, 'delete', self.compute_api._do_delete) mock_refresh.assert_called_once_with() self.assertFalse(mock_compute_delete.called) _test() def test_delete_instance_no_cell_instance_not_found_already_deleted(self): # If anything in _local_delete accesses the instance causing a db # lookup before instance.destroy() is reached, if the instance has # already been deleted by a instance_destroy_at_top, # InstanceNotFound will be raised instance = objects.Instance(uuid=uuids.delete_instance, cell_name=None) notfound = exception.InstanceNotFound(instance_id=instance.uuid) @mock.patch.object(compute_api.API, 'delete') @mock.patch.object(self.compute_api.cells_rpcapi, 'instance_delete_everywhere') @mock.patch.object(compute_api.API, '_local_delete', side_effect=notfound) def _test(mock_local_delete, mock_delete_everywhere, mock_compute_delete): self.compute_api.delete(self.context, instance) mock_delete_everywhere.assert_called_once_with(self.context, instance, 'hard') mock_local_delete.assert_called_once_with(self.context, instance, mock.ANY, 'delete', self.compute_api._do_delete) self.assertFalse(mock_compute_delete.called) _test() def test_soft_delete_instance_no_cell(self): self._test_delete_instance_no_cell('soft_delete') def test_delete_instance_no_cell(self): self._test_delete_instance_no_cell('delete') def test_force_delete_instance_no_cell(self): self._test_delete_instance_no_cell('force_delete') def test_get_migrations(self): filters = {'cell_name': 'ChildCell', 'status': 'confirmed'} migrations = {'migrations': [{'id': 1234}]} cells_rpcapi = self.compute_api.cells_rpcapi self.mox.StubOutWithMock(cells_rpcapi, 'get_migrations') cells_rpcapi.get_migrations(self.context, filters).AndReturn(migrations) self.mox.ReplayAll() response = self.compute_api.get_migrations(self.context, filters) self.assertEqual(migrations, response) def test_create_block_device_mapping(self): instance_type = {'swap': 1, 'ephemeral_gb': 1} instance = self._create_fake_instance_obj() bdms = [block_device.BlockDeviceDict({'source_type': 'image', 'destination_type': 'local', 'image_id': 'fake-image', 'boot_index': 0})] self.compute_api._create_block_device_mapping( instance_type, instance.uuid, bdms) bdms = db.block_device_mapping_get_all_by_instance( self.context, instance['uuid']) self.assertEqual(0, len(bdms)) def test_create_bdm_from_flavor(self): self.skipTest("Test is incompatible with cells.") @mock.patch('nova.cells.messaging._TargetedMessage') def test_rebuild_sig(self, mock_msg): # TODO(belliott) Cells could benefit from better testing to ensure API # and manager signatures stay up to date def wire(version): # wire the rpc cast directly to the manager method to make sure # the signature matches cells_mgr = manager.CellsManager() def cast(context, method, *args, **kwargs): fn = getattr(cells_mgr, method) fn(context, *args, **kwargs) cells_mgr.cast = cast return cells_mgr cells_rpcapi = self.compute_api.cells_rpcapi client = cells_rpcapi.client with mock.patch.object(client, 'prepare', side_effect=wire): inst = self._create_fake_instance_obj() inst.cell_name = 'mycell' cells_rpcapi.rebuild_instance(self.context, inst, 'pass', None, None, None, None, None, recreate=False, on_shared_storage=False, host='host', preserve_ephemeral=True, kwargs=None) # one targeted message should have been created self.assertEqual(1, mock_msg.call_count) class CellsShelveComputeAPITestCase(test_shelve.ShelveComputeAPITestCase): def setUp(self): super(CellsShelveComputeAPITestCase, self).setUp() global ORIG_COMPUTE_API ORIG_COMPUTE_API = self.compute_api self.compute_api = compute_cells_api.ComputeCellsAPI() def _fake_validate_cell(*args, **kwargs): return def _fake_cast_to_cells(self, context, instance, method, *args, **kwargs): fn = getattr(ORIG_COMPUTE_API, method) fn(context, instance, *args, **kwargs) self.stub_out('nova.compute.api.API._validate_cell', _fake_validate_cell) self.stub_out('nova.compute.cells_api.ComputeCellsAPI._cast_to_cells', _fake_cast_to_cells) def test_unshelve(self): # Ensure instance can be unshelved on cell environment. # The super class tests nova-shelve. instance = self._create_fake_instance_obj() self.assertIsNone(instance['task_state']) self.compute_api.shelve(self.context, instance) instance.task_state = None instance.vm_state = vm_states.SHELVED instance.save() self.compute_api.unshelve(self.context, instance) self.assertEqual(task_states.UNSHELVING, instance.task_state) def tearDown(self): global ORIG_COMPUTE_API self.compute_api = ORIG_COMPUTE_API super(CellsShelveComputeAPITestCase, self).tearDown() class CellsConductorAPIRPCRedirect(test.NoDBTestCase): def setUp(self): super(CellsConductorAPIRPCRedirect, self).setUp() self.compute_api = compute_cells_api.ComputeCellsAPI() self.cells_rpcapi = mock.MagicMock() self.compute_api.compute_task_api.cells_rpcapi = self.cells_rpcapi self.context = context.RequestContext('fake', 'fake') @mock.patch.object(compute_api.API, '_record_action_start') @mock.patch.object(compute_api.API, '_provision_instances') @mock.patch.object(compute_api.API, '_check_and_transform_bdm') @mock.patch.object(compute_api.API, '_get_image') @mock.patch.object(compute_api.API, '_validate_and_build_base_options') @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild') def test_build_instances(self, _checks_for_create_and_rebuild, _validate, _get_image, _check_bdm, _provision, _record_action_start): _get_image.return_value = (None, 'fake-image') _validate.return_value = ({}, 1) _check_bdm.return_value = objects.BlockDeviceMappingList() _provision.return_value = 'instances' self.compute_api.create(self.context, 'fake-flavor', 'fake-image') # Subsequent tests in class are verifying the hooking. We don't check # args since this is verified in compute test code. self.assertTrue(self.cells_rpcapi.build_instances.called) @mock.patch.object(compute_api.API, '_record_action_start') @mock.patch.object(compute_api.API, '_resize_cells_support') @mock.patch.object(compute_utils, 'reserve_quota_delta') @mock.patch.object(compute_utils, 'upsize_quota_delta') @mock.patch.object(objects.Instance, 'save') @mock.patch.object(flavors, 'extract_flavor') @mock.patch.object(compute_api.API, '_check_auto_disk_config') @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') def test_resize_instance(self, _bdms, _check, _extract, _save, _upsize, _reserve, _cells, _record): flavor = objects.Flavor(**test_flavor.fake_flavor) _extract.return_value = flavor orig_system_metadata = {} instance = fake_instance.fake_instance_obj(self.context, vm_state=vm_states.ACTIVE, cell_name='fake-cell', launched_at=timeutils.utcnow(), system_metadata=orig_system_metadata, expected_attrs=['system_metadata']) instance.flavor = flavor instance.old_flavor = instance.new_flavor = None self.compute_api.resize(self.context, instance) self.assertTrue(self.cells_rpcapi.resize_instance.called) @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid') @mock.patch.object(compute_api.API, '_record_action_start') @mock.patch.object(objects.Instance, 'save') def test_live_migrate_instance(self, instance_save, _record, _get_spec): orig_system_metadata = {} instance = fake_instance.fake_instance_obj(self.context, vm_state=vm_states.ACTIVE, cell_name='fake-cell', launched_at=timeutils.utcnow(), system_metadata=orig_system_metadata, expected_attrs=['system_metadata']) self.compute_api.live_migrate(self.context, instance, True, True, 'fake_dest_host') self.assertTrue(self.cells_rpcapi.live_migrate_instance.called) @mock.patch.object(objects.Instance, 'save') @mock.patch.object(objects.Instance, 'get_flavor') @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') @mock.patch.object(compute_api.API, '_get_image') @mock.patch.object(compute_api.API, '_check_auto_disk_config') @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild') @mock.patch.object(compute_api.API, '_record_action_start') def test_rebuild_instance(self, _record_action_start, _checks_for_create_and_rebuild, _check_auto_disk_config, _get_image, bdm_get_by_instance_uuid, get_flavor, instance_save): orig_system_metadata = {} instance = fake_instance.fake_instance_obj(self.context, vm_state=vm_states.ACTIVE, cell_name='fake-cell', launched_at=timeutils.utcnow(), system_metadata=orig_system_metadata, expected_attrs=['system_metadata']) get_flavor.return_value = '' image_href = '' image = {"min_ram": 10, "min_disk": 1, "properties": {'architecture': 'x86_64'}} admin_pass = '' files_to_inject = [] bdms = objects.BlockDeviceMappingList() _get_image.return_value = (None, image) bdm_get_by_instance_uuid.return_value = bdms self.compute_api.rebuild(self.context, instance, image_href, admin_pass, files_to_inject) self.assertTrue(self.cells_rpcapi.rebuild_instance.called) def test_check_equal(self): task_api = self.compute_api.compute_task_api tests = set() for (name, value) in inspect.getmembers(self, inspect.ismethod): if name.startswith('test_') and name != 'test_check_equal': tests.add(name[5:]) if tests != set(task_api.cells_compatible): self.fail("Testcases not equivalent to cells_compatible list") class CellsComputePolicyTestCase(test_compute.ComputePolicyTestCase): def setUp(self): super(CellsComputePolicyTestCase, self).setUp() global ORIG_COMPUTE_API ORIG_COMPUTE_API = self.compute_api self.compute_api = compute_cells_api.ComputeCellsAPI() deploy_stubs(self.stubs, self.compute_api) def tearDown(self): global ORIG_COMPUTE_API self.compute_api = ORIG_COMPUTE_API super(CellsComputePolicyTestCase, self).tearDown() nova-13.1.4/nova/tests/unit/compute/test_resources.py0000664000567000056710000002143713064447152024104 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for the compute extra resources framework.""" from oslo_config import cfg from stevedore import extension from stevedore import named from nova.compute import resources from nova.compute.resources import base from nova import context from nova.objects import flavor as flavor_obj from nova import test CONF = cfg.CONF class FakeResourceHandler(resources.ResourceHandler): def __init__(self, extensions): self._mgr = \ named.NamedExtensionManager.make_test_instance(extensions) class FakeResource(base.Resource): def __init__(self): self.total_res = 0 self.used_res = 0 def _get_requested(self, usage): if 'extra_specs' not in usage: return if self.resource_name not in usage['extra_specs']: return req = usage['extra_specs'][self.resource_name] return int(req) def _get_limit(self, limits): if self.resource_name not in limits: return limit = limits[self.resource_name] return int(limit) def reset(self, resources, driver): self.total_res = 0 self.used_res = 0 def test(self, usage, limits): requested = self._get_requested(usage) if not requested: return limit = self._get_limit(limits) if not limit: return free = limit - self.used_res if requested <= free: return else: return ('Free %(free)d < requested %(requested)d ' % {'free': free, 'requested': requested}) def add_instance(self, usage): requested = self._get_requested(usage) if requested: self.used_res += requested def remove_instance(self, usage): requested = self._get_requested(usage) if requested: self.used_res -= requested def write(self, resources): pass def report_free(self): return "Free %s" % (self.total_res - self.used_res) class ResourceA(FakeResource): def reset(self, resources, driver): # ResourceA uses a configuration option self.total_res = int(CONF.resA) self.used_res = 0 self.resource_name = 'resource:resA' def write(self, resources): resources['resA'] = self.total_res resources['used_resA'] = self.used_res class ResourceB(FakeResource): def reset(self, resources, driver): # ResourceB uses resource details passed in parameter resources self.total_res = resources['resB'] self.used_res = 0 self.resource_name = 'resource:resB' def write(self, resources): resources['resB'] = self.total_res resources['used_resB'] = self.used_res def fake_flavor_obj(**updates): flavor = flavor_obj.Flavor() flavor.id = 1 flavor.name = 'fakeflavor' flavor.memory_mb = 8000 flavor.vcpus = 3 flavor.root_gb = 11 flavor.ephemeral_gb = 4 flavor.swap = 0 flavor.rxtx_factor = 1.0 flavor.vcpu_weight = 1 if updates: flavor.update(updates) return flavor class BaseTestCase(test.NoDBTestCase): def _initialize_used_res_counter(self): # Initialize the value for the used resource for ext in self.r_handler._mgr.extensions: ext.obj.used_res = 0 def setUp(self): super(BaseTestCase, self).setUp() # initialize flavors and stub get_by_id to # get flavors from here self._flavors = {} self.ctxt = context.get_admin_context() # Create a flavor without extra_specs defined _flavor_id = 1 _flavor = fake_flavor_obj(id=_flavor_id) self._flavors[_flavor_id] = _flavor # Create a flavor with extra_specs defined _flavor_id = 2 requested_resA = 5 requested_resB = 7 requested_resC = 7 _extra_specs = {'resource:resA': requested_resA, 'resource:resB': requested_resB, 'resource:resC': requested_resC} _flavor = fake_flavor_obj(id=_flavor_id, extra_specs=_extra_specs) self._flavors[_flavor_id] = _flavor # create fake resource extensions and resource handler _extensions = [ extension.Extension('resA', None, ResourceA, ResourceA()), extension.Extension('resB', None, ResourceB, ResourceB()), ] self.r_handler = FakeResourceHandler(_extensions) # Resources details can be passed to each plugin or can be specified as # configuration options driver_resources = {'resB': 5} CONF.resA = '10' # initialise the resources self.r_handler.reset_resources(driver_resources, None) def test_update_from_instance_with_extra_specs(self): # Flavor with extra_specs _flavor_id = 2 sign = 1 self.r_handler.update_from_instance(self._flavors[_flavor_id], sign) expected_resA = self._flavors[_flavor_id].extra_specs['resource:resA'] expected_resB = self._flavors[_flavor_id].extra_specs['resource:resB'] self.assertEqual(int(expected_resA), self.r_handler._mgr['resA'].obj.used_res) self.assertEqual(int(expected_resB), self.r_handler._mgr['resB'].obj.used_res) def test_update_from_instance_without_extra_specs(self): # Flavor id without extra spec _flavor_id = 1 self._initialize_used_res_counter() self.r_handler.resource_list = [] sign = 1 self.r_handler.update_from_instance(self._flavors[_flavor_id], sign) self.assertEqual(0, self.r_handler._mgr['resA'].obj.used_res) self.assertEqual(0, self.r_handler._mgr['resB'].obj.used_res) def test_write_resources(self): self._initialize_used_res_counter() extra_resources = {} expected = {'resA': 10, 'used_resA': 0, 'resB': 5, 'used_resB': 0} self.r_handler.write_resources(extra_resources) self.assertEqual(expected, extra_resources) def test_test_resources_without_extra_specs(self): limits = {} # Flavor id without extra_specs flavor = self._flavors[1] result = self.r_handler.test_resources(flavor, limits) self.assertEqual([None, None], result) def test_test_resources_with_limits_for_different_resource(self): limits = {'resource:resC': 20} # Flavor id with extra_specs flavor = self._flavors[2] result = self.r_handler.test_resources(flavor, limits) self.assertEqual([None, None], result) def test_passing_test_resources(self): limits = {'resource:resA': 10, 'resource:resB': 20} # Flavor id with extra_specs flavor = self._flavors[2] self._initialize_used_res_counter() result = self.r_handler.test_resources(flavor, limits) self.assertEqual([None, None], result) def test_failing_test_resources_for_single_resource(self): limits = {'resource:resA': 4, 'resource:resB': 20} # Flavor id with extra_specs flavor = self._flavors[2] self._initialize_used_res_counter() result = self.r_handler.test_resources(flavor, limits) expected = ['Free 4 < requested 5 ', None] self.assertEqual(sorted(expected, key=str), sorted(result, key=str)) def test_empty_resource_handler(self): """An empty resource handler has no resource extensions, should have no effect, and should raise no exceptions. """ empty_r_handler = FakeResourceHandler([]) resources = {} empty_r_handler.reset_resources(resources, None) flavor = self._flavors[1] sign = 1 empty_r_handler.update_from_instance(flavor, sign) limits = {} test_result = empty_r_handler.test_resources(flavor, limits) self.assertEqual([], test_result) sign = -1 empty_r_handler.update_from_instance(flavor, sign) extra_resources = {} expected_extra_resources = extra_resources empty_r_handler.write_resources(extra_resources) self.assertEqual(expected_extra_resources, extra_resources) empty_r_handler.report_free_resources() nova-13.1.4/nova/tests/unit/compute/test_rpcapi.py0000664000567000056710000007537613064447152023363 0ustar jenkinsjenkins00000000000000# Copyright 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for nova.compute.rpcapi """ import mock from oslo_config import cfg from oslo_serialization import jsonutils from nova.compute import rpcapi as compute_rpcapi from nova import context from nova import exception from nova.objects import block_device as objects_block_dev from nova.objects import migrate_data as migrate_data_obj from nova import test from nova.tests.unit import fake_block_device from nova.tests.unit import fake_flavor from nova.tests.unit import fake_instance CONF = cfg.CONF class ComputeRpcAPITestCase(test.NoDBTestCase): def setUp(self): super(ComputeRpcAPITestCase, self).setUp() self.context = context.get_admin_context() self.fake_flavor_obj = fake_flavor.fake_flavor_obj(self.context) self.fake_flavor = jsonutils.to_primitive(self.fake_flavor_obj) instance_attr = {'host': 'fake_host', 'instance_type_id': self.fake_flavor_obj['id'], 'instance_type': self.fake_flavor_obj} self.fake_instance_obj = fake_instance.fake_instance_obj(self.context, **instance_attr) self.fake_instance = jsonutils.to_primitive(self.fake_instance_obj) self.fake_volume_bdm = objects_block_dev.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'instance_uuid': self.fake_instance_obj.uuid, 'volume_id': 'fake-volume-id'})) @mock.patch('nova.objects.Service.get_minimum_version') def test_auto_pin(self, mock_get_min): mock_get_min.return_value = 1 self.flags(compute='auto', group='upgrade_levels') compute_rpcapi.LAST_VERSION = None rpcapi = compute_rpcapi.ComputeAPI() self.assertEqual('4.4', rpcapi.client.version_cap) mock_get_min.assert_called_once_with(mock.ANY, 'nova-compute') @mock.patch('nova.objects.Service.get_minimum_version') def test_auto_pin_fails_if_too_old(self, mock_get_min): mock_get_min.return_value = 1955 self.flags(compute='auto', group='upgrade_levels') compute_rpcapi.LAST_VERSION = None self.assertRaises(exception.ServiceTooOld, compute_rpcapi.ComputeAPI) @mock.patch('nova.objects.Service.get_minimum_version') def test_auto_pin_kilo(self, mock_get_min): mock_get_min.return_value = 0 self.flags(compute='auto', group='upgrade_levels') compute_rpcapi.LAST_VERSION = None rpcapi = compute_rpcapi.ComputeAPI() self.assertEqual('4.0', rpcapi.client.version_cap) mock_get_min.assert_called_once_with(mock.ANY, 'nova-compute') @mock.patch('nova.objects.Service.get_minimum_version') def test_auto_pin_caches(self, mock_get_min): mock_get_min.return_value = 1 self.flags(compute='auto', group='upgrade_levels') compute_rpcapi.LAST_VERSION = None compute_rpcapi.ComputeAPI() compute_rpcapi.ComputeAPI() mock_get_min.assert_called_once_with(mock.ANY, 'nova-compute') self.assertEqual('4.4', compute_rpcapi.LAST_VERSION) def _test_compute_api(self, method, rpc_method, expected_args=None, **kwargs): ctxt = context.RequestContext('fake_user', 'fake_project') rpcapi = kwargs.pop('rpcapi_class', compute_rpcapi.ComputeAPI)() self.assertIsNotNone(rpcapi.client) self.assertEqual(rpcapi.client.target.topic, CONF.compute_topic) orig_prepare = rpcapi.client.prepare base_version = rpcapi.client.target.version expected_version = kwargs.pop('version', base_version) expected_kwargs = kwargs.copy() if expected_args: expected_kwargs.update(expected_args) if 'host_param' in expected_kwargs: expected_kwargs['host'] = expected_kwargs.pop('host_param') else: expected_kwargs.pop('host', None) cast_and_call = ['confirm_resize', 'stop_instance'] if rpc_method == 'call' and method in cast_and_call: if method == 'confirm_resize': kwargs['cast'] = False else: kwargs['do_cast'] = False if 'host' in kwargs: host = kwargs['host'] elif 'instances' in kwargs: host = kwargs['instances'][0]['host'] else: host = kwargs['instance']['host'] if method == 'rebuild_instance' and 'node' in expected_kwargs: expected_kwargs['scheduled_node'] = expected_kwargs.pop('node') with test.nested( mock.patch.object(rpcapi.client, rpc_method), mock.patch.object(rpcapi.client, 'prepare'), mock.patch.object(rpcapi.client, 'can_send_version'), ) as ( rpc_mock, prepare_mock, csv_mock ): prepare_mock.return_value = rpcapi.client if '_return_value' in kwargs: rpc_mock.return_value = kwargs.pop('_return_value') del expected_kwargs['_return_value'] elif rpc_method == 'call': rpc_mock.return_value = 'foo' else: rpc_mock.return_value = None csv_mock.side_effect = ( lambda v: orig_prepare(version=v).can_send_version()) retval = getattr(rpcapi, method)(ctxt, **kwargs) self.assertEqual(retval, rpc_mock.return_value) prepare_mock.assert_called_once_with(version=expected_version, server=host) rpc_mock.assert_called_once_with(ctxt, method, **expected_kwargs) def test_add_aggregate_host(self): self._test_compute_api('add_aggregate_host', 'cast', aggregate={'id': 'fake_id'}, host_param='host', host='host', slave_info={}) def test_add_fixed_ip_to_instance(self): self._test_compute_api('add_fixed_ip_to_instance', 'cast', instance=self.fake_instance_obj, network_id='id', version='4.0') def test_attach_interface(self): self._test_compute_api('attach_interface', 'call', instance=self.fake_instance_obj, network_id='id', port_id='id2', version='4.0', requested_ip='192.168.1.50') def test_attach_volume(self): self._test_compute_api('attach_volume', 'cast', instance=self.fake_instance_obj, bdm=self.fake_volume_bdm, version='4.0') def test_change_instance_metadata(self): self._test_compute_api('change_instance_metadata', 'cast', instance=self.fake_instance_obj, diff={}, version='4.0') def test_check_instance_shared_storage(self): self._test_compute_api('check_instance_shared_storage', 'call', instance=self.fake_instance_obj, data='foo', version='4.0') def test_confirm_resize_cast(self): self._test_compute_api('confirm_resize', 'cast', instance=self.fake_instance_obj, migration={'id': 'foo'}, host='host', reservations=list('fake_res')) def test_confirm_resize_call(self): self._test_compute_api('confirm_resize', 'call', instance=self.fake_instance_obj, migration={'id': 'foo'}, host='host', reservations=list('fake_res')) def test_detach_interface(self): self._test_compute_api('detach_interface', 'cast', version='4.0', instance=self.fake_instance_obj, port_id='fake_id') def test_detach_volume(self): self._test_compute_api('detach_volume', 'cast', instance=self.fake_instance_obj, volume_id='id', attachment_id='fake_id', version='4.7') def test_detach_volume_no_attachment_id(self): ctxt = context.RequestContext('fake_user', 'fake_project') instance = self.fake_instance_obj rpcapi = compute_rpcapi.ComputeAPI() cast_mock = mock.Mock() cctxt_mock = mock.Mock(cast=cast_mock) with test.nested( mock.patch.object(rpcapi.client, 'can_send_version', return_value=False), mock.patch.object(rpcapi.client, 'prepare', return_value=cctxt_mock) ) as ( can_send_mock, prepare_mock ): rpcapi.detach_volume(ctxt, instance=instance, volume_id='id', attachment_id='fake_id') # assert our mocks were called as expected can_send_mock.assert_called_once_with('4.7') prepare_mock.assert_called_once_with(server=instance['host'], version='4.0') cast_mock.assert_called_once_with(ctxt, 'detach_volume', instance=instance, volume_id='id') def test_finish_resize(self): self._test_compute_api('finish_resize', 'cast', instance=self.fake_instance_obj, migration={'id': 'foo'}, image='image', disk_info='disk_info', host='host', reservations=list('fake_res')) def test_finish_revert_resize(self): self._test_compute_api('finish_revert_resize', 'cast', instance=self.fake_instance_obj, migration={'id': 'fake_id'}, host='host', reservations=list('fake_res')) def test_get_console_output(self): self._test_compute_api('get_console_output', 'call', instance=self.fake_instance_obj, tail_length='tl', version='4.0') def test_get_console_pool_info(self): self._test_compute_api('get_console_pool_info', 'call', console_type='type', host='host') def test_get_console_topic(self): self._test_compute_api('get_console_topic', 'call', host='host') def test_get_diagnostics(self): self._test_compute_api('get_diagnostics', 'call', instance=self.fake_instance_obj, version='4.0') def test_get_instance_diagnostics(self): expected_args = {'instance': self.fake_instance} self._test_compute_api('get_instance_diagnostics', 'call', expected_args, instance=self.fake_instance_obj, version='4.0') def test_get_vnc_console(self): self._test_compute_api('get_vnc_console', 'call', instance=self.fake_instance_obj, console_type='type', version='4.0') def test_get_spice_console(self): self._test_compute_api('get_spice_console', 'call', instance=self.fake_instance_obj, console_type='type', version='4.0') def test_get_rdp_console(self): self._test_compute_api('get_rdp_console', 'call', instance=self.fake_instance_obj, console_type='type', version='4.0') def test_get_serial_console(self): self._test_compute_api('get_serial_console', 'call', instance=self.fake_instance_obj, console_type='serial', version='4.0') def test_get_mks_console(self): self._test_compute_api('get_mks_console', 'call', instance=self.fake_instance_obj, console_type='webmks', version='4.3') def test_validate_console_port(self): self._test_compute_api('validate_console_port', 'call', instance=self.fake_instance_obj, port="5900", console_type="novnc", version='4.0') def test_host_maintenance_mode(self): self._test_compute_api('host_maintenance_mode', 'call', host_param='param', mode='mode', host='host') def test_host_power_action(self): self._test_compute_api('host_power_action', 'call', action='action', host='host') def test_inject_network_info(self): self._test_compute_api('inject_network_info', 'cast', instance=self.fake_instance_obj) def test_live_migration(self): self._test_compute_api('live_migration', 'cast', instance=self.fake_instance_obj, dest='dest', block_migration='blockity_block', host='tsoh', migration='migration', migrate_data={}, version='4.8') def test_live_migration_force_complete(self): self._test_compute_api('live_migration_force_complete', 'cast', instance=self.fake_instance_obj, migration_id='1', version='4.9') def test_live_migration_abort(self): self._test_compute_api('live_migration_abort', 'cast', instance=self.fake_instance_obj, migration_id='1', version='4.10') def test_post_live_migration_at_destination(self): self._test_compute_api('post_live_migration_at_destination', 'cast', instance=self.fake_instance_obj, block_migration='block_migration', host='host', version='4.0') def test_pause_instance(self): self._test_compute_api('pause_instance', 'cast', instance=self.fake_instance_obj) def test_soft_delete_instance(self): self._test_compute_api('soft_delete_instance', 'cast', instance=self.fake_instance_obj, reservations=['uuid1', 'uuid2']) def test_swap_volume(self): self._test_compute_api('swap_volume', 'cast', instance=self.fake_instance_obj, old_volume_id='oldid', new_volume_id='newid') def test_restore_instance(self): self._test_compute_api('restore_instance', 'cast', instance=self.fake_instance_obj, version='4.0') def test_pre_live_migration(self): self._test_compute_api('pre_live_migration', 'call', instance=self.fake_instance_obj, block_migration='block_migration', disk='disk', host='host', migrate_data=None, version='4.8') def test_prep_resize(self): self._test_compute_api('prep_resize', 'cast', instance=self.fake_instance_obj, instance_type=self.fake_flavor_obj, image='fake_image', host='host', reservations=list('fake_res'), request_spec='fake_spec', filter_properties={'fakeprop': 'fakeval'}, node='node', clean_shutdown=True, version='4.1') self.flags(compute='4.0', group='upgrade_levels') expected_args = {'instance_type': self.fake_flavor} self._test_compute_api('prep_resize', 'cast', expected_args, instance=self.fake_instance_obj, instance_type=self.fake_flavor_obj, image='fake_image', host='host', reservations=list('fake_res'), request_spec='fake_spec', filter_properties={'fakeprop': 'fakeval'}, node='node', clean_shutdown=True, version='4.0') def test_reboot_instance(self): self.maxDiff = None self._test_compute_api('reboot_instance', 'cast', instance=self.fake_instance_obj, block_device_info={}, reboot_type='type') def test_rebuild_instance(self): self._test_compute_api('rebuild_instance', 'cast', new_pass='None', injected_files='None', image_ref='None', orig_image_ref='None', bdms=[], instance=self.fake_instance_obj, host='new_host', orig_sys_metadata=None, recreate=True, on_shared_storage=True, preserve_ephemeral=True, migration=None, node=None, limits=None, version='4.5') def test_rebuild_instance_downgrade(self): self.flags(group='upgrade_levels', compute='4.0') self._test_compute_api('rebuild_instance', 'cast', new_pass='None', injected_files='None', image_ref='None', orig_image_ref='None', bdms=[], instance=self.fake_instance_obj, host='new_host', orig_sys_metadata=None, recreate=True, on_shared_storage=True, preserve_ephemeral=True, version='4.0') def test_reserve_block_device_name(self): self._test_compute_api('reserve_block_device_name', 'call', instance=self.fake_instance_obj, device='device', volume_id='id', disk_bus='ide', device_type='cdrom', version='4.0', _return_value=objects_block_dev.BlockDeviceMapping()) def test_refresh_instance_security_rules(self): expected_args = {'instance': self.fake_instance_obj} self._test_compute_api('refresh_instance_security_rules', 'cast', expected_args, host='fake_host', instance=self.fake_instance_obj, version='4.4') def test_remove_aggregate_host(self): self._test_compute_api('remove_aggregate_host', 'cast', aggregate={'id': 'fake_id'}, host_param='host', host='host', slave_info={}) def test_remove_fixed_ip_from_instance(self): self._test_compute_api('remove_fixed_ip_from_instance', 'cast', instance=self.fake_instance_obj, address='addr', version='4.0') def test_remove_volume_connection(self): self._test_compute_api('remove_volume_connection', 'call', instance=self.fake_instance_obj, volume_id='id', host='host', version='4.0') def test_rescue_instance(self): self._test_compute_api('rescue_instance', 'cast', instance=self.fake_instance_obj, rescue_password='pw', rescue_image_ref='fake_image_ref', clean_shutdown=True, version='4.0') def test_reset_network(self): self._test_compute_api('reset_network', 'cast', instance=self.fake_instance_obj) def test_resize_instance(self): self._test_compute_api('resize_instance', 'cast', instance=self.fake_instance_obj, migration={'id': 'fake_id'}, image='image', instance_type=self.fake_flavor_obj, reservations=list('fake_res'), clean_shutdown=True, version='4.1') self.flags(compute='4.0', group='upgrade_levels') expected_args = {'instance_type': self.fake_flavor} self._test_compute_api('resize_instance', 'cast', expected_args, instance=self.fake_instance_obj, migration={'id': 'fake_id'}, image='image', instance_type=self.fake_flavor_obj, reservations=list('fake_res'), clean_shutdown=True, version='4.0') def test_resume_instance(self): self._test_compute_api('resume_instance', 'cast', instance=self.fake_instance_obj) def test_revert_resize(self): self._test_compute_api('revert_resize', 'cast', instance=self.fake_instance_obj, migration={'id': 'fake_id'}, host='host', reservations=list('fake_res')) def test_set_admin_password(self): self._test_compute_api('set_admin_password', 'call', instance=self.fake_instance_obj, new_pass='pw', version='4.0') def test_set_host_enabled(self): self._test_compute_api('set_host_enabled', 'call', enabled='enabled', host='host') def test_get_host_uptime(self): self._test_compute_api('get_host_uptime', 'call', host='host') def test_backup_instance(self): self._test_compute_api('backup_instance', 'cast', instance=self.fake_instance_obj, image_id='id', backup_type='type', rotation='rotation') def test_snapshot_instance(self): self._test_compute_api('snapshot_instance', 'cast', instance=self.fake_instance_obj, image_id='id') def test_start_instance(self): self._test_compute_api('start_instance', 'cast', instance=self.fake_instance_obj) def test_stop_instance_cast(self): self._test_compute_api('stop_instance', 'cast', instance=self.fake_instance_obj, clean_shutdown=True, version='4.0') def test_stop_instance_call(self): self._test_compute_api('stop_instance', 'call', instance=self.fake_instance_obj, clean_shutdown=True, version='4.0') def test_suspend_instance(self): self._test_compute_api('suspend_instance', 'cast', instance=self.fake_instance_obj) def test_terminate_instance(self): self._test_compute_api('terminate_instance', 'cast', instance=self.fake_instance_obj, bdms=[], reservations=['uuid1', 'uuid2'], version='4.0') def test_unpause_instance(self): self._test_compute_api('unpause_instance', 'cast', instance=self.fake_instance_obj) def test_unrescue_instance(self): self._test_compute_api('unrescue_instance', 'cast', instance=self.fake_instance_obj, version='4.0') def test_shelve_instance(self): self._test_compute_api('shelve_instance', 'cast', instance=self.fake_instance_obj, image_id='image_id', clean_shutdown=True, version='4.0') def test_shelve_offload_instance(self): self._test_compute_api('shelve_offload_instance', 'cast', instance=self.fake_instance_obj, clean_shutdown=True, version='4.0') def test_unshelve_instance(self): self._test_compute_api('unshelve_instance', 'cast', instance=self.fake_instance_obj, host='host', image='image', filter_properties={'fakeprop': 'fakeval'}, node='node', version='4.0') def test_volume_snapshot_create(self): self._test_compute_api('volume_snapshot_create', 'cast', instance=self.fake_instance_obj, volume_id='fake_id', create_info={}, version='4.0') def test_volume_snapshot_delete(self): self._test_compute_api('volume_snapshot_delete', 'cast', instance=self.fake_instance_obj, volume_id='fake_id', snapshot_id='fake_id2', delete_info={}, version='4.0') def test_external_instance_event(self): self._test_compute_api('external_instance_event', 'cast', instances=[self.fake_instance_obj], events=['event'], version='4.0') def test_build_and_run_instance(self): self._test_compute_api('build_and_run_instance', 'cast', instance=self.fake_instance_obj, host='host', image='image', request_spec={'request': 'spec'}, filter_properties=[], admin_password='passwd', injected_files=None, requested_networks=['network1'], security_groups=None, block_device_mapping=None, node='node', limits=[], version='4.0') def test_quiesce_instance(self): self._test_compute_api('quiesce_instance', 'call', instance=self.fake_instance_obj, version='4.0') def test_unquiesce_instance(self): self._test_compute_api('unquiesce_instance', 'cast', instance=self.fake_instance_obj, mapping=None, version='4.0') def test_trigger_crash_dump(self): self._test_compute_api('trigger_crash_dump', 'cast', instance=self.fake_instance_obj, version='4.6') def test_trigger_crash_dump_incompatible(self): self.flags(compute='4.0', group='upgrade_levels') self.assertRaises(exception.TriggerCrashDumpNotSupported, self._test_compute_api, 'trigger_crash_dump', 'cast', instance=self.fake_instance_obj, version='4.6') def _test_simple_call(self, method, inargs, callargs, callret, calltype='call', can_send=False): rpc = compute_rpcapi.ComputeAPI() @mock.patch.object(rpc, 'client') @mock.patch.object(compute_rpcapi, '_compute_host') def _test(mock_ch, mock_client): mock_client.can_send_version.return_value = can_send call = getattr(mock_client.prepare.return_value, calltype) call.return_value = callret ctxt = mock.MagicMock() result = getattr(rpc, method)(ctxt, **inargs) call.assert_called_once_with(ctxt, method, **callargs) return result return _test() def test_check_can_live_migrate_source_converts_objects(self): obj = migrate_data_obj.LiveMigrateData() result = self._test_simple_call('check_can_live_migrate_source', inargs={'instance': 'foo', 'dest_check_data': obj}, callargs={'instance': 'foo', 'dest_check_data': {}}, callret=obj) self.assertEqual(obj, result) result = self._test_simple_call('check_can_live_migrate_source', inargs={'instance': 'foo', 'dest_check_data': obj}, callargs={'instance': 'foo', 'dest_check_data': {}}, callret={'foo': 'bar'}) self.assertIsInstance(result, migrate_data_obj.LiveMigrateData) @mock.patch('nova.objects.migrate_data.LiveMigrateData.' 'detect_implementation') def test_check_can_live_migrate_destination_converts_dict(self, mock_det): result = self._test_simple_call('check_can_live_migrate_destination', inargs={'instance': 'foo', 'destination': 'bar', 'block_migration': False, 'disk_over_commit': False}, callargs={'instance': 'foo', 'block_migration': False, 'disk_over_commit': False}, callret={'foo': 'bar'}) self.assertEqual(mock_det.return_value, result) def test_live_migration_converts_objects(self): obj = migrate_data_obj.LiveMigrateData() self._test_simple_call('live_migration', inargs={'instance': 'foo', 'dest': 'foo', 'block_migration': False, 'host': 'foo', 'migration': None, 'migrate_data': obj}, callargs={'instance': 'foo', 'dest': 'foo', 'block_migration': False, 'migrate_data': { 'pre_live_migration_result': {}}}, callret=None, calltype='cast') @mock.patch('nova.objects.migrate_data.LiveMigrateData.from_legacy_dict') def test_pre_live_migration_converts_objects(self, mock_fld): obj = migrate_data_obj.LiveMigrateData() result = self._test_simple_call('pre_live_migration', inargs={'instance': 'foo', 'block_migration': False, 'disk': None, 'host': 'foo', 'migrate_data': obj}, callargs={'instance': 'foo', 'block_migration': False, 'disk': None, 'migrate_data': {}}, callret=obj) self.assertFalse(mock_fld.called) self.assertEqual(obj, result) result = self._test_simple_call('pre_live_migration', inargs={'instance': 'foo', 'block_migration': False, 'disk': None, 'host': 'foo', 'migrate_data': obj}, callargs={'instance': 'foo', 'block_migration': False, 'disk': None, 'migrate_data': {}}, callret={'foo': 'bar'}) mock_fld.assert_called_once_with( {'pre_live_migration_result': {'foo': 'bar'}}) self.assertIsInstance(result, migrate_data_obj.LiveMigrateData) def test_rollback_live_migration_at_destination_converts_objects(self): obj = migrate_data_obj.LiveMigrateData() method = 'rollback_live_migration_at_destination' self._test_simple_call(method, inargs={'instance': 'foo', 'host': 'foo', 'destroy_disks': False, 'migrate_data': obj}, callargs={'instance': 'foo', 'destroy_disks': False, 'migrate_data': {}}, callret=None, calltype='cast') def test_check_can_live_migrate_destination_old_compute(self): self.flags(compute='4.10', group='upgrade_levels') self.assertRaises(exception.LiveMigrationWithOldNovaNotSupported, self._test_compute_api, 'check_can_live_migrate_destination', 'call', instance=self.fake_instance_obj, block_migration=None, destination='dest', disk_over_commit=None, version='4.11') nova-13.1.4/nova/tests/unit/compute/test_claims.py0000664000567000056710000004060113064447152023334 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for resource tracker claims.""" import uuid import mock from nova.compute import claims from nova import context from nova import exception from nova import objects from nova.pci import manager as pci_manager from nova import test from nova.tests.unit import fake_instance from nova.tests.unit.pci import fakes as pci_fakes class FakeResourceHandler(object): test_called = False usage_is_instance = False def test_resources(self, usage, limits): self.test_called = True self.usage_is_itype = usage.get('name') == 'fakeitype' return [] class DummyTracker(object): icalled = False rcalled = False ext_resources_handler = FakeResourceHandler() def __init__(self): self.new_pci_tracker() def abort_instance_claim(self, *args, **kwargs): self.icalled = True def drop_move_claim(self, *args, **kwargs): self.rcalled = True def new_pci_tracker(self): ctxt = context.RequestContext('testuser', 'testproject') self.pci_tracker = pci_manager.PciDevTracker(ctxt) class ClaimTestCase(test.NoDBTestCase): def setUp(self): super(ClaimTestCase, self).setUp() self.context = context.RequestContext('fake-user', 'fake-project') self.resources = self._fake_resources() self.tracker = DummyTracker() self.empty_requests = objects.InstancePCIRequests( requests=[] ) def _claim(self, limits=None, overhead=None, requests=None, **kwargs): numa_topology = kwargs.pop('numa_topology', None) instance = self._fake_instance(**kwargs) if numa_topology: db_numa_topology = { 'id': 1, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': None, 'instance_uuid': instance.uuid, 'numa_topology': numa_topology._to_json(), 'pci_requests': (requests or self.empty_requests).to_json() } else: db_numa_topology = None if overhead is None: overhead = {'memory_mb': 0} requests = requests or self.empty_requests @mock.patch('nova.db.instance_extra_get_by_instance_uuid', return_value=db_numa_topology) def get_claim(mock_extra_get): return claims.Claim(self.context, instance, self.tracker, self.resources, requests, overhead=overhead, limits=limits) return get_claim() def _fake_instance(self, **kwargs): instance = { 'uuid': str(uuid.uuid1()), 'memory_mb': 1024, 'root_gb': 10, 'ephemeral_gb': 5, 'vcpus': 1, 'system_metadata': {}, 'numa_topology': None } instance.update(**kwargs) return fake_instance.fake_instance_obj(self.context, **instance) def _fake_instance_type(self, **kwargs): instance_type = { 'id': 1, 'name': 'fakeitype', 'memory_mb': 1, 'vcpus': 1, 'root_gb': 1, 'ephemeral_gb': 2 } instance_type.update(**kwargs) return objects.Flavor(**instance_type) def _fake_resources(self, values=None): resources = { 'memory_mb': 2048, 'memory_mb_used': 0, 'free_ram_mb': 2048, 'local_gb': 20, 'local_gb_used': 0, 'free_disk_gb': 20, 'vcpus': 2, 'vcpus_used': 0, 'numa_topology': objects.NUMATopology( cells=[objects.NUMACell(id=1, cpuset=set([1, 2]), memory=512, memory_usage=0, cpu_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), objects.NUMACell(id=2, cpuset=set([3, 4]), memory=512, memory_usage=0, cpu_usage=0, mempages=[], siblings=[], pinned_cpus=set([]))] )._to_json() } if values: resources.update(values) return resources def test_memory_unlimited(self): self._claim(memory_mb=99999999) def test_disk_unlimited_root(self): self._claim(root_gb=999999) def test_disk_unlimited_ephemeral(self): self._claim(ephemeral_gb=999999) def test_memory_with_overhead(self): overhead = {'memory_mb': 8} limits = {'memory_mb': 2048} self._claim(memory_mb=2040, limits=limits, overhead=overhead) def test_memory_with_overhead_insufficient(self): overhead = {'memory_mb': 9} limits = {'memory_mb': 2048} self.assertRaises(exception.ComputeResourcesUnavailable, self._claim, limits=limits, overhead=overhead, memory_mb=2040) def test_memory_oversubscription(self): self._claim(memory_mb=4096) def test_memory_insufficient(self): limits = {'memory_mb': 8192} self.assertRaises(exception.ComputeResourcesUnavailable, self._claim, limits=limits, memory_mb=16384) def test_disk_oversubscription(self): limits = {'disk_gb': 60} self._claim(root_gb=10, ephemeral_gb=40, limits=limits) def test_disk_insufficient(self): limits = {'disk_gb': 45} self.assertRaisesRegex( exception.ComputeResourcesUnavailable, "disk", self._claim, limits=limits, root_gb=10, ephemeral_gb=40) def test_disk_and_memory_insufficient(self): limits = {'disk_gb': 45, 'memory_mb': 8192} self.assertRaisesRegex( exception.ComputeResourcesUnavailable, "memory.*disk", self._claim, limits=limits, root_gb=10, ephemeral_gb=40, memory_mb=16384) @mock.patch('nova.pci.stats.PciDeviceStats.support_requests', return_value=True) def test_pci_pass(self, mock_supports): request = objects.InstancePCIRequest(count=1, spec=[{'vendor_id': 'v', 'product_id': 'p'}]) requests = objects.InstancePCIRequests(requests=[request]) # Claim.__init__() would raise ComputeResourcesUnavailable # if Claim._test_pci() did not return None. self._claim(requests=requests) mock_supports.assert_called_once_with(requests.requests) @mock.patch('nova.pci.stats.PciDeviceStats.support_requests', return_value=False) def test_pci_fail(self, mock_supports): request = objects.InstancePCIRequest(count=1, spec=[{'vendor_id': 'v', 'product_id': 'p'}]) requests = objects.InstancePCIRequests(requests=[request]) self.assertRaises(exception.ComputeResourcesUnavailable, self._claim, requests=requests) mock_supports.assert_called_once_with(requests.requests) @mock.patch('nova.pci.stats.PciDeviceStats.support_requests', return_value=True) def test_pci_pass_no_requests(self, mock_supports): # Claim.__init__() would raise ComputeResourcesUnavailable # if Claim._test_pci() did not return None. self._claim() self.assertFalse(mock_supports.called) def test_ext_resources(self): self._claim() self.assertTrue(self.tracker.ext_resources_handler.test_called) self.assertFalse(self.tracker.ext_resources_handler.usage_is_itype) def test_numa_topology_no_limit(self): huge_instance = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=1, cpuset=set([1, 2]), memory=512)]) self._claim(numa_topology=huge_instance) def test_numa_topology_fails(self): huge_instance = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=1, cpuset=set([1, 2, 3, 4, 5]), memory=2048)]) limit_topo = objects.NUMATopologyLimits( cpu_allocation_ratio=1, ram_allocation_ratio=1) self.assertRaises(exception.ComputeResourcesUnavailable, self._claim, limits={'numa_topology': limit_topo}, numa_topology=huge_instance) def test_numa_topology_passes(self): huge_instance = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=1, cpuset=set([1, 2]), memory=512)]) limit_topo = objects.NUMATopologyLimits( cpu_allocation_ratio=1, ram_allocation_ratio=1) self._claim(limits={'numa_topology': limit_topo}, numa_topology=huge_instance) @pci_fakes.patch_pci_whitelist @mock.patch('nova.objects.InstancePCIRequests.get_by_instance') def test_numa_topology_with_pci(self, mock_get_by_instance): dev_dict = { 'compute_node_id': 1, 'address': 'a', 'product_id': 'p', 'vendor_id': 'v', 'numa_node': 1, 'dev_type': 'type-PCI', 'parent_addr': 'a1', 'status': 'available'} self.tracker.new_pci_tracker() self.tracker.pci_tracker._set_hvdevs([dev_dict]) request = objects.InstancePCIRequest(count=1, spec=[{'vendor_id': 'v', 'product_id': 'p'}]) requests = objects.InstancePCIRequests(requests=[request]) mock_get_by_instance.return_value = requests huge_instance = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=1, cpuset=set([1, 2]), memory=512)]) self._claim(requests=requests, numa_topology=huge_instance) @pci_fakes.patch_pci_whitelist @mock.patch('nova.objects.InstancePCIRequests.get_by_instance') def test_numa_topology_with_pci_fail(self, mock_get_by_instance): dev_dict = { 'compute_node_id': 1, 'address': 'a', 'product_id': 'p', 'vendor_id': 'v', 'numa_node': 1, 'dev_type': 'type-PCI', 'parent_addr': 'a1', 'status': 'available'} dev_dict2 = { 'compute_node_id': 1, 'address': 'a', 'product_id': 'p', 'vendor_id': 'v', 'numa_node': 2, 'dev_type': 'type-PCI', 'parent_addr': 'a1', 'status': 'available'} self.tracker.new_pci_tracker() self.tracker.pci_tracker._set_hvdevs([dev_dict, dev_dict2]) request = objects.InstancePCIRequest(count=2, spec=[{'vendor_id': 'v', 'product_id': 'p'}]) requests = objects.InstancePCIRequests(requests=[request]) mock_get_by_instance.return_value = requests huge_instance = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=1, cpuset=set([1, 2]), memory=512)]) self.assertRaises(exception.ComputeResourcesUnavailable, self._claim, requests=requests, numa_topology=huge_instance) @pci_fakes.patch_pci_whitelist @mock.patch('nova.objects.InstancePCIRequests.get_by_instance') def test_numa_topology_with_pci_no_numa_info(self, mock_get_by_instance): dev_dict = { 'compute_node_id': 1, 'address': 'a', 'product_id': 'p', 'vendor_id': 'v', 'numa_node': None, 'dev_type': 'type-PCI', 'parent_addr': 'a1', 'status': 'available'} self.tracker.new_pci_tracker() self.tracker.pci_tracker._set_hvdevs([dev_dict]) request = objects.InstancePCIRequest(count=1, spec=[{'vendor_id': 'v', 'product_id': 'p'}]) requests = objects.InstancePCIRequests(requests=[request]) mock_get_by_instance.return_value = requests huge_instance = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=1, cpuset=set([1, 2]), memory=512)]) self._claim(requests=requests, numa_topology=huge_instance) def test_abort(self): claim = self._abort() self.assertTrue(claim.tracker.icalled) def _abort(self): claim = None try: with self._claim(memory_mb=4096) as claim: raise test.TestingException("abort") except test.TestingException: pass return claim class MoveClaimTestCase(ClaimTestCase): def _claim(self, limits=None, overhead=None, requests=None, image_meta=None, **kwargs): instance_type = self._fake_instance_type(**kwargs) numa_topology = kwargs.pop('numa_topology', None) image_meta = image_meta or {} self.instance = self._fake_instance(**kwargs) self.instance.numa_topology = None if numa_topology: self.db_numa_topology = { 'id': 1, 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': None, 'instance_uuid': self.instance.uuid, 'numa_topology': numa_topology._to_json(), 'pci_requests': (requests or self.empty_requests).to_json() } else: self.db_numa_topology = None if overhead is None: overhead = {'memory_mb': 0} requests = requests or self.empty_requests @mock.patch('nova.virt.hardware.numa_get_constraints', return_value=numa_topology) @mock.patch('nova.db.instance_extra_get_by_instance_uuid', return_value=self.db_numa_topology) def get_claim(mock_extra_get, mock_numa_get): return claims.MoveClaim(self.context, self.instance, instance_type, image_meta, self.tracker, self.resources, requests, overhead=overhead, limits=limits) return get_claim() def test_ext_resources(self): self._claim() self.assertTrue(self.tracker.ext_resources_handler.test_called) self.assertTrue(self.tracker.ext_resources_handler.usage_is_itype) def test_abort(self): claim = self._abort() self.assertTrue(claim.tracker.rcalled) def test_create_migration_context(self): numa_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=1, cpuset=set([1, 2]), memory=512)]) claim = self._claim(numa_topology=numa_topology) migration = objects.Migration(context=self.context, id=42) claim.migration = migration fake_mig_context = mock.Mock(spec=objects.MigrationContext) @mock.patch('nova.db.instance_extra_get_by_instance_uuid', return_value=None) @mock.patch('nova.objects.MigrationContext', return_value=fake_mig_context) def _test(ctxt_mock, mock_get_extra): claim.create_migration_context() ctxt_mock.assert_called_once_with( context=self.context, instance_uuid=self.instance.uuid, migration_id=42, old_numa_topology=None, new_numa_topology=mock.ANY) self.assertIsInstance(ctxt_mock.call_args[1]['new_numa_topology'], objects.InstanceNUMATopology) self.assertEqual(migration, claim.migration) _test() def test_image_meta(self): claim = self._claim() self.assertIsInstance(claim.image_meta, objects.ImageMeta) def test_image_meta_object_passed(self): image_meta = objects.ImageMeta() claim = self._claim(image_meta=image_meta) self.assertIsInstance(claim.image_meta, objects.ImageMeta) nova-13.1.4/nova/tests/unit/compute/test_compute_mgr.py0000664000567000056710000071062513064447152024417 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for ComputeManager().""" import datetime import time import uuid from cinderclient import exceptions as cinder_exception from eventlet import event as eventlet_event import mock from mox3 import mox import netaddr from oslo_config import cfg import oslo_messaging as messaging from oslo_serialization import jsonutils from oslo_utils import importutils from oslo_utils import timeutils from oslo_utils import uuidutils import six import nova from nova.compute import build_results from nova.compute import manager from nova.compute import power_state from nova.compute import task_states from nova.compute import utils as compute_utils from nova.compute import vm_states from nova.conductor import api as conductor_api from nova import context from nova import db from nova import exception from nova.network import api as network_api from nova.network import model as network_model from nova import objects from nova.objects import block_device as block_device_obj from nova.objects import migrate_data as migrate_data_obj from nova import test from nova.tests import fixtures from nova.tests.unit.compute import fake_resource_tracker from nova.tests.unit import fake_block_device from nova.tests.unit import fake_flavor from nova.tests.unit import fake_instance from nova.tests.unit import fake_network from nova.tests.unit import fake_network_cache_model from nova.tests.unit import fake_server_actions from nova.tests.unit.objects import test_instance_fault from nova.tests.unit.objects import test_instance_info_cache from nova.tests import uuidsentinel as uuids from nova import utils from nova.virt import driver as virt_driver from nova.virt import event as virtevent from nova.virt import fake as fake_driver from nova.virt import hardware CONF = cfg.CONF CONF.import_opt('compute_manager', 'nova.service') class ComputeManagerUnitTestCase(test.NoDBTestCase): def setUp(self): super(ComputeManagerUnitTestCase, self).setUp() self.flags(use_local=True, group='conductor') self.compute = importutils.import_object(CONF.compute_manager) self.context = context.RequestContext('fake', 'fake') fake_server_actions.stub_out_action_events(self.stubs) self.useFixture(fixtures.SpawnIsSynchronousFixture()) @mock.patch.object(manager.ComputeManager, '_get_power_state') @mock.patch.object(manager.ComputeManager, '_sync_instance_power_state') @mock.patch.object(objects.Instance, 'get_by_uuid') def _test_handle_lifecycle_event(self, mock_get, mock_sync, mock_get_power_state, transition, event_pwr_state, current_pwr_state): event = mock.Mock() event.get_instance_uuid.return_value = mock.sentinel.uuid event.get_transition.return_value = transition mock_get_power_state.return_value = current_pwr_state self.compute.handle_lifecycle_event(event) mock_get.assert_called_with(mock.ANY, mock.sentinel.uuid, expected_attrs=[]) if event_pwr_state == current_pwr_state: mock_sync.assert_called_with(mock.ANY, mock_get.return_value, event_pwr_state) else: self.assertFalse(mock_sync.called) def test_handle_lifecycle_event(self): event_map = {virtevent.EVENT_LIFECYCLE_STOPPED: power_state.SHUTDOWN, virtevent.EVENT_LIFECYCLE_STARTED: power_state.RUNNING, virtevent.EVENT_LIFECYCLE_PAUSED: power_state.PAUSED, virtevent.EVENT_LIFECYCLE_RESUMED: power_state.RUNNING, virtevent.EVENT_LIFECYCLE_SUSPENDED: power_state.SUSPENDED, } for transition, pwr_state in six.iteritems(event_map): self._test_handle_lifecycle_event(transition=transition, event_pwr_state=pwr_state, current_pwr_state=pwr_state) def test_handle_lifecycle_event_state_mismatch(self): self._test_handle_lifecycle_event( transition=virtevent.EVENT_LIFECYCLE_STOPPED, event_pwr_state=power_state.SHUTDOWN, current_pwr_state=power_state.RUNNING) def test_delete_instance_info_cache_delete_ordering(self): call_tracker = mock.Mock() call_tracker.clear_events_for_instance.return_value = None mgr_class = self.compute.__class__ orig_delete = mgr_class._delete_instance specd_compute = mock.create_autospec(mgr_class) # spec out everything except for the method we really want # to test, then use call_tracker to verify call sequence specd_compute._delete_instance = orig_delete mock_inst = mock.Mock() mock_inst.uuid = uuids.instance mock_inst.save = mock.Mock() mock_inst.destroy = mock.Mock() mock_inst.system_metadata = mock.Mock() def _mark_notify(*args, **kwargs): call_tracker._notify_about_instance_usage(*args, **kwargs) def _mark_shutdown(*args, **kwargs): call_tracker._shutdown_instance(*args, **kwargs) specd_compute.instance_events = call_tracker specd_compute._notify_about_instance_usage = _mark_notify specd_compute._shutdown_instance = _mark_shutdown mock_inst.info_cache = call_tracker specd_compute._delete_instance(specd_compute, self.context, mock_inst, mock.Mock(), mock.Mock()) methods_called = [n for n, a, k in call_tracker.mock_calls] self.assertEqual(['clear_events_for_instance', '_notify_about_instance_usage', '_shutdown_instance', 'delete'], methods_called) @mock.patch.object(manager.ComputeManager, '_get_resource_tracker') @mock.patch.object(fake_driver.FakeDriver, 'get_available_nodes') @mock.patch.object(manager.ComputeManager, '_get_compute_nodes_in_db') def test_update_available_resource(self, get_db_nodes, get_avail_nodes, get_rt): info = {'cn_id': 1} def _make_compute_node(hyp_hostname): cn = mock.Mock(spec_set=['hypervisor_hostname', 'id', 'destroy']) cn.id = info['cn_id'] info['cn_id'] += 1 cn.hypervisor_hostname = hyp_hostname return cn def _make_rt(node): n = mock.Mock(spec_set=['update_available_resource', 'nodename']) n.nodename = node return n ctxt = mock.Mock() db_nodes = [_make_compute_node('node1'), _make_compute_node('node2'), _make_compute_node('node3'), _make_compute_node('node4')] avail_nodes = set(['node2', 'node3', 'node4', 'node5']) avail_nodes_l = list(avail_nodes) rts = [_make_rt(node) for node in avail_nodes_l] # Make the 2nd and 3rd ones raise exc = exception.ComputeHostNotFound(host='fake') rts[1].update_available_resource.side_effect = exc exc = test.TestingException() rts[2].update_available_resource.side_effect = exc rts_iter = iter(rts) def _get_rt_side_effect(*args, **kwargs): return next(rts_iter) expected_rt_dict = {avail_nodes_l[0]: rts[0], avail_nodes_l[2]: rts[2], avail_nodes_l[3]: rts[3]} get_db_nodes.return_value = db_nodes get_avail_nodes.return_value = avail_nodes get_rt.side_effect = _get_rt_side_effect self.compute.update_available_resource(ctxt) get_db_nodes.assert_called_once_with(ctxt, use_slave=True) self.assertEqual(sorted([mock.call(node) for node in avail_nodes]), sorted(get_rt.call_args_list)) for rt in rts: rt.update_available_resource.assert_called_once_with(ctxt) self.assertEqual(expected_rt_dict, self.compute._resource_tracker_dict) # First node in set should have been removed from DB for db_node in db_nodes: if db_node.hypervisor_hostname == 'node1': db_node.destroy.assert_called_once_with() else: self.assertFalse(db_node.destroy.called) def test_delete_instance_without_info_cache(self): instance = fake_instance.fake_instance_obj( self.context, uuid=uuids.instance, vm_state=vm_states.ERROR, host=self.compute.host, expected_attrs=['system_metadata']) quotas = mock.create_autospec(objects.Quotas, spec_set=True) with test.nested( mock.patch.object(self.compute, '_notify_about_instance_usage'), mock.patch.object(self.compute, '_shutdown_instance'), mock.patch.object(instance, 'obj_load_attr'), mock.patch.object(instance, 'save'), mock.patch.object(instance, 'destroy') ) as ( compute_notify_about_instance_usage, comupte_shutdown_instance, instance_obj_load_attr, instance_save, instance_destroy ): instance.info_cache = None self.compute._delete_instance(self.context, instance, [], quotas) @mock.patch.object(network_api.API, 'allocate_for_instance') @mock.patch.object(objects.Instance, 'save') @mock.patch.object(time, 'sleep') def test_allocate_network_succeeds_after_retries( self, mock_sleep, mock_save, mock_allocate_for_instance): self.flags(network_allocate_retries=8) instance = fake_instance.fake_instance_obj( self.context, expected_attrs=['system_metadata']) is_vpn = 'fake-is-vpn' req_networks = 'fake-req-networks' macs = 'fake-macs' sec_groups = 'fake-sec-groups' final_result = 'meow' dhcp_options = None mock_allocate_for_instance.side_effect = [ test.TestingException()] * 7 + [final_result] expected_sleep_times = [1, 2, 4, 8, 16, 30, 30, 30] res = self.compute._allocate_network_async(self.context, instance, req_networks, macs, sec_groups, is_vpn, dhcp_options) mock_sleep.has_calls(expected_sleep_times) self.assertEqual(final_result, res) # Ensure save is not called in while allocating networks, the instance # is saved after the allocation. self.assertFalse(mock_save.called) self.assertEqual('True', instance.system_metadata['network_allocated']) def test_allocate_network_fails(self): self.flags(network_allocate_retries=0) nwapi = self.compute.network_api self.mox.StubOutWithMock(nwapi, 'allocate_for_instance') instance = {} is_vpn = 'fake-is-vpn' req_networks = 'fake-req-networks' macs = 'fake-macs' sec_groups = 'fake-sec-groups' dhcp_options = None nwapi.allocate_for_instance( self.context, instance, vpn=is_vpn, requested_networks=req_networks, macs=macs, security_groups=sec_groups, dhcp_options=dhcp_options, bind_host_id=instance.get('host')).AndRaise( test.TestingException()) self.mox.ReplayAll() self.assertRaises(test.TestingException, self.compute._allocate_network_async, self.context, instance, req_networks, macs, sec_groups, is_vpn, dhcp_options) def test_allocate_network_neg_conf_value_treated_as_zero(self): self.flags(network_allocate_retries=-1) nwapi = self.compute.network_api self.mox.StubOutWithMock(nwapi, 'allocate_for_instance') instance = {} is_vpn = 'fake-is-vpn' req_networks = 'fake-req-networks' macs = 'fake-macs' sec_groups = 'fake-sec-groups' dhcp_options = None # Only attempted once. nwapi.allocate_for_instance( self.context, instance, vpn=is_vpn, requested_networks=req_networks, macs=macs, security_groups=sec_groups, dhcp_options=dhcp_options, bind_host_id=instance.get('host')).AndRaise( test.TestingException()) self.mox.ReplayAll() self.assertRaises(test.TestingException, self.compute._allocate_network_async, self.context, instance, req_networks, macs, sec_groups, is_vpn, dhcp_options) @mock.patch.object(network_api.API, 'allocate_for_instance') @mock.patch.object(manager.ComputeManager, '_instance_update') @mock.patch.object(time, 'sleep') def test_allocate_network_with_conf_value_is_one( self, sleep, _instance_update, allocate_for_instance): self.flags(network_allocate_retries=1) instance = fake_instance.fake_instance_obj( self.context, expected_attrs=['system_metadata']) is_vpn = 'fake-is-vpn' req_networks = 'fake-req-networks' macs = 'fake-macs' sec_groups = 'fake-sec-groups' dhcp_options = None final_result = 'zhangtralon' allocate_for_instance.side_effect = [test.TestingException(), final_result] res = self.compute._allocate_network_async(self.context, instance, req_networks, macs, sec_groups, is_vpn, dhcp_options) self.assertEqual(final_result, res) self.assertEqual(1, sleep.call_count) @mock.patch('nova.compute.manager.ComputeManager.' '_do_build_and_run_instance') def _test_max_concurrent_builds(self, mock_dbari): with mock.patch.object(self.compute, '_build_semaphore') as mock_sem: instance = objects.Instance(uuid=str(uuid.uuid4())) for i in (1, 2, 3): self.compute.build_and_run_instance(self.context, instance, mock.sentinel.image, mock.sentinel.request_spec, {}) self.assertEqual(3, mock_sem.__enter__.call_count) def test_max_concurrent_builds_limited(self): self.flags(max_concurrent_builds=2) self._test_max_concurrent_builds() def test_max_concurrent_builds_unlimited(self): self.flags(max_concurrent_builds=0) self._test_max_concurrent_builds() def test_max_concurrent_builds_semaphore_limited(self): self.flags(max_concurrent_builds=123) self.assertEqual(123, manager.ComputeManager()._build_semaphore.balance) def test_max_concurrent_builds_semaphore_unlimited(self): self.flags(max_concurrent_builds=0) compute = manager.ComputeManager() self.assertEqual(0, compute._build_semaphore.balance) self.assertIsInstance(compute._build_semaphore, compute_utils.UnlimitedSemaphore) def test_nil_out_inst_obj_host_and_node_sets_nil(self): instance = fake_instance.fake_instance_obj(self.context, uuid=uuids.instance, host='foo-host', node='foo-node') self.assertIsNotNone(instance.host) self.assertIsNotNone(instance.node) self.compute._nil_out_instance_obj_host_and_node(instance) self.assertIsNone(instance.host) self.assertIsNone(instance.node) def test_init_host(self): our_host = self.compute.host inst = fake_instance.fake_db_instance( vm_state=vm_states.ACTIVE, info_cache=dict(test_instance_info_cache.fake_info_cache, network_info=None), security_groups=None) startup_instances = [inst, inst, inst] def _do_mock_calls(defer_iptables_apply): self.compute.driver.init_host(host=our_host) context.get_admin_context().AndReturn(self.context) db.instance_get_all_by_host( self.context, our_host, columns_to_join=['info_cache', 'metadata'] ).AndReturn(startup_instances) if defer_iptables_apply: self.compute.driver.filter_defer_apply_on() self.compute._destroy_evacuated_instances(self.context) self.compute._init_instance(self.context, mox.IsA(objects.Instance)) self.compute._init_instance(self.context, mox.IsA(objects.Instance)) self.compute._init_instance(self.context, mox.IsA(objects.Instance)) if defer_iptables_apply: self.compute.driver.filter_defer_apply_off() self.mox.StubOutWithMock(self.compute.driver, 'init_host') self.mox.StubOutWithMock(self.compute.driver, 'filter_defer_apply_on') self.mox.StubOutWithMock(self.compute.driver, 'filter_defer_apply_off') self.mox.StubOutWithMock(db, 'instance_get_all_by_host') self.mox.StubOutWithMock(context, 'get_admin_context') self.mox.StubOutWithMock(self.compute, '_destroy_evacuated_instances') self.mox.StubOutWithMock(self.compute, '_init_instance') # Test with defer_iptables_apply self.flags(defer_iptables_apply=True) _do_mock_calls(True) self.mox.ReplayAll() self.compute.init_host() self.mox.VerifyAll() # Test without defer_iptables_apply self.mox.ResetAll() self.flags(defer_iptables_apply=False) _do_mock_calls(False) self.mox.ReplayAll() self.compute.init_host() # tearDown() uses context.get_admin_context(), so we have # to do the verification here and unstub it. self.mox.VerifyAll() self.mox.UnsetStubs() @mock.patch('nova.objects.InstanceList') @mock.patch('nova.objects.MigrationList.get_by_filters') def test_cleanup_host(self, mock_miglist_get, mock_instance_list): # just testing whether the cleanup_host method # when fired will invoke the underlying driver's # equivalent method. mock_miglist_get.return_value = [] mock_instance_list.get_by_host.return_value = [] with mock.patch.object(self.compute, 'driver') as mock_driver: self.compute.init_host() mock_driver.init_host.assert_called_once_with(host='fake-mini') self.compute.cleanup_host() # register_event_listener is called on startup (init_host) and # in cleanup_host mock_driver.register_event_listener.assert_has_calls([ mock.call(self.compute.handle_events), mock.call(None)]) mock_driver.cleanup_host.assert_called_once_with(host='fake-mini') def test_init_virt_events_disabled(self): self.flags(handle_virt_lifecycle_events=False, group='workarounds') with mock.patch.object(self.compute.driver, 'register_event_listener') as mock_register: self.compute.init_virt_events() self.assertFalse(mock_register.called) @mock.patch('nova.objects.MigrationList.get_by_filters') @mock.patch('nova.objects.Migration.save') def test_init_host_with_evacuated_instance(self, mock_save, mock_mig_get): our_host = self.compute.host not_our_host = 'not-' + our_host deleted_instance = fake_instance.fake_instance_obj( self.context, host=not_our_host, uuid=uuids.deleted_instance) migration = objects.Migration(instance_uuid=deleted_instance.uuid) mock_mig_get.return_value = [migration] self.mox.StubOutWithMock(self.compute.driver, 'init_host') self.mox.StubOutWithMock(self.compute.driver, 'destroy') self.mox.StubOutWithMock(db, 'instance_get_all_by_host') self.mox.StubOutWithMock(context, 'get_admin_context') self.mox.StubOutWithMock(self.compute, 'init_virt_events') self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver') self.mox.StubOutWithMock(self.compute, '_init_instance') self.mox.StubOutWithMock(self.compute.network_api, 'get_instance_nw_info') self.compute.driver.init_host(host=our_host) context.get_admin_context().AndReturn(self.context) db.instance_get_all_by_host(self.context, our_host, columns_to_join=['info_cache', 'metadata'] ).AndReturn([]) self.compute.init_virt_events() # simulate failed instance self.compute._get_instances_on_driver( self.context, {'deleted': False}).AndReturn([deleted_instance]) self.compute.network_api.get_instance_nw_info( self.context, deleted_instance).AndRaise( exception.InstanceNotFound(instance_id=deleted_instance['uuid'])) # ensure driver.destroy is called so that driver may # clean up any dangling files self.compute.driver.destroy(self.context, deleted_instance, mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self.compute.init_host() # tearDown() uses context.get_admin_context(), so we have # to do the verification here and unstub it. self.mox.VerifyAll() self.mox.UnsetStubs() def test_init_instance_with_binding_failed_vif_type(self): # this instance will plug a 'binding_failed' vif instance = fake_instance.fake_instance_obj( self.context, uuid=uuids.instance, info_cache=None, power_state=power_state.RUNNING, vm_state=vm_states.ACTIVE, task_state=None, host=self.compute.host, expected_attrs=['info_cache']) with test.nested( mock.patch.object(context, 'get_admin_context', return_value=self.context), mock.patch.object(compute_utils, 'get_nw_info_for_instance', return_value=network_model.NetworkInfo()), mock.patch.object(self.compute.driver, 'plug_vifs', side_effect=exception.VirtualInterfacePlugException( "Unexpected vif_type=binding_failed")), mock.patch.object(self.compute, '_set_instance_obj_error_state') ) as (get_admin_context, get_nw_info, plug_vifs, set_error_state): self.compute._init_instance(self.context, instance) set_error_state.assert_called_once_with(self.context, instance) def test__get_power_state_InstanceNotFound(self): instance = fake_instance.fake_instance_obj( self.context, power_state=power_state.RUNNING) with mock.patch.object(self.compute.driver, 'get_info', side_effect=exception.InstanceNotFound(instance_id=1)): self.assertEqual(self.compute._get_power_state(self.context, instance), power_state.NOSTATE) def test__get_power_state_NotFound(self): instance = fake_instance.fake_instance_obj( self.context, power_state=power_state.RUNNING) with mock.patch.object(self.compute.driver, 'get_info', side_effect=exception.NotFound()): self.assertRaises(exception.NotFound, self.compute._get_power_state, self.context, instance) def test_init_instance_failed_resume_sets_error(self): instance = fake_instance.fake_instance_obj( self.context, uuid=uuids.instance, info_cache=None, power_state=power_state.RUNNING, vm_state=vm_states.ACTIVE, task_state=None, host=self.compute.host, expected_attrs=['info_cache']) self.flags(resume_guests_state_on_host_boot=True) self.mox.StubOutWithMock(self.compute, '_get_power_state') self.mox.StubOutWithMock(self.compute.driver, 'plug_vifs') self.mox.StubOutWithMock(self.compute.driver, 'resume_state_on_host_boot') self.mox.StubOutWithMock(self.compute, '_get_instance_block_device_info') self.mox.StubOutWithMock(self.compute, '_set_instance_obj_error_state') self.compute._get_power_state(mox.IgnoreArg(), instance).AndReturn(power_state.SHUTDOWN) self.compute._get_power_state(mox.IgnoreArg(), instance).AndReturn(power_state.SHUTDOWN) self.compute._get_power_state(mox.IgnoreArg(), instance).AndReturn(power_state.SHUTDOWN) self.compute.driver.plug_vifs(instance, mox.IgnoreArg()) self.compute._get_instance_block_device_info(mox.IgnoreArg(), instance).AndReturn('fake-bdm') self.compute.driver.resume_state_on_host_boot(mox.IgnoreArg(), instance, mox.IgnoreArg(), 'fake-bdm').AndRaise(test.TestingException) self.compute._set_instance_obj_error_state(mox.IgnoreArg(), instance) self.mox.ReplayAll() self.compute._init_instance('fake-context', instance) @mock.patch.object(objects.BlockDeviceMapping, 'destroy') @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') @mock.patch.object(objects.Instance, 'destroy') @mock.patch.object(objects.Instance, 'obj_load_attr') @mock.patch.object(objects.quotas.Quotas, 'commit') @mock.patch.object(objects.quotas.Quotas, 'reserve') @mock.patch.object(objects.quotas, 'ids_from_instance') def test_init_instance_complete_partial_deletion( self, mock_ids_from_instance, mock_reserve, mock_commit, mock_inst_destroy, mock_obj_load_attr, mock_get_by_instance_uuid, mock_bdm_destroy): """Test to complete deletion for instances in DELETED status but not marked as deleted in the DB """ instance = fake_instance.fake_instance_obj( self.context, project_id='fake', uuid=uuids.instance, vcpus=1, memory_mb=64, power_state=power_state.SHUTDOWN, vm_state=vm_states.DELETED, host=self.compute.host, task_state=None, deleted=False, deleted_at=None, metadata={}, system_metadata={}, expected_attrs=['metadata', 'system_metadata']) # Make sure instance vm_state is marked as 'DELETED' but instance is # not destroyed from db. self.assertEqual(vm_states.DELETED, instance.vm_state) self.assertFalse(instance.deleted) deltas = {'instances': -1, 'cores': -instance.vcpus, 'ram': -instance.memory_mb} def fake_inst_destroy(): instance.deleted = True instance.deleted_at = timeutils.utcnow() mock_ids_from_instance.return_value = (instance.project_id, instance.user_id) mock_inst_destroy.side_effect = fake_inst_destroy() self.compute._init_instance(self.context, instance) # Make sure that instance.destroy method was called and # instance was deleted from db. self.assertTrue(mock_reserve.called) self.assertTrue(mock_commit.called) self.assertNotEqual(0, instance.deleted) mock_reserve.assert_called_once_with(project_id=instance.project_id, user_id=instance.user_id, **deltas) @mock.patch('nova.compute.manager.LOG') def test_init_instance_complete_partial_deletion_raises_exception( self, mock_log): instance = fake_instance.fake_instance_obj( self.context, project_id='fake', uuid=uuids.instance, vcpus=1, memory_mb=64, power_state=power_state.SHUTDOWN, vm_state=vm_states.DELETED, host=self.compute.host, task_state=None, deleted=False, deleted_at=None, metadata={}, system_metadata={}, expected_attrs=['metadata', 'system_metadata']) with mock.patch.object(self.compute, '_complete_partial_deletion') as mock_deletion: mock_deletion.side_effect = test.TestingException() self.compute._init_instance(self, instance) msg = u'Failed to complete a deletion' mock_log.exception.assert_called_once_with(msg, instance=instance) def test_init_instance_stuck_in_deleting(self): instance = fake_instance.fake_instance_obj( self.context, project_id='fake', uuid=uuids.instance, vcpus=1, memory_mb=64, power_state=power_state.RUNNING, vm_state=vm_states.ACTIVE, host=self.compute.host, task_state=task_states.DELETING) self.mox.StubOutWithMock(objects.BlockDeviceMappingList, 'get_by_instance_uuid') self.mox.StubOutWithMock(self.compute, '_delete_instance') self.mox.StubOutWithMock(instance, 'obj_load_attr') self.mox.StubOutWithMock(self.compute, '_create_reservations') bdms = [] quotas = objects.quotas.Quotas(self.context) instance.obj_load_attr('metadata') instance.obj_load_attr('system_metadata') objects.BlockDeviceMappingList.get_by_instance_uuid( self.context, instance.uuid).AndReturn(bdms) self.compute._create_reservations(self.context, instance, instance.project_id, instance.user_id).AndReturn(quotas) self.compute._delete_instance(self.context, instance, bdms, mox.IgnoreArg()) self.mox.ReplayAll() self.compute._init_instance(self.context, instance) @mock.patch.object(objects.Instance, 'get_by_uuid') @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') def test_init_instance_stuck_in_deleting_raises_exception( self, mock_get_by_instance_uuid, mock_get_by_uuid): instance = fake_instance.fake_instance_obj( self.context, project_id='fake', uuid=uuids.instance, vcpus=1, memory_mb=64, metadata={}, system_metadata={}, host=self.compute.host, vm_state=vm_states.ACTIVE, task_state=task_states.DELETING, expected_attrs=['metadata', 'system_metadata']) bdms = [] reservations = ['fake-resv'] def _create_patch(name, attr): patcher = mock.patch.object(name, attr) mocked_obj = patcher.start() self.addCleanup(patcher.stop) return mocked_obj mock_delete_instance = _create_patch(self.compute, '_delete_instance') mock_set_instance_error_state = _create_patch( self.compute, '_set_instance_obj_error_state') mock_create_reservations = _create_patch(self.compute, '_create_reservations') mock_create_reservations.return_value = reservations mock_get_by_instance_uuid.return_value = bdms mock_get_by_uuid.return_value = instance mock_delete_instance.side_effect = test.TestingException('test') self.compute._init_instance(self.context, instance) mock_set_instance_error_state.assert_called_once_with( self.context, instance) def _test_init_instance_reverts_crashed_migrations(self, old_vm_state=None): power_on = True if (not old_vm_state or old_vm_state == vm_states.ACTIVE) else False sys_meta = { 'old_vm_state': old_vm_state } instance = fake_instance.fake_instance_obj( self.context, uuid=uuids.instance, vm_state=vm_states.ERROR, task_state=task_states.RESIZE_MIGRATING, power_state=power_state.SHUTDOWN, system_metadata=sys_meta, host=self.compute.host, expected_attrs=['system_metadata']) self.mox.StubOutWithMock(compute_utils, 'get_nw_info_for_instance') self.mox.StubOutWithMock(self.compute.driver, 'plug_vifs') self.mox.StubOutWithMock(self.compute.driver, 'finish_revert_migration') self.mox.StubOutWithMock(self.compute, '_get_instance_block_device_info') self.mox.StubOutWithMock(self.compute.driver, 'get_info') self.mox.StubOutWithMock(instance, 'save') self.mox.StubOutWithMock(self.compute, '_retry_reboot') self.compute._retry_reboot(self.context, instance).AndReturn( (False, None)) compute_utils.get_nw_info_for_instance(instance).AndReturn( network_model.NetworkInfo()) self.compute.driver.plug_vifs(instance, []) self.compute._get_instance_block_device_info( self.context, instance).AndReturn([]) self.compute.driver.finish_revert_migration(self.context, instance, [], [], power_on) instance.save() self.compute.driver.get_info(instance).AndReturn( hardware.InstanceInfo(state=power_state.SHUTDOWN)) self.compute.driver.get_info(instance).AndReturn( hardware.InstanceInfo(state=power_state.SHUTDOWN)) self.mox.ReplayAll() self.compute._init_instance(self.context, instance) self.assertIsNone(instance.task_state) def test_init_instance_reverts_crashed_migration_from_active(self): self._test_init_instance_reverts_crashed_migrations( old_vm_state=vm_states.ACTIVE) def test_init_instance_reverts_crashed_migration_from_stopped(self): self._test_init_instance_reverts_crashed_migrations( old_vm_state=vm_states.STOPPED) def test_init_instance_reverts_crashed_migration_no_old_state(self): self._test_init_instance_reverts_crashed_migrations(old_vm_state=None) def test_init_instance_resets_crashed_live_migration(self): instance = fake_instance.fake_instance_obj( self.context, uuid=uuids.instance, vm_state=vm_states.ACTIVE, host=self.compute.host, task_state=task_states.MIGRATING) with test.nested( mock.patch.object(instance, 'save'), mock.patch('nova.compute.utils.get_nw_info_for_instance', return_value=network_model.NetworkInfo()) ) as (save, get_nw_info): self.compute._init_instance(self.context, instance) save.assert_called_once_with(expected_task_state=['migrating']) get_nw_info.assert_called_once_with(instance) self.assertIsNone(instance.task_state) self.assertEqual(vm_states.ACTIVE, instance.vm_state) def _test_init_instance_sets_building_error(self, vm_state, task_state=None): instance = fake_instance.fake_instance_obj( self.context, uuid=uuids.instance, vm_state=vm_state, host=self.compute.host, task_state=task_state) with mock.patch.object(instance, 'save') as save: self.compute._init_instance(self.context, instance) save.assert_called_once_with() self.assertIsNone(instance.task_state) self.assertEqual(vm_states.ERROR, instance.vm_state) def test_init_instance_sets_building_error(self): self._test_init_instance_sets_building_error(vm_states.BUILDING) def test_init_instance_sets_rebuilding_errors(self): tasks = [task_states.REBUILDING, task_states.REBUILD_BLOCK_DEVICE_MAPPING, task_states.REBUILD_SPAWNING] vms = [vm_states.ACTIVE, vm_states.STOPPED] for vm_state in vms: for task_state in tasks: self._test_init_instance_sets_building_error( vm_state, task_state) def _test_init_instance_sets_building_tasks_error(self, instance): instance.host = self.compute.host with mock.patch.object(instance, 'save') as save: self.compute._init_instance(self.context, instance) save.assert_called_once_with() self.assertIsNone(instance.task_state) self.assertEqual(vm_states.ERROR, instance.vm_state) def test_init_instance_sets_building_tasks_error_scheduling(self): instance = fake_instance.fake_instance_obj( self.context, uuid=uuids.instance, vm_state=None, task_state=task_states.SCHEDULING) self._test_init_instance_sets_building_tasks_error(instance) def test_init_instance_sets_building_tasks_error_block_device(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.vm_state = None instance.task_state = task_states.BLOCK_DEVICE_MAPPING self._test_init_instance_sets_building_tasks_error(instance) def test_init_instance_sets_building_tasks_error_networking(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.vm_state = None instance.task_state = task_states.NETWORKING self._test_init_instance_sets_building_tasks_error(instance) def test_init_instance_sets_building_tasks_error_spawning(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.vm_state = None instance.task_state = task_states.SPAWNING self._test_init_instance_sets_building_tasks_error(instance) def _test_init_instance_cleans_image_states(self, instance): with mock.patch.object(instance, 'save') as save: self.compute._get_power_state = mock.Mock() self.compute.driver.post_interrupted_snapshot_cleanup = mock.Mock() instance.info_cache = None instance.power_state = power_state.RUNNING instance.host = self.compute.host self.compute._init_instance(self.context, instance) save.assert_called_once_with() self.compute.driver.post_interrupted_snapshot_cleanup.\ assert_called_once_with(self.context, instance) self.assertIsNone(instance.task_state) @mock.patch('nova.compute.manager.ComputeManager._get_power_state', return_value=power_state.RUNNING) @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') def _test_init_instance_cleans_task_states(self, powerstate, state, mock_get_uuid, mock_get_power_state): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.info_cache = None instance.power_state = power_state.RUNNING instance.vm_state = vm_states.ACTIVE instance.task_state = state instance.host = self.compute.host mock_get_power_state.return_value = powerstate self.compute._init_instance(self.context, instance) return instance def test_init_instance_cleans_image_state_pending_upload(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.IMAGE_PENDING_UPLOAD self._test_init_instance_cleans_image_states(instance) def test_init_instance_cleans_image_state_uploading(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.IMAGE_UPLOADING self._test_init_instance_cleans_image_states(instance) def test_init_instance_cleans_image_state_snapshot(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.IMAGE_SNAPSHOT self._test_init_instance_cleans_image_states(instance) def test_init_instance_cleans_image_state_snapshot_pending(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING self._test_init_instance_cleans_image_states(instance) @mock.patch.object(objects.Instance, 'save') def test_init_instance_cleans_running_pausing(self, mock_save): instance = self._test_init_instance_cleans_task_states( power_state.RUNNING, task_states.PAUSING) mock_save.assert_called_once_with() self.assertEqual(vm_states.ACTIVE, instance.vm_state) self.assertIsNone(instance.task_state) @mock.patch.object(objects.Instance, 'save') def test_init_instance_cleans_running_unpausing(self, mock_save): instance = self._test_init_instance_cleans_task_states( power_state.RUNNING, task_states.UNPAUSING) mock_save.assert_called_once_with() self.assertEqual(vm_states.ACTIVE, instance.vm_state) self.assertIsNone(instance.task_state) @mock.patch('nova.compute.manager.ComputeManager.unpause_instance') def test_init_instance_cleans_paused_unpausing(self, mock_unpause): def fake_unpause(context, instance): instance.task_state = None mock_unpause.side_effect = fake_unpause instance = self._test_init_instance_cleans_task_states( power_state.PAUSED, task_states.UNPAUSING) mock_unpause.assert_called_once_with(self.context, instance) self.assertEqual(vm_states.ACTIVE, instance.vm_state) self.assertIsNone(instance.task_state) def test_init_instance_errors_when_not_migrating(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.vm_state = vm_states.ERROR instance.task_state = task_states.IMAGE_UPLOADING instance.host = self.compute.host self.mox.StubOutWithMock(compute_utils, 'get_nw_info_for_instance') self.mox.ReplayAll() self.compute._init_instance(self.context, instance) self.mox.VerifyAll() def test_init_instance_deletes_error_deleting_instance(self): instance = fake_instance.fake_instance_obj( self.context, project_id='fake', uuid=uuids.instance, vcpus=1, memory_mb=64, vm_state=vm_states.ERROR, host=self.compute.host, task_state=task_states.DELETING) self.mox.StubOutWithMock(objects.BlockDeviceMappingList, 'get_by_instance_uuid') self.mox.StubOutWithMock(self.compute, '_delete_instance') self.mox.StubOutWithMock(instance, 'obj_load_attr') self.mox.StubOutWithMock(objects.quotas, 'ids_from_instance') self.mox.StubOutWithMock(self.compute, '_create_reservations') bdms = [] quotas = objects.quotas.Quotas(self.context) instance.obj_load_attr('metadata') instance.obj_load_attr('system_metadata') objects.BlockDeviceMappingList.get_by_instance_uuid( self.context, instance.uuid).AndReturn(bdms) objects.quotas.ids_from_instance(self.context, instance).AndReturn( (instance.project_id, instance.user_id)) self.compute._create_reservations(self.context, instance, instance.project_id, instance.user_id).AndReturn(quotas) self.compute._delete_instance(self.context, instance, bdms, mox.IgnoreArg()) self.mox.ReplayAll() self.compute._init_instance(self.context, instance) self.mox.VerifyAll() def test_init_instance_resize_prep(self): instance = fake_instance.fake_instance_obj( self.context, uuid=uuids.instance, vm_state=vm_states.ACTIVE, host=self.compute.host, task_state=task_states.RESIZE_PREP, power_state=power_state.RUNNING) with test.nested( mock.patch.object(self.compute, '_get_power_state', return_value=power_state.RUNNING), mock.patch.object(compute_utils, 'get_nw_info_for_instance'), mock.patch.object(instance, 'save', autospec=True) ) as (mock_get_power_state, mock_nw_info, mock_instance_save): self.compute._init_instance(self.context, instance) mock_instance_save.assert_called_once_with() self.assertIsNone(instance.task_state) @mock.patch('nova.context.RequestContext.elevated') @mock.patch('nova.compute.utils.get_nw_info_for_instance') @mock.patch( 'nova.compute.manager.ComputeManager._get_instance_block_device_info') @mock.patch('nova.virt.driver.ComputeDriver.destroy') @mock.patch('nova.virt.fake.FakeDriver.get_volume_connector') def _test_shutdown_instance_exception(self, exc, mock_connector, mock_destroy, mock_blk_device_info, mock_nw_info, mock_elevated): mock_connector.side_effect = exc mock_elevated.return_value = self.context instance = fake_instance.fake_instance_obj( self.context, uuid=uuids.instance, vm_state=vm_states.ERROR, task_state=task_states.DELETING) bdms = [mock.Mock(id=1, is_volume=True)] self.compute._shutdown_instance(self.context, instance, bdms, notify=False, try_deallocate_networks=False) def test_shutdown_instance_endpoint_not_found(self): exc = cinder_exception.EndpointNotFound self._test_shutdown_instance_exception(exc) def test_shutdown_instance_client_exception(self): exc = cinder_exception.ClientException(code=9001) self._test_shutdown_instance_exception(exc) def test_shutdown_instance_volume_not_found(self): exc = exception.VolumeNotFound(volume_id=42) self._test_shutdown_instance_exception(exc) def test_shutdown_instance_disk_not_found(self): exc = exception.DiskNotFound(location="not\\here") self._test_shutdown_instance_exception(exc) def test_shutdown_instance_other_exception(self): exc = Exception('some other exception') self._test_shutdown_instance_exception(exc) def _test_init_instance_retries_reboot(self, instance, reboot_type, return_power_state): instance.host = self.compute.host with test.nested( mock.patch.object(self.compute, '_get_power_state', return_value=return_power_state), mock.patch.object(self.compute, 'reboot_instance'), mock.patch.object(compute_utils, 'get_nw_info_for_instance') ) as ( _get_power_state, reboot_instance, get_nw_info_for_instance ): self.compute._init_instance(self.context, instance) call = mock.call(self.context, instance, block_device_info=None, reboot_type=reboot_type) reboot_instance.assert_has_calls([call]) def test_init_instance_retries_reboot_pending(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.task_state = task_states.REBOOT_PENDING for state in vm_states.ALLOW_SOFT_REBOOT: instance.vm_state = state self._test_init_instance_retries_reboot(instance, 'SOFT', power_state.RUNNING) def test_init_instance_retries_reboot_pending_hard(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.task_state = task_states.REBOOT_PENDING_HARD for state in vm_states.ALLOW_HARD_REBOOT: # NOTE(dave-mcnally) while a reboot of a vm in error state is # possible we don't attempt to recover an error during init if state == vm_states.ERROR: continue instance.vm_state = state self._test_init_instance_retries_reboot(instance, 'HARD', power_state.RUNNING) def test_init_instance_retries_reboot_pending_soft_became_hard(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.task_state = task_states.REBOOT_PENDING for state in vm_states.ALLOW_HARD_REBOOT: # NOTE(dave-mcnally) while a reboot of a vm in error state is # possible we don't attempt to recover an error during init if state == vm_states.ERROR: continue instance.vm_state = state with mock.patch.object(instance, 'save'): self._test_init_instance_retries_reboot(instance, 'HARD', power_state.SHUTDOWN) self.assertEqual(task_states.REBOOT_PENDING_HARD, instance.task_state) def test_init_instance_retries_reboot_started(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.REBOOT_STARTED with mock.patch.object(instance, 'save'): self._test_init_instance_retries_reboot(instance, 'HARD', power_state.NOSTATE) def test_init_instance_retries_reboot_started_hard(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.REBOOT_STARTED_HARD self._test_init_instance_retries_reboot(instance, 'HARD', power_state.NOSTATE) def _test_init_instance_cleans_reboot_state(self, instance): instance.host = self.compute.host with test.nested( mock.patch.object(self.compute, '_get_power_state', return_value=power_state.RUNNING), mock.patch.object(instance, 'save', autospec=True), mock.patch.object(compute_utils, 'get_nw_info_for_instance') ) as ( _get_power_state, instance_save, get_nw_info_for_instance ): self.compute._init_instance(self.context, instance) instance_save.assert_called_once_with() self.assertIsNone(instance.task_state) self.assertEqual(vm_states.ACTIVE, instance.vm_state) def test_init_instance_cleans_image_state_reboot_started(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.REBOOT_STARTED instance.power_state = power_state.RUNNING self._test_init_instance_cleans_reboot_state(instance) def test_init_instance_cleans_image_state_reboot_started_hard(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.REBOOT_STARTED_HARD instance.power_state = power_state.RUNNING self._test_init_instance_cleans_reboot_state(instance) def test_init_instance_retries_power_off(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.id = 1 instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.POWERING_OFF instance.host = self.compute.host with mock.patch.object(self.compute, 'stop_instance'): self.compute._init_instance(self.context, instance) call = mock.call(self.context, instance, True) self.compute.stop_instance.assert_has_calls([call]) def test_init_instance_retries_power_on(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.id = 1 instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.POWERING_ON instance.host = self.compute.host with mock.patch.object(self.compute, 'start_instance'): self.compute._init_instance(self.context, instance) call = mock.call(self.context, instance) self.compute.start_instance.assert_has_calls([call]) def test_init_instance_retries_power_on_silent_exception(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.id = 1 instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.POWERING_ON instance.host = self.compute.host with mock.patch.object(self.compute, 'start_instance', return_value=Exception): init_return = self.compute._init_instance(self.context, instance) call = mock.call(self.context, instance) self.compute.start_instance.assert_has_calls([call]) self.assertIsNone(init_return) def test_init_instance_retries_power_off_silent_exception(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.id = 1 instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.POWERING_OFF instance.host = self.compute.host with mock.patch.object(self.compute, 'stop_instance', return_value=Exception): init_return = self.compute._init_instance(self.context, instance) call = mock.call(self.context, instance, True) self.compute.stop_instance.assert_has_calls([call]) self.assertIsNone(init_return) def test_get_instances_on_driver(self): driver_instances = [] for x in range(10): driver_instances.append(fake_instance.fake_db_instance()) self.mox.StubOutWithMock(self.compute.driver, 'list_instance_uuids') self.mox.StubOutWithMock(db, 'instance_get_all_by_filters') self.compute.driver.list_instance_uuids().AndReturn( [inst['uuid'] for inst in driver_instances]) db.instance_get_all_by_filters( self.context, {'uuid': [inst['uuid'] for inst in driver_instances]}, 'created_at', 'desc', columns_to_join=None, limit=None, marker=None).AndReturn(driver_instances) self.mox.ReplayAll() result = self.compute._get_instances_on_driver(self.context) self.assertEqual([x['uuid'] for x in driver_instances], [x['uuid'] for x in result]) @mock.patch('nova.virt.driver.ComputeDriver.list_instance_uuids') @mock.patch('nova.db.api.instance_get_all_by_filters') def test_get_instances_on_driver_empty(self, mock_list, mock_db): mock_list.return_value = [] result = self.compute._get_instances_on_driver(self.context) # instance_get_all_by_filters should not be called self.assertEqual(0, mock_db.call_count) self.assertEqual([], [x['uuid'] for x in result]) def test_get_instances_on_driver_fallback(self): # Test getting instances when driver doesn't support # 'list_instance_uuids' self.compute.host = 'host' filters = {'host': self.compute.host} self.flags(instance_name_template='inst-%i') all_instances = [] driver_instances = [] for x in range(10): instance = fake_instance.fake_db_instance(name='inst-%i' % x, id=x) if x % 2: driver_instances.append(instance) all_instances.append(instance) self.mox.StubOutWithMock(self.compute.driver, 'list_instance_uuids') self.mox.StubOutWithMock(self.compute.driver, 'list_instances') self.mox.StubOutWithMock(db, 'instance_get_all_by_filters') self.compute.driver.list_instance_uuids().AndRaise( NotImplementedError()) self.compute.driver.list_instances().AndReturn( [inst['name'] for inst in driver_instances]) db.instance_get_all_by_filters( self.context, filters, 'created_at', 'desc', columns_to_join=None, limit=None, marker=None).AndReturn(all_instances) self.mox.ReplayAll() result = self.compute._get_instances_on_driver(self.context, filters) self.assertEqual([x['uuid'] for x in driver_instances], [x['uuid'] for x in result]) def test_instance_usage_audit(self): instances = [objects.Instance(uuid=uuids.instance)] @classmethod def fake_task_log(*a, **k): pass @classmethod def fake_get(*a, **k): return instances self.flags(instance_usage_audit=True) self.stubs.Set(objects.TaskLog, 'get', fake_task_log) self.stubs.Set(objects.InstanceList, 'get_active_by_window_joined', fake_get) self.stubs.Set(objects.TaskLog, 'begin_task', fake_task_log) self.stubs.Set(objects.TaskLog, 'end_task', fake_task_log) self.mox.StubOutWithMock(compute_utils, 'notify_usage_exists') compute_utils.notify_usage_exists(self.compute.notifier, self.context, instances[0], ignore_missing_network_data=False) self.mox.ReplayAll() self.compute._instance_usage_audit(self.context) @mock.patch.object(objects.InstanceList, 'get_by_host') def test_sync_power_states(self, mock_get): instance = mock.Mock() mock_get.return_value = [instance] with mock.patch.object(self.compute._sync_power_pool, 'spawn_n') as mock_spawn: self.compute._sync_power_states(mock.sentinel.context) mock_get.assert_called_with(mock.sentinel.context, self.compute.host, expected_attrs=[], use_slave=True) mock_spawn.assert_called_once_with(mock.ANY, instance) def _get_sync_instance(self, power_state, vm_state, task_state=None, shutdown_terminate=False): instance = objects.Instance() instance.uuid = uuids.instance instance.power_state = power_state instance.vm_state = vm_state instance.host = self.compute.host instance.task_state = task_state instance.shutdown_terminate = shutdown_terminate self.mox.StubOutWithMock(instance, 'refresh') self.mox.StubOutWithMock(instance, 'save') return instance def test_sync_instance_power_state_match(self): instance = self._get_sync_instance(power_state.RUNNING, vm_states.ACTIVE) instance.refresh(use_slave=False) self.mox.ReplayAll() self.compute._sync_instance_power_state(self.context, instance, power_state.RUNNING) def test_sync_instance_power_state_running_stopped(self): instance = self._get_sync_instance(power_state.RUNNING, vm_states.ACTIVE) instance.refresh(use_slave=False) instance.save() self.mox.ReplayAll() self.compute._sync_instance_power_state(self.context, instance, power_state.SHUTDOWN) self.assertEqual(instance.power_state, power_state.SHUTDOWN) def _test_sync_to_stop(self, power_state, vm_state, driver_power_state, stop=True, force=False, shutdown_terminate=False): instance = self._get_sync_instance( power_state, vm_state, shutdown_terminate=shutdown_terminate) instance.refresh(use_slave=False) instance.save() self.mox.StubOutWithMock(self.compute.compute_api, 'stop') self.mox.StubOutWithMock(self.compute.compute_api, 'delete') self.mox.StubOutWithMock(self.compute.compute_api, 'force_stop') if shutdown_terminate: self.compute.compute_api.delete(self.context, instance) elif stop: if force: self.compute.compute_api.force_stop(self.context, instance) else: self.compute.compute_api.stop(self.context, instance) self.mox.ReplayAll() self.compute._sync_instance_power_state(self.context, instance, driver_power_state) self.mox.VerifyAll() self.mox.UnsetStubs() def test_sync_instance_power_state_to_stop(self): for ps in (power_state.SHUTDOWN, power_state.CRASHED, power_state.SUSPENDED): self._test_sync_to_stop(power_state.RUNNING, vm_states.ACTIVE, ps) for ps in (power_state.SHUTDOWN, power_state.CRASHED): self._test_sync_to_stop(power_state.PAUSED, vm_states.PAUSED, ps, force=True) self._test_sync_to_stop(power_state.SHUTDOWN, vm_states.STOPPED, power_state.RUNNING, force=True) def test_sync_instance_power_state_to_terminate(self): self._test_sync_to_stop(power_state.RUNNING, vm_states.ACTIVE, power_state.SHUTDOWN, force=False, shutdown_terminate=True) def test_sync_instance_power_state_to_no_stop(self): for ps in (power_state.PAUSED, power_state.NOSTATE): self._test_sync_to_stop(power_state.RUNNING, vm_states.ACTIVE, ps, stop=False) for vs in (vm_states.SOFT_DELETED, vm_states.DELETED): for ps in (power_state.NOSTATE, power_state.SHUTDOWN): self._test_sync_to_stop(power_state.RUNNING, vs, ps, stop=False) @mock.patch('nova.compute.manager.ComputeManager.' '_sync_instance_power_state') def test_query_driver_power_state_and_sync_pending_task( self, mock_sync_power_state): with mock.patch.object(self.compute.driver, 'get_info') as mock_get_info: db_instance = objects.Instance(uuid=uuids.db_instance, task_state=task_states.POWERING_OFF) self.compute._query_driver_power_state_and_sync(self.context, db_instance) self.assertFalse(mock_get_info.called) self.assertFalse(mock_sync_power_state.called) @mock.patch('nova.compute.manager.ComputeManager.' '_sync_instance_power_state') def test_query_driver_power_state_and_sync_not_found_driver( self, mock_sync_power_state): error = exception.InstanceNotFound(instance_id=1) with mock.patch.object(self.compute.driver, 'get_info', side_effect=error) as mock_get_info: db_instance = objects.Instance(uuid=uuids.db_instance, task_state=None) self.compute._query_driver_power_state_and_sync(self.context, db_instance) mock_get_info.assert_called_once_with(db_instance) mock_sync_power_state.assert_called_once_with(self.context, db_instance, power_state.NOSTATE, use_slave=True) def test_run_pending_deletes(self): self.flags(instance_delete_interval=10) class FakeInstance(object): def __init__(self, uuid, name, smd): self.uuid = uuid self.name = name self.system_metadata = smd self.cleaned = False def __getitem__(self, name): return getattr(self, name) def save(self): pass a = FakeInstance('123', 'apple', {'clean_attempts': '100'}) b = FakeInstance('456', 'orange', {'clean_attempts': '3'}) c = FakeInstance('789', 'banana', {}) self.mox.StubOutWithMock(objects.InstanceList, 'get_by_filters') objects.InstanceList.get_by_filters( {'read_deleted': 'yes'}, {'deleted': True, 'soft_deleted': False, 'host': 'fake-mini', 'cleaned': False}, expected_attrs=['info_cache', 'security_groups', 'system_metadata'], use_slave=True).AndReturn([a, b, c]) self.mox.StubOutWithMock(self.compute.driver, 'delete_instance_files') self.compute.driver.delete_instance_files( mox.IgnoreArg()).AndReturn(True) self.compute.driver.delete_instance_files( mox.IgnoreArg()).AndReturn(False) self.mox.ReplayAll() self.compute._run_pending_deletes({}) self.assertFalse(a.cleaned) self.assertEqual('100', a.system_metadata['clean_attempts']) self.assertTrue(b.cleaned) self.assertEqual('4', b.system_metadata['clean_attempts']) self.assertFalse(c.cleaned) self.assertEqual('1', c.system_metadata['clean_attempts']) @mock.patch.object(objects.Migration, 'obj_as_admin') @mock.patch.object(objects.Migration, 'save') @mock.patch.object(objects.MigrationList, 'get_by_filters') @mock.patch.object(objects.InstanceList, 'get_by_filters') def _test_cleanup_incomplete_migrations(self, inst_host, mock_inst_get_by_filters, mock_migration_get_by_filters, mock_save, mock_obj_as_admin): def fake_inst(context, uuid, host): inst = objects.Instance(context) inst.uuid = uuid inst.host = host return inst def fake_migration(uuid, status, inst_uuid, src_host, dest_host): migration = objects.Migration() migration.uuid = uuid migration.status = status migration.instance_uuid = inst_uuid migration.source_compute = src_host migration.dest_compute = dest_host return migration fake_instances = [fake_inst(self.context, uuids.instance_1, inst_host), fake_inst(self.context, uuids.instance_2, inst_host)] fake_migrations = [fake_migration('123', 'error', uuids.instance_1, 'fake-host', 'fake-mini'), fake_migration('456', 'error', uuids.instance_2, 'fake-host', 'fake-mini')] mock_migration_get_by_filters.return_value = fake_migrations mock_inst_get_by_filters.return_value = fake_instances with mock.patch.object(self.compute.driver, 'delete_instance_files'): self.compute._cleanup_incomplete_migrations(self.context) # Ensure that migration status is set to 'failed' after instance # files deletion for those instances whose instance.host is not # same as compute host where periodic task is running. for inst in fake_instances: if inst.host != CONF.host: for mig in fake_migrations: if inst.uuid == mig.instance_uuid: self.assertEqual('failed', mig.status) def test_cleanup_incomplete_migrations_dest_node(self): """Test to ensure instance files are deleted from destination node. If instance gets deleted during resizing/revert-resizing operation, in that case instance files gets deleted from instance.host (source host here), but there is possibility that instance files could be present on destination node. This test ensures that `_cleanup_incomplete_migration` periodic task deletes orphaned instance files from destination compute node. """ self.flags(host='fake-mini') self._test_cleanup_incomplete_migrations('fake-host') def test_cleanup_incomplete_migrations_source_node(self): """Test to ensure instance files are deleted from source node. If instance gets deleted during resizing/revert-resizing operation, in that case instance files gets deleted from instance.host (dest host here), but there is possibility that instance files could be present on source node. This test ensures that `_cleanup_incomplete_migration` periodic task deletes orphaned instance files from source compute node. """ self.flags(host='fake-host') self._test_cleanup_incomplete_migrations('fake-mini') def test_attach_interface_failure(self): # Test that the fault methods are invoked when an attach fails db_instance = fake_instance.fake_db_instance() f_instance = objects.Instance._from_db_object(self.context, objects.Instance(), db_instance) e = exception.InterfaceAttachFailed(instance_uuid=f_instance.uuid) @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') @mock.patch.object(self.compute.network_api, 'allocate_port_for_instance', side_effect=e) @mock.patch.object(self.compute, '_instance_update', side_effect=lambda *a, **k: {}) def do_test(update, meth, add_fault): self.assertRaises(exception.InterfaceAttachFailed, self.compute.attach_interface, self.context, f_instance, 'net_id', 'port_id', None) add_fault.assert_has_calls([ mock.call(self.context, f_instance, e, mock.ANY)]) do_test() def test_detach_interface_failure(self): # Test that the fault methods are invoked when a detach fails # Build test data that will cause a PortNotFound exception f_instance = mock.MagicMock() f_instance.info_cache = mock.MagicMock() f_instance.info_cache.network_info = [] @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') @mock.patch.object(self.compute, '_set_instance_obj_error_state') def do_test(meth, add_fault): self.assertRaises(exception.PortNotFound, self.compute.detach_interface, self.context, f_instance, 'port_id') add_fault.assert_has_calls( [mock.call(self.context, f_instance, mock.ANY, mock.ANY)]) do_test() def test_swap_volume_volume_api_usage(self): # This test ensures that volume_id arguments are passed to volume_api # and that volume states are OK volumes = {} old_volume_id = uuidutils.generate_uuid() volumes[old_volume_id] = {'id': old_volume_id, 'display_name': 'old_volume', 'status': 'detaching', 'size': 1} new_volume_id = uuidutils.generate_uuid() volumes[new_volume_id] = {'id': new_volume_id, 'display_name': 'new_volume', 'status': 'available', 'size': 2} def fake_vol_api_roll_detaching(context, volume_id): self.assertTrue(uuidutils.is_uuid_like(volume_id)) if volumes[volume_id]['status'] == 'detaching': volumes[volume_id]['status'] = 'in-use' fake_bdm = fake_block_device.FakeDbBlockDeviceDict( {'device_name': '/dev/vdb', 'source_type': 'volume', 'destination_type': 'volume', 'instance_uuid': uuids.instance, 'connection_info': '{"foo": "bar"}'}) def fake_vol_api_func(context, volume, *args): self.assertTrue(uuidutils.is_uuid_like(volume)) return {} def fake_vol_get(context, volume_id): self.assertTrue(uuidutils.is_uuid_like(volume_id)) return volumes[volume_id] def fake_vol_unreserve(context, volume_id): self.assertTrue(uuidutils.is_uuid_like(volume_id)) if volumes[volume_id]['status'] == 'attaching': volumes[volume_id]['status'] = 'available' def fake_vol_migrate_volume_completion(context, old_volume_id, new_volume_id, error=False): self.assertTrue(uuidutils.is_uuid_like(old_volume_id)) self.assertTrue(uuidutils.is_uuid_like(new_volume_id)) volumes[old_volume_id]['status'] = 'in-use' return {'save_volume_id': new_volume_id} def fake_func_exc(*args, **kwargs): raise AttributeError # Random exception def fake_swap_volume(old_connection_info, new_connection_info, instance, mountpoint, resize_to): self.assertEqual(resize_to, 2) def fake_block_device_mapping_update(ctxt, id, updates, legacy): self.assertEqual(2, updates['volume_size']) return fake_bdm self.stubs.Set(self.compute.volume_api, 'roll_detaching', fake_vol_api_roll_detaching) self.stubs.Set(self.compute.volume_api, 'get', fake_vol_get) self.stubs.Set(self.compute.volume_api, 'initialize_connection', fake_vol_api_func) self.stubs.Set(self.compute.volume_api, 'unreserve_volume', fake_vol_unreserve) self.stubs.Set(self.compute.volume_api, 'terminate_connection', fake_vol_api_func) self.stub_out('nova.db.' 'block_device_mapping_get_by_instance_and_volume_id', lambda x, y, z, v: fake_bdm) self.stubs.Set(self.compute.driver, 'get_volume_connector', lambda x: {}) self.stubs.Set(self.compute.driver, 'swap_volume', fake_swap_volume) self.stubs.Set(self.compute.volume_api, 'migrate_volume_completion', fake_vol_migrate_volume_completion) self.stub_out('nova.db.block_device_mapping_update', fake_block_device_mapping_update) self.stub_out('nova.db.instance_fault_create', lambda x, y: test_instance_fault.fake_faults['fake-uuid'][0]) self.stubs.Set(self.compute, '_instance_update', lambda c, u, **k: {}) # Good path self.compute.swap_volume(self.context, old_volume_id, new_volume_id, fake_instance.fake_instance_obj( self.context, **{'uuid': uuids.instance})) self.assertEqual(volumes[old_volume_id]['status'], 'in-use') # Error paths volumes[old_volume_id]['status'] = 'detaching' volumes[new_volume_id]['status'] = 'attaching' self.stubs.Set(self.compute.driver, 'swap_volume', fake_func_exc) self.assertRaises(AttributeError, self.compute.swap_volume, self.context, old_volume_id, new_volume_id, fake_instance.fake_instance_obj( self.context, **{'uuid': uuids.instance})) self.assertEqual(volumes[old_volume_id]['status'], 'in-use') self.assertEqual(volumes[new_volume_id]['status'], 'available') volumes[old_volume_id]['status'] = 'detaching' volumes[new_volume_id]['status'] = 'attaching' self.stubs.Set(self.compute.volume_api, 'initialize_connection', fake_func_exc) self.assertRaises(AttributeError, self.compute.swap_volume, self.context, old_volume_id, new_volume_id, fake_instance.fake_instance_obj( self.context, **{'uuid': uuids.instance})) self.assertEqual(volumes[old_volume_id]['status'], 'in-use') self.assertEqual(volumes[new_volume_id]['status'], 'available') @mock.patch.object(compute_utils, 'EventReporter') def test_check_can_live_migrate_source(self, event_mock): is_volume_backed = 'volume_backed' dest_check_data = migrate_data_obj.LiveMigrateData() db_instance = fake_instance.fake_db_instance() instance = objects.Instance._from_db_object( self.context, objects.Instance(), db_instance) self.mox.StubOutWithMock(self.compute.compute_api, 'is_volume_backed_instance') self.mox.StubOutWithMock(self.compute, '_get_instance_block_device_info') self.mox.StubOutWithMock(self.compute.driver, 'check_can_live_migrate_source') self.compute.compute_api.is_volume_backed_instance( self.context, instance).AndReturn(is_volume_backed) self.compute._get_instance_block_device_info( self.context, instance, refresh_conn_info=True ).AndReturn({'block_device_mapping': 'fake'}) self.compute.driver.check_can_live_migrate_source( self.context, instance, dest_check_data, {'block_device_mapping': 'fake'}) self.mox.ReplayAll() self.compute.check_can_live_migrate_source( self.context, instance=instance, dest_check_data=dest_check_data) event_mock.assert_called_once_with( self.context, 'compute_check_can_live_migrate_source', instance.uuid) self.assertTrue(dest_check_data.is_volume_backed) @mock.patch.object(compute_utils, 'EventReporter') def _test_check_can_live_migrate_destination(self, event_mock, do_raise=False): db_instance = fake_instance.fake_db_instance(host='fake-host') instance = objects.Instance._from_db_object( self.context, objects.Instance(), db_instance) instance.host = 'fake-host' block_migration = 'block_migration' disk_over_commit = 'disk_over_commit' src_info = 'src_info' dest_info = 'dest_info' dest_check_data = dict(foo='bar') mig_data = dict(cow='moo') self.mox.StubOutWithMock(self.compute, '_get_compute_info') self.mox.StubOutWithMock(self.compute.driver, 'check_can_live_migrate_destination') self.mox.StubOutWithMock(self.compute.compute_rpcapi, 'check_can_live_migrate_source') self.mox.StubOutWithMock(self.compute.driver, 'check_can_live_migrate_destination_cleanup') self.compute._get_compute_info(self.context, 'fake-host').AndReturn(src_info) self.compute._get_compute_info(self.context, CONF.host).AndReturn(dest_info) self.compute.driver.check_can_live_migrate_destination( self.context, instance, src_info, dest_info, block_migration, disk_over_commit).AndReturn(dest_check_data) mock_meth = self.compute.compute_rpcapi.check_can_live_migrate_source( self.context, instance, dest_check_data) if do_raise: mock_meth.AndRaise(test.TestingException()) self.mox.StubOutWithMock(db, 'instance_fault_create') db.instance_fault_create( self.context, mox.IgnoreArg()).AndReturn( test_instance_fault.fake_faults['fake-uuid'][0]) else: mock_meth.AndReturn(mig_data) self.compute.driver.check_can_live_migrate_destination_cleanup( self.context, dest_check_data) self.mox.ReplayAll() result = self.compute.check_can_live_migrate_destination( self.context, instance=instance, block_migration=block_migration, disk_over_commit=disk_over_commit) self.assertEqual(mig_data, result) event_mock.assert_called_once_with( self.context, 'compute_check_can_live_migrate_destination', instance.uuid) def test_check_can_live_migrate_destination_success(self): self._test_check_can_live_migrate_destination() def test_check_can_live_migrate_destination_fail(self): self.assertRaises( test.TestingException, self._test_check_can_live_migrate_destination, do_raise=True) @mock.patch('nova.compute.manager.InstanceEvents._lock_name') def test_prepare_for_instance_event(self, lock_name_mock): inst_obj = objects.Instance(uuid=uuids.instance) result = self.compute.instance_events.prepare_for_instance_event( inst_obj, 'test-event') self.assertIn(uuids.instance, self.compute.instance_events._events) self.assertIn('test-event', self.compute.instance_events._events[uuids.instance]) self.assertEqual( result, self.compute.instance_events._events[uuids.instance]['test-event']) self.assertTrue(hasattr(result, 'send')) lock_name_mock.assert_called_once_with(inst_obj) @mock.patch('nova.compute.manager.InstanceEvents._lock_name') def test_pop_instance_event(self, lock_name_mock): event = eventlet_event.Event() self.compute.instance_events._events = { uuids.instance: { 'network-vif-plugged': event, } } inst_obj = objects.Instance(uuid=uuids.instance) event_obj = objects.InstanceExternalEvent(name='network-vif-plugged', tag=None) result = self.compute.instance_events.pop_instance_event(inst_obj, event_obj) self.assertEqual(result, event) lock_name_mock.assert_called_once_with(inst_obj) @mock.patch('nova.compute.manager.InstanceEvents._lock_name') def test_clear_events_for_instance(self, lock_name_mock): event = eventlet_event.Event() self.compute.instance_events._events = { uuids.instance: { 'test-event': event, } } inst_obj = objects.Instance(uuid=uuids.instance) result = self.compute.instance_events.clear_events_for_instance( inst_obj) self.assertEqual(result, {'test-event': event}) lock_name_mock.assert_called_once_with(inst_obj) def test_instance_events_lock_name(self): inst_obj = objects.Instance(uuid=uuids.instance) result = self.compute.instance_events._lock_name(inst_obj) self.assertEqual(result, "%s-events" % uuids.instance) def test_prepare_for_instance_event_again(self): inst_obj = objects.Instance(uuid=uuids.instance) self.compute.instance_events.prepare_for_instance_event( inst_obj, 'test-event') # A second attempt will avoid creating a new list; make sure we # get the current list result = self.compute.instance_events.prepare_for_instance_event( inst_obj, 'test-event') self.assertIn(uuids.instance, self.compute.instance_events._events) self.assertIn('test-event', self.compute.instance_events._events[uuids.instance]) self.assertEqual( result, self.compute.instance_events._events[uuids.instance]['test-event']) self.assertTrue(hasattr(result, 'send')) def test_process_instance_event(self): event = eventlet_event.Event() self.compute.instance_events._events = { uuids.instance: { 'network-vif-plugged': event, } } inst_obj = objects.Instance(uuid=uuids.instance) event_obj = objects.InstanceExternalEvent(name='network-vif-plugged', tag=None) self.compute._process_instance_event(inst_obj, event_obj) self.assertTrue(event.ready()) self.assertEqual(event_obj, event.wait()) self.assertEqual({}, self.compute.instance_events._events) def test_process_instance_vif_deleted_event(self): vif1 = fake_network_cache_model.new_vif() vif1['id'] = '1' vif2 = fake_network_cache_model.new_vif() vif2['id'] = '2' nw_info = network_model.NetworkInfo([vif1, vif2]) info_cache = objects.InstanceInfoCache(network_info=nw_info, instance_uuid=uuids.instance) inst_obj = objects.Instance(id=3, uuid=uuids.instance, info_cache=info_cache) @mock.patch.object(manager.base_net_api, 'update_instance_cache_with_nw_info') @mock.patch.object(self.compute.driver, 'detach_interface') def do_test(detach_interface, update_instance_cache_with_nw_info): self.compute._process_instance_vif_deleted_event(self.context, inst_obj, vif2['id']) update_instance_cache_with_nw_info.assert_called_once_with( self.compute.network_api, self.context, inst_obj, nw_info=[vif1]) detach_interface.assert_called_once_with(inst_obj, vif2) do_test() def test_external_instance_event(self): instances = [ objects.Instance(id=1, uuid=uuids.instance_1), objects.Instance(id=2, uuid=uuids.instance_2), objects.Instance(id=3, uuid=uuids.instance_3)] events = [ objects.InstanceExternalEvent(name='network-changed', tag='tag1', instance_uuid=uuids.instance_1), objects.InstanceExternalEvent(name='network-vif-plugged', instance_uuid=uuids.instance_2, tag='tag2'), objects.InstanceExternalEvent(name='network-vif-deleted', instance_uuid=uuids.instance_3, tag='tag3')] @mock.patch.object(self.compute, '_process_instance_vif_deleted_event') @mock.patch.object(self.compute.network_api, 'get_instance_nw_info') @mock.patch.object(self.compute, '_process_instance_event') def do_test(_process_instance_event, get_instance_nw_info, _process_instance_vif_deleted_event): self.compute.external_instance_event(self.context, instances, events) get_instance_nw_info.assert_called_once_with(self.context, instances[0]) _process_instance_event.assert_called_once_with(instances[1], events[1]) _process_instance_vif_deleted_event.assert_called_once_with( self.context, instances[2], events[2].tag) do_test() def test_external_instance_event_with_exception(self): vif1 = fake_network_cache_model.new_vif() vif1['id'] = '1' vif2 = fake_network_cache_model.new_vif() vif2['id'] = '2' nw_info = network_model.NetworkInfo([vif1, vif2]) info_cache = objects.InstanceInfoCache(network_info=nw_info, instance_uuid=uuids.instance_2) instances = [ objects.Instance(id=1, uuid=uuids.instance_1), objects.Instance(id=2, uuid=uuids.instance_2, info_cache=info_cache), objects.Instance(id=3, uuid=uuids.instance_3)] events = [ objects.InstanceExternalEvent(name='network-changed', tag='tag1', instance_uuid=uuids.instance_1), objects.InstanceExternalEvent(name='network-vif-deleted', instance_uuid=uuids.instance_2, tag='2'), objects.InstanceExternalEvent(name='network-vif-plugged', instance_uuid=uuids.instance_3, tag='tag3')] # Make sure all the three events are handled despite the exceptions in # processing events 1 and 2 @mock.patch.object(manager.base_net_api, 'update_instance_cache_with_nw_info') @mock.patch.object(self.compute.driver, 'detach_interface', side_effect=exception.NovaException) @mock.patch.object(self.compute.network_api, 'get_instance_nw_info', side_effect=exception.InstanceInfoCacheNotFound( instance_uuid=uuids.instance_1)) @mock.patch.object(self.compute, '_process_instance_event') def do_test(_process_instance_event, get_instance_nw_info, detach_interface, update_instance_cache_with_nw_info): self.compute.external_instance_event(self.context, instances, events) get_instance_nw_info.assert_called_once_with(self.context, instances[0]) update_instance_cache_with_nw_info.assert_called_once_with( self.compute.network_api, self.context, instances[1], nw_info=[vif1]) detach_interface.assert_called_once_with(instances[1], vif2) _process_instance_event.assert_called_once_with(instances[2], events[2]) do_test() def test_cancel_all_events(self): inst = objects.Instance(uuid=uuids.instance) fake_eventlet_event = mock.MagicMock() self.compute.instance_events._events = { inst.uuid: { 'network-vif-plugged-bar': fake_eventlet_event, } } self.compute.instance_events.cancel_all_events() # call it again to make sure we handle that gracefully self.compute.instance_events.cancel_all_events() self.assertTrue(fake_eventlet_event.send.called) event = fake_eventlet_event.send.call_args_list[0][0][0] self.assertEqual('network-vif-plugged', event.name) self.assertEqual('bar', event.tag) self.assertEqual('failed', event.status) def test_cleanup_cancels_all_events(self): with mock.patch.object(self.compute, 'instance_events') as mock_ev: self.compute.cleanup_host() mock_ev.cancel_all_events.assert_called_once_with() def test_cleanup_blocks_new_events(self): instance = objects.Instance(uuid=uuids.instance) self.compute.instance_events.cancel_all_events() callback = mock.MagicMock() body = mock.MagicMock() with self.compute.virtapi.wait_for_instance_event( instance, ['network-vif-plugged-bar'], error_callback=callback): body() self.assertTrue(body.called) callback.assert_called_once_with('network-vif-plugged-bar', instance) def test_pop_events_fails_gracefully(self): inst = objects.Instance(uuid=uuids.instance) event = mock.MagicMock() self.compute.instance_events._events = None self.assertIsNone( self.compute.instance_events.pop_instance_event(inst, event)) def test_clear_events_fails_gracefully(self): inst = objects.Instance(uuid=uuids.instance) self.compute.instance_events._events = None self.assertEqual( self.compute.instance_events.clear_events_for_instance(inst), {}) def test_retry_reboot_pending_soft(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.task_state = task_states.REBOOT_PENDING instance.vm_state = vm_states.ACTIVE with mock.patch.object(self.compute, '_get_power_state', return_value=power_state.RUNNING): allow_reboot, reboot_type = self.compute._retry_reboot( context, instance) self.assertTrue(allow_reboot) self.assertEqual(reboot_type, 'SOFT') def test_retry_reboot_pending_hard(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.task_state = task_states.REBOOT_PENDING_HARD instance.vm_state = vm_states.ACTIVE with mock.patch.object(self.compute, '_get_power_state', return_value=power_state.RUNNING): allow_reboot, reboot_type = self.compute._retry_reboot( context, instance) self.assertTrue(allow_reboot) self.assertEqual(reboot_type, 'HARD') def test_retry_reboot_starting_soft_off(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.task_state = task_states.REBOOT_STARTED with mock.patch.object(self.compute, '_get_power_state', return_value=power_state.NOSTATE): allow_reboot, reboot_type = self.compute._retry_reboot( context, instance) self.assertTrue(allow_reboot) self.assertEqual(reboot_type, 'HARD') def test_retry_reboot_starting_hard_off(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.task_state = task_states.REBOOT_STARTED_HARD with mock.patch.object(self.compute, '_get_power_state', return_value=power_state.NOSTATE): allow_reboot, reboot_type = self.compute._retry_reboot( context, instance) self.assertTrue(allow_reboot) self.assertEqual(reboot_type, 'HARD') def test_retry_reboot_starting_hard_on(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.task_state = task_states.REBOOT_STARTED_HARD with mock.patch.object(self.compute, '_get_power_state', return_value=power_state.RUNNING): allow_reboot, reboot_type = self.compute._retry_reboot( context, instance) self.assertFalse(allow_reboot) self.assertEqual(reboot_type, 'HARD') def test_retry_reboot_no_reboot(self): instance = objects.Instance(self.context) instance.uuid = uuids.instance instance.task_state = 'bar' with mock.patch.object(self.compute, '_get_power_state', return_value=power_state.RUNNING): allow_reboot, reboot_type = self.compute._retry_reboot( context, instance) self.assertFalse(allow_reboot) self.assertEqual(reboot_type, 'HARD') @mock.patch('nova.objects.BlockDeviceMapping.get_by_volume_and_instance') @mock.patch('nova.compute.manager.ComputeManager._driver_detach_volume') @mock.patch('nova.objects.Instance._from_db_object') def test_remove_volume_connection(self, inst_from_db, detach, bdm_get): bdm = mock.sentinel.bdm bdm.connection_info = jsonutils.dumps({}) inst_obj = mock.Mock() inst_obj.uuid = 'uuid' bdm_get.return_value = bdm inst_from_db.return_value = inst_obj with mock.patch.object(self.compute, 'volume_api'): self.compute.remove_volume_connection(self.context, 'vol', inst_obj) detach.assert_called_once_with(self.context, inst_obj, bdm, {}) bdm_get.assert_called_once_with(self.context, 'vol', 'uuid') def test_detach_volume(self): self._test_detach_volume() def test_detach_volume_not_destroy_bdm(self): self._test_detach_volume(destroy_bdm=False) @mock.patch('nova.objects.BlockDeviceMapping.get_by_volume_and_instance') @mock.patch('nova.compute.manager.ComputeManager._driver_detach_volume') @mock.patch('nova.compute.manager.ComputeManager.' '_notify_about_instance_usage') def _test_detach_volume(self, notify_inst_usage, detach, bdm_get, destroy_bdm=True): volume_id = uuids.volume inst_obj = mock.Mock() inst_obj.uuid = uuids.instance inst_obj.host = CONF.host attachment_id = uuids.attachment bdm = mock.MagicMock(spec=objects.BlockDeviceMapping) bdm.device_name = 'vdb' bdm.connection_info = jsonutils.dumps({}) bdm_get.return_value = bdm detach.return_value = {} with mock.patch.object(self.compute, 'volume_api') as volume_api: with mock.patch.object(self.compute, 'driver') as driver: connector_sentinel = mock.sentinel.connector driver.get_volume_connector.return_value = connector_sentinel self.compute._detach_volume(self.context, volume_id, inst_obj, destroy_bdm=destroy_bdm, attachment_id=attachment_id) detach.assert_called_once_with(self.context, inst_obj, bdm, {}) driver.get_volume_connector.assert_called_once_with(inst_obj) volume_api.terminate_connection.assert_called_once_with( self.context, volume_id, connector_sentinel) volume_api.detach.assert_called_once_with(mock.ANY, volume_id, inst_obj.uuid, attachment_id) notify_inst_usage.assert_called_once_with( self.context, inst_obj, "volume.detach", extra_usage_info={'volume_id': volume_id} ) if destroy_bdm: bdm.destroy.assert_called_once_with() else: self.assertFalse(bdm.destroy.called) def test_detach_volume_evacuate(self): """For evacuate, terminate_connection is called with original host.""" expected_connector = {'host': 'evacuated-host'} conn_info_str = '{"connector": {"host": "evacuated-host"}}' self._test_detach_volume_evacuate(conn_info_str, expected=expected_connector) def test_detach_volume_evacuate_legacy(self): """Test coverage for evacuate with legacy attachments. In this case, legacy means the volume was attached to the instance before nova stashed the connector in connection_info. The connector sent to terminate_connection will still be for the local host in this case because nova does not have the info to get the connector for the original (evacuated) host. """ conn_info_str = '{"foo": "bar"}' # Has no 'connector'. self._test_detach_volume_evacuate(conn_info_str) def test_detach_volume_evacuate_mismatch(self): """Test coverage for evacuate with connector mismatch. For evacuate, if the stashed connector also has the wrong host, then log it and stay with the local connector. """ conn_info_str = '{"connector": {"host": "other-host"}}' self._test_detach_volume_evacuate(conn_info_str) @mock.patch('nova.objects.BlockDeviceMapping.get_by_volume_and_instance') @mock.patch('nova.compute.manager.ComputeManager.' '_notify_about_instance_usage') def _test_detach_volume_evacuate(self, conn_info_str, notify_inst_usage, bdm_get, expected=None): """Re-usable code for detach volume evacuate test cases. :param conn_info_str: String form of the stashed connector. :param expected: Dict of the connector that is expected in the terminate call (optional). Default is to expect the local connector to be used. """ volume_id = 'vol_id' instance = fake_instance.fake_instance_obj(self.context, host='evacuated-host') bdm = mock.Mock() bdm.connection_info = conn_info_str bdm_get.return_value = bdm local_connector = {'host': 'local-connector-host'} expected_connector = local_connector if not expected else expected with mock.patch.object(self.compute, 'volume_api') as volume_api: with mock.patch.object(self.compute, 'driver') as driver: driver.get_volume_connector.return_value = local_connector self.compute._detach_volume(self.context, volume_id, instance, destroy_bdm=False) driver._driver_detach_volume.assert_not_called() driver.get_volume_connector.assert_called_once_with(instance) volume_api.terminate_connection.assert_called_once_with( self.context, volume_id, expected_connector) volume_api.detach.assert_called_once_with(mock.ANY, volume_id, instance.uuid, None) notify_inst_usage.assert_called_once_with( self.context, instance, "volume.detach", extra_usage_info={'volume_id': volume_id} ) def _test_rescue(self, clean_shutdown=True): instance = fake_instance.fake_instance_obj( self.context, vm_state=vm_states.ACTIVE) fake_nw_info = network_model.NetworkInfo() rescue_image_meta = objects.ImageMeta.from_dict( {'id': 'fake', 'name': 'fake'}) with test.nested( mock.patch.object(self.context, 'elevated', return_value=self.context), mock.patch.object(self.compute.network_api, 'get_instance_nw_info', return_value=fake_nw_info), mock.patch.object(self.compute, '_get_rescue_image', return_value=rescue_image_meta), mock.patch.object(self.compute, '_notify_about_instance_usage'), mock.patch.object(self.compute, '_power_off_instance'), mock.patch.object(self.compute.driver, 'rescue'), mock.patch.object(compute_utils, 'notify_usage_exists'), mock.patch.object(self.compute, '_get_power_state', return_value=power_state.RUNNING), mock.patch.object(instance, 'save') ) as ( elevated_context, get_nw_info, get_rescue_image, notify_instance_usage, power_off_instance, driver_rescue, notify_usage_exists, get_power_state, instance_save ): self.compute.rescue_instance( self.context, instance, rescue_password='verybadpass', rescue_image_ref=None, clean_shutdown=clean_shutdown) # assert the field values on the instance object self.assertEqual(vm_states.RESCUED, instance.vm_state) self.assertIsNone(instance.task_state) self.assertEqual(power_state.RUNNING, instance.power_state) self.assertIsNotNone(instance.launched_at) # assert our mock calls get_nw_info.assert_called_once_with(self.context, instance) get_rescue_image.assert_called_once_with( self.context, instance, None) extra_usage_info = {'rescue_image_name': 'fake'} notify_calls = [ mock.call(self.context, instance, "rescue.start", extra_usage_info=extra_usage_info, network_info=fake_nw_info), mock.call(self.context, instance, "rescue.end", extra_usage_info=extra_usage_info, network_info=fake_nw_info) ] notify_instance_usage.assert_has_calls(notify_calls) power_off_instance.assert_called_once_with(self.context, instance, clean_shutdown) driver_rescue.assert_called_once_with( self.context, instance, fake_nw_info, rescue_image_meta, 'verybadpass') notify_usage_exists.assert_called_once_with(self.compute.notifier, self.context, instance, current_period=True) instance_save.assert_called_once_with( expected_task_state=task_states.RESCUING) def test_rescue(self): self._test_rescue() def test_rescue_forced_shutdown(self): self._test_rescue(clean_shutdown=False) def test_unrescue(self): instance = fake_instance.fake_instance_obj( self.context, vm_state=vm_states.RESCUED) fake_nw_info = network_model.NetworkInfo() with test.nested( mock.patch.object(self.context, 'elevated', return_value=self.context), mock.patch.object(self.compute.network_api, 'get_instance_nw_info', return_value=fake_nw_info), mock.patch.object(self.compute, '_notify_about_instance_usage'), mock.patch.object(self.compute.driver, 'unrescue'), mock.patch.object(self.compute, '_get_power_state', return_value=power_state.RUNNING), mock.patch.object(instance, 'save') ) as ( elevated_context, get_nw_info, notify_instance_usage, driver_unrescue, get_power_state, instance_save ): self.compute.unrescue_instance(self.context, instance) # assert the field values on the instance object self.assertEqual(vm_states.ACTIVE, instance.vm_state) self.assertIsNone(instance.task_state) self.assertEqual(power_state.RUNNING, instance.power_state) # assert our mock calls get_nw_info.assert_called_once_with(self.context, instance) notify_calls = [ mock.call(self.context, instance, "unrescue.start", network_info=fake_nw_info), mock.call(self.context, instance, "unrescue.end", network_info=fake_nw_info) ] notify_instance_usage.assert_has_calls(notify_calls) driver_unrescue.assert_called_once_with(instance, fake_nw_info) instance_save.assert_called_once_with( expected_task_state=task_states.UNRESCUING) @mock.patch('nova.compute.manager.ComputeManager._get_power_state', return_value=power_state.RUNNING) @mock.patch.object(objects.Instance, 'save') @mock.patch('nova.utils.generate_password', return_value='fake-pass') def test_set_admin_password(self, gen_password_mock, instance_save_mock, power_state_mock): # Ensure instance can have its admin password set. instance = fake_instance.fake_instance_obj( self.context, vm_state=vm_states.ACTIVE, task_state=task_states.UPDATING_PASSWORD) @mock.patch.object(self.context, 'elevated', return_value=self.context) @mock.patch.object(self.compute.driver, 'set_admin_password') def do_test(driver_mock, elevated_mock): # call the manager method self.compute.set_admin_password(self.context, instance, None) # make our assertions self.assertEqual(vm_states.ACTIVE, instance.vm_state) self.assertIsNone(instance.task_state) power_state_mock.assert_called_once_with(self.context, instance) driver_mock.assert_called_once_with(instance, 'fake-pass') instance_save_mock.assert_called_once_with( expected_task_state=task_states.UPDATING_PASSWORD) do_test() @mock.patch('nova.compute.manager.ComputeManager._get_power_state', return_value=power_state.NOSTATE) @mock.patch('nova.compute.manager.ComputeManager._instance_update') @mock.patch.object(objects.Instance, 'save') @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') def test_set_admin_password_bad_state(self, add_fault_mock, instance_save_mock, update_mock, power_state_mock): # Test setting password while instance is rebuilding. instance = fake_instance.fake_instance_obj(self.context) with mock.patch.object(self.context, 'elevated', return_value=self.context): # call the manager method self.assertRaises(exception.InstancePasswordSetFailed, self.compute.set_admin_password, self.context, instance, None) # make our assertions power_state_mock.assert_called_once_with(self.context, instance) instance_save_mock.assert_called_once_with( expected_task_state=task_states.UPDATING_PASSWORD) add_fault_mock.assert_called_once_with( self.context, instance, mock.ANY, mock.ANY) @mock.patch('nova.utils.generate_password', return_value='fake-pass') @mock.patch('nova.compute.manager.ComputeManager._get_power_state', return_value=power_state.RUNNING) @mock.patch('nova.compute.manager.ComputeManager._instance_update') @mock.patch.object(objects.Instance, 'save') @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') def _do_test_set_admin_password_driver_error(self, exc, expected_vm_state, expected_task_state, expected_exception, add_fault_mock, instance_save_mock, update_mock, power_state_mock, gen_password_mock): # Ensure expected exception is raised if set_admin_password fails. instance = fake_instance.fake_instance_obj( self.context, vm_state=vm_states.ACTIVE, task_state=task_states.UPDATING_PASSWORD) @mock.patch.object(self.context, 'elevated', return_value=self.context) @mock.patch.object(self.compute.driver, 'set_admin_password', side_effect=exc) def do_test(driver_mock, elevated_mock): # error raised from the driver should not reveal internal # information so a new error is raised self.assertRaises(expected_exception, self.compute.set_admin_password, self.context, instance=instance, new_pass=None) if expected_exception == NotImplementedError: instance_save_mock.assert_called_once_with( expected_task_state=task_states.UPDATING_PASSWORD) else: # setting the instance to error state instance_save_mock.assert_called_once_with() self.assertEqual(expected_vm_state, instance.vm_state) # check revert_task_state decorator update_mock.assert_called_once_with( self.context, instance, task_state=expected_task_state) # check wrap_instance_fault decorator add_fault_mock.assert_called_once_with( self.context, instance, mock.ANY, mock.ANY) do_test() def test_set_admin_password_driver_not_authorized(self): # Ensure expected exception is raised if set_admin_password not # authorized. exc = exception.Forbidden('Internal error') expected_exception = exception.InstancePasswordSetFailed self._do_test_set_admin_password_driver_error( exc, vm_states.ERROR, None, expected_exception) def test_set_admin_password_driver_not_implemented(self): # Ensure expected exception is raised if set_admin_password not # implemented by driver. exc = NotImplementedError() expected_exception = NotImplementedError self._do_test_set_admin_password_driver_error( exc, vm_states.ACTIVE, None, expected_exception) def test_destroy_evacuated_instances(self): our_host = self.compute.host instance_1 = objects.Instance(self.context) instance_1.uuid = uuids.instance_1 instance_1.task_state = None instance_1.vm_state = vm_states.ACTIVE instance_1.host = 'not-' + our_host instance_2 = objects.Instance(self.context) instance_2.uuid = uuids.instance_2 instance_2.task_state = None instance_2.vm_state = vm_states.ACTIVE instance_2.host = 'not-' + our_host # Only instance 2 has a migration record migration = objects.Migration(instance_uuid=instance_2.uuid) # Consider the migration successful migration.status = 'done' with test.nested( mock.patch.object(self.compute, '_get_instances_on_driver', return_value=[instance_1, instance_2]), mock.patch.object(self.compute.network_api, 'get_instance_nw_info', return_value=None), mock.patch.object(self.compute, '_get_instance_block_device_info', return_value={}), mock.patch.object(self.compute, '_is_instance_storage_shared', return_value=False), mock.patch.object(self.compute.driver, 'destroy'), mock.patch('nova.objects.MigrationList.get_by_filters'), mock.patch('nova.objects.Migration.save') ) as (_get_instances_on_driver, get_instance_nw_info, _get_instance_block_device_info, _is_instance_storage_shared, destroy, migration_list, migration_save): migration_list.return_value = [migration] self.compute._destroy_evacuated_instances(self.context) # Only instance 2 should be deleted. Instance 1 is still running # here, but no migration from our host exists, so ignore it destroy.assert_called_once_with(self.context, instance_2, None, {}, True) @mock.patch('nova.compute.manager.ComputeManager.' '_destroy_evacuated_instances') @mock.patch('nova.compute.manager.LOG') def test_init_host_foreign_instance(self, mock_log, mock_destroy): inst = mock.MagicMock() inst.host = self.compute.host + '-alt' self.compute._init_instance(mock.sentinel.context, inst) self.assertFalse(inst.save.called) self.assertTrue(mock_log.warning.called) msg = mock_log.warning.call_args_list[0] self.assertIn('appears to not be owned by this host', msg[0][0]) @mock.patch('nova.compute.manager.ComputeManager._instance_update') def test_error_out_instance_on_exception_not_implemented_err(self, inst_update_mock): instance = fake_instance.fake_instance_obj(self.context) def do_test(): with self.compute._error_out_instance_on_exception( self.context, instance, instance_state=vm_states.STOPPED): raise NotImplementedError('test') self.assertRaises(NotImplementedError, do_test) inst_update_mock.assert_called_once_with( self.context, instance, vm_state=vm_states.STOPPED, task_state=None) @mock.patch('nova.compute.manager.ComputeManager._instance_update') def test_error_out_instance_on_exception_inst_fault_rollback(self, inst_update_mock): instance = fake_instance.fake_instance_obj(self.context) def do_test(): with self.compute._error_out_instance_on_exception(self.context, instance): raise exception.InstanceFaultRollback( inner_exception=test.TestingException('test')) self.assertRaises(test.TestingException, do_test) inst_update_mock.assert_called_once_with( self.context, instance, vm_state=vm_states.ACTIVE, task_state=None) @mock.patch('nova.compute.manager.ComputeManager.' '_set_instance_obj_error_state') def test_error_out_instance_on_exception_unknown_with_quotas(self, set_error): instance = fake_instance.fake_instance_obj(self.context) quotas = mock.create_autospec(objects.Quotas, spec_set=True) def do_test(): with self.compute._error_out_instance_on_exception( self.context, instance, quotas): raise test.TestingException('test') self.assertRaises(test.TestingException, do_test) self.assertEqual(1, len(quotas.method_calls)) self.assertEqual(mock.call.rollback(), quotas.method_calls[0]) set_error.assert_called_once_with(self.context, instance) def test_cleanup_volumes(self): instance = fake_instance.fake_instance_obj(self.context) bdm_do_not_delete_dict = fake_block_device.FakeDbBlockDeviceDict( {'volume_id': 'fake-id1', 'source_type': 'image', 'delete_on_termination': False}) bdm_delete_dict = fake_block_device.FakeDbBlockDeviceDict( {'volume_id': 'fake-id2', 'source_type': 'image', 'delete_on_termination': True}) bdms = block_device_obj.block_device_make_list(self.context, [bdm_do_not_delete_dict, bdm_delete_dict]) with mock.patch.object(self.compute.volume_api, 'delete') as volume_delete: self.compute._cleanup_volumes(self.context, instance.uuid, bdms) volume_delete.assert_called_once_with(self.context, bdms[1].volume_id) def test_cleanup_volumes_exception_do_not_raise(self): instance = fake_instance.fake_instance_obj(self.context) bdm_dict1 = fake_block_device.FakeDbBlockDeviceDict( {'volume_id': 'fake-id1', 'source_type': 'image', 'delete_on_termination': True}) bdm_dict2 = fake_block_device.FakeDbBlockDeviceDict( {'volume_id': 'fake-id2', 'source_type': 'image', 'delete_on_termination': True}) bdms = block_device_obj.block_device_make_list(self.context, [bdm_dict1, bdm_dict2]) with mock.patch.object(self.compute.volume_api, 'delete', side_effect=[test.TestingException(), None]) as volume_delete: self.compute._cleanup_volumes(self.context, instance.uuid, bdms, raise_exc=False) calls = [mock.call(self.context, bdm.volume_id) for bdm in bdms] self.assertEqual(calls, volume_delete.call_args_list) def test_cleanup_volumes_exception_raise(self): instance = fake_instance.fake_instance_obj(self.context) bdm_dict1 = fake_block_device.FakeDbBlockDeviceDict( {'volume_id': 'fake-id1', 'source_type': 'image', 'delete_on_termination': True}) bdm_dict2 = fake_block_device.FakeDbBlockDeviceDict( {'volume_id': 'fake-id2', 'source_type': 'image', 'delete_on_termination': True}) bdms = block_device_obj.block_device_make_list(self.context, [bdm_dict1, bdm_dict2]) with mock.patch.object(self.compute.volume_api, 'delete', side_effect=[test.TestingException(), None]) as volume_delete: self.assertRaises(test.TestingException, self.compute._cleanup_volumes, self.context, instance.uuid, bdms) calls = [mock.call(self.context, bdm.volume_id) for bdm in bdms] self.assertEqual(calls, volume_delete.call_args_list) def test_stop_instance_task_state_none_power_state_shutdown(self): # Tests that stop_instance doesn't puke when the instance power_state # is shutdown and the task_state is None. instance = fake_instance.fake_instance_obj( self.context, vm_state=vm_states.ACTIVE, task_state=None, power_state=power_state.SHUTDOWN) @mock.patch.object(self.compute, '_get_power_state', return_value=power_state.SHUTDOWN) @mock.patch.object(self.compute, '_notify_about_instance_usage') @mock.patch.object(self.compute, '_power_off_instance') @mock.patch.object(instance, 'save') def do_test(save_mock, power_off_mock, notify_mock, get_state_mock): # run the code self.compute.stop_instance(self.context, instance, True) # assert the calls self.assertEqual(2, get_state_mock.call_count) notify_mock.assert_has_calls([ mock.call(self.context, instance, 'power_off.start'), mock.call(self.context, instance, 'power_off.end') ]) power_off_mock.assert_called_once_with( self.context, instance, True) save_mock.assert_called_once_with( expected_task_state=[task_states.POWERING_OFF, None]) self.assertEqual(power_state.SHUTDOWN, instance.power_state) self.assertIsNone(instance.task_state) self.assertEqual(vm_states.STOPPED, instance.vm_state) do_test() def test_reset_network_driver_not_implemented(self): instance = fake_instance.fake_instance_obj(self.context) @mock.patch.object(self.compute.driver, 'reset_network', side_effect=NotImplementedError()) @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') def do_test(mock_add_fault, mock_reset): self.assertRaises(messaging.ExpectedException, self.compute.reset_network, self.context, instance) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(NotImplementedError, self.compute.reset_network, self.context, instance) do_test() def _test_rebuild_ex(self, instance, ex): # Test that we do not raise on certain exceptions with test.nested( mock.patch.object(self.compute, '_get_compute_info'), mock.patch.object(self.compute, '_do_rebuild_instance_with_claim', side_effect=ex), mock.patch.object(self.compute, '_set_migration_status'), mock.patch.object(self.compute, '_notify_about_instance_usage') ) as (mock_get, mock_rebuild, mock_set, mock_notify): self.compute.rebuild_instance(self.context, instance, None, None, None, None, None, None, None) mock_set.assert_called_once_with(None, 'failed') mock_notify.assert_called_once_with(mock.ANY, instance, 'rebuild.error', fault=ex) def test_rebuild_deleting(self): instance = objects.Instance(uuid='fake-uuid') ex = exception.UnexpectedDeletingTaskStateError( instance_uuid=instance.uuid, expected='expected', actual='actual') self._test_rebuild_ex(instance, ex) def test_rebuild_notfound(self): instance = objects.Instance(uuid='fake-uuid') ex = exception.InstanceNotFound(instance_id=instance.uuid) self._test_rebuild_ex(instance, ex) def test_rebuild_default_impl(self): def _detach(context, bdms): # NOTE(rpodolyaka): check that instance has been powered off by # the time we detach block devices, exact calls arguments will be # checked below self.assertTrue(mock_power_off.called) self.assertFalse(mock_destroy.called) def _attach(context, instance, bdms, do_check_attach=True): return {'block_device_mapping': 'shared_block_storage'} def _spawn(context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): self.assertEqual(block_device_info['block_device_mapping'], 'shared_block_storage') with test.nested( mock.patch.object(self.compute.driver, 'destroy', return_value=None), mock.patch.object(self.compute.driver, 'spawn', side_effect=_spawn), mock.patch.object(objects.Instance, 'save', return_value=None), mock.patch.object(self.compute, '_power_off_instance', return_value=None) ) as( mock_destroy, mock_spawn, mock_save, mock_power_off ): instance = fake_instance.fake_instance_obj(self.context) instance.migration_context = None instance.numa_topology = None instance.task_state = task_states.REBUILDING instance.save(expected_task_state=[task_states.REBUILDING]) self.compute._rebuild_default_impl(self.context, instance, None, [], admin_password='new_pass', bdms=[], detach_block_devices=_detach, attach_block_devices=_attach, network_info=None, recreate=False, block_device_info=None, preserve_ephemeral=False) self.assertTrue(mock_save.called) self.assertTrue(mock_spawn.called) mock_destroy.assert_called_once_with( self.context, instance, network_info=None, block_device_info=None) mock_power_off.assert_called_once_with( self.context, instance, clean_shutdown=True) @mock.patch.object(utils, 'last_completed_audit_period', return_value=(0, 0)) @mock.patch.object(time, 'time', side_effect=[10, 20, 21]) @mock.patch.object(objects.InstanceList, 'get_by_host', return_value=[]) @mock.patch.object(objects.BandwidthUsage, 'get_by_instance_uuid_and_mac') @mock.patch.object(db, 'bw_usage_update') def test_poll_bandwidth_usage(self, bw_usage_update, get_by_uuid_mac, get_by_host, time, last_completed_audit): bw_counters = [{'uuid': uuids.instance, 'mac_address': 'fake-mac', 'bw_in': 1, 'bw_out': 2}] usage = objects.BandwidthUsage() usage.bw_in = 3 usage.bw_out = 4 usage.last_ctr_in = 0 usage.last_ctr_out = 0 self.flags(bandwidth_poll_interval=1) get_by_uuid_mac.return_value = usage _time = timeutils.utcnow() bw_usage_update.return_value = {'uuid': uuids.instance, 'mac': '', 'start_period': _time, 'last_refreshed': _time, 'bw_in': 0, 'bw_out': 0, 'last_ctr_in': 0, 'last_ctr_out': 0, 'deleted': 0, 'created_at': _time, 'updated_at': _time, 'deleted_at': _time} with mock.patch.object(self.compute.driver, 'get_all_bw_counters', return_value=bw_counters): self.compute._poll_bandwidth_usage(self.context) get_by_uuid_mac.assert_called_once_with(self.context, uuids.instance, 'fake-mac', start_period=0, use_slave=True) # NOTE(sdague): bw_usage_update happens at some time in # the future, so what last_refreshed is irrelevant. bw_usage_update.assert_called_once_with(self.context, uuids.instance, 'fake-mac', 0, 4, 6, 1, 2, last_refreshed=mock.ANY, update_cells=False) def test_reverts_task_state_instance_not_found(self): # Tests that the reverts_task_state decorator in the compute manager # will not trace when an InstanceNotFound is raised. instance = objects.Instance(uuid=uuids.instance, task_state="FAKE") instance_update_mock = mock.Mock( side_effect=exception.InstanceNotFound(instance_id=instance.uuid)) self.compute._instance_update = instance_update_mock log_mock = mock.Mock() manager.LOG = log_mock @manager.reverts_task_state def fake_function(self, context, instance): raise test.TestingException() self.assertRaises(test.TestingException, fake_function, self, self.context, instance) self.assertFalse(log_mock.called) @mock.patch.object(nova.scheduler.client.SchedulerClient, 'update_instance_info') def test_update_scheduler_instance_info(self, mock_update): instance = objects.Instance(uuid=uuids.instance) self.compute._update_scheduler_instance_info(self.context, instance) self.assertEqual(mock_update.call_count, 1) args = mock_update.call_args[0] self.assertNotEqual(args[0], self.context) self.assertIsInstance(args[0], self.context.__class__) self.assertEqual(args[1], self.compute.host) # Send a single instance; check that the method converts to an # InstanceList self.assertIsInstance(args[2], objects.InstanceList) self.assertEqual(args[2].objects[0], instance) @mock.patch.object(nova.scheduler.client.SchedulerClient, 'delete_instance_info') def test_delete_scheduler_instance_info(self, mock_delete): self.compute._delete_scheduler_instance_info(self.context, mock.sentinel.inst_uuid) self.assertEqual(mock_delete.call_count, 1) args = mock_delete.call_args[0] self.assertNotEqual(args[0], self.context) self.assertIsInstance(args[0], self.context.__class__) self.assertEqual(args[1], self.compute.host) self.assertEqual(args[2], mock.sentinel.inst_uuid) @mock.patch.object(nova.context.RequestContext, 'elevated') @mock.patch.object(nova.objects.InstanceList, 'get_by_host') @mock.patch.object(nova.scheduler.client.SchedulerClient, 'sync_instance_info') def test_sync_scheduler_instance_info(self, mock_sync, mock_get_by_host, mock_elevated): inst1 = objects.Instance(uuid=uuids.instance_1) inst2 = objects.Instance(uuid=uuids.instance_2) inst3 = objects.Instance(uuid=uuids.instance_3) exp_uuids = [inst.uuid for inst in [inst1, inst2, inst3]] mock_get_by_host.return_value = objects.InstanceList( objects=[inst1, inst2, inst3]) fake_elevated = context.get_admin_context() mock_elevated.return_value = fake_elevated self.compute._sync_scheduler_instance_info(self.context) mock_get_by_host.assert_called_once_with( fake_elevated, self.compute.host, expected_attrs=[], use_slave=True) mock_sync.assert_called_once_with(fake_elevated, self.compute.host, exp_uuids) @mock.patch.object(nova.scheduler.client.SchedulerClient, 'sync_instance_info') @mock.patch.object(nova.scheduler.client.SchedulerClient, 'delete_instance_info') @mock.patch.object(nova.scheduler.client.SchedulerClient, 'update_instance_info') def test_scheduler_info_updates_off(self, mock_update, mock_delete, mock_sync): mgr = self.compute mgr.send_instance_updates = False mgr._update_scheduler_instance_info(self.context, mock.sentinel.instance) mgr._delete_scheduler_instance_info(self.context, mock.sentinel.instance_uuid) mgr._sync_scheduler_instance_info(self.context) # None of the calls should have been made self.assertFalse(mock_update.called) self.assertFalse(mock_delete.called) self.assertFalse(mock_sync.called) def test_refresh_instance_security_rules_takes_non_object(self): inst = fake_instance.fake_db_instance() with mock.patch.object(self.compute.driver, 'refresh_instance_security_rules') as mock_r: self.compute.refresh_instance_security_rules(self.context, inst) self.assertIsInstance(mock_r.call_args_list[0][0][0], objects.Instance) def test_set_instance_obj_error_state_with_clean_task_state(self): instance = fake_instance.fake_instance_obj(self.context, vm_state=vm_states.BUILDING, task_state=task_states.SPAWNING) with mock.patch.object(instance, 'save'): self.compute._set_instance_obj_error_state(self.context, instance, clean_task_state=True) self.assertEqual(vm_states.ERROR, instance.vm_state) self.assertIsNone(instance.task_state) def test_set_instance_obj_error_state_by_default(self): instance = fake_instance.fake_instance_obj(self.context, vm_state=vm_states.BUILDING, task_state=task_states.SPAWNING) with mock.patch.object(instance, 'save'): self.compute._set_instance_obj_error_state(self.context, instance) self.assertEqual(vm_states.ERROR, instance.vm_state) self.assertEqual(task_states.SPAWNING, instance.task_state) @mock.patch.object(objects.Instance, 'save') def test_instance_update(self, mock_save): instance = objects.Instance(task_state=task_states.SCHEDULING, vm_state=vm_states.BUILDING) updates = {'task_state': None, 'vm_state': vm_states.ERROR} with mock.patch.object(self.compute, '_update_resource_tracker') as mock_rt: self.compute._instance_update(self.context, instance, **updates) self.assertIsNone(instance.task_state) self.assertEqual(vm_states.ERROR, instance.vm_state) mock_save.assert_called_once_with() mock_rt.assert_called_once_with(self.context, instance) def test_reset_reloads_rpcapi(self): orig_rpc = self.compute.compute_rpcapi with mock.patch('nova.compute.rpcapi.ComputeAPI') as mock_rpc: self.compute.reset() mock_rpc.assert_called_once_with() self.assertIsNot(orig_rpc, self.compute.compute_rpcapi) @mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid') @mock.patch('nova.compute.manager.ComputeManager._delete_instance') def test_terminate_instance_no_bdm_volume_id(self, mock_delete_instance, mock_bdm_get_by_inst): # Tests that we refresh the bdm list if a volume bdm does not have the # volume_id set. instance = fake_instance.fake_instance_obj( self.context, vm_state=vm_states.ERROR, task_state=task_states.DELETING) bdm = fake_block_device.FakeDbBlockDeviceDict( {'source_type': 'snapshot', 'destination_type': 'volume', 'instance_uuid': instance.uuid, 'device_name': '/dev/vda'}) bdms = block_device_obj.block_device_make_list(self.context, [bdm]) # since the bdms passed in don't have a volume_id, we'll go back to the # database looking for updated versions mock_bdm_get_by_inst.return_value = bdms self.compute.terminate_instance(self.context, instance, bdms, []) mock_bdm_get_by_inst.assert_called_once_with( self.context, instance.uuid) mock_delete_instance.assert_called_once_with( self.context, instance, bdms, mock.ANY) @mock.patch.object(nova.compute.manager.ComputeManager, '_notify_about_instance_usage') def test_trigger_crash_dump(self, notify_mock): instance = fake_instance.fake_instance_obj( self.context, vm_state=vm_states.ACTIVE) self.compute.trigger_crash_dump(self.context, instance) notify_mock.assert_has_calls([ mock.call(self.context, instance, 'trigger_crash_dump.start'), mock.call(self.context, instance, 'trigger_crash_dump.end') ]) self.assertIsNone(instance.task_state) self.assertEqual(vm_states.ACTIVE, instance.vm_state) class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase): def setUp(self): super(ComputeManagerBuildInstanceTestCase, self).setUp() self.compute = importutils.import_object(CONF.compute_manager) self.context = context.RequestContext('fake', 'fake') self.instance = fake_instance.fake_instance_obj(self.context, vm_state=vm_states.ACTIVE, expected_attrs=['metadata', 'system_metadata', 'info_cache']) self.admin_pass = 'pass' self.injected_files = [] self.image = {} self.node = 'fake-node' self.limits = {} self.requested_networks = [] self.security_groups = [] self.block_device_mapping = [] self.filter_properties = {'retry': {'num_attempts': 1, 'hosts': [[self.compute.host, 'fake-node']]}} self.useFixture(fixtures.SpawnIsSynchronousFixture()) def fake_network_info(): return network_model.NetworkInfo([{'address': '1.2.3.4'}]) self.network_info = network_model.NetworkInfoAsyncWrapper( fake_network_info) self.block_device_info = self.compute._prep_block_device(context, self.instance, self.block_device_mapping) # override tracker with a version that doesn't need the database: fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host, self.compute.driver, self.node) self.compute._resource_tracker_dict[self.node] = fake_rt def _do_build_instance_update(self, reschedule_update=False): self.mox.StubOutWithMock(self.instance, 'save') self.instance.save( expected_task_state=(task_states.SCHEDULING, None)).AndReturn( self.instance) if reschedule_update: self.instance.save().AndReturn(self.instance) def _build_and_run_instance_update(self): self.mox.StubOutWithMock(self.instance, 'save') self._build_resources_instance_update(stub=False) self.instance.save(expected_task_state= task_states.BLOCK_DEVICE_MAPPING).AndReturn(self.instance) def _build_resources_instance_update(self, stub=True): if stub: self.mox.StubOutWithMock(self.instance, 'save') self.instance.save().AndReturn(self.instance) def _notify_about_instance_usage(self, event, stub=True, **kwargs): if stub: self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage') self.compute._notify_about_instance_usage(self.context, self.instance, event, **kwargs) def _instance_action_events(self): self.mox.StubOutWithMock(objects.InstanceActionEvent, 'event_start') self.mox.StubOutWithMock(objects.InstanceActionEvent, 'event_finish_with_failure') objects.InstanceActionEvent.event_start( self.context, self.instance.uuid, mox.IgnoreArg(), want_result=False) objects.InstanceActionEvent.event_finish_with_failure( self.context, self.instance.uuid, mox.IgnoreArg(), exc_val=mox.IgnoreArg(), exc_tb=mox.IgnoreArg(), want_result=False) @staticmethod def _assert_build_instance_hook_called(mock_hooks, result): # NOTE(coreywright): we want to test the return value of # _do_build_and_run_instance, but it doesn't bubble all the way up, so # mock the hooking, which allows us to test that too, though a little # too intimately mock_hooks.setdefault().run_post.assert_called_once_with( 'build_instance', result, mock.ANY, mock.ANY, f=None) @mock.patch('nova.hooks._HOOKS') def test_build_and_run_instance_called_with_proper_args(self, mock_hooks): self.mox.StubOutWithMock(self.compute, '_build_and_run_instance') self._do_build_instance_update() self.compute._build_and_run_instance(self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties) self._instance_action_events() self.mox.ReplayAll() self.compute.build_and_run_instance(self.context, self.instance, self.image, request_spec={}, filter_properties=self.filter_properties, injected_files=self.injected_files, admin_password=self.admin_pass, requested_networks=self.requested_networks, security_groups=self.security_groups, block_device_mapping=self.block_device_mapping, node=self.node, limits=self.limits) self._assert_build_instance_hook_called(mock_hooks, build_results.ACTIVE) # This test when sending an icehouse compatible rpc call to juno compute # node, NetworkRequest object can load from three items tuple. @mock.patch('nova.objects.Instance.save') @mock.patch('nova.compute.manager.ComputeManager._build_and_run_instance') def test_build_and_run_instance_with_icehouse_requested_network( self, mock_build_and_run, mock_save): fake_server_actions.stub_out_action_events(self.stubs) mock_save.return_value = self.instance self.compute.build_and_run_instance(self.context, self.instance, self.image, request_spec={}, filter_properties=self.filter_properties, injected_files=self.injected_files, admin_password=self.admin_pass, requested_networks=[objects.NetworkRequest( network_id='fake_network_id', address='10.0.0.1', port_id=uuids.port_instance)], security_groups=self.security_groups, block_device_mapping=self.block_device_mapping, node=self.node, limits=self.limits) requested_network = mock_build_and_run.call_args[0][5][0] self.assertEqual('fake_network_id', requested_network.network_id) self.assertEqual('10.0.0.1', str(requested_network.address)) self.assertEqual(uuids.port_instance, requested_network.port_id) @mock.patch('nova.hooks._HOOKS') def test_build_abort_exception(self, mock_hooks): self.mox.StubOutWithMock(self.compute, '_build_and_run_instance') self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks') self.mox.StubOutWithMock(self.compute, '_cleanup_volumes') self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc') self.mox.StubOutWithMock(self.compute, '_nil_out_instance_obj_host_and_node') self.mox.StubOutWithMock(self.compute, '_set_instance_obj_error_state') self.mox.StubOutWithMock(self.compute.compute_task_api, 'build_instances') self._do_build_instance_update() self.compute._build_and_run_instance(self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties).AndRaise( exception.BuildAbortException(reason='', instance_uuid=self.instance.uuid)) self.compute._cleanup_allocated_networks(self.context, self.instance, self.requested_networks) self.compute._cleanup_volumes(self.context, self.instance.uuid, self.block_device_mapping, raise_exc=False) compute_utils.add_instance_fault_from_exc(self.context, self.instance, mox.IgnoreArg(), mox.IgnoreArg()) self.compute._nil_out_instance_obj_host_and_node(self.instance) self.compute._set_instance_obj_error_state(self.context, self.instance, clean_task_state=True) self._instance_action_events() self.mox.ReplayAll() self.compute.build_and_run_instance(self.context, self.instance, self.image, request_spec={}, filter_properties=self.filter_properties, injected_files=self.injected_files, admin_password=self.admin_pass, requested_networks=self.requested_networks, security_groups=self.security_groups, block_device_mapping=self.block_device_mapping, node=self.node, limits=self.limits) self._assert_build_instance_hook_called(mock_hooks, build_results.FAILED) @mock.patch('nova.hooks._HOOKS') def test_rescheduled_exception(self, mock_hooks): self.mox.StubOutWithMock(self.compute, '_build_and_run_instance') self.mox.StubOutWithMock(self.compute, '_set_instance_obj_error_state') self.mox.StubOutWithMock(self.compute.compute_task_api, 'build_instances') self.mox.StubOutWithMock(self.compute, '_nil_out_instance_obj_host_and_node') self.mox.StubOutWithMock(self.compute.network_api, 'cleanup_instance_network_on_host') self._do_build_instance_update(reschedule_update=True) self.compute._build_and_run_instance(self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties).AndRaise( exception.RescheduledException(reason='', instance_uuid=self.instance.uuid)) self.compute.network_api.cleanup_instance_network_on_host(self.context, self.instance, self.compute.host) self.compute._nil_out_instance_obj_host_and_node(self.instance) self.compute.compute_task_api.build_instances(self.context, [self.instance], self.image, self.filter_properties, self.admin_pass, self.injected_files, self.requested_networks, self.security_groups, self.block_device_mapping) self._instance_action_events() self.mox.ReplayAll() self.compute.build_and_run_instance(self.context, self.instance, self.image, request_spec={}, filter_properties=self.filter_properties, injected_files=self.injected_files, admin_password=self.admin_pass, requested_networks=self.requested_networks, security_groups=self.security_groups, block_device_mapping=self.block_device_mapping, node=self.node, limits=self.limits) self._assert_build_instance_hook_called(mock_hooks, build_results.RESCHEDULED) def test_rescheduled_exception_with_non_ascii_exception(self): exc = exception.NovaException(u's\xe9quence') self.mox.StubOutWithMock(self.compute.driver, 'spawn') self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance') self.mox.StubOutWithMock(self.compute, '_shutdown_instance') self.compute._build_networks_for_instance(self.context, self.instance, self.requested_networks, self.security_groups).AndReturn( self.network_info) self.compute._shutdown_instance(self.context, self.instance, self.block_device_mapping, self.requested_networks, try_deallocate_networks=False) self._notify_about_instance_usage('create.start', extra_usage_info={'image_name': self.image.get('name')}) self.compute.driver.spawn(self.context, self.instance, mox.IsA(objects.ImageMeta), self.injected_files, self.admin_pass, network_info=self.network_info, block_device_info=self.block_device_info).AndRaise(exc) self._notify_about_instance_usage('create.error', fault=exc, stub=False) self.mox.ReplayAll() with mock.patch.object(self.instance, 'save') as mock_save: self.assertRaises(exception.RescheduledException, self.compute._build_and_run_instance, self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties) mock_save.assert_has_calls([ mock.call(), mock.call(), mock.call(expected_task_state='block_device_mapping'), ]) @mock.patch.object(manager.ComputeManager, '_build_and_run_instance') @mock.patch.object(conductor_api.ComputeTaskAPI, 'build_instances') @mock.patch.object(network_api.API, 'cleanup_instance_network_on_host') @mock.patch.object(objects.Instance, 'save') @mock.patch.object(objects.InstanceActionEvent, 'event_start') @mock.patch.object(objects.InstanceActionEvent, 'event_finish_with_failure') @mock.patch.object(virt_driver.ComputeDriver, 'macs_for_instance') def test_rescheduled_exception_with_network_allocated(self, mock_macs_for_instance, mock_event_finish, mock_event_start, mock_ins_save, mock_cleanup_network, mock_build_ins, mock_build_and_run): instance = fake_instance.fake_instance_obj(self.context, vm_state=vm_states.ACTIVE, system_metadata={'network_allocated': 'True'}, expected_attrs=['metadata', 'system_metadata', 'info_cache']) mock_ins_save.return_value = instance mock_macs_for_instance.return_value = [] mock_build_and_run.side_effect = exception.RescheduledException( reason='', instance_uuid=self.instance.uuid) self.compute._do_build_and_run_instance(self.context, instance, self.image, request_spec={}, filter_properties=self.filter_properties, injected_files=self.injected_files, admin_password=self.admin_pass, requested_networks=self.requested_networks, security_groups=self.security_groups, block_device_mapping=self.block_device_mapping, node=self.node, limits=self.limits) mock_build_and_run.assert_called_once_with(self.context, instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties) mock_cleanup_network.assert_called_once_with( self.context, instance, self.compute.host) mock_build_ins.assert_called_once_with(self.context, [instance], self.image, self.filter_properties, self.admin_pass, self.injected_files, self.requested_networks, self.security_groups, self.block_device_mapping) @mock.patch.object(manager.ComputeManager, '_build_and_run_instance') @mock.patch.object(conductor_api.ComputeTaskAPI, 'build_instances') @mock.patch.object(manager.ComputeManager, '_cleanup_allocated_networks') @mock.patch.object(objects.Instance, 'save') @mock.patch.object(objects.InstanceActionEvent, 'event_start') @mock.patch.object(objects.InstanceActionEvent, 'event_finish_with_failure') @mock.patch.object(virt_driver.ComputeDriver, 'macs_for_instance') def test_rescheduled_exception_with_sriov_network_allocated(self, mock_macs_for_instance, mock_event_finish, mock_event_start, mock_ins_save, mock_cleanup_network, mock_build_ins, mock_build_and_run): vif1 = fake_network_cache_model.new_vif() vif1['id'] = '1' vif1['vnic_type'] = network_model.VNIC_TYPE_NORMAL vif2 = fake_network_cache_model.new_vif() vif2['id'] = '2' vif1['vnic_type'] = network_model.VNIC_TYPE_DIRECT nw_info = network_model.NetworkInfo([vif1, vif2]) instance = fake_instance.fake_instance_obj(self.context, vm_state=vm_states.ACTIVE, system_metadata={'network_allocated': 'True'}, expected_attrs=['metadata', 'system_metadata', 'info_cache']) info_cache = objects.InstanceInfoCache(network_info=nw_info, instance_uuid=instance.uuid) instance.info_cache = info_cache mock_ins_save.return_value = instance mock_macs_for_instance.return_value = [] mock_build_and_run.side_effect = exception.RescheduledException( reason='', instance_uuid=self.instance.uuid) self.compute._do_build_and_run_instance(self.context, instance, self.image, request_spec={}, filter_properties=self.filter_properties, injected_files=self.injected_files, admin_password=self.admin_pass, requested_networks=self.requested_networks, security_groups=self.security_groups, block_device_mapping=self.block_device_mapping, node=self.node, limits=self.limits) mock_build_and_run.assert_called_once_with(self.context, instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties) mock_cleanup_network.assert_called_once_with( self.context, instance, self.requested_networks) mock_build_ins.assert_called_once_with(self.context, [instance], self.image, self.filter_properties, self.admin_pass, self.injected_files, self.requested_networks, self.security_groups, self.block_device_mapping) @mock.patch('nova.hooks._HOOKS') def test_rescheduled_exception_without_retry(self, mock_hooks): self.mox.StubOutWithMock(self.compute, '_build_and_run_instance') self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc') self.mox.StubOutWithMock(self.compute, '_set_instance_obj_error_state') self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks') self.mox.StubOutWithMock(self.compute, '_cleanup_volumes') self.mox.StubOutWithMock(self.compute, '_nil_out_instance_obj_host_and_node') self._do_build_instance_update() self.compute._build_and_run_instance(self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, {}).AndRaise( exception.RescheduledException(reason='', instance_uuid=self.instance.uuid)) self.compute._cleanup_allocated_networks(self.context, self.instance, self.requested_networks) compute_utils.add_instance_fault_from_exc(self.context, self.instance, mox.IgnoreArg(), mox.IgnoreArg(), fault_message=mox.IgnoreArg()) self.compute._nil_out_instance_obj_host_and_node(self.instance) self.compute._set_instance_obj_error_state(self.context, self.instance, clean_task_state=True) self._instance_action_events() self.mox.ReplayAll() self.compute.build_and_run_instance(self.context, self.instance, self.image, request_spec={}, filter_properties={}, injected_files=self.injected_files, admin_password=self.admin_pass, requested_networks=self.requested_networks, security_groups=self.security_groups, block_device_mapping=self.block_device_mapping, node=self.node, limits=self.limits) self._assert_build_instance_hook_called(mock_hooks, build_results.FAILED) @mock.patch('nova.hooks._HOOKS') def test_rescheduled_exception_do_not_deallocate_network(self, mock_hooks): self.mox.StubOutWithMock(self.compute, '_build_and_run_instance') self.mox.StubOutWithMock(self.compute.driver, 'deallocate_networks_on_reschedule') self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks') self.mox.StubOutWithMock(self.compute, '_nil_out_instance_obj_host_and_node') self.mox.StubOutWithMock(self.compute.compute_task_api, 'build_instances') self.mox.StubOutWithMock(self.compute.network_api, 'cleanup_instance_network_on_host') self._do_build_instance_update(reschedule_update=True) self.compute._build_and_run_instance(self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties).AndRaise( exception.RescheduledException(reason='', instance_uuid=self.instance.uuid)) self.compute.driver.deallocate_networks_on_reschedule( self.instance).AndReturn(False) self.compute.network_api.cleanup_instance_network_on_host( self.context, self.instance, self.compute.host) self.compute._nil_out_instance_obj_host_and_node(self.instance) self.compute.compute_task_api.build_instances(self.context, [self.instance], self.image, self.filter_properties, self.admin_pass, self.injected_files, self.requested_networks, self.security_groups, self.block_device_mapping) self._instance_action_events() self.mox.ReplayAll() self.compute.build_and_run_instance(self.context, self.instance, self.image, request_spec={}, filter_properties=self.filter_properties, injected_files=self.injected_files, admin_password=self.admin_pass, requested_networks=self.requested_networks, security_groups=self.security_groups, block_device_mapping=self.block_device_mapping, node=self.node, limits=self.limits) self._assert_build_instance_hook_called(mock_hooks, build_results.RESCHEDULED) @mock.patch('nova.hooks._HOOKS') def test_rescheduled_exception_deallocate_network(self, mock_hooks): self.mox.StubOutWithMock(self.compute, '_build_and_run_instance') self.mox.StubOutWithMock(self.compute.driver, 'deallocate_networks_on_reschedule') self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks') self.mox.StubOutWithMock(self.compute, '_nil_out_instance_obj_host_and_node') self.mox.StubOutWithMock(self.compute.compute_task_api, 'build_instances') self._do_build_instance_update(reschedule_update=True) self.compute._build_and_run_instance(self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties).AndRaise( exception.RescheduledException(reason='', instance_uuid=self.instance.uuid)) self.compute.driver.deallocate_networks_on_reschedule( self.instance).AndReturn(True) self.compute._cleanup_allocated_networks(self.context, self.instance, self.requested_networks) self.compute._nil_out_instance_obj_host_and_node(self.instance) self.compute.compute_task_api.build_instances(self.context, [self.instance], self.image, self.filter_properties, self.admin_pass, self.injected_files, self.requested_networks, self.security_groups, self.block_device_mapping) self._instance_action_events() self.mox.ReplayAll() self.compute.build_and_run_instance(self.context, self.instance, self.image, request_spec={}, filter_properties=self.filter_properties, injected_files=self.injected_files, admin_password=self.admin_pass, requested_networks=self.requested_networks, security_groups=self.security_groups, block_device_mapping=self.block_device_mapping, node=self.node, limits=self.limits) self._assert_build_instance_hook_called(mock_hooks, build_results.RESCHEDULED) @mock.patch('nova.hooks._HOOKS') def _test_build_and_run_exceptions(self, exc, mock_hooks, set_error=False, cleanup_volumes=False, nil_out_host_and_node=False): self.mox.StubOutWithMock(self.compute, '_build_and_run_instance') self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks') self.mox.StubOutWithMock(self.compute, '_cleanup_volumes') self.mox.StubOutWithMock(self.compute.compute_task_api, 'build_instances') self._do_build_instance_update() self.compute._build_and_run_instance(self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties).AndRaise(exc) self.compute._cleanup_allocated_networks(self.context, self.instance, self.requested_networks) if cleanup_volumes: self.compute._cleanup_volumes(self.context, self.instance.uuid, self.block_device_mapping, raise_exc=False) if nil_out_host_and_node: self.mox.StubOutWithMock(self.compute, '_nil_out_instance_obj_host_and_node') self.compute._nil_out_instance_obj_host_and_node(self.instance) if set_error: self.mox.StubOutWithMock(self.compute, '_set_instance_obj_error_state') self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc') compute_utils.add_instance_fault_from_exc(self.context, self.instance, mox.IgnoreArg(), mox.IgnoreArg()) self.compute._set_instance_obj_error_state(self.context, self.instance, clean_task_state=True) self._instance_action_events() self.mox.ReplayAll() self.compute.build_and_run_instance(self.context, self.instance, self.image, request_spec={}, filter_properties=self.filter_properties, injected_files=self.injected_files, admin_password=self.admin_pass, requested_networks=self.requested_networks, security_groups=self.security_groups, block_device_mapping=self.block_device_mapping, node=self.node, limits=self.limits) self._assert_build_instance_hook_called(mock_hooks, build_results.FAILED) def test_build_and_run_notfound_exception(self): self._test_build_and_run_exceptions(exception.InstanceNotFound( instance_id='')) def test_build_and_run_unexpecteddeleting_exception(self): self._test_build_and_run_exceptions( exception.UnexpectedDeletingTaskStateError( instance_uuid=uuids.instance, expected={}, actual={})) def test_build_and_run_buildabort_exception(self): self._test_build_and_run_exceptions( exception.BuildAbortException(instance_uuid='', reason=''), set_error=True, cleanup_volumes=True, nil_out_host_and_node=True) def test_build_and_run_unhandled_exception(self): self._test_build_and_run_exceptions(test.TestingException(), set_error=True, cleanup_volumes=True, nil_out_host_and_node=True) def test_instance_not_found(self): exc = exception.InstanceNotFound(instance_id=1) self.mox.StubOutWithMock(self.compute.driver, 'spawn') self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance') self.mox.StubOutWithMock(self.compute, '_shutdown_instance') self.compute._build_networks_for_instance(self.context, self.instance, self.requested_networks, self.security_groups).AndReturn( self.network_info) self.compute._shutdown_instance(self.context, self.instance, self.block_device_mapping, self.requested_networks, try_deallocate_networks=False) self._notify_about_instance_usage('create.start', extra_usage_info={'image_name': self.image.get('name')}) self.compute.driver.spawn(self.context, self.instance, mox.IsA(objects.ImageMeta), self.injected_files, self.admin_pass, network_info=self.network_info, block_device_info=self.block_device_info).AndRaise(exc) self._notify_about_instance_usage('create.end', fault=exc, stub=False) self.mox.ReplayAll() with mock.patch.object(self.instance, 'save') as mock_save: self.assertRaises(exception.InstanceNotFound, self.compute._build_and_run_instance, self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties) mock_save.assert_has_calls([ mock.call(), mock.call(), mock.call(expected_task_state='block_device_mapping'), ]) def test_reschedule_on_exception(self): self.mox.StubOutWithMock(self.compute.driver, 'spawn') self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance') self.mox.StubOutWithMock(self.compute, '_shutdown_instance') self.compute._build_networks_for_instance(self.context, self.instance, self.requested_networks, self.security_groups).AndReturn( self.network_info) self.compute._shutdown_instance(self.context, self.instance, self.block_device_mapping, self.requested_networks, try_deallocate_networks=False) self._notify_about_instance_usage('create.start', extra_usage_info={'image_name': self.image.get('name')}) exc = test.TestingException() self.compute.driver.spawn(self.context, self.instance, mox.IsA(objects.ImageMeta), self.injected_files, self.admin_pass, network_info=self.network_info, block_device_info=self.block_device_info).AndRaise(exc) self._notify_about_instance_usage('create.error', fault=exc, stub=False) self.mox.ReplayAll() with mock.patch.object(self.instance, 'save') as mock_save: self.assertRaises(exception.RescheduledException, self.compute._build_and_run_instance, self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties) mock_save.assert_has_calls([ mock.call(), mock.call(), mock.call(expected_task_state='block_device_mapping'), ]) def test_spawn_network_alloc_failure(self): # Because network allocation is asynchronous, failures may not present # themselves until the virt spawn method is called. self._test_build_and_run_spawn_exceptions(exception.NoMoreNetworks()) def test_build_and_run_no_more_fixedips_exception(self): self._test_build_and_run_spawn_exceptions( exception.NoMoreFixedIps("error messge")) def test_build_and_run_flavor_disk_smaller_image_exception(self): self._test_build_and_run_spawn_exceptions( exception.FlavorDiskSmallerThanImage( flavor_size=0, image_size=1)) def test_build_and_run_flavor_disk_smaller_min_disk(self): self._test_build_and_run_spawn_exceptions( exception.FlavorDiskSmallerThanMinDisk( flavor_size=0, image_min_disk=1)) def test_build_and_run_flavor_memory_too_small_exception(self): self._test_build_and_run_spawn_exceptions( exception.FlavorMemoryTooSmall()) def test_build_and_run_image_not_active_exception(self): self._test_build_and_run_spawn_exceptions( exception.ImageNotActive(image_id=self.image.get('id'))) def test_build_and_run_image_unacceptable_exception(self): self._test_build_and_run_spawn_exceptions( exception.ImageUnacceptable(image_id=self.image.get('id'), reason="")) def test_build_and_run_invalid_disk_info_exception(self): self._test_build_and_run_spawn_exceptions( exception.InvalidDiskInfo(reason="")) def _test_build_and_run_spawn_exceptions(self, exc): with test.nested( mock.patch.object(self.compute.driver, 'spawn', side_effect=exc), mock.patch.object(self.instance, 'save', side_effect=[self.instance, self.instance, self.instance]), mock.patch.object(self.compute, '_build_networks_for_instance', return_value=self.network_info), mock.patch.object(self.compute, '_notify_about_instance_usage'), mock.patch.object(self.compute, '_shutdown_instance'), mock.patch.object(self.compute, '_validate_instance_group_policy') ) as (spawn, save, _build_networks_for_instance, _notify_about_instance_usage, _shutdown_instance, _validate_instance_group_policy): self.assertRaises(exception.BuildAbortException, self.compute._build_and_run_instance, self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties) _validate_instance_group_policy.assert_called_once_with( self.context, self.instance, self.filter_properties) _build_networks_for_instance.assert_has_calls( [mock.call(self.context, self.instance, self.requested_networks, self.security_groups)]) _notify_about_instance_usage.assert_has_calls([ mock.call(self.context, self.instance, 'create.start', extra_usage_info={'image_name': self.image.get('name')}), mock.call(self.context, self.instance, 'create.error', fault=exc)]) save.assert_has_calls([ mock.call(), mock.call(), mock.call( expected_task_state=task_states.BLOCK_DEVICE_MAPPING)]) spawn.assert_has_calls([mock.call(self.context, self.instance, test.MatchType(objects.ImageMeta), self.injected_files, self.admin_pass, network_info=self.network_info, block_device_info=self.block_device_info)]) _shutdown_instance.assert_called_once_with(self.context, self.instance, self.block_device_mapping, self.requested_networks, try_deallocate_networks=False) def test_reschedule_on_resources_unavailable(self): reason = 'resource unavailable' exc = exception.ComputeResourcesUnavailable(reason=reason) class FakeResourceTracker(object): def instance_claim(self, context, instance, limits): raise exc self.mox.StubOutWithMock(self.compute, '_get_resource_tracker') self.mox.StubOutWithMock(self.compute.compute_task_api, 'build_instances') self.mox.StubOutWithMock(self.compute.network_api, 'cleanup_instance_network_on_host') self.mox.StubOutWithMock(self.compute, '_nil_out_instance_obj_host_and_node') self.compute._get_resource_tracker(self.node).AndReturn( FakeResourceTracker()) self._do_build_instance_update(reschedule_update=True) self._notify_about_instance_usage('create.start', extra_usage_info={'image_name': self.image.get('name')}) self._notify_about_instance_usage('create.error', fault=exc, stub=False) self.compute.network_api.cleanup_instance_network_on_host( self.context, self.instance, self.compute.host) self.compute._nil_out_instance_obj_host_and_node(self.instance) self.compute.compute_task_api.build_instances(self.context, [self.instance], self.image, self.filter_properties, self.admin_pass, self.injected_files, self.requested_networks, self.security_groups, self.block_device_mapping) self._instance_action_events() self.mox.ReplayAll() self.compute.build_and_run_instance(self.context, self.instance, self.image, request_spec={}, filter_properties=self.filter_properties, injected_files=self.injected_files, admin_password=self.admin_pass, requested_networks=self.requested_networks, security_groups=self.security_groups, block_device_mapping=self.block_device_mapping, node=self.node, limits=self.limits) def test_build_resources_buildabort_reraise(self): exc = exception.BuildAbortException( instance_uuid=self.instance.uuid, reason='') self.mox.StubOutWithMock(self.compute, '_build_resources') self._notify_about_instance_usage('create.start', extra_usage_info={'image_name': self.image.get('name')}) self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, mox.IsA(objects.ImageMeta), self.block_device_mapping).AndRaise(exc) self._notify_about_instance_usage('create.error', fault=exc, stub=False) self.mox.ReplayAll() with mock.patch.object(self.instance, 'save') as mock_save: self.assertRaises(exception.BuildAbortException, self.compute._build_and_run_instance, self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties) mock_save.assert_called_once_with() def test_build_resources_reraises_on_failed_bdm_prep(self): self.mox.StubOutWithMock(self.compute, '_prep_block_device') self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance') self.compute._build_networks_for_instance(self.context, self.instance, self.requested_networks, self.security_groups).AndReturn( self.network_info) self._build_resources_instance_update() self.compute._prep_block_device(self.context, self.instance, self.block_device_mapping).AndRaise(test.TestingException()) self.mox.ReplayAll() try: with self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, self.image, self.block_device_mapping): pass except Exception as e: self.assertIsInstance(e, exception.BuildAbortException) def test_failed_bdm_prep_from_delete_raises_unexpected(self): with test.nested( mock.patch.object(self.compute, '_build_networks_for_instance', return_value=self.network_info), mock.patch.object(self.instance, 'save', side_effect=exception.UnexpectedDeletingTaskStateError( instance_uuid=uuids.instance, actual={'task_state': task_states.DELETING}, expected={'task_state': None})), ) as (_build_networks_for_instance, save): try: with self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, self.image, self.block_device_mapping): pass except Exception as e: self.assertIsInstance(e, exception.UnexpectedDeletingTaskStateError) _build_networks_for_instance.assert_has_calls( [mock.call(self.context, self.instance, self.requested_networks, self.security_groups)]) save.assert_has_calls([mock.call()]) def test_build_resources_aborts_on_failed_network_alloc(self): self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance') self.compute._build_networks_for_instance(self.context, self.instance, self.requested_networks, self.security_groups).AndRaise( test.TestingException()) self.mox.ReplayAll() try: with self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, self.image, self.block_device_mapping): pass except Exception as e: self.assertIsInstance(e, exception.BuildAbortException) def test_failed_network_alloc_from_delete_raises_unexpected(self): with mock.patch.object(self.compute, '_build_networks_for_instance') as _build_networks: exc = exception.UnexpectedDeletingTaskStateError _build_networks.side_effect = exc( instance_uuid=uuids.instance, actual={'task_state': task_states.DELETING}, expected={'task_state': None}) try: with self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, self.image, self.block_device_mapping): pass except Exception as e: self.assertIsInstance(e, exc) _build_networks.assert_has_calls( [mock.call(self.context, self.instance, self.requested_networks, self.security_groups)]) def test_build_resources_with_network_info_obj_on_spawn_failure(self): self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance') self.mox.StubOutWithMock(self.compute, '_shutdown_instance') self.compute._build_networks_for_instance(self.context, self.instance, self.requested_networks, self.security_groups).AndReturn( self.network_info) self.compute._shutdown_instance(self.context, self.instance, self.block_device_mapping, self.requested_networks, try_deallocate_networks=False) self._build_resources_instance_update() self.mox.ReplayAll() test_exception = test.TestingException() def fake_spawn(): raise test_exception try: with self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, self.image, self.block_device_mapping): fake_spawn() except Exception as e: self.assertEqual(test_exception, e) def test_build_resources_cleans_up_and_reraises_on_spawn_failure(self): self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance') self.mox.StubOutWithMock(self.compute, '_shutdown_instance') self.compute._build_networks_for_instance(self.context, self.instance, self.requested_networks, self.security_groups).AndReturn( self.network_info) self.compute._shutdown_instance(self.context, self.instance, self.block_device_mapping, self.requested_networks, try_deallocate_networks=False) self._build_resources_instance_update() self.mox.ReplayAll() test_exception = test.TestingException() def fake_spawn(): raise test_exception try: with self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, self.image, self.block_device_mapping): fake_spawn() except Exception as e: self.assertEqual(test_exception, e) @mock.patch('nova.network.model.NetworkInfoAsyncWrapper.wait') @mock.patch( 'nova.compute.manager.ComputeManager._build_networks_for_instance') @mock.patch('nova.objects.Instance.save') def test_build_resources_instance_not_found_before_yield( self, mock_save, mock_build_network, mock_info_wait): mock_build_network.return_value = self.network_info expected_exc = exception.InstanceNotFound( instance_id=self.instance.uuid) mock_save.side_effect = expected_exc try: with self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, self.image, self.block_device_mapping): raise except Exception as e: self.assertEqual(expected_exc, e) mock_build_network.assert_called_once_with(self.context, self.instance, self.requested_networks, self.security_groups) mock_info_wait.assert_called_once_with(do_raise=False) @mock.patch('nova.network.model.NetworkInfoAsyncWrapper.wait') @mock.patch( 'nova.compute.manager.ComputeManager._build_networks_for_instance') @mock.patch('nova.objects.Instance.save') def test_build_resources_unexpected_task_error_before_yield( self, mock_save, mock_build_network, mock_info_wait): mock_build_network.return_value = self.network_info mock_save.side_effect = exception.UnexpectedTaskStateError( instance_uuid=uuids.instance, expected={}, actual={}) try: with self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, self.image, self.block_device_mapping): raise except exception.BuildAbortException: pass mock_build_network.assert_called_once_with(self.context, self.instance, self.requested_networks, self.security_groups) mock_info_wait.assert_called_once_with(do_raise=False) @mock.patch('nova.network.model.NetworkInfoAsyncWrapper.wait') @mock.patch( 'nova.compute.manager.ComputeManager._build_networks_for_instance') @mock.patch('nova.objects.Instance.save') def test_build_resources_exception_before_yield( self, mock_save, mock_build_network, mock_info_wait): mock_build_network.return_value = self.network_info mock_save.side_effect = Exception() try: with self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, self.image, self.block_device_mapping): raise except exception.BuildAbortException: pass mock_build_network.assert_called_once_with(self.context, self.instance, self.requested_networks, self.security_groups) mock_info_wait.assert_called_once_with(do_raise=False) @mock.patch('nova.compute.manager.LOG') def test_build_resources_aborts_on_cleanup_failure(self, mock_log): self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance') self.mox.StubOutWithMock(self.compute, '_shutdown_instance') self.compute._build_networks_for_instance(self.context, self.instance, self.requested_networks, self.security_groups).AndReturn( self.network_info) self.compute._shutdown_instance(self.context, self.instance, self.block_device_mapping, self.requested_networks, try_deallocate_networks=False).AndRaise( test.TestingException('Failed to shutdown')) self._build_resources_instance_update() self.mox.ReplayAll() def fake_spawn(): raise test.TestingException('Failed to spawn') with self.assertRaisesRegex(exception.BuildAbortException, 'Failed to spawn'): with self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, self.image, self.block_device_mapping): fake_spawn() self.assertTrue(mock_log.warning.called) msg = mock_log.warning.call_args_list[0] self.assertIn('Failed to shutdown', msg[0][1]) def test_build_networks_if_not_allocated(self): instance = fake_instance.fake_instance_obj(self.context, system_metadata={}, expected_attrs=['system_metadata']) self.mox.StubOutWithMock(self.compute.network_api, 'get_instance_nw_info') self.mox.StubOutWithMock(self.compute, '_allocate_network') self.compute._allocate_network(self.context, instance, self.requested_networks, None, self.security_groups, None) self.mox.ReplayAll() self.compute._build_networks_for_instance(self.context, instance, self.requested_networks, self.security_groups) def test_build_networks_if_allocated_false(self): instance = fake_instance.fake_instance_obj(self.context, system_metadata=dict(network_allocated='False'), expected_attrs=['system_metadata']) self.mox.StubOutWithMock(self.compute.network_api, 'get_instance_nw_info') self.mox.StubOutWithMock(self.compute, '_allocate_network') self.compute._allocate_network(self.context, instance, self.requested_networks, None, self.security_groups, None) self.mox.ReplayAll() self.compute._build_networks_for_instance(self.context, instance, self.requested_networks, self.security_groups) def test_return_networks_if_found(self): instance = fake_instance.fake_instance_obj(self.context, system_metadata=dict(network_allocated='True'), expected_attrs=['system_metadata']) def fake_network_info(): return network_model.NetworkInfo([{'address': '123.123.123.123'}]) self.mox.StubOutWithMock(self.compute.network_api, 'get_instance_nw_info') self.mox.StubOutWithMock(self.compute, '_allocate_network') self.mox.StubOutWithMock(self.compute.network_api, 'setup_instance_network_on_host') self.compute.network_api.setup_instance_network_on_host( self.context, instance, instance.host) self.compute.network_api.get_instance_nw_info( self.context, instance).AndReturn( network_model.NetworkInfoAsyncWrapper(fake_network_info)) self.mox.ReplayAll() self.compute._build_networks_for_instance(self.context, instance, self.requested_networks, self.security_groups) def test_cleanup_allocated_networks_instance_not_found(self): with test.nested( mock.patch.object(self.compute, '_deallocate_network'), mock.patch.object(self.instance, 'save', side_effect=exception.InstanceNotFound(instance_id='')) ) as (_deallocate_network, save): # Testing that this doesn't raise an exeption self.compute._cleanup_allocated_networks(self.context, self.instance, self.requested_networks) save.assert_called_once_with() self.assertEqual('False', self.instance.system_metadata['network_allocated']) @mock.patch.object(manager.ComputeManager, '_instance_update') def test_launched_at_in_create_end_notification(self, mock_instance_update): def fake_notify(*args, **kwargs): if args[2] == 'create.end': # Check that launched_at is set on the instance self.assertIsNotNone(args[1].launched_at) with test.nested( mock.patch.object(self.compute, '_update_scheduler_instance_info'), mock.patch.object(self.compute.driver, 'spawn'), mock.patch.object(self.compute, '_build_networks_for_instance', return_value=[]), mock.patch.object(self.instance, 'save'), mock.patch.object(self.compute, '_notify_about_instance_usage', side_effect=fake_notify) ) as (mock_upd, mock_spawn, mock_networks, mock_save, mock_notify): self.compute._build_and_run_instance(self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties) expected_call = mock.call(self.context, self.instance, 'create.end', extra_usage_info={'message': u'Success'}, network_info=[]) create_end_call = mock_notify.call_args_list[ mock_notify.call_count - 1] self.assertEqual(expected_call, create_end_call) def test_access_ip_set_when_instance_set_to_active(self): self.flags(default_access_ip_network_name='test1') instance = fake_instance.fake_db_instance() @mock.patch.object(db, 'instance_update_and_get_original', return_value=({}, instance)) @mock.patch.object(self.compute.driver, 'spawn') @mock.patch.object(self.compute, '_build_networks_for_instance', return_value=fake_network.fake_get_instance_nw_info(self)) @mock.patch.object(db, 'instance_extra_update_by_uuid') @mock.patch.object(self.compute, '_notify_about_instance_usage') def _check_access_ip(mock_notify, mock_extra, mock_networks, mock_spawn, mock_db_update): self.compute._build_and_run_instance(self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties) updates = {'vm_state': u'active', 'access_ip_v6': netaddr.IPAddress('2001:db8:0:1:dcad:beff:feef:1'), 'access_ip_v4': netaddr.IPAddress('192.168.1.100'), 'power_state': 0, 'task_state': None, 'launched_at': mock.ANY, 'expected_task_state': 'spawning'} expected_call = mock.call(self.context, self.instance.uuid, updates, columns_to_join=['metadata', 'system_metadata', 'info_cache']) last_update_call = mock_db_update.call_args_list[ mock_db_update.call_count - 1] self.assertEqual(expected_call, last_update_call) _check_access_ip() @mock.patch.object(manager.ComputeManager, '_instance_update') def test_create_end_on_instance_delete(self, mock_instance_update): def fake_notify(*args, **kwargs): if args[2] == 'create.end': # Check that launched_at is set on the instance self.assertIsNotNone(args[1].launched_at) exc = exception.InstanceNotFound(instance_id='') with test.nested( mock.patch.object(self.compute.driver, 'spawn'), mock.patch.object(self.compute, '_build_networks_for_instance', return_value=[]), mock.patch.object(self.instance, 'save', side_effect=[None, None, None, exc]), mock.patch.object(self.compute, '_notify_about_instance_usage', side_effect=fake_notify) ) as (mock_spawn, mock_networks, mock_save, mock_notify): self.assertRaises(exception.InstanceNotFound, self.compute._build_and_run_instance, self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, self.filter_properties) expected_call = mock.call(self.context, self.instance, 'create.end', fault=exc) create_end_call = mock_notify.call_args_list[ mock_notify.call_count - 1] self.assertEqual(expected_call, create_end_call) class ComputeManagerMigrationTestCase(test.NoDBTestCase): def setUp(self): super(ComputeManagerMigrationTestCase, self).setUp() self.compute = importutils.import_object(CONF.compute_manager) self.context = context.RequestContext('fake', 'fake') self.image = {} self.instance = fake_instance.fake_instance_obj(self.context, vm_state=vm_states.ACTIVE, expected_attrs=['metadata', 'system_metadata', 'info_cache']) self.migration = objects.Migration(context=self.context.elevated(), new_instance_type_id=7) self.migration.status = 'migrating' fake_server_actions.stub_out_action_events(self.stubs) self.useFixture(fixtures.SpawnIsSynchronousFixture()) @mock.patch.object(objects.Migration, 'save') @mock.patch.object(objects.Migration, 'obj_as_admin') def test_errors_out_migration_decorator(self, mock_save, mock_obj_as_admin): # Tests that errors_out_migration decorator in compute manager # sets migration status to 'error' when an exception is raised # from decorated method instance = fake_instance.fake_instance_obj(self.context) migration = objects.Migration() migration.instance_uuid = instance.uuid migration.status = 'migrating' migration.id = 0 @manager.errors_out_migration def fake_function(self, context, instance, migration): raise test.TestingException() mock_obj_as_admin.return_value = mock.MagicMock() self.assertRaises(test.TestingException, fake_function, self, self.context, instance, migration) self.assertEqual('error', migration.status) mock_save.assert_called_once_with() mock_obj_as_admin.assert_called_once_with() def test_finish_resize_failure(self): with test.nested( mock.patch.object(self.compute, '_finish_resize', side_effect=exception.ResizeError(reason='')), mock.patch.object(db, 'instance_fault_create'), mock.patch.object(self.compute, '_instance_update'), mock.patch.object(self.instance, 'save'), mock.patch.object(self.migration, 'save'), mock.patch.object(self.migration, 'obj_as_admin', return_value=mock.MagicMock()) ) as (meth, fault_create, instance_update, instance_save, migration_save, migration_obj_as_admin): fault_create.return_value = ( test_instance_fault.fake_faults['fake-uuid'][0]) self.assertRaises( exception.ResizeError, self.compute.finish_resize, context=self.context, disk_info=[], image=self.image, instance=self.instance, reservations=[], migration=self.migration ) self.assertEqual("error", self.migration.status) migration_save.assert_called_once_with() migration_obj_as_admin.assert_called_once_with() def test_resize_instance_failure(self): self.migration.dest_host = None with test.nested( mock.patch.object(self.compute.driver, 'migrate_disk_and_power_off', side_effect=exception.ResizeError(reason='')), mock.patch.object(db, 'instance_fault_create'), mock.patch.object(self.compute, '_instance_update'), mock.patch.object(self.migration, 'save'), mock.patch.object(self.migration, 'obj_as_admin', return_value=mock.MagicMock()), mock.patch.object(self.compute.network_api, 'get_instance_nw_info', return_value=None), mock.patch.object(self.instance, 'save'), mock.patch.object(self.compute, '_notify_about_instance_usage'), mock.patch.object(self.compute, '_get_instance_block_device_info', return_value=None), mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid', return_value=None), mock.patch.object(objects.Flavor, 'get_by_id', return_value=None) ) as (meth, fault_create, instance_update, migration_save, migration_obj_as_admin, nw_info, save_inst, notify, vol_block_info, bdm, flavor): fault_create.return_value = ( test_instance_fault.fake_faults['fake-uuid'][0]) self.assertRaises( exception.ResizeError, self.compute.resize_instance, context=self.context, instance=self.instance, image=self.image, reservations=[], migration=self.migration, instance_type='type', clean_shutdown=True) self.assertEqual("error", self.migration.status) self.assertEqual([mock.call(), mock.call()], migration_save.mock_calls) self.assertEqual([mock.call(), mock.call()], migration_obj_as_admin.mock_calls) def _test_revert_resize_instance_destroy_disks(self, is_shared=False): # This test asserts that _is_instance_storage_shared() is called from # revert_resize() and the return value is passed to driver.destroy(). # Otherwise we could regress this. @mock.patch.object(self.instance, 'revert_migration_context') @mock.patch.object(self.compute.network_api, 'get_instance_nw_info') @mock.patch.object(self.compute, '_is_instance_storage_shared') @mock.patch.object(self.compute, 'finish_revert_resize') @mock.patch.object(self.compute, '_instance_update') @mock.patch.object(self.compute, '_get_resource_tracker') @mock.patch.object(self.compute.driver, 'destroy') @mock.patch.object(self.compute.network_api, 'setup_networks_on_host') @mock.patch.object(self.compute.network_api, 'migrate_instance_start') @mock.patch.object(compute_utils, 'notify_usage_exists') @mock.patch.object(self.migration, 'save') @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') def do_test(get_by_instance_uuid, migration_save, notify_usage_exists, migrate_instance_start, setup_networks_on_host, destroy, _get_resource_tracker, _instance_update, finish_revert_resize, _is_instance_storage_shared, get_instance_nw_info, revert_migration_context): self.migration.source_compute = self.instance['host'] # Inform compute that instance uses non-shared or shared storage _is_instance_storage_shared.return_value = is_shared self.compute.revert_resize(context=self.context, migration=self.migration, instance=self.instance, reservations=None) _is_instance_storage_shared.assert_called_once_with( self.context, self.instance, host=self.migration.source_compute) # If instance storage is shared, driver destroy method # should not destroy disks otherwise it should destroy disks. destroy.assert_called_once_with(self.context, self.instance, mock.ANY, mock.ANY, not is_shared) do_test() def test_revert_resize_instance_destroy_disks_shared_storage(self): self._test_revert_resize_instance_destroy_disks(is_shared=True) def test_revert_resize_instance_destroy_disks_non_shared_storage(self): self._test_revert_resize_instance_destroy_disks(is_shared=False) def test_consoles_enabled(self): self.flags(enabled=False, group='vnc') self.flags(enabled=False, group='spice') self.flags(enabled=False, group='rdp') self.flags(enabled=False, group='serial_console') self.assertFalse(self.compute._consoles_enabled()) self.flags(enabled=True, group='vnc') self.assertTrue(self.compute._consoles_enabled()) self.flags(enabled=False, group='vnc') for console in ['spice', 'rdp', 'serial_console']: self.flags(enabled=True, group=console) self.assertTrue(self.compute._consoles_enabled()) self.flags(enabled=False, group=console) @mock.patch('nova.compute.manager.ComputeManager.' '_do_live_migration') def _test_max_concurrent_live(self, mock_lm): @mock.patch('nova.objects.Migration.save') def _do_it(mock_mig_save): instance = objects.Instance(uuid=str(uuid.uuid4())) migration = objects.Migration() self.compute.live_migration(self.context, mock.sentinel.dest, instance, mock.sentinel.block_migration, migration, mock.sentinel.migrate_data) self.assertEqual('queued', migration.status) migration.save.assert_called_once_with() with mock.patch.object(self.compute, '_live_migration_semaphore') as mock_sem: for i in (1, 2, 3): _do_it() self.assertEqual(3, mock_sem.__enter__.call_count) def test_max_concurrent_live_limited(self): self.flags(max_concurrent_live_migrations=2) self._test_max_concurrent_live() def test_max_concurrent_live_unlimited(self): self.flags(max_concurrent_live_migrations=0) self._test_max_concurrent_live() def test_max_concurrent_live_semaphore_limited(self): self.flags(max_concurrent_live_migrations=123) self.assertEqual( 123, manager.ComputeManager()._live_migration_semaphore.balance) def test_max_concurrent_live_semaphore_unlimited(self): self.flags(max_concurrent_live_migrations=0) compute = manager.ComputeManager() self.assertEqual(0, compute._live_migration_semaphore.balance) self.assertIsInstance(compute._live_migration_semaphore, compute_utils.UnlimitedSemaphore) def test_max_concurrent_live_semaphore_negative(self): self.flags(max_concurrent_live_migrations=-2) compute = manager.ComputeManager() self.assertEqual(0, compute._live_migration_semaphore.balance) self.assertIsInstance(compute._live_migration_semaphore, compute_utils.UnlimitedSemaphore) def test_check_migrate_source_converts_object(self): # NOTE(danms): Make sure that we legacy-ify any data objects # the drivers give us back, if we were passed a non-object data = migrate_data_obj.LiveMigrateData(is_volume_backed=False) compute = manager.ComputeManager() @mock.patch.object(compute.driver, 'check_can_live_migrate_source') @mock.patch.object(compute, '_get_instance_block_device_info') @mock.patch.object(compute.compute_api, 'is_volume_backed_instance') def _test(mock_ivbi, mock_gibdi, mock_cclms): mock_cclms.return_value = data self.assertIsInstance( compute.check_can_live_migrate_source( self.context, {'uuid': uuids.instance}, {}), dict) self.assertIsInstance(mock_cclms.call_args_list[0][0][2], migrate_data_obj.LiveMigrateData) _test() def test_pre_live_migration_handles_dict(self): compute = manager.ComputeManager() @mock.patch.object(compute, '_notify_about_instance_usage') @mock.patch.object(compute, 'network_api') @mock.patch.object(compute.driver, 'pre_live_migration') @mock.patch.object(compute, '_get_instance_block_device_info') @mock.patch.object(compute.compute_api, 'is_volume_backed_instance') def _test(mock_ivbi, mock_gibdi, mock_plm, mock_nwapi, mock_notify): migrate_data = migrate_data_obj.LiveMigrateData() mock_plm.return_value = migrate_data r = compute.pre_live_migration(self.context, {'uuid': 'foo'}, False, {}, {}) self.assertIsInstance(r, dict) self.assertIsInstance(mock_plm.call_args_list[0][0][5], migrate_data_obj.LiveMigrateData) _test() def test_live_migration_handles_dict(self): compute = manager.ComputeManager() @mock.patch.object(compute, 'compute_rpcapi') @mock.patch.object(compute, 'driver') def _test(mock_driver, mock_rpc): migrate_data = migrate_data_obj.LiveMigrateData() migration = objects.Migration() migration.save = mock.MagicMock() mock_rpc.pre_live_migration.return_value = migrate_data compute._do_live_migration(self.context, 'foo', {'uuid': 'foo'}, False, migration, {}) self.assertIsInstance( mock_rpc.pre_live_migration.call_args_list[0][0][5], migrate_data_obj.LiveMigrateData) _test() def test_rollback_live_migration_handles_dict(self): compute = manager.ComputeManager() @mock.patch.object(compute.network_api, 'setup_networks_on_host') @mock.patch.object(compute, '_notify_about_instance_usage') @mock.patch.object(compute, '_live_migration_cleanup_flags') @mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid') def _test(mock_bdm, mock_lmcf, mock_notify, mock_nwapi): mock_bdm.return_value = [] mock_lmcf.return_value = False, False compute._rollback_live_migration(self.context, mock.MagicMock(), 'foo', False, {}) self.assertIsInstance(mock_lmcf.call_args_list[0][0][0], migrate_data_obj.LiveMigrateData) _test() def test_live_migration_force_complete_succeeded(self): instance = objects.Instance(uuid=str(uuid.uuid4())) migration = objects.Migration() migration.status = 'running' migration.id = 0 @mock.patch.object(compute_utils.EventReporter, '__enter__') @mock.patch.object(self.compute, '_notify_about_instance_usage') @mock.patch.object(objects.Migration, 'get_by_id', return_value=migration) @mock.patch.object(self.compute.driver, 'live_migration_force_complete') def _do_test(force_complete, get_by_id, _notify_about_instance_usage, enter_event_reporter): self.compute.live_migration_force_complete( self.context, instance, migration.id) force_complete.assert_called_once_with(instance) _notify_usage_calls = [ mock.call(self.context, instance, 'live.migration.force.complete.start'), mock.call(self.context, instance, 'live.migration.force.complete.end') ] _notify_about_instance_usage.assert_has_calls(_notify_usage_calls) enter_event_reporter.assert_called_once_with() _do_test() @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') def test_live_migration_pause_vm_invalid_migration_state( self, add_instance_fault_from_exc): instance = objects.Instance(id=1234, uuid=str(uuid.uuid4())) migration = objects.Migration() migration.status = 'aborted' migration.id = 0 @mock.patch.object(objects.Migration, 'get_by_id', return_value=migration) def _do_test(get_by_id): self.assertRaises(exception.InvalidMigrationState, self.compute.live_migration_force_complete, self.context, instance, migration.id) _do_test() def test_post_live_migration_at_destination_success(self): @mock.patch.object(self.instance, 'save') @mock.patch.object(self.compute.network_api, 'get_instance_nw_info', return_value='test_network') @mock.patch.object(self.compute.network_api, 'setup_networks_on_host') @mock.patch.object(self.compute.network_api, 'migrate_instance_finish') @mock.patch.object(self.compute, '_notify_about_instance_usage') @mock.patch.object(self.compute, '_get_instance_block_device_info') @mock.patch.object(self.compute, '_get_power_state', return_value=1) @mock.patch.object(self.compute, '_get_compute_info') @mock.patch.object(self.compute.driver, 'post_live_migration_at_destination') def _do_test(post_live_migration_at_destination, _get_compute_info, _get_power_state, _get_instance_block_device_info, _notify_about_instance_usage, migrate_instance_finish, setup_networks_on_host, get_instance_nw_info, save): cn = mock.Mock(spec_set=['hypervisor_hostname']) cn.hypervisor_hostname = 'test_host' _get_compute_info.return_value = cn cn_old = self.instance.host instance_old = self.instance self.compute.post_live_migration_at_destination( self.context, self.instance, False) setup_networks_calls = [ mock.call(self.context, self.instance, self.compute.host), mock.call(self.context, self.instance, cn_old, teardown=True), mock.call(self.context, self.instance, self.compute.host) ] setup_networks_on_host.assert_has_calls(setup_networks_calls) notify_usage_calls = [ mock.call(self.context, instance_old, "live_migration.post.dest.start", network_info='test_network'), mock.call(self.context, self.instance, "live_migration.post.dest.end", network_info='test_network') ] _notify_about_instance_usage.assert_has_calls(notify_usage_calls) migrate_instance_finish.assert_called_once_with( self.context, self.instance, {'source_compute': cn_old, 'dest_compute': self.compute.host}) _get_instance_block_device_info.assert_called_once_with( self.context, self.instance ) get_instance_nw_info.assert_called_once_with(self.context, self.instance) _get_power_state.assert_called_once_with(self.context, self.instance) _get_compute_info.assert_called_once_with(self.context, self.compute.host) self.assertEqual(self.compute.host, self.instance.host) self.assertEqual('test_host', self.instance.node) self.assertEqual(1, self.instance.power_state) self.assertEqual(0, self.instance.progress) self.assertIsNone(self.instance.task_state) save.assert_called_once_with( expected_task_state=task_states.MIGRATING) _do_test() def test_post_live_migration_at_destination_compute_not_found(self): @mock.patch.object(self.instance, 'save') @mock.patch.object(self.compute, 'network_api') @mock.patch.object(self.compute, '_notify_about_instance_usage') @mock.patch.object(self.compute, '_get_instance_block_device_info') @mock.patch.object(self.compute, '_get_power_state', return_value=1) @mock.patch.object(self.compute, '_get_compute_info', side_effect=exception.ComputeHostNotFound( host='fake')) @mock.patch.object(self.compute.driver, 'post_live_migration_at_destination') def _do_test(post_live_migration_at_destination, _get_compute_info, _get_power_state, _get_instance_block_device_info, _notify_about_instance_usage, network_api, save): cn = mock.Mock(spec_set=['hypervisor_hostname']) cn.hypervisor_hostname = 'test_host' _get_compute_info.return_value = cn self.compute.post_live_migration_at_destination( self.context, self.instance, False) self.assertIsNone(self.instance.node) _do_test() def test_post_live_migration_at_destination_unexpected_exception(self): @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') @mock.patch.object(self.instance, 'save') @mock.patch.object(self.compute, 'network_api') @mock.patch.object(self.compute, '_notify_about_instance_usage') @mock.patch.object(self.compute, '_get_instance_block_device_info') @mock.patch.object(self.compute, '_get_power_state', return_value=1) @mock.patch.object(self.compute, '_get_compute_info') @mock.patch.object(self.compute.driver, 'post_live_migration_at_destination', side_effect=exception.NovaException) def _do_test(post_live_migration_at_destination, _get_compute_info, _get_power_state, _get_instance_block_device_info, _notify_about_instance_usage, network_api, save, add_instance_fault_from_exc): cn = mock.Mock(spec_set=['hypervisor_hostname']) cn.hypervisor_hostname = 'test_host' _get_compute_info.return_value = cn self.assertRaises(exception.NovaException, self.compute.post_live_migration_at_destination, self.context, self.instance, False) self.assertEqual(vm_states.ERROR, self.instance.vm_state) _do_test() def _get_migration(self, migration_id, status, migration_type): migration = objects.Migration() migration.id = migration_id migration.status = status migration.migration_type = migration_type return migration @mock.patch.object(manager.ComputeManager, '_notify_about_instance_usage') @mock.patch.object(objects.Migration, 'get_by_id') @mock.patch.object(nova.virt.fake.SmallFakeDriver, 'live_migration_abort') def test_live_migration_abort(self, mock_driver, mock_get_migration, mock_notify): instance = objects.Instance(id=123, uuid=uuids.instance) migration = self._get_migration(10, 'running', 'live-migration') mock_get_migration.return_value = migration self.compute.live_migration_abort(self.context, instance, migration.id) mock_driver.assert_called_with(instance) _notify_usage_calls = [mock.call(self.context, instance, 'live.migration.abort.start'), mock.call(self.context, instance, 'live.migration.abort.end')] mock_notify.assert_has_calls(_notify_usage_calls) @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') @mock.patch.object(manager.ComputeManager, '_notify_about_instance_usage') @mock.patch.object(objects.Migration, 'get_by_id') @mock.patch.object(nova.virt.fake.SmallFakeDriver, 'live_migration_abort') def test_live_migration_abort_not_supported(self, mock_driver, mock_get_migration, mock_notify, mock_instance_fault): instance = objects.Instance(id=123, uuid=uuids.instance) migration = self._get_migration(10, 'running', 'live-migration') mock_get_migration.return_value = migration mock_driver.side_effect = NotImplementedError() self.assertRaises(NotImplementedError, self.compute.live_migration_abort, self.context, instance, migration.id) @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') @mock.patch.object(objects.Migration, 'get_by_id') def test_live_migration_abort_wrong_migration_state(self, mock_get_migration, mock_instance_fault): instance = objects.Instance(id=123, uuid=uuids.instance) migration = self._get_migration(10, 'completed', 'live-migration') mock_get_migration.return_value = migration self.assertRaises(exception.InvalidMigrationState, self.compute.live_migration_abort, self.context, instance, migration.id) def test_live_migration_cleanup_flags_block_migrate_libvirt(self): migrate_data = objects.LibvirtLiveMigrateData( is_shared_block_storage=False, is_shared_instance_path=False) do_cleanup, destroy_disks = self.compute._live_migration_cleanup_flags( migrate_data) self.assertTrue(do_cleanup) self.assertTrue(destroy_disks) def test_live_migration_cleanup_flags_shared_block_libvirt(self): migrate_data = objects.LibvirtLiveMigrateData( is_shared_block_storage=True, is_shared_instance_path=False) do_cleanup, destroy_disks = self.compute._live_migration_cleanup_flags( migrate_data) self.assertTrue(do_cleanup) self.assertFalse(destroy_disks) def test_live_migration_cleanup_flags_shared_path_libvirt(self): migrate_data = objects.LibvirtLiveMigrateData( is_shared_block_storage=False, is_shared_instance_path=True) do_cleanup, destroy_disks = self.compute._live_migration_cleanup_flags( migrate_data) self.assertFalse(do_cleanup) self.assertTrue(destroy_disks) def test_live_migration_cleanup_flags_shared_libvirt(self): migrate_data = objects.LibvirtLiveMigrateData( is_shared_block_storage=True, is_shared_instance_path=True) do_cleanup, destroy_disks = self.compute._live_migration_cleanup_flags( migrate_data) self.assertFalse(do_cleanup) self.assertFalse(destroy_disks) def test_live_migration_cleanup_flags_block_migrate_xenapi(self): migrate_data = objects.XenapiLiveMigrateData(block_migration=True) do_cleanup, destroy_disks = self.compute._live_migration_cleanup_flags( migrate_data) self.assertTrue(do_cleanup) self.assertTrue(destroy_disks) def test_live_migration_cleanup_flags_live_migrate_xenapi(self): migrate_data = objects.XenapiLiveMigrateData(block_migration=False) do_cleanup, destroy_disks = self.compute._live_migration_cleanup_flags( migrate_data) self.assertFalse(do_cleanup) self.assertFalse(destroy_disks) def test_live_migration_cleanup_flags_live_migrate(self): do_cleanup, destroy_disks = self.compute._live_migration_cleanup_flags( {}) self.assertFalse(do_cleanup) self.assertFalse(destroy_disks) class ComputeManagerInstanceUsageAuditTestCase(test.TestCase): def setUp(self): super(ComputeManagerInstanceUsageAuditTestCase, self).setUp() self.flags(use_local=True, group='conductor') self.flags(instance_usage_audit=True) @mock.patch('nova.objects.TaskLog') def test_deleted_instance(self, mock_task_log): mock_task_log.get.return_value = None compute = importutils.import_object(CONF.compute_manager) admin_context = context.get_admin_context() fake_db_flavor = fake_flavor.fake_db_flavor() flavor = objects.Flavor(admin_context, **fake_db_flavor) updates = {'host': compute.host, 'flavor': flavor, 'root_gb': 0, 'ephemeral_gb': 0} # fudge beginning and ending time by a second (backwards and forwards, # respectively) so they differ from the instance's launch and # termination times when sub-seconds are truncated and fall within the # audit period one_second = datetime.timedelta(seconds=1) begin = timeutils.utcnow() - one_second instance = objects.Instance(admin_context, **updates) instance.create() instance.launched_at = timeutils.utcnow() instance.save() instance.destroy() end = timeutils.utcnow() + one_second def fake_last_completed_audit_period(): return (begin, end) self.stub_out('nova.utils.last_completed_audit_period', fake_last_completed_audit_period) compute._instance_usage_audit(admin_context) self.assertEqual(1, mock_task_log().task_items, 'the deleted test instance was not found in the audit' ' period') self.assertEqual(0, mock_task_log().errors, 'an error was encountered processing the deleted test' ' instance') nova-13.1.4/nova/tests/unit/cert/0000775000567000056710000000000013064447471017737 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/cert/__init__.py0000664000567000056710000000000013064447140022027 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/cert/test_rpcapi.py0000664000567000056710000000555613064447152022635 0ustar jenkinsjenkins00000000000000# Copyright 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for nova.cert.rpcapi """ import mock from oslo_config import cfg from nova.cert import rpcapi as cert_rpcapi from nova import context from nova import test CONF = cfg.CONF class CertRpcAPITestCase(test.NoDBTestCase): def _test_cert_api(self, method, **kwargs): ctxt = context.RequestContext('fake_user', 'fake_project') rpcapi = cert_rpcapi.CertAPI() self.assertIsNotNone(rpcapi.client) self.assertEqual(CONF.cert_topic, rpcapi.client.target.topic) orig_prepare = rpcapi.client.prepare with test.nested( mock.patch.object(rpcapi.client, 'call'), mock.patch.object(rpcapi.client, 'prepare'), mock.patch.object(rpcapi.client, 'can_send_version'), ) as ( rpc_mock, prepare_mock, csv_mock ): prepare_mock.return_value = rpcapi.client rpc_mock.return_value = 'foo' csv_mock.side_effect = ( lambda v: orig_prepare().can_send_version()) retval = getattr(rpcapi, method)(ctxt, **kwargs) self.assertEqual(rpc_mock.return_value, retval) prepare_mock.assert_called_once_with() rpc_mock.assert_called_once_with(ctxt, method, **kwargs) def test_revoke_certs_by_user(self): self._test_cert_api('revoke_certs_by_user', user_id='fake_user_id') def test_revoke_certs_by_project(self): self._test_cert_api('revoke_certs_by_project', project_id='fake_project_id') def test_revoke_certs_by_user_and_project(self): self._test_cert_api('revoke_certs_by_user_and_project', user_id='fake_user_id', project_id='fake_project_id') def test_generate_x509_cert(self): self._test_cert_api('generate_x509_cert', user_id='fake_user_id', project_id='fake_project_id') def test_fetch_ca(self): self._test_cert_api('fetch_ca', project_id='fake_project_id') def test_fetch_crl(self): self._test_cert_api('fetch_crl', project_id='fake_project_id') def test_decrypt_text(self): self._test_cert_api('decrypt_text', project_id='fake_project_id', text='blah') nova-13.1.4/nova/tests/unit/test_flavors.py0000664000567000056710000005075213064447152022074 0ustar jenkinsjenkins00000000000000# Copyright 2011 Ken Pepple # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for flavors code """ from nova.compute import flavors from nova import context from nova import db from nova import exception from nova import objects from nova.objects import base as obj_base from nova import test DEFAULT_FLAVORS = [ {'memory_mb': 512, 'root_gb': 1, 'deleted_at': None, 'name': 'm1.tiny', 'deleted': 0, 'created_at': None, 'ephemeral_gb': 0, 'updated_at': None, 'disabled': False, 'vcpus': 1, 'extra_specs': {}, 'swap': 0, 'rxtx_factor': 1.0, 'is_public': True, 'flavorid': '1', 'vcpu_weight': None, 'id': 2}, {'memory_mb': 2048, 'root_gb': 20, 'deleted_at': None, 'name': 'm1.small', 'deleted': 0, 'created_at': None, 'ephemeral_gb': 0, 'updated_at': None, 'disabled': False, 'vcpus': 1, 'extra_specs': {}, 'swap': 0, 'rxtx_factor': 1.0, 'is_public': True, 'flavorid': '2', 'vcpu_weight': None, 'id': 5}, {'memory_mb': 4096, 'root_gb': 40, 'deleted_at': None, 'name': 'm1.medium', 'deleted': 0, 'created_at': None, 'ephemeral_gb': 0, 'updated_at': None, 'disabled': False, 'vcpus': 2, 'extra_specs': {}, 'swap': 0, 'rxtx_factor': 1.0, 'is_public': True, 'flavorid': '3', 'vcpu_weight': None, 'id': 1}, {'memory_mb': 8192, 'root_gb': 80, 'deleted_at': None, 'name': 'm1.large', 'deleted': 0, 'created_at': None, 'ephemeral_gb': 0, 'updated_at': None, 'disabled': False, 'vcpus': 4, 'extra_specs': {}, 'swap': 0, 'rxtx_factor': 1.0, 'is_public': True, 'flavorid': '4', 'vcpu_weight': None, 'id': 3}, {'memory_mb': 16384, 'root_gb': 160, 'deleted_at': None, 'name': 'm1.xlarge', 'deleted': 0, 'created_at': None, 'ephemeral_gb': 0, 'updated_at': None, 'disabled': False, 'vcpus': 8, 'extra_specs': {}, 'swap': 0, 'rxtx_factor': 1.0, 'is_public': True, 'flavorid': '5', 'vcpu_weight': None, 'id': 4} ] CONTEXT = context.RequestContext('fake', 'fake', is_admin=False) DEFAULT_FLAVOR_OBJS = [ objects.Flavor._obj_from_primitive(CONTEXT, objects.Flavor.VERSION, {'nova_object.data': flavor}) for flavor in DEFAULT_FLAVORS ] class InstanceTypeTestCase(test.TestCase): """Test cases for flavor code.""" def test_non_existent_inst_type_should_not_delete(self): # Ensures that flavor creation fails with invalid args. self.assertRaises(exception.FlavorNotFoundByName, flavors.destroy, 'unknown_flavor') def test_will_not_destroy_with_no_name(self): # Ensure destroy said path of no name raises error. self.assertRaises(exception.FlavorNotFoundByName, flavors.destroy, None) def test_will_not_get_bad_default_instance_type(self): # ensures error raised on bad default flavor. self.flags(default_flavor='unknown_flavor') self.assertRaises(exception.FlavorNotFound, flavors.get_default_flavor) def test_flavor_get_by_None_name_returns_default(self): # Ensure get by name returns default flavor with no name. default = flavors.get_default_flavor() actual = flavors.get_flavor_by_name(None) self.assertIsInstance(default, objects.Flavor) self.assertIsInstance(actual, objects.Flavor) self.assertEqual(default.flavorid, actual.flavorid) def test_will_not_get_flavor_with_bad_name(self): # Ensure get by name returns default flavor with bad name. self.assertRaises(exception.FlavorNotFound, flavors.get_flavor_by_name, 10000) def test_will_not_get_instance_by_unknown_flavor_id(self): # Ensure get by flavor raises error with wrong flavorid. self.assertRaises(exception.FlavorNotFound, flavors.get_flavor_by_flavor_id, 'unknown_flavor') def test_will_get_instance_by_flavor_id(self): default_instance_type = flavors.get_default_flavor() flavorid = default_instance_type.flavorid fetched = flavors.get_flavor_by_flavor_id(flavorid) self.assertIsInstance(fetched, objects.Flavor) self.assertEqual(default_instance_type.flavorid, fetched.flavorid) def test_can_read_deleted_types_using_flavor_id(self): # Ensure deleted flavors can be read when querying flavor_id. inst_type_name = "test" inst_type_flavor_id = "test1" inst_type = flavors.create(inst_type_name, 256, 1, 120, 100, inst_type_flavor_id) self.assertEqual(inst_type_name, inst_type.name) # NOTE(jk0): The deleted flavor will show up here because the context # in get_flavor_by_flavor_id() is set to use read_deleted by # default. flavors.destroy(inst_type.name) deleted_inst_type = flavors.get_flavor_by_flavor_id( inst_type_flavor_id) self.assertEqual(inst_type_name, deleted_inst_type.name) def test_read_deleted_false_converting_flavorid(self): """Ensure deleted flavors are not returned when not needed (for example when creating a server and attempting to translate from flavorid to instance_type_id. """ flavors.create("instance_type1", 256, 1, 120, 100, "test1") flavors.destroy("instance_type1") flavors.create("instance_type1_redo", 256, 1, 120, 100, "test1") instance_type = flavors.get_flavor_by_flavor_id( "test1", read_deleted="no") self.assertEqual("instance_type1_redo", instance_type.name) def test_get_all_flavors_sorted_list_sort(self): # Test default sort all_flavors = flavors.get_all_flavors_sorted_list() self.assertEqual(len(DEFAULT_FLAVORS), len(all_flavors)) for i in range(len(all_flavors)): f = all_flavors[i] self.assertIsInstance(f, objects.Flavor) self.assertEqual(DEFAULT_FLAVORS[i]['flavorid'], f.flavorid) # Test sorted by name all_flavors = flavors.get_all_flavors_sorted_list(sort_key='name') expected = sorted(DEFAULT_FLAVORS, key=lambda item: item['name']) self.assertEqual(len(expected), len(all_flavors)) for i in range(len(all_flavors)): f = all_flavors[i] self.assertIsInstance(f, objects.Flavor) self.assertEqual(expected[i]['flavorid'], f.flavorid) def test_get_all_flavors_sorted_list_limit(self): limited_flavors = flavors.get_all_flavors_sorted_list(limit=2) self.assertEqual(2, len(limited_flavors)) def test_get_all_flavors_sorted_list_marker(self): all_flavors = flavors.get_all_flavors_sorted_list() # Set the 3rd result as the marker marker_flavorid = all_flavors[2].flavorid marked_flavors = flavors.get_all_flavors_sorted_list( marker=marker_flavorid) # We expect everything /after/ the 3rd result expected_results = all_flavors[3:] self.assertEqual(len(expected_results), len(marked_flavors)) for i in range(len(marked_flavors)): f = marked_flavors[i] self.assertIsInstance(f, objects.Flavor) self.assertEqual(expected_results[i].flavorid, f.flavorid) class InstanceTypeToolsTest(test.TestCase): def _dict_to_metadata(self, data): return [{'key': key, 'value': value} for key, value in data.items()] def _test_extract_flavor(self, prefix): instance_type = flavors.get_default_flavor() instance_type_p = obj_base.obj_to_primitive(instance_type) metadata = {} flavors.save_flavor_info(metadata, instance_type, prefix) instance = {'system_metadata': self._dict_to_metadata(metadata)} _instance_type = flavors.extract_flavor(instance, prefix) _instance_type_p = obj_base.obj_to_primitive(_instance_type) props = flavors.system_metadata_flavor_props.keys() for key in list(instance_type_p.keys()): if key not in props: del instance_type_p[key] self.assertEqual(instance_type_p, _instance_type_p) def test_extract_flavor(self): self._test_extract_flavor('') def test_extract_flavor_no_sysmeta(self): instance = {} prefix = '' result = flavors.extract_flavor(instance, prefix) self.assertIsNone(result) def test_extract_flavor_prefix(self): self._test_extract_flavor('foo_') def test_save_flavor_info(self): instance_type = flavors.get_default_flavor() example = {} example_prefix = {} for key in flavors.system_metadata_flavor_props.keys(): example['instance_type_%s' % key] = instance_type[key] example_prefix['fooinstance_type_%s' % key] = instance_type[key] metadata = {} flavors.save_flavor_info(metadata, instance_type) self.assertEqual(example, metadata) metadata = {} flavors.save_flavor_info(metadata, instance_type, 'foo') self.assertEqual(example_prefix, metadata) def test_delete_flavor_info(self): instance_type = flavors.get_default_flavor() metadata = {} flavors.save_flavor_info(metadata, instance_type) flavors.save_flavor_info(metadata, instance_type, '_') flavors.delete_flavor_info(metadata, '', '_') self.assertEqual(metadata, {}) def test_flavor_numa_extras_are_saved(self): instance_type = flavors.get_default_flavor() instance_type['extra_specs'] = { 'hw:numa_mem.0': '123', 'hw:numa_cpus.0': '456', 'hw:numa_mem.1': '789', 'hw:numa_cpus.1': 'ABC', 'foo': 'bar', } sysmeta = flavors.save_flavor_info({}, instance_type) _instance_type = flavors.extract_flavor({'system_metadata': sysmeta}) expected_extra_specs = { 'hw:numa_mem.0': '123', 'hw:numa_cpus.0': '456', 'hw:numa_mem.1': '789', 'hw:numa_cpus.1': 'ABC', } self.assertEqual(expected_extra_specs, _instance_type['extra_specs']) flavors.delete_flavor_info(sysmeta, '') self.assertEqual({}, sysmeta) class InstanceTypeFilteringTest(test.TestCase): """Test cases for the filter option available for instance_type_get_all.""" def setUp(self): super(InstanceTypeFilteringTest, self).setUp() self.context = context.get_admin_context() def assertFilterResults(self, filters, expected): inst_types = db.flavor_get_all( self.context, filters=filters) inst_names = [i['name'] for i in inst_types] self.assertEqual(inst_names, expected) def test_no_filters(self): filters = None expected = ['m1.tiny', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge'] self.assertFilterResults(filters, expected) def test_min_memory_mb_filter(self): # Exclude tiny instance which is 512 MB. filters = dict(min_memory_mb=513) expected = ['m1.small', 'm1.medium', 'm1.large', 'm1.xlarge'] self.assertFilterResults(filters, expected) def test_min_root_gb_filter(self): # Exclude everything but large and xlarge which have >= 80 GB. filters = dict(min_root_gb=80) expected = ['m1.large', 'm1.xlarge'] self.assertFilterResults(filters, expected) def test_min_memory_mb_AND_root_gb_filter(self): # Exclude everything but large and xlarge which have >= 80 GB. filters = dict(min_memory_mb=16384, min_root_gb=80) expected = ['m1.xlarge'] self.assertFilterResults(filters, expected) class CreateInstanceTypeTest(test.TestCase): def assertInvalidInput(self, *create_args, **create_kwargs): self.assertRaises(exception.InvalidInput, flavors.create, *create_args, **create_kwargs) def test_create_with_valid_name(self): # Names can contain alphanumeric and [_.- ] flavors.create('azAZ09. -_', 64, 1, 120) # And they are not limited to ascii characters # E.g.: m1.huge in simplified Chinese flavors.create(u'm1.\u5DE8\u5927', 6400, 100, 12000) def test_name_with_special_characters(self): # Names can contain all printable characters flavors.create('_foo.bar-123', 64, 1, 120) # Ensure instance types raises InvalidInput for invalid characters. self.assertInvalidInput('foobar\x00', 64, 1, 120) def test_name_with_non_printable_characters(self): # Names cannot contain printable characters self.assertInvalidInput(u'm1.\u0868 #', 64, 1, 120) def test_name_length_checks(self): MAX_LEN = 255 # Flavor name with 255 characters or less is valid. flavors.create('a' * MAX_LEN, 64, 1, 120) # Flavor name which is more than 255 characters will cause error. self.assertInvalidInput('a' * (MAX_LEN + 1), 64, 1, 120) # Flavor name which is empty should cause an error self.assertInvalidInput('', 64, 1, 120) def test_all_whitespace_flavor_names_rejected(self): self.assertInvalidInput(' ', 64, 1, 120) def test_flavorid_with_invalid_characters(self): # Ensure Flavor ID can only contain [a-zA-Z0-9_.- ] self.assertInvalidInput('a', 64, 1, 120, flavorid=u'\u2605') self.assertInvalidInput('a', 64, 1, 120, flavorid='%%$%$@#$#@$@#$^%') def test_flavorid_length_checks(self): MAX_LEN = 255 # Flavor ID which is more than 255 characters will cause error. self.assertInvalidInput('a', 64, 1, 120, flavorid='a' * (MAX_LEN + 1)) def test_memory_must_be_positive_db_integer(self): self.assertInvalidInput('flavor1', 'foo', 1, 120) self.assertInvalidInput('flavor1', -1, 1, 120) self.assertInvalidInput('flavor1', 0, 1, 120) self.assertInvalidInput('flavor1', db.MAX_INT + 1, 1, 120) flavors.create('flavor1', 1, 1, 120) def test_vcpus_must_be_positive_db_integer(self): self.assertInvalidInput('flavor`', 64, 'foo', 120) self.assertInvalidInput('flavor1', 64, -1, 120) self.assertInvalidInput('flavor1', 64, 0, 120) self.assertInvalidInput('flavor1', 64, db.MAX_INT + 1, 120) flavors.create('flavor1', 64, 1, 120) def test_root_gb_must_be_nonnegative_db_integer(self): self.assertInvalidInput('flavor1', 64, 1, 'foo') self.assertInvalidInput('flavor1', 64, 1, -1) self.assertInvalidInput('flavor1', 64, 1, db.MAX_INT + 1) flavors.create('flavor1', 64, 1, 0) flavors.create('flavor2', 64, 1, 120) def test_ephemeral_gb_must_be_nonnegative_db_integer(self): self.assertInvalidInput('flavor1', 64, 1, 120, ephemeral_gb='foo') self.assertInvalidInput('flavor1', 64, 1, 120, ephemeral_gb=-1) self.assertInvalidInput('flavor1', 64, 1, 120, ephemeral_gb=db.MAX_INT + 1) flavors.create('flavor1', 64, 1, 120, ephemeral_gb=0) flavors.create('flavor2', 64, 1, 120, ephemeral_gb=120) def test_swap_must_be_nonnegative_db_integer(self): self.assertInvalidInput('flavor1', 64, 1, 120, swap='foo') self.assertInvalidInput('flavor1', 64, 1, 120, swap=-1) self.assertInvalidInput('flavor1', 64, 1, 120, swap=db.MAX_INT + 1) flavors.create('flavor1', 64, 1, 120, swap=0) flavors.create('flavor2', 64, 1, 120, swap=1) def test_rxtx_factor_must_be_positive_float(self): self.assertInvalidInput('flavor1', 64, 1, 120, rxtx_factor='foo') self.assertInvalidInput('flavor1', 64, 1, 120, rxtx_factor=-1.0) self.assertInvalidInput('flavor1', 64, 1, 120, rxtx_factor=0.0) flavor = flavors.create('flavor1', 64, 1, 120, rxtx_factor=1.0) self.assertEqual(1.0, flavor.rxtx_factor) flavor = flavors.create('flavor2', 64, 1, 120, rxtx_factor=1.1) self.assertEqual(1.1, flavor.rxtx_factor) def test_rxtx_factor_must_be_within_sql_float_range(self): _context = context.get_admin_context() db.flavor_get_all(_context) # We do * 10 since this is an approximation and we need to make sure # the difference is noticeble. over_rxtx_factor = flavors.SQL_SP_FLOAT_MAX * 10 self.assertInvalidInput('flavor1', 64, 1, 120, rxtx_factor=over_rxtx_factor) flavor = flavors.create('flavor2', 64, 1, 120, rxtx_factor=flavors.SQL_SP_FLOAT_MAX) self.assertEqual(flavors.SQL_SP_FLOAT_MAX, flavor.rxtx_factor) def test_is_public_must_be_valid_bool_string(self): self.assertInvalidInput('flavor1', 64, 1, 120, is_public='foo') flavors.create('flavor1', 64, 1, 120, is_public='TRUE') flavors.create('flavor2', 64, 1, 120, is_public='False') flavors.create('flavor3', 64, 1, 120, is_public='Yes') flavors.create('flavor4', 64, 1, 120, is_public='No') flavors.create('flavor5', 64, 1, 120, is_public='Y') flavors.create('flavor6', 64, 1, 120, is_public='N') flavors.create('flavor7', 64, 1, 120, is_public='1') flavors.create('flavor8', 64, 1, 120, is_public='0') flavors.create('flavor9', 64, 1, 120, is_public='true') def test_flavorid_populated(self): flavor1 = flavors.create('flavor1', 64, 1, 120) self.assertIsNot(None, flavor1.flavorid) flavor2 = flavors.create('flavor2', 64, 1, 120, flavorid='') self.assertIsNot(None, flavor2.flavorid) flavor3 = flavors.create('flavor3', 64, 1, 120, flavorid='foo') self.assertEqual('foo', flavor3.flavorid) def test_default_values(self): flavor1 = flavors.create('flavor1', 64, 1, 120) self.assertIsNot(None, flavor1.flavorid) self.assertEqual(flavor1.ephemeral_gb, 0) self.assertEqual(flavor1.swap, 0) self.assertEqual(flavor1.rxtx_factor, 1.0) def test_basic_create(self): # Ensure instance types can be created. ctxt = context.get_admin_context() original_list = objects.FlavorList.get_all(ctxt) # Create new type and make sure values stick flavor = flavors.create('flavor', 64, 1, 120) self.assertEqual(flavor.name, 'flavor') self.assertEqual(flavor.memory_mb, 64) self.assertEqual(flavor.vcpus, 1) self.assertEqual(flavor.root_gb, 120) # Ensure new type shows up in list new_list = objects.FlavorList.get_all(ctxt) self.assertNotEqual(len(original_list), len(new_list), 'flavor was not created') def test_create_then_delete(self): ctxt = context.get_admin_context() original_list = objects.FlavorList.get_all(ctxt) flavor = flavors.create('flavor', 64, 1, 120) # Ensure new type shows up in list new_list = objects.FlavorList.get_all(ctxt) self.assertNotEqual(len(original_list), len(new_list), 'instance type was not created') flavors.destroy('flavor') self.assertRaises(exception.FlavorNotFound, objects.Flavor.get_by_id, ctxt, flavor.id) # Deleted instance should not be in list anymore new_list = objects.FlavorList.get_all(ctxt) self.assertEqual(len(original_list), len(new_list)) for i, f in enumerate(original_list): self.assertIsInstance(f, objects.Flavor) self.assertEqual(f.flavorid, new_list[i].flavorid) def test_duplicate_names_fail(self): # Ensures that name duplicates raise FlavorCreateFailed. flavors.create('flavor', 256, 1, 120, 200, 'flavor1') self.assertRaises(exception.FlavorExists, flavors.create, 'flavor', 64, 1, 120) def test_duplicate_flavorids_fail(self): # Ensures that flavorid duplicates raise FlavorCreateFailed. flavors.create('flavor1', 64, 1, 120, flavorid='flavorid') self.assertRaises(exception.FlavorIdExists, flavors.create, 'flavor2', 64, 1, 120, flavorid='flavorid') nova-13.1.4/nova/tests/unit/keymgr/0000775000567000056710000000000013064447471020300 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/keymgr/test_single_key_mgr.py0000664000567000056710000000462013064447152024705 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test cases for the single key manager. """ import array from nova import exception from nova.keymgr import key from nova.keymgr import single_key_mgr from nova.tests.unit.keymgr import test_mock_key_mgr class SingleKeyManagerTestCase(test_mock_key_mgr.MockKeyManagerTestCase): def _create_key_manager(self): return single_key_mgr.SingleKeyManager() def setUp(self): super(SingleKeyManagerTestCase, self).setUp() self.key_id = '00000000-0000-0000-0000-000000000000' encoded = array.array('B', ('0' * 64).decode('hex')).tolist() self.key = key.SymmetricKey('AES', encoded) def test___init__(self): self.assertEqual(self.key, self.key_mgr.get_key(self.ctxt, self.key_id)) def test_create_key(self): key_id_1 = self.key_mgr.create_key(self.ctxt) key_id_2 = self.key_mgr.create_key(self.ctxt) # ensure that the UUIDs are the same self.assertEqual(key_id_1, key_id_2) def test_create_key_with_length(self): pass def test_store_null_context(self): self.assertRaises(exception.Forbidden, self.key_mgr.store_key, None, self.key) def test_copy_key(self): key_id = self.key_mgr.create_key(self.ctxt) key = self.key_mgr.get_key(self.ctxt, key_id) copied_key_id = self.key_mgr.copy_key(self.ctxt, key_id) copied_key = self.key_mgr.get_key(self.ctxt, copied_key_id) self.assertEqual(key_id, copied_key_id) self.assertEqual(key, copied_key) def test_delete_key(self): pass def test_delete_unknown_key(self): self.assertRaises(exception.KeyManagerError, self.key_mgr.delete_key, self.ctxt, None) nova-13.1.4/nova/tests/unit/keymgr/test_mock_key_mgr.py0000664000567000056710000000703713064447152024362 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test cases for the mock key manager. """ import array from nova import context from nova import exception from nova.keymgr import key as keymgr_key from nova.keymgr import mock_key_mgr from nova.tests.unit.keymgr import test_key_mgr class MockKeyManagerTestCase(test_key_mgr.KeyManagerTestCase): def _create_key_manager(self): return mock_key_mgr.MockKeyManager() def setUp(self): super(MockKeyManagerTestCase, self).setUp() self.ctxt = context.RequestContext('fake', 'fake') def test_create_key(self): key_id_1 = self.key_mgr.create_key(self.ctxt) key_id_2 = self.key_mgr.create_key(self.ctxt) # ensure that the UUIDs are unique self.assertNotEqual(key_id_1, key_id_2) def test_create_key_with_length(self): for length in [64, 128, 256]: key_id = self.key_mgr.create_key(self.ctxt, key_length=length) key = self.key_mgr.get_key(self.ctxt, key_id) self.assertEqual(length / 8, len(key.get_encoded())) def test_create_null_context(self): self.assertRaises(exception.Forbidden, self.key_mgr.create_key, None) def test_store_key(self): secret_key = array.array('B', ('0' * 64).decode('hex')).tolist() _key = keymgr_key.SymmetricKey('AES', secret_key) key_id = self.key_mgr.store_key(self.ctxt, _key) actual_key = self.key_mgr.get_key(self.ctxt, key_id) self.assertEqual(_key, actual_key) def test_store_null_context(self): self.assertRaises(exception.Forbidden, self.key_mgr.store_key, None, None) def test_copy_key(self): key_id = self.key_mgr.create_key(self.ctxt) key = self.key_mgr.get_key(self.ctxt, key_id) copied_key_id = self.key_mgr.copy_key(self.ctxt, key_id) copied_key = self.key_mgr.get_key(self.ctxt, copied_key_id) self.assertNotEqual(key_id, copied_key_id) self.assertEqual(key, copied_key) def test_copy_null_context(self): self.assertRaises(exception.Forbidden, self.key_mgr.copy_key, None, None) def test_get_key(self): pass def test_get_null_context(self): self.assertRaises(exception.Forbidden, self.key_mgr.get_key, None, None) def test_get_unknown_key(self): self.assertRaises(KeyError, self.key_mgr.get_key, self.ctxt, None) def test_delete_key(self): key_id = self.key_mgr.create_key(self.ctxt) self.key_mgr.delete_key(self.ctxt, key_id) self.assertRaises(KeyError, self.key_mgr.get_key, self.ctxt, key_id) def test_delete_null_context(self): self.assertRaises(exception.Forbidden, self.key_mgr.delete_key, None, None) def test_delete_unknown_key(self): self.assertRaises(KeyError, self.key_mgr.delete_key, self.ctxt, None) nova-13.1.4/nova/tests/unit/keymgr/test_key_mgr.py0000664000567000056710000000172213064447152023344 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test cases for the key manager. """ from nova import test class KeyManagerTestCase(test.NoDBTestCase): def _create_key_manager(self): raise NotImplementedError() def setUp(self): super(KeyManagerTestCase, self).setUp() self.key_mgr = self._create_key_manager() nova-13.1.4/nova/tests/unit/keymgr/__init__.py0000664000567000056710000000000013064447140022370 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/keymgr/test_barbican.py0000664000567000056710000002457513064447152023463 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test cases for the barbican key manager. """ import array import binascii import mock from nova import exception from nova.keymgr import barbican from nova.keymgr import key as keymgr_key from nova.tests.unit.keymgr import test_key_mgr class BarbicanKeyManagerTestCase(test_key_mgr.KeyManagerTestCase): def _create_key_manager(self): return barbican.BarbicanKeyManager() def setUp(self): super(BarbicanKeyManagerTestCase, self).setUp() # Create fake auth_token self.ctxt = mock.MagicMock() self.ctxt.auth_token = "fake_token" self.ctxt.project = "fake_project" # Create mock barbican client self._build_mock_barbican() # Create a key_id, secret_ref, pre_hex, and hex to use self.key_id = "d152fa13-2b41-42ca-a934-6c21566c0f40" self.secret_ref = ("http://host:9311/v1/secrets/" + self.key_id) self.pre_hex = "AIDxQp2++uAbKaTVDMXFYIu8PIugJGqkK0JLqkU0rhY=" self.hex = ("0080f1429dbefae01b29a4d50cc5c5608bbc3c8ba0246aa42b424baa4" "534ae16") self.key_mgr._current_context = self.ctxt self.key_mgr._base_url = "http://host:9311/v1" self.addCleanup(self._restore) def _restore(self): if hasattr(self, 'original_key'): keymgr_key.SymmetricKey = self.original_key def _build_mock_barbican(self): self.mock_barbican = mock.MagicMock(name='mock_barbican') # Set commonly used methods self.get = self.mock_barbican.secrets.get self.delete = self.mock_barbican.secrets.delete self.store = self.mock_barbican.secrets.store self.create = self.mock_barbican.secrets.create self.key_mgr._barbican_client = self.mock_barbican def _build_mock_symKey(self): self.mock_symKey = mock.Mock() def fake_sym_key(alg, key): self.mock_symKey.get_encoded.return_value = key self.mock_symKey.get_algorithm.return_value = alg return self.mock_symKey self.original_key = keymgr_key.SymmetricKey keymgr_key.SymmetricKey = fake_sym_key def test_copy_key(self): # Create metadata for original secret original_secret_metadata = mock.Mock() original_secret_metadata.algorithm = mock.sentinel.alg original_secret_metadata.bit_length = mock.sentinel.bit original_secret_metadata.name = mock.sentinel.name original_secret_metadata.expiration = mock.sentinel.expiration original_secret_metadata.mode = mock.sentinel.mode content_types = {'default': 'fake_type'} original_secret_metadata.content_types = content_types original_secret_data = mock.Mock() original_secret_metadata.payload = original_secret_data # Create href for copied secret copied_secret = mock.Mock() copied_secret.store.return_value = 'http://test/uuid' # Set get and create return values self.get.return_value = original_secret_metadata self.create.return_value = copied_secret # Create the mock key self._build_mock_symKey() # Copy the original self.key_mgr.copy_key(self.ctxt, self.key_id) # Assert proper methods were called self.get.assert_called_once_with(self.secret_ref) self.create.assert_called_once_with( mock.sentinel.name, self.mock_symKey.get_encoded(), content_types['default'], 'base64', mock.sentinel.alg, mock.sentinel.bit, mock.sentinel.mode, mock.sentinel.expiration) copied_secret.store.assert_called_once_with() def test_copy_null_context(self): self.key_mgr._barbican_client = None self.assertRaises(exception.Forbidden, self.key_mgr.copy_key, None, self.key_id) def test_create_key(self): # Create order_ref_url and assign return value order_ref_url = ("http://localhost:9311/v1/None/orders/" "4fe939b7-72bc-49aa-bd1e-e979589858af") key_order = mock.Mock() self.mock_barbican.orders.create_key.return_value = key_order key_order.submit.return_value = order_ref_url # Create order and assign return value order = mock.Mock() order.secret_ref = self.secret_ref self.mock_barbican.orders.get.return_value = order # Create the key, get the UUID returned_uuid = self.key_mgr.create_key(self.ctxt) self.mock_barbican.orders.get.assert_called_once_with(order_ref_url) self.assertEqual(returned_uuid, self.key_id) def test_create_null_context(self): self.key_mgr._barbican_client = None self.assertRaises(exception.Forbidden, self.key_mgr.create_key, None) def test_delete_null_context(self): self.key_mgr._barbican_client = None self.assertRaises(exception.Forbidden, self.key_mgr.delete_key, None, self.key_id) def test_delete_key(self): self.key_mgr.delete_key(self.ctxt, self.key_id) self.delete.assert_called_once_with(self.secret_ref) def test_delete_unknown_key(self): self.assertRaises(exception.KeyManagerError, self.key_mgr.delete_key, self.ctxt, None) @mock.patch('base64.b64encode') def test_get_key(self, b64_mock): b64_mock.return_value = self.pre_hex content_type = 'application/octet-stream' key = self.key_mgr.get_key(self.ctxt, self.key_id, content_type) self.get.assert_called_once_with(self.secret_ref) encoded = array.array('B', binascii.unhexlify(self.hex)).tolist() self.assertEqual(key.get_encoded(), encoded) def test_get_null_context(self): self.key_mgr._barbican_client = None self.assertRaises(exception.Forbidden, self.key_mgr.get_key, None, self.key_id) def test_get_unknown_key(self): self.assertRaises(exception.KeyManagerError, self.key_mgr.get_key, self.ctxt, None) def test_store_key_base64(self): # Create Key to store secret_key = array.array('B', [0x01, 0x02, 0xA0, 0xB3]).tolist() _key = keymgr_key.SymmetricKey('AES', secret_key) # Define the return values secret = mock.Mock() self.create.return_value = secret secret.store.return_value = self.secret_ref # Store the Key returned_uuid = self.key_mgr.store_key(self.ctxt, _key, bit_length=32) self.create.assert_called_once_with('Nova Compute Key', 'AQKgsw==', 'application/octet-stream', 'base64', 'AES', 32, 'CBC', None) self.assertEqual(returned_uuid, self.key_id) def test_store_key_plaintext(self): # Create the plaintext key secret_key_text = "This is a test text key." _key = keymgr_key.SymmetricKey('AES', secret_key_text) # Store the Key self.key_mgr.store_key(self.ctxt, _key, payload_content_type='text/plain', payload_content_encoding=None) self.create.assert_called_once_with('Nova Compute Key', secret_key_text, 'text/plain', None, 'AES', 256, 'CBC', None) self.assertEqual(self.store.call_count, 0) def test_store_null_context(self): self.key_mgr._barbican_client = None self.assertRaises(exception.Forbidden, self.key_mgr.store_key, None, None) @mock.patch('keystoneauth1.session.Session') @mock.patch('barbicanclient.client.Client') def test_get_barbican_client_new(self, mock_barbican, mock_keystone): manager = self._create_key_manager() manager._get_barbican_client(self.ctxt) self.assertEqual(mock_barbican.call_count, 1) @mock.patch('keystoneauth1.session.Session') @mock.patch('barbicanclient.client.Client') def test_get_barbican_client_reused(self, mock_barbican, mock_keystone): manager = self._create_key_manager() manager._get_barbican_client(self.ctxt) self.assertEqual(mock_barbican.call_count, 1) manager._get_barbican_client(self.ctxt) self.assertEqual(mock_barbican.call_count, 1) @mock.patch('keystoneauth1.session.Session') @mock.patch('barbicanclient.client.Client') def test_get_barbican_client_not_reused(self, mock_barbican, mock_keystone): manager = self._create_key_manager() manager._get_barbican_client(self.ctxt) self.assertEqual(mock_barbican.call_count, 1) ctxt2 = mock.MagicMock() ctxt2.auth_token = "fake_token2" ctxt2.project = "fake_project2" manager._get_barbican_client(ctxt2) self.assertEqual(mock_barbican.call_count, 2) def test_get_barbican_client_null_context(self): self.assertRaises(exception.Forbidden, self.key_mgr._get_barbican_client, None) def test_get_barbican_client_missing_project(self): del(self.ctxt.project_id) self.assertRaises(exception.KeyManagerError, self.key_mgr._get_barbican_client, self.ctxt) def test_get_barbican_client_none_project(self): self.ctxt.project_id = None self.assertRaises(exception.KeyManagerError, self.key_mgr._get_barbican_client, self.ctxt) nova-13.1.4/nova/tests/unit/keymgr/test_conf_key_mgr.py0000664000567000056710000000376213064447152024357 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test cases for the conf key manager. """ import array from oslo_config import cfg from nova.keymgr import conf_key_mgr from nova.keymgr import key from nova.tests.unit.keymgr import test_single_key_mgr CONF = cfg.CONF CONF.import_opt('fixed_key', 'nova.keymgr.conf_key_mgr', group='keymgr') class ConfKeyManagerTestCase(test_single_key_mgr.SingleKeyManagerTestCase): def __init__(self, *args, **kwargs): super(ConfKeyManagerTestCase, self).__init__(*args, **kwargs) self._hex_key = '0' * 64 def _create_key_manager(self): CONF.set_default('fixed_key', default=self._hex_key, group='keymgr') return conf_key_mgr.ConfKeyManager() def setUp(self): super(ConfKeyManagerTestCase, self).setUp() encoded_key = array.array('B', self._hex_key.decode('hex')).tolist() self.key = key.SymmetricKey('AES', encoded_key) def test_init(self): key_manager = self._create_key_manager() self.assertEqual(self._hex_key, key_manager._hex_key) def test_init_value_error(self): CONF.set_default('fixed_key', default=None, group='keymgr') self.assertRaises(ValueError, conf_key_mgr.ConfKeyManager) def test_generate_hex_key(self): key_manager = self._create_key_manager() self.assertEqual(self._hex_key, key_manager._generate_hex_key()) nova-13.1.4/nova/tests/unit/keymgr/test_key.py0000664000567000056710000000356413064447152022505 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test cases for the key classes. """ import array from nova.keymgr import key from nova import test class KeyTestCase(test.NoDBTestCase): def _create_key(self): raise NotImplementedError() def setUp(self): super(KeyTestCase, self).setUp() self.key = self._create_key() class SymmetricKeyTestCase(KeyTestCase): def _create_key(self): return key.SymmetricKey(self.algorithm, self.encoded) def setUp(self): self.algorithm = 'AES' self.encoded = array.array('B', ('0' * 64).decode('hex')).tolist() super(SymmetricKeyTestCase, self).setUp() def test_get_algorithm(self): self.assertEqual(self.key.get_algorithm(), self.algorithm) def test_get_format(self): self.assertEqual(self.key.get_format(), 'RAW') def test_get_encoded(self): self.assertEqual(self.key.get_encoded(), self.encoded) def test___eq__(self): self.assertTrue(self.key == self.key) self.assertFalse(self.key is None) self.assertFalse(None == self.key) def test___ne__(self): self.assertFalse(self.key != self.key) self.assertTrue(self.key is not None) self.assertTrue(None != self.key) nova-13.1.4/nova/tests/unit/keymgr/fake.py0000664000567000056710000000146013064447152021555 0ustar jenkinsjenkins00000000000000# Copyright 2011 Justin Santa Barbara # Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of a fake key manager.""" from nova.keymgr import mock_key_mgr def fake_api(): return mock_key_mgr.MockKeyManager() nova-13.1.4/nova/tests/unit/keymgr/test_not_implemented_key_mgr.py0000664000567000056710000000324113064447152026605 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test cases for the not implemented key manager. """ from nova.keymgr import not_implemented_key_mgr from nova.tests.unit.keymgr import test_key_mgr class NotImplementedKeyManagerTestCase(test_key_mgr.KeyManagerTestCase): def _create_key_manager(self): return not_implemented_key_mgr.NotImplementedKeyManager() def test_create_key(self): self.assertRaises(NotImplementedError, self.key_mgr.create_key, None) def test_store_key(self): self.assertRaises(NotImplementedError, self.key_mgr.store_key, None, None) def test_copy_key(self): self.assertRaises(NotImplementedError, self.key_mgr.copy_key, None, None) def test_get_key(self): self.assertRaises(NotImplementedError, self.key_mgr.get_key, None, None) def test_delete_key(self): self.assertRaises(NotImplementedError, self.key_mgr.delete_key, None, None) nova-13.1.4/nova/tests/unit/virt/0000775000567000056710000000000013064447471017766 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/virt/test_fake.py0000664000567000056710000000161313064447141022300 0ustar jenkinsjenkins00000000000000# # Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import test from nova.virt import driver from nova.virt import fake class FakeDriverTest(test.NoDBTestCase): def test_public_api_signatures(self): baseinst = driver.ComputeDriver(None) inst = fake.FakeDriver(fake.FakeVirtAPI(), True) self.assertPublicAPISignatures(baseinst, inst) nova-13.1.4/nova/tests/unit/virt/test_block_device.py0000664000567000056710000013125513064447152024013 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_serialization import jsonutils import six from nova import block_device from nova import context from nova import exception from nova import objects from nova.objects import fields from nova import test from nova.tests.unit import fake_block_device from nova.tests.unit import fake_instance from nova.tests.unit import matchers from nova.virt import block_device as driver_block_device from nova.virt import driver from nova.volume import cinder from nova.volume import encryptors class TestDriverBlockDevice(test.NoDBTestCase): driver_classes = { 'swap': driver_block_device.DriverSwapBlockDevice, 'ephemeral': driver_block_device.DriverEphemeralBlockDevice, 'volume': driver_block_device.DriverVolumeBlockDevice, 'snapshot': driver_block_device.DriverSnapshotBlockDevice, 'image': driver_block_device.DriverImageBlockDevice, 'blank': driver_block_device.DriverBlankBlockDevice } swap_bdm_dict = block_device.BlockDeviceDict( {'id': 1, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sdb1', 'source_type': 'blank', 'destination_type': 'local', 'delete_on_termination': True, 'guest_format': 'swap', 'disk_bus': 'scsi', 'volume_size': 2, 'boot_index': -1}) swap_driver_bdm = { 'device_name': '/dev/sdb1', 'swap_size': 2, 'disk_bus': 'scsi'} swap_legacy_driver_bdm = { 'device_name': '/dev/sdb1', 'swap_size': 2} ephemeral_bdm_dict = block_device.BlockDeviceDict( {'id': 2, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sdc1', 'source_type': 'blank', 'destination_type': 'local', 'disk_bus': 'scsi', 'device_type': 'disk', 'volume_size': 4, 'guest_format': 'ext4', 'delete_on_termination': True, 'boot_index': -1}) ephemeral_driver_bdm = { 'device_name': '/dev/sdc1', 'size': 4, 'device_type': 'disk', 'guest_format': 'ext4', 'disk_bus': 'scsi'} ephemeral_legacy_driver_bdm = { 'device_name': '/dev/sdc1', 'size': 4, 'virtual_name': 'ephemeral0', 'num': 0} volume_bdm_dict = block_device.BlockDeviceDict( {'id': 3, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda1', 'source_type': 'volume', 'disk_bus': 'scsi', 'device_type': 'disk', 'volume_size': 8, 'destination_type': 'volume', 'volume_id': 'fake-volume-id-1', 'guest_format': 'ext4', 'connection_info': '{"fake": "connection_info"}', 'delete_on_termination': False, 'boot_index': 0}) volume_driver_bdm = { 'mount_device': '/dev/sda1', 'connection_info': {"fake": "connection_info"}, 'delete_on_termination': False, 'disk_bus': 'scsi', 'device_type': 'disk', 'guest_format': 'ext4', 'boot_index': 0} volume_legacy_driver_bdm = { 'mount_device': '/dev/sda1', 'connection_info': {"fake": "connection_info"}, 'delete_on_termination': False} snapshot_bdm_dict = block_device.BlockDeviceDict( {'id': 4, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda2', 'delete_on_termination': True, 'volume_size': 3, 'disk_bus': 'scsi', 'device_type': 'disk', 'source_type': 'snapshot', 'destination_type': 'volume', 'connection_info': '{"fake": "connection_info"}', 'snapshot_id': 'fake-snapshot-id-1', 'volume_id': 'fake-volume-id-2', 'boot_index': -1}) snapshot_driver_bdm = { 'mount_device': '/dev/sda2', 'connection_info': {"fake": "connection_info"}, 'delete_on_termination': True, 'disk_bus': 'scsi', 'device_type': 'disk', 'guest_format': None, 'boot_index': -1} snapshot_legacy_driver_bdm = { 'mount_device': '/dev/sda2', 'connection_info': {"fake": "connection_info"}, 'delete_on_termination': True} image_bdm_dict = block_device.BlockDeviceDict( {'id': 5, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda2', 'delete_on_termination': True, 'volume_size': 1, 'disk_bus': 'scsi', 'device_type': 'disk', 'source_type': 'image', 'destination_type': 'volume', 'connection_info': '{"fake": "connection_info"}', 'image_id': 'fake-image-id-1', 'volume_id': 'fake-volume-id-2', 'boot_index': -1}) image_driver_bdm = { 'mount_device': '/dev/sda2', 'connection_info': {"fake": "connection_info"}, 'delete_on_termination': True, 'disk_bus': 'scsi', 'device_type': 'disk', 'guest_format': None, 'boot_index': -1} image_legacy_driver_bdm = { 'mount_device': '/dev/sda2', 'connection_info': {"fake": "connection_info"}, 'delete_on_termination': True} blank_bdm_dict = block_device.BlockDeviceDict( {'id': 6, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda2', 'delete_on_termination': True, 'volume_size': 3, 'disk_bus': 'scsi', 'device_type': 'disk', 'source_type': 'blank', 'destination_type': 'volume', 'connection_info': '{"fake": "connection_info"}', 'snapshot_id': 'fake-snapshot-id-1', 'volume_id': 'fake-volume-id-2', 'boot_index': -1}) blank_driver_bdm = { 'mount_device': '/dev/sda2', 'connection_info': {"fake": "connection_info"}, 'delete_on_termination': True, 'disk_bus': 'scsi', 'device_type': 'disk', 'guest_format': None, 'boot_index': -1} blank_legacy_driver_bdm = { 'mount_device': '/dev/sda2', 'connection_info': {"fake": "connection_info"}, 'delete_on_termination': True} def setUp(self): super(TestDriverBlockDevice, self).setUp() self.volume_api = self.mox.CreateMock(cinder.API) self.virt_driver = self.mox.CreateMock(driver.ComputeDriver) self.context = context.RequestContext('fake_user', 'fake_project') # create bdm objects for testing self.swap_bdm = fake_block_device.fake_bdm_object( self.context, self.swap_bdm_dict) self.ephemeral_bdm = fake_block_device.fake_bdm_object( self.context, self.ephemeral_bdm_dict) self.volume_bdm = fake_block_device.fake_bdm_object( self.context, self.volume_bdm_dict) self.snapshot_bdm = fake_block_device.fake_bdm_object( self.context, self.snapshot_bdm_dict) self.image_bdm = fake_block_device.fake_bdm_object( self.context, self.image_bdm_dict) self.blank_bdm = fake_block_device.fake_bdm_object( self.context, self.blank_bdm_dict) def test_no_device_raises(self): for name, cls in self.driver_classes.items(): bdm = fake_block_device.fake_bdm_object( self.context, {'no_device': True}) self.assertRaises(driver_block_device._NotTransformable, cls, bdm) def _test_driver_device(self, name): db_bdm = getattr(self, "%s_bdm" % name) test_bdm = self.driver_classes[name](db_bdm) self.assertThat(test_bdm, matchers.DictMatches( getattr(self, "%s_driver_bdm" % name))) for k, v in six.iteritems(db_bdm): field_val = getattr(test_bdm._bdm_obj, k) if isinstance(field_val, bool): v = bool(v) self.assertEqual(field_val, v) self.assertThat(test_bdm.legacy(), matchers.DictMatches( getattr(self, "%s_legacy_driver_bdm" % name))) # Test passthru attributes for passthru in test_bdm._proxy_as_attr: self.assertEqual(getattr(test_bdm, passthru), getattr(test_bdm._bdm_obj, passthru)) # Make sure that all others raise _invalidType for other_name, cls in six.iteritems(self.driver_classes): if other_name == name: continue self.assertRaises(driver_block_device._InvalidType, cls, getattr(self, '%s_bdm' % name)) # Test the save method with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock: for fld, alias in six.iteritems(test_bdm._update_on_save): # We can't set fake values on enums, like device_type, # so skip those. if not isinstance(test_bdm._bdm_obj.fields[fld], fields.BaseEnumField): test_bdm[alias or fld] = 'fake_changed_value' test_bdm.save() for fld, alias in six.iteritems(test_bdm._update_on_save): self.assertEqual(test_bdm[alias or fld], getattr(test_bdm._bdm_obj, fld)) save_mock.assert_called_once_with() def check_save(): self.assertEqual(set([]), test_bdm._bdm_obj.obj_what_changed()) # Test that nothing is set on the object if there are no actual changes test_bdm._bdm_obj.obj_reset_changes() with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock: save_mock.side_effect = check_save test_bdm.save() def _test_driver_default_size(self, name): size = 'swap_size' if name == 'swap' else 'size' no_size_bdm = getattr(self, "%s_bdm_dict" % name).copy() no_size_bdm['volume_size'] = None driver_bdm = self.driver_classes[name]( fake_block_device.fake_bdm_object(self.context, no_size_bdm)) self.assertEqual(driver_bdm[size], 0) del no_size_bdm['volume_size'] driver_bdm = self.driver_classes[name]( fake_block_device.fake_bdm_object(self.context, no_size_bdm)) self.assertEqual(driver_bdm[size], 0) def test_driver_swap_block_device(self): self._test_driver_device("swap") def test_driver_swap_default_size(self): self._test_driver_default_size('swap') def test_driver_ephemeral_block_device(self): self._test_driver_device("ephemeral") def test_driver_ephemeral_default_size(self): self._test_driver_default_size('ephemeral') def test_driver_volume_block_device(self): self._test_driver_device("volume") test_bdm = self.driver_classes['volume']( self.volume_bdm) self.assertEqual(test_bdm['connection_info'], jsonutils.loads(test_bdm._bdm_obj.connection_info)) self.assertEqual(test_bdm._bdm_obj.id, 3) self.assertEqual(test_bdm.volume_id, 'fake-volume-id-1') self.assertEqual(test_bdm.volume_size, 8) def test_driver_snapshot_block_device(self): self._test_driver_device("snapshot") test_bdm = self.driver_classes['snapshot']( self.snapshot_bdm) self.assertEqual(test_bdm._bdm_obj.id, 4) self.assertEqual(test_bdm.snapshot_id, 'fake-snapshot-id-1') self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2') self.assertEqual(test_bdm.volume_size, 3) def test_driver_image_block_device(self): self._test_driver_device('image') test_bdm = self.driver_classes['image']( self.image_bdm) self.assertEqual(test_bdm._bdm_obj.id, 5) self.assertEqual(test_bdm.image_id, 'fake-image-id-1') self.assertEqual(test_bdm.volume_size, 1) def test_driver_image_block_device_destination_local(self): self._test_driver_device('image') bdm = self.image_bdm_dict.copy() bdm['destination_type'] = 'local' self.assertRaises(driver_block_device._InvalidType, self.driver_classes['image'], fake_block_device.fake_bdm_object(self.context, bdm)) def test_driver_blank_block_device(self): self._test_driver_device('blank') test_bdm = self.driver_classes['blank']( self.blank_bdm) self.assertEqual(6, test_bdm._bdm_obj.id) self.assertEqual('fake-volume-id-2', test_bdm.volume_id) self.assertEqual(3, test_bdm.volume_size) def _test_call_wait_func(self, delete_on_termination, delete_fail=False): test_bdm = self.driver_classes['volume'](self.volume_bdm) test_bdm['delete_on_termination'] = delete_on_termination with mock.patch.object(self.volume_api, 'delete') as vol_delete: wait_func = mock.MagicMock() mock_exception = exception.VolumeNotCreated(volume_id='fake-id', seconds=1, attempts=1, volume_status='error') wait_func.side_effect = mock_exception if delete_on_termination and delete_fail: vol_delete.side_effect = Exception() self.assertRaises(exception.VolumeNotCreated, test_bdm._call_wait_func, context=self.context, wait_func=wait_func, volume_api=self.volume_api, volume_id='fake-id') self.assertEqual(delete_on_termination, vol_delete.called) def test_call_wait_delete_volume(self): self._test_call_wait_func(True) def test_call_wait_delete_volume_fail(self): self._test_call_wait_func(True, True) def test_call_wait_no_delete_volume(self): self._test_call_wait_func(False) def _test_volume_attach(self, driver_bdm, bdm_dict, fake_volume, check_attach=True, fail_check_attach=False, driver_attach=False, fail_driver_attach=False, volume_attach=True, fail_volume_attach=False, access_mode='rw', availability_zone=None): elevated_context = self.context.elevated() self.stubs.Set(self.context, 'elevated', lambda: elevated_context) self.mox.StubOutWithMock(driver_bdm._bdm_obj, 'save') self.mox.StubOutWithMock(encryptors, 'get_encryption_metadata') instance_detail = {'id': '123', 'uuid': 'fake_uuid', 'availability_zone': availability_zone} instance = fake_instance.fake_instance_obj(self.context, **instance_detail) connector = {'ip': 'fake_ip', 'host': 'fake_host'} connection_info = {'data': {'access_mode': access_mode}} expected_conn_info = {'data': {'access_mode': access_mode}, 'serial': fake_volume['id']} enc_data = {'fake': 'enc_data'} self.volume_api.get(self.context, fake_volume['id']).AndReturn(fake_volume) if check_attach: if not fail_check_attach: self.volume_api.check_attach(self.context, fake_volume, instance=instance).AndReturn(None) else: self.volume_api.check_attach(self.context, fake_volume, instance=instance).AndRaise( test.TestingException) driver_bdm._bdm_obj.save().AndReturn(None) return instance, expected_conn_info self.virt_driver.get_volume_connector(instance).AndReturn(connector) self.volume_api.initialize_connection( elevated_context, fake_volume['id'], connector).AndReturn(connection_info) if driver_attach: encryptors.get_encryption_metadata( elevated_context, self.volume_api, fake_volume['id'], connection_info).AndReturn(enc_data) if not fail_driver_attach: self.virt_driver.attach_volume( elevated_context, expected_conn_info, instance, bdm_dict['device_name'], disk_bus=bdm_dict['disk_bus'], device_type=bdm_dict['device_type'], encryption=enc_data).AndReturn(None) else: self.virt_driver.attach_volume( elevated_context, expected_conn_info, instance, bdm_dict['device_name'], disk_bus=bdm_dict['disk_bus'], device_type=bdm_dict['device_type'], encryption=enc_data).AndRaise(test.TestingException) self.volume_api.terminate_connection( elevated_context, fake_volume['id'], connector).AndReturn(None) driver_bdm._bdm_obj.save().AndReturn(None) return instance, expected_conn_info if volume_attach: driver_bdm._bdm_obj.save().AndReturn(None) if not fail_volume_attach: self.volume_api.attach(elevated_context, fake_volume['id'], 'fake_uuid', bdm_dict['device_name'], mode=access_mode).AndReturn(None) else: self.volume_api.attach(elevated_context, fake_volume['id'], 'fake_uuid', bdm_dict['device_name'], mode=access_mode).AndRaise( test.TestingException) if driver_attach: self.virt_driver.detach_volume( expected_conn_info, instance, bdm_dict['device_name'], encryption=enc_data).AndReturn(None) self.volume_api.terminate_connection( elevated_context, fake_volume['id'], connector).AndReturn(None) self.volume_api.detach(elevated_context, fake_volume['id']).AndReturn(None) driver_bdm._bdm_obj.save().AndReturn(None) return instance, expected_conn_info def test_volume_attach(self): test_bdm = self.driver_classes['volume']( self.volume_bdm) volume = {'id': 'fake-volume-id-1', 'attach_status': 'detached'} instance, expected_conn_info = self._test_volume_attach( test_bdm, self.volume_bdm, volume) self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver) self.assertThat(test_bdm['connection_info'], matchers.DictMatches(expected_conn_info)) def test_volume_attach_ro(self): test_bdm = self.driver_classes['volume'](self.volume_bdm) volume = {'id': 'fake-volume-id-1', 'attach_status': 'detached'} instance, expected_conn_info = self._test_volume_attach( test_bdm, self.volume_bdm, volume, access_mode='ro') self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver) self.assertThat(test_bdm['connection_info'], matchers.DictMatches(expected_conn_info)) def test_volume_attach_update_size(self): test_bdm = self.driver_classes['volume'](self.volume_bdm) test_bdm.volume_size = None volume = {'id': 'fake-volume-id-1', 'attach_status': 'detached', 'size': 42} instance, expected_conn_info = self._test_volume_attach( test_bdm, self.volume_bdm, volume) self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver) self.assertEqual(expected_conn_info, test_bdm['connection_info']) self.assertEqual(42, test_bdm.volume_size) def test_volume_attach_check_attach_fails(self): test_bdm = self.driver_classes['volume']( self.volume_bdm) volume = {'id': 'fake-volume-id-1'} instance, _ = self._test_volume_attach( test_bdm, self.volume_bdm, volume, fail_check_attach=True) self.mox.ReplayAll() self.assertRaises(test.TestingException, test_bdm.attach, self.context, instance, self.volume_api, self.virt_driver) def test_volume_no_volume_attach(self): test_bdm = self.driver_classes['volume']( self.volume_bdm) volume = {'id': 'fake-volume-id-1', 'attach_status': 'detached'} instance, expected_conn_info = self._test_volume_attach( test_bdm, self.volume_bdm, volume, check_attach=False, driver_attach=False) self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver, do_check_attach=False, do_driver_attach=False) self.assertThat(test_bdm['connection_info'], matchers.DictMatches(expected_conn_info)) def test_volume_attach_no_check_driver_attach(self): test_bdm = self.driver_classes['volume']( self.volume_bdm) volume = {'id': 'fake-volume-id-1', 'attach_status': 'detached'} instance, expected_conn_info = self._test_volume_attach( test_bdm, self.volume_bdm, volume, check_attach=False, driver_attach=True) self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver, do_check_attach=False, do_driver_attach=True) self.assertThat(test_bdm['connection_info'], matchers.DictMatches(expected_conn_info)) def test_volume_attach_driver_attach_fails(self): test_bdm = self.driver_classes['volume']( self.volume_bdm) volume = {'id': 'fake-volume-id-1'} instance, _ = self._test_volume_attach( test_bdm, self.volume_bdm, volume, driver_attach=True, fail_driver_attach=True) self.mox.ReplayAll() self.assertRaises(test.TestingException, test_bdm.attach, self.context, instance, self.volume_api, self.virt_driver, do_driver_attach=True) def test_volume_attach_volume_attach_fails(self): test_bdm = self.driver_classes['volume']( self.volume_bdm) volume = {'id': 'fake-volume-id-1', 'attach_status': 'detached'} instance, _ = self._test_volume_attach( test_bdm, self.volume_bdm, volume, driver_attach=True, fail_volume_attach=True) self.mox.ReplayAll() self.assertRaises(test.TestingException, test_bdm.attach, self.context, instance, self.volume_api, self.virt_driver, do_driver_attach=True) def test_volume_attach_no_driver_attach_volume_attach_fails(self): test_bdm = self.driver_classes['volume']( self.volume_bdm) volume = {'id': 'fake-volume-id-1', 'attach_status': 'detached'} instance, _ = self._test_volume_attach( test_bdm, self.volume_bdm, volume, fail_volume_attach=True) self.mox.ReplayAll() self.assertRaises(test.TestingException, test_bdm.attach, self.context, instance, self.volume_api, self.virt_driver, do_driver_attach=False) def test_refresh_connection(self): test_bdm = self.driver_classes['snapshot']( self.snapshot_bdm) instance = {'id': 'fake_id', 'uuid': 'fake_uuid'} connector = {'ip': 'fake_ip', 'host': 'fake_host'} connection_info = {'data': {'multipath_id': 'fake_multipath_id'}} expected_conn_info = {'data': {'multipath_id': 'fake_multipath_id'}, 'serial': 'fake-volume-id-2'} self.mox.StubOutWithMock(test_bdm._bdm_obj, 'save') self.virt_driver.get_volume_connector(instance).AndReturn(connector) self.volume_api.initialize_connection( self.context, test_bdm.volume_id, connector).AndReturn(connection_info) test_bdm._bdm_obj.save().AndReturn(None) self.mox.ReplayAll() test_bdm.refresh_connection_info(self.context, instance, self.volume_api, self.virt_driver) self.assertThat(test_bdm['connection_info'], matchers.DictMatches(expected_conn_info)) def test_snapshot_attach_no_volume(self): no_volume_snapshot = self.snapshot_bdm_dict.copy() no_volume_snapshot['volume_id'] = None test_bdm = self.driver_classes['snapshot']( fake_block_device.fake_bdm_object( self.context, no_volume_snapshot)) snapshot = {'id': 'fake-volume-id-1', 'attach_status': 'detached'} volume = {'id': 'fake-volume-id-2', 'attach_status': 'detached'} wait_func = self.mox.CreateMockAnything() self.volume_api.get_snapshot(self.context, 'fake-snapshot-id-1').AndReturn(snapshot) self.volume_api.create(self.context, 3, '', '', snapshot, availability_zone=None).AndReturn(volume) wait_func(self.context, 'fake-volume-id-2').AndReturn(None) instance, expected_conn_info = self._test_volume_attach( test_bdm, no_volume_snapshot, volume) self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver, wait_func) self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2') def test_snapshot_attach_no_volume_cinder_cross_az_attach_false(self): # Tests that the volume created from the snapshot has the same AZ as # the instance. self.flags(cross_az_attach=False, group='cinder') no_volume_snapshot = self.snapshot_bdm_dict.copy() no_volume_snapshot['volume_id'] = None test_bdm = self.driver_classes['snapshot']( fake_block_device.fake_bdm_object( self.context, no_volume_snapshot)) snapshot = {'id': 'fake-volume-id-1', 'attach_status': 'detached'} volume = {'id': 'fake-volume-id-2', 'attach_status': 'detached'} wait_func = self.mox.CreateMockAnything() self.volume_api.get_snapshot(self.context, 'fake-snapshot-id-1').AndReturn(snapshot) self.volume_api.create(self.context, 3, '', '', snapshot, availability_zone='test-az').AndReturn(volume) wait_func(self.context, 'fake-volume-id-2').AndReturn(None) instance, expected_conn_info = self._test_volume_attach( test_bdm, no_volume_snapshot, volume, availability_zone='test-az') self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver, wait_func) self.assertEqual('fake-volume-id-2', test_bdm.volume_id) def test_snapshot_attach_fail_volume(self): fail_volume_snapshot = self.snapshot_bdm_dict.copy() fail_volume_snapshot['volume_id'] = None test_bdm = self.driver_classes['snapshot']( fake_block_device.fake_bdm_object( self.context, fail_volume_snapshot)) snapshot = {'id': 'fake-volume-id-1', 'attach_status': 'detached'} volume = {'id': 'fake-volume-id-2', 'attach_status': 'detached'} instance = fake_instance.fake_instance_obj(mock.sentinel.ctx, **{'uuid': 'fake-uuid'}) with test.nested( mock.patch.object(self.volume_api, 'get_snapshot', return_value=snapshot), mock.patch.object(self.volume_api, 'create', return_value=volume), mock.patch.object(self.volume_api, 'delete'), ) as (vol_get_snap, vol_create, vol_delete): wait_func = mock.MagicMock() mock_exception = exception.VolumeNotCreated(volume_id=volume['id'], seconds=1, attempts=1, volume_status='error') wait_func.side_effect = mock_exception self.assertRaises(exception.VolumeNotCreated, test_bdm.attach, context=self.context, instance=instance, volume_api=self.volume_api, virt_driver=self.virt_driver, wait_func=wait_func) vol_get_snap.assert_called_once_with( self.context, 'fake-snapshot-id-1') vol_create.assert_called_once_with( self.context, 3, '', '', snapshot, availability_zone=None) vol_delete.assert_called_once_with(self.context, volume['id']) def test_snapshot_attach_volume(self): test_bdm = self.driver_classes['snapshot']( self.snapshot_bdm) instance = {'id': 'fake_id', 'uuid': 'fake_uuid'} volume_class = self.driver_classes['volume'] self.mox.StubOutWithMock(volume_class, 'attach') # Make sure theses are not called self.mox.StubOutWithMock(self.volume_api, 'get_snapshot') self.mox.StubOutWithMock(self.volume_api, 'create') volume_class.attach(self.context, instance, self.volume_api, self.virt_driver, do_check_attach=True ).AndReturn(None) self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver) self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2') def test_image_attach_no_volume(self): no_volume_image = self.image_bdm_dict.copy() no_volume_image['volume_id'] = None test_bdm = self.driver_classes['image']( fake_block_device.fake_bdm_object( self.context, no_volume_image)) image = {'id': 'fake-image-id-1'} volume = {'id': 'fake-volume-id-2', 'attach_status': 'detached'} wait_func = self.mox.CreateMockAnything() self.volume_api.create(self.context, 1, '', '', image_id=image['id'], availability_zone=None).AndReturn(volume) wait_func(self.context, 'fake-volume-id-2').AndReturn(None) instance, expected_conn_info = self._test_volume_attach( test_bdm, no_volume_image, volume) self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver, wait_func) self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2') def test_image_attach_no_volume_cinder_cross_az_attach_false(self): # Tests that the volume created from the image has the same AZ as the # instance. self.flags(cross_az_attach=False, group='cinder') no_volume_image = self.image_bdm_dict.copy() no_volume_image['volume_id'] = None test_bdm = self.driver_classes['image']( fake_block_device.fake_bdm_object( self.context, no_volume_image)) image = {'id': 'fake-image-id-1'} volume = {'id': 'fake-volume-id-2', 'attach_status': 'detached'} wait_func = self.mox.CreateMockAnything() self.volume_api.create(self.context, 1, '', '', image_id=image['id'], availability_zone='test-az').AndReturn(volume) wait_func(self.context, 'fake-volume-id-2').AndReturn(None) instance, expected_conn_info = self._test_volume_attach( test_bdm, no_volume_image, volume, availability_zone='test-az') self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver, wait_func) self.assertEqual('fake-volume-id-2', test_bdm.volume_id) def test_image_attach_fail_volume(self): fail_volume_image = self.image_bdm_dict.copy() fail_volume_image['volume_id'] = None test_bdm = self.driver_classes['image']( fake_block_device.fake_bdm_object( self.context, fail_volume_image)) image = {'id': 'fake-image-id-1'} volume = {'id': 'fake-volume-id-2', 'attach_status': 'detached'} instance = fake_instance.fake_instance_obj(mock.sentinel.ctx, **{'uuid': 'fake-uuid'}) with test.nested( mock.patch.object(self.volume_api, 'create', return_value=volume), mock.patch.object(self.volume_api, 'delete'), ) as (vol_create, vol_delete): wait_func = mock.MagicMock() mock_exception = exception.VolumeNotCreated(volume_id=volume['id'], seconds=1, attempts=1, volume_status='error') wait_func.side_effect = mock_exception self.assertRaises(exception.VolumeNotCreated, test_bdm.attach, context=self.context, instance=instance, volume_api=self.volume_api, virt_driver=self.virt_driver, wait_func=wait_func) vol_create.assert_called_once_with( self.context, 1, '', '', image_id=image['id'], availability_zone=None) vol_delete.assert_called_once_with(self.context, volume['id']) def test_image_attach_volume(self): test_bdm = self.driver_classes['image']( self.image_bdm) instance = {'id': 'fake_id', 'uuid': 'fake_uuid'} volume_class = self.driver_classes['volume'] self.mox.StubOutWithMock(volume_class, 'attach') # Make sure theses are not called self.mox.StubOutWithMock(self.volume_api, 'get_snapshot') self.mox.StubOutWithMock(self.volume_api, 'create') volume_class.attach(self.context, instance, self.volume_api, self.virt_driver, do_check_attach=True ).AndReturn(None) self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver) self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2') def test_blank_attach_fail_volume(self): no_blank_volume = self.blank_bdm_dict.copy() no_blank_volume['volume_id'] = None test_bdm = self.driver_classes['blank']( fake_block_device.fake_bdm_object( self.context, no_blank_volume)) instance = fake_instance.fake_instance_obj(mock.sentinel.ctx, **{'uuid': 'fake-uuid'}) volume = {'id': 'fake-volume-id-2', 'display_name': 'fake-uuid-blank-vol'} with test.nested( mock.patch.object(self.volume_api, 'create', return_value=volume), mock.patch.object(self.volume_api, 'delete'), ) as (vol_create, vol_delete): wait_func = mock.MagicMock() mock_exception = exception.VolumeNotCreated(volume_id=volume['id'], seconds=1, attempts=1, volume_status='error') wait_func.side_effect = mock_exception self.assertRaises(exception.VolumeNotCreated, test_bdm.attach, context=self.context, instance=instance, volume_api=self.volume_api, virt_driver=self.virt_driver, wait_func=wait_func) vol_create.assert_called_once_with( self.context, test_bdm.volume_size, 'fake-uuid-blank-vol', '', availability_zone=None) vol_delete.assert_called_once_with( self.context, volume['id']) def test_blank_attach_volume(self): no_blank_volume = self.blank_bdm_dict.copy() no_blank_volume['volume_id'] = None test_bdm = self.driver_classes['blank']( fake_block_device.fake_bdm_object( self.context, no_blank_volume)) instance = fake_instance.fake_instance_obj(mock.sentinel.ctx, **{'uuid': 'fake-uuid'}) volume_class = self.driver_classes['volume'] volume = {'id': 'fake-volume-id-2', 'display_name': 'fake-uuid-blank-vol'} with test.nested( mock.patch.object(self.volume_api, 'create', return_value=volume), mock.patch.object(volume_class, 'attach') ) as (vol_create, vol_attach): test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver) vol_create.assert_called_once_with( self.context, test_bdm.volume_size, 'fake-uuid-blank-vol', '', availability_zone=None) vol_attach.assert_called_once_with(self.context, instance, self.volume_api, self.virt_driver, do_check_attach=True) self.assertEqual('fake-volume-id-2', test_bdm.volume_id) def test_blank_attach_volume_cinder_cross_az_attach_false(self): # Tests that the blank volume created is in the same availability zone # as the instance. self.flags(cross_az_attach=False, group='cinder') no_blank_volume = self.blank_bdm_dict.copy() no_blank_volume['volume_id'] = None test_bdm = self.driver_classes['blank']( fake_block_device.fake_bdm_object( self.context, no_blank_volume)) updates = {'uuid': 'fake-uuid', 'availability_zone': 'test-az'} instance = fake_instance.fake_instance_obj(mock.sentinel.ctx, **updates) volume_class = self.driver_classes['volume'] volume = {'id': 'fake-volume-id-2', 'display_name': 'fake-uuid-blank-vol'} with mock.patch.object(self.volume_api, 'create', return_value=volume) as vol_create: with mock.patch.object(volume_class, 'attach') as vol_attach: test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver) vol_create.assert_called_once_with( self.context, test_bdm.volume_size, 'fake-uuid-blank-vol', '', availability_zone='test-az') vol_attach.assert_called_once_with(self.context, instance, self.volume_api, self.virt_driver, do_check_attach=True) self.assertEqual('fake-volume-id-2', test_bdm.volume_id) def test_convert_block_devices(self): bdms = objects.BlockDeviceMappingList( objects=[self.volume_bdm, self.ephemeral_bdm]) converted = driver_block_device._convert_block_devices( self.driver_classes['volume'], bdms) self.assertEqual(converted, [self.volume_driver_bdm]) def test_convert_all_volumes(self): converted = driver_block_device.convert_all_volumes() self.assertEqual([], converted) converted = driver_block_device.convert_all_volumes( self.volume_bdm, self.ephemeral_bdm, self.image_bdm, self.blank_bdm, self.snapshot_bdm) self.assertEqual(converted, [self.volume_driver_bdm, self.image_driver_bdm, self.blank_driver_bdm, self.snapshot_driver_bdm]) def test_convert_volume(self): self.assertIsNone(driver_block_device.convert_volume(self.swap_bdm)) self.assertEqual(self.volume_driver_bdm, driver_block_device.convert_volume(self.volume_bdm)) self.assertEqual(self.snapshot_driver_bdm, driver_block_device.convert_volume(self.snapshot_bdm)) def test_legacy_block_devices(self): test_snapshot = self.driver_classes['snapshot']( self.snapshot_bdm) block_device_mapping = [test_snapshot, test_snapshot] legacy_bdm = driver_block_device.legacy_block_devices( block_device_mapping) self.assertEqual(legacy_bdm, [self.snapshot_legacy_driver_bdm, self.snapshot_legacy_driver_bdm]) # Test that the ephemerals work as expected test_ephemerals = [self.driver_classes['ephemeral']( self.ephemeral_bdm) for _ in range(2)] expected = [self.ephemeral_legacy_driver_bdm.copy() for _ in range(2)] expected[0]['virtual_name'] = 'ephemeral0' expected[0]['num'] = 0 expected[1]['virtual_name'] = 'ephemeral1' expected[1]['num'] = 1 legacy_ephemerals = driver_block_device.legacy_block_devices( test_ephemerals) self.assertEqual(expected, legacy_ephemerals) def test_get_swap(self): swap = [self.swap_driver_bdm] legacy_swap = [self.swap_legacy_driver_bdm] no_swap = [self.volume_driver_bdm] self.assertEqual(swap[0], driver_block_device.get_swap(swap)) self.assertEqual(legacy_swap[0], driver_block_device.get_swap(legacy_swap)) self.assertIsNone(driver_block_device.get_swap(no_swap)) self.assertIsNone(driver_block_device.get_swap([])) def test_is_implemented(self): for bdm in (self.image_bdm, self.volume_bdm, self.swap_bdm, self.ephemeral_bdm, self.snapshot_bdm): self.assertTrue(driver_block_device.is_implemented(bdm)) local_image = self.image_bdm_dict.copy() local_image['destination_type'] = 'local' self.assertFalse(driver_block_device.is_implemented( fake_block_device.fake_bdm_object(self.context, local_image))) def test_is_block_device_mapping(self): test_swap = self.driver_classes['swap'](self.swap_bdm) test_ephemeral = self.driver_classes['ephemeral'](self.ephemeral_bdm) test_image = self.driver_classes['image'](self.image_bdm) test_snapshot = self.driver_classes['snapshot'](self.snapshot_bdm) test_volume = self.driver_classes['volume'](self.volume_bdm) test_blank = self.driver_classes['blank'](self.blank_bdm) for bdm in (test_image, test_snapshot, test_volume, test_blank): self.assertTrue(driver_block_device.is_block_device_mapping( bdm._bdm_obj)) for bdm in (test_swap, test_ephemeral): self.assertFalse(driver_block_device.is_block_device_mapping( bdm._bdm_obj)) def test_get_volume_create_az_cinder_cross_az_attach_true(self): # Tests that we get None back if cinder.cross_az_attach=True even if # the instance has an AZ assigned. Note that since cross_az_attach # defaults to True we don't need to set a flag explicitly for the test. updates = {'availability_zone': 'test-az'} instance = fake_instance.fake_instance_obj(self.context, **updates) self.assertIsNone( driver_block_device._get_volume_create_az_value(instance)) nova-13.1.4/nova/tests/unit/virt/image/0000775000567000056710000000000013064447471021050 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/virt/image/test_model.py0000664000567000056710000000624713064447152023566 0ustar jenkinsjenkins00000000000000# # Copyright (C) 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from nova import exception from nova import test from nova.virt.image import model as imgmodel class ImageTest(test.NoDBTestCase): def test_local_file_image(self): img = imgmodel.LocalFileImage( "/var/lib/libvirt/images/demo.qcow2", imgmodel.FORMAT_QCOW2) self.assertIsInstance(img, imgmodel.Image) self.assertEqual("/var/lib/libvirt/images/demo.qcow2", img.path) self.assertEqual(imgmodel.FORMAT_QCOW2, img.format) def test_local_file_bad_format(self): self.assertRaises(exception.InvalidImageFormat, imgmodel.LocalFileImage, "/var/lib/libvirt/images/demo.qcow2", "jpeg") def test_local_block_image(self): img = imgmodel.LocalBlockImage( "/dev/volgroup/demovol") self.assertIsInstance(img, imgmodel.Image) self.assertEqual("/dev/volgroup/demovol", img.path) self.assertEqual(imgmodel.FORMAT_RAW, img.format) def test_rbd_image(self): img = imgmodel.RBDImage( "demo", "openstack", "cthulu", "braanes", ["rbd.example.org"]) self.assertIsInstance(img, imgmodel.Image) self.assertEqual("demo", img.name) self.assertEqual("openstack", img.pool) self.assertEqual("cthulu", img.user) self.assertEqual("braanes", img.password) self.assertEqual(["rbd.example.org"], img.servers) self.assertEqual(imgmodel.FORMAT_RAW, img.format) def test_equality(self): img1 = imgmodel.LocalFileImage( "/var/lib/libvirt/images/demo.qcow2", imgmodel.FORMAT_QCOW2) img2 = imgmodel.LocalFileImage( "/var/lib/libvirt/images/demo.qcow2", imgmodel.FORMAT_QCOW2) img3 = imgmodel.LocalFileImage( "/var/lib/libvirt/images/demo.qcow2", imgmodel.FORMAT_RAW) img4 = imgmodel.LocalImage( "/dev/mapper/vol", imgmodel.FORMAT_RAW) img5 = imgmodel.LocalBlockImage( "/dev/mapper/vol") self.assertEqual(img1, img1) self.assertEqual(img1, img2) self.assertEqual(img1.__hash__(), img2.__hash__()) self.assertNotEqual(img1, img3) self.assertNotEqual(img4, img5) def test_stringify(self): img = imgmodel.RBDImage( "demo", "openstack", "cthulu", "braanes", ["rbd.example.org"]) msg = str(img) self.assertTrue(msg.find("braanes") == -1) self.assertTrue(msg.find("***") != -1) nova-13.1.4/nova/tests/unit/virt/image/__init__.py0000664000567000056710000000000013064447140023140 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/virt/test_configdrive.py0000664000567000056710000000373613064447140023700 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import objects from nova import test from nova.virt import configdrive class ConfigDriveTestCase(test.NoDBTestCase): def test_instance_force(self): self.flags(force_config_drive=False) instance = objects.Instance( config_drive="yes", system_metadata={ "image_img_config_drive": "mandatory", } ) self.assertTrue(configdrive.required_by(instance)) def test_image_meta_force(self): self.flags(force_config_drive=False) instance = objects.Instance( config_drive=None, system_metadata={ "image_img_config_drive": "mandatory", } ) self.assertTrue(configdrive.required_by(instance)) def test_config_flag_force(self): self.flags(force_config_drive=True) instance = objects.Instance( config_drive=None, system_metadata={ "image_img_config_drive": "optional", } ) self.assertTrue(configdrive.required_by(instance)) def test_no_config_drive(self): self.flags(force_config_drive=False) instance = objects.Instance( config_drive=None, system_metadata={ "image_img_config_drive": "optional", } ) self.assertFalse(configdrive.required_by(instance)) nova-13.1.4/nova/tests/unit/virt/ironic/0000775000567000056710000000000013064447471021251 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/virt/ironic/test_driver.py0000664000567000056710000023642513064447152024165 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for the ironic driver.""" from ironicclient import exc as ironic_exception import mock from oslo_config import cfg from oslo_service import loopingcall from oslo_utils import uuidutils import six from testtools.matchers import HasLength from nova.api.metadata import base as instance_metadata from nova.compute import power_state as nova_states from nova.compute import task_states from nova.compute import vm_states from nova import context as nova_context from nova import exception from nova import objects from nova import test from nova.tests.unit import fake_instance from nova.tests.unit import utils from nova.tests.unit.virt.ironic import utils as ironic_utils from nova.virt import configdrive from nova.virt import driver from nova.virt import fake from nova.virt import firewall from nova.virt import hardware from nova.virt.ironic import client_wrapper as cw from nova.virt.ironic import driver as ironic_driver from nova.virt.ironic import ironic_states CONF = cfg.CONF IRONIC_FLAGS = dict( api_version=1, group='ironic', ) FAKE_CLIENT = ironic_utils.FakeClient() class FakeClientWrapper(cw.IronicClientWrapper): def _get_client(self, retry_on_conflict=True): return FAKE_CLIENT class FakeLoopingCall(object): def __init__(self): self.wait = mock.MagicMock() self.start = mock.MagicMock() self.start.return_value = self def _get_properties(): return {'cpus': 2, 'memory_mb': 512, 'local_gb': 10, 'cpu_arch': 'x86_64', 'capabilities': None} def _get_instance_info(): return {'vcpus': 1, 'memory_mb': 1024, 'local_gb': 10} def _get_stats(): return {'cpu_arch': 'x86_64'} FAKE_CLIENT_WRAPPER = FakeClientWrapper() @mock.patch.object(cw, 'IronicClientWrapper', lambda *_: FAKE_CLIENT_WRAPPER) class IronicDriverTestCase(test.NoDBTestCase): @mock.patch.object(cw, 'IronicClientWrapper', lambda *_: FAKE_CLIENT_WRAPPER) def setUp(self): super(IronicDriverTestCase, self).setUp() self.flags(**IRONIC_FLAGS) # set client log config to exercise the code that manipulates it CONF.set_override('client_log_level', 'DEBUG', group='ironic') self.driver = ironic_driver.IronicDriver(None) self.driver.virtapi = fake.FakeVirtAPI() self.ctx = nova_context.get_admin_context() self.instance_uuid = uuidutils.generate_uuid() # mock retries configs to avoid sleeps and make tests run quicker CONF.set_default('api_max_retries', default=1, group='ironic') CONF.set_default('api_retry_interval', default=0, group='ironic') def test_public_api_signatures(self): self.assertPublicAPISignatures(driver.ComputeDriver(None), self.driver) def test_validate_driver_loading(self): self.assertIsInstance(self.driver, ironic_driver.IronicDriver) def test_driver_capabilities(self): self.assertFalse(self.driver.capabilities['has_imagecache'], 'Driver capabilities for \'has_imagecache\'' 'is invalid') self.assertFalse(self.driver.capabilities['supports_recreate'], 'Driver capabilities for \'supports_recreate\'' 'is invalid') def test__get_hypervisor_type(self): self.assertEqual('ironic', self.driver._get_hypervisor_type()) def test__get_hypervisor_version(self): self.assertEqual(1, self.driver._get_hypervisor_version()) @mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid') def test__validate_instance_and_node(self, mock_gbiui): node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' node = ironic_utils.get_test_node(uuid=node_uuid, instance_uuid=self.instance_uuid) instance = fake_instance.fake_instance_obj(self.ctx, uuid=self.instance_uuid) mock_gbiui.return_value = node result = self.driver._validate_instance_and_node(instance) self.assertEqual(result.uuid, node_uuid) mock_gbiui.assert_called_once_with(instance.uuid, fields=ironic_driver._NODE_FIELDS) @mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid') def test__validate_instance_and_node_failed(self, mock_gbiui): mock_gbiui.side_effect = ironic_exception.NotFound() instance = fake_instance.fake_instance_obj(self.ctx, uuid=self.instance_uuid) self.assertRaises(exception.InstanceNotFound, self.driver._validate_instance_and_node, instance) mock_gbiui.assert_called_once_with(instance.uuid, fields=ironic_driver._NODE_FIELDS) @mock.patch.object(objects.Instance, 'refresh') @mock.patch.object(ironic_driver.IronicDriver, '_validate_instance_and_node') def test__wait_for_active_pass(self, fake_validate, fake_refresh): instance = fake_instance.fake_instance_obj(self.ctx, uuid=uuidutils.generate_uuid()) node = ironic_utils.get_test_node( provision_state=ironic_states.DEPLOYING) fake_validate.return_value = node self.driver._wait_for_active(instance) fake_validate.assert_called_once_with(instance) fake_refresh.assert_called_once_with() @mock.patch.object(objects.Instance, 'refresh') @mock.patch.object(ironic_driver.IronicDriver, '_validate_instance_and_node') def test__wait_for_active_done(self, fake_validate, fake_refresh): instance = fake_instance.fake_instance_obj(self.ctx, uuid=uuidutils.generate_uuid()) node = ironic_utils.get_test_node( provision_state=ironic_states.ACTIVE) fake_validate.return_value = node self.assertRaises(loopingcall.LoopingCallDone, self.driver._wait_for_active, instance) fake_validate.assert_called_once_with(instance) fake_refresh.assert_called_once_with() @mock.patch.object(objects.Instance, 'refresh') @mock.patch.object(ironic_driver.IronicDriver, '_validate_instance_and_node') def test__wait_for_active_fail(self, fake_validate, fake_refresh): instance = fake_instance.fake_instance_obj(self.ctx, uuid=uuidutils.generate_uuid()) node = ironic_utils.get_test_node( provision_state=ironic_states.DEPLOYFAIL) fake_validate.return_value = node self.assertRaises(exception.InstanceDeployFailure, self.driver._wait_for_active, instance) fake_validate.assert_called_once_with(instance) fake_refresh.assert_called_once_with() @mock.patch.object(objects.Instance, 'refresh') @mock.patch.object(ironic_driver.IronicDriver, '_validate_instance_and_node') def _wait_for_active_abort(self, instance_params, fake_validate, fake_refresh): instance = fake_instance.fake_instance_obj(self.ctx, uuid=uuidutils.generate_uuid(), **instance_params) self.assertRaises(exception.InstanceDeployFailure, self.driver._wait_for_active, instance) # Assert _validate_instance_and_node wasn't called self.assertFalse(fake_validate.called) fake_refresh.assert_called_once_with() def test__wait_for_active_abort_deleting(self): self._wait_for_active_abort({'task_state': task_states.DELETING}) def test__wait_for_active_abort_deleted(self): self._wait_for_active_abort({'vm_state': vm_states.DELETED}) def test__wait_for_active_abort_error(self): self._wait_for_active_abort({'vm_state': vm_states.ERROR}) @mock.patch.object(ironic_driver.IronicDriver, '_validate_instance_and_node') def test__wait_for_power_state_pass(self, fake_validate): instance = fake_instance.fake_instance_obj(self.ctx, uuid=uuidutils.generate_uuid()) node = ironic_utils.get_test_node( target_power_state=ironic_states.POWER_OFF) fake_validate.return_value = node self.driver._wait_for_power_state(instance, 'fake message') self.assertTrue(fake_validate.called) @mock.patch.object(ironic_driver.IronicDriver, '_validate_instance_and_node') def test__wait_for_power_state_ok(self, fake_validate): instance = fake_instance.fake_instance_obj(self.ctx, uuid=uuidutils.generate_uuid()) node = ironic_utils.get_test_node( target_power_state=ironic_states.NOSTATE) fake_validate.return_value = node self.assertRaises(loopingcall.LoopingCallDone, self.driver._wait_for_power_state, instance, 'fake message') self.assertTrue(fake_validate.called) def _test__node_resource(self, has_inst_info): node_uuid = uuidutils.generate_uuid() props = _get_properties() stats = _get_stats() if has_inst_info: instance_info = _get_instance_info() else: instance_info = {} node = ironic_utils.get_test_node(uuid=node_uuid, instance_uuid=self.instance_uuid, instance_info=instance_info, properties=props) result = self.driver._node_resource(node) wantkeys = ["hypervisor_hostname", "hypervisor_type", "hypervisor_version", "cpu_info", "vcpus", "vcpus_used", "memory_mb", "memory_mb_used", "local_gb", "local_gb_used", "disk_available_least", "supported_instances", "stats", "numa_topology"] wantkeys.sort() gotkeys = result.keys() gotkeys.sort() self.assertEqual(wantkeys, gotkeys) if has_inst_info: props_dict = instance_info expected_cpus = instance_info['vcpus'] else: props_dict = props expected_cpus = props['cpus'] self.assertEqual(0, result['vcpus']) self.assertEqual(expected_cpus, result['vcpus_used']) self.assertEqual(0, result['memory_mb']) self.assertEqual(props_dict['memory_mb'], result['memory_mb_used']) self.assertEqual(0, result['local_gb']) self.assertEqual(props_dict['local_gb'], result['local_gb_used']) self.assertEqual(node_uuid, result['hypervisor_hostname']) self.assertEqual(stats, result['stats']) self.assertIsNone(result['numa_topology']) def test__node_resource(self): self._test__node_resource(True) def test__node_resource_no_instance_info(self): self._test__node_resource(False) def test__node_resource_canonicalizes_arch(self): node_uuid = uuidutils.generate_uuid() props = _get_properties() props['cpu_arch'] = 'i386' node = ironic_utils.get_test_node(uuid=node_uuid, properties=props) result = self.driver._node_resource(node) self.assertEqual('i686', result['supported_instances'][0][0]) self.assertEqual('i386', result['stats']['cpu_arch']) def test__node_resource_unknown_arch(self): node_uuid = uuidutils.generate_uuid() props = _get_properties() del props['cpu_arch'] node = ironic_utils.get_test_node(uuid=node_uuid, properties=props) result = self.driver._node_resource(node) self.assertEqual([], result['supported_instances']) def test__node_resource_exposes_capabilities(self): props = _get_properties() props['capabilities'] = 'test:capability, test2:value2' node = ironic_utils.get_test_node(properties=props) result = self.driver._node_resource(node) stats = result['stats'] self.assertIsNone(stats.get('capabilities')) self.assertEqual('capability', stats.get('test')) self.assertEqual('value2', stats.get('test2')) def test__node_resource_no_capabilities(self): props = _get_properties() props['capabilities'] = None node = ironic_utils.get_test_node(properties=props) result = self.driver._node_resource(node) self.assertIsNone(result['stats'].get('capabilities')) def test__node_resource_malformed_capabilities(self): props = _get_properties() props['capabilities'] = 'test:capability,:no_key,no_val:' node = ironic_utils.get_test_node(properties=props) result = self.driver._node_resource(node) stats = result['stats'] self.assertEqual('capability', stats.get('test')) def test__node_resource_available(self): node_uuid = uuidutils.generate_uuid() props = _get_properties() stats = _get_stats() node = ironic_utils.get_test_node( uuid=node_uuid, instance_uuid=None, power_state=ironic_states.POWER_OFF, properties=props, provision_state=ironic_states.AVAILABLE) result = self.driver._node_resource(node) self.assertEqual(props['cpus'], result['vcpus']) self.assertEqual(0, result['vcpus_used']) self.assertEqual(props['memory_mb'], result['memory_mb']) self.assertEqual(0, result['memory_mb_used']) self.assertEqual(props['local_gb'], result['local_gb']) self.assertEqual(0, result['local_gb_used']) self.assertEqual(node_uuid, result['hypervisor_hostname']) self.assertEqual(stats, result['stats']) @mock.patch.object(ironic_driver.IronicDriver, '_node_resources_unavailable') def test__node_resource_unavailable_node_res(self, mock_res_unavail): mock_res_unavail.return_value = True node_uuid = uuidutils.generate_uuid() props = _get_properties() stats = _get_stats() node = ironic_utils.get_test_node(uuid=node_uuid, instance_uuid=None, properties=props) result = self.driver._node_resource(node) self.assertEqual(0, result['vcpus']) self.assertEqual(0, result['vcpus_used']) self.assertEqual(0, result['memory_mb']) self.assertEqual(0, result['memory_mb_used']) self.assertEqual(0, result['local_gb']) self.assertEqual(0, result['local_gb_used']) self.assertEqual(node_uuid, result['hypervisor_hostname']) self.assertEqual(stats, result['stats']) @mock.patch.object(ironic_driver.IronicDriver, '_node_resources_used') def test__node_resource_used_node_res(self, mock_res_used): mock_res_used.return_value = True node_uuid = uuidutils.generate_uuid() props = _get_properties() stats = _get_stats() instance_info = _get_instance_info() node = ironic_utils.get_test_node( uuid=node_uuid, instance_uuid=uuidutils.generate_uuid(), provision_state=ironic_states.ACTIVE, properties=props, instance_info=instance_info) result = self.driver._node_resource(node) self.assertEqual(0, result['vcpus']) self.assertEqual(instance_info['vcpus'], result['vcpus_used']) self.assertEqual(0, result['memory_mb']) self.assertEqual(instance_info['memory_mb'], result['memory_mb_used']) self.assertEqual(0, result['local_gb']) self.assertEqual(instance_info['local_gb'], result['local_gb_used']) self.assertEqual(node_uuid, result['hypervisor_hostname']) self.assertEqual(stats, result['stats']) @mock.patch.object(ironic_driver.LOG, 'warning') def test__parse_node_properties(self, mock_warning): props = _get_properties() node = ironic_utils.get_test_node( uuid=uuidutils.generate_uuid(), properties=props) # raw_cpu_arch is included because extra_specs filters do not # canonicalized the arch props['raw_cpu_arch'] = props['cpu_arch'] parsed = self.driver._parse_node_properties(node) self.assertEqual(props, parsed) # Assert we didn't log any warning since all properties are # correct self.assertFalse(mock_warning.called) @mock.patch.object(ironic_driver.LOG, 'warning') def test__parse_node_properties_bad_values(self, mock_warning): props = _get_properties() props['cpus'] = 'bad-value' props['memory_mb'] = 'bad-value' props['local_gb'] = 'bad-value' props['cpu_arch'] = 'bad-value' node = ironic_utils.get_test_node( uuid=uuidutils.generate_uuid(), properties=props) # raw_cpu_arch is included because extra_specs filters do not # canonicalized the arch props['raw_cpu_arch'] = props['cpu_arch'] parsed = self.driver._parse_node_properties(node) expected_props = props.copy() expected_props['cpus'] = 0 expected_props['memory_mb'] = 0 expected_props['local_gb'] = 0 expected_props['cpu_arch'] = None self.assertEqual(expected_props, parsed) self.assertEqual(4, mock_warning.call_count) @mock.patch.object(ironic_driver.LOG, 'warning') def test__parse_node_instance_info(self, mock_warning): props = _get_properties() instance_info = _get_instance_info() node = ironic_utils.get_test_node( uuid=uuidutils.generate_uuid(), instance_info=instance_info) parsed = self.driver._parse_node_instance_info(node, props) self.assertEqual(instance_info, parsed) self.assertFalse(mock_warning.called) @mock.patch.object(ironic_driver.LOG, 'warning') def test__parse_node_instance_info_bad_values(self, mock_warning): props = _get_properties() instance_info = _get_instance_info() instance_info['vcpus'] = 'bad-value' instance_info['memory_mb'] = 'bad-value' instance_info['local_gb'] = 'bad-value' node = ironic_utils.get_test_node( uuid=uuidutils.generate_uuid(), instance_info=instance_info) parsed = self.driver._parse_node_instance_info(node, props) expected = { 'vcpus': props['cpus'], 'memory_mb': props['memory_mb'], 'local_gb': props['local_gb'] } self.assertEqual(expected, parsed) self.assertEqual(3, mock_warning.call_count) @mock.patch.object(ironic_driver.LOG, 'warning') def test__parse_node_properties_canonicalize_cpu_arch(self, mock_warning): props = _get_properties() props['cpu_arch'] = 'amd64' node = ironic_utils.get_test_node( uuid=uuidutils.generate_uuid(), properties=props) # raw_cpu_arch is included because extra_specs filters do not # canonicalized the arch props['raw_cpu_arch'] = props['cpu_arch'] parsed = self.driver._parse_node_properties(node) expected_props = props.copy() # Make sure it cpu_arch was canonicalized expected_props['cpu_arch'] = 'x86_64' self.assertEqual(expected_props, parsed) # Assert we didn't log any warning since all properties are # correct self.assertFalse(mock_warning.called) @mock.patch.object(firewall.NoopFirewallDriver, 'prepare_instance_filter', create=True) @mock.patch.object(firewall.NoopFirewallDriver, 'setup_basic_filtering', create=True) @mock.patch.object(firewall.NoopFirewallDriver, 'apply_instance_filter', create=True) def test__start_firewall(self, mock_aif, mock_sbf, mock_pif): fake_inst = 'fake-inst' fake_net_info = utils.get_test_network_info() self.driver._start_firewall(fake_inst, fake_net_info) mock_aif.assert_called_once_with(fake_inst, fake_net_info) mock_sbf.assert_called_once_with(fake_inst, fake_net_info) mock_pif.assert_called_once_with(fake_inst, fake_net_info) @mock.patch.object(firewall.NoopFirewallDriver, 'unfilter_instance', create=True) def test__stop_firewall(self, mock_ui): fake_inst = 'fake-inst' fake_net_info = utils.get_test_network_info() self.driver._stop_firewall(fake_inst, fake_net_info) mock_ui.assert_called_once_with(fake_inst, fake_net_info) @mock.patch.object(cw.IronicClientWrapper, 'call') def test_instance_exists(self, mock_call): instance = fake_instance.fake_instance_obj(self.ctx, uuid=self.instance_uuid) self.assertTrue(self.driver.instance_exists(instance)) mock_call.assert_called_once_with('node.get_by_instance_uuid', self.instance_uuid, fields=ironic_driver._NODE_FIELDS) @mock.patch.object(cw.IronicClientWrapper, 'call') def test_instance_exists_fail(self, mock_call): mock_call.side_effect = ironic_exception.NotFound instance = fake_instance.fake_instance_obj(self.ctx, uuid=self.instance_uuid) self.assertFalse(self.driver.instance_exists(instance)) mock_call.assert_called_once_with('node.get_by_instance_uuid', self.instance_uuid, fields=ironic_driver._NODE_FIELDS) @mock.patch.object(cw.IronicClientWrapper, 'call') @mock.patch.object(objects.Instance, 'get_by_uuid') def test_list_instances(self, mock_inst_by_uuid, mock_call): nodes = [] instances = [] for i in range(2): uuid = uuidutils.generate_uuid() instances.append(fake_instance.fake_instance_obj(self.ctx, id=i, uuid=uuid)) nodes.append(ironic_utils.get_test_node(instance_uuid=uuid)) mock_inst_by_uuid.side_effect = instances mock_call.return_value = nodes response = self.driver.list_instances() mock_call.assert_called_with("node.list", associated=True, limit=0) expected_calls = [mock.call(mock.ANY, instances[0].uuid), mock.call(mock.ANY, instances[1].uuid)] mock_inst_by_uuid.assert_has_calls(expected_calls) self.assertEqual(['instance-00000000', 'instance-00000001'], sorted(response)) @mock.patch.object(cw.IronicClientWrapper, 'call') @mock.patch.object(objects.Instance, 'get_by_uuid') def test_list_instances_fail(self, mock_inst_by_uuid, mock_call): mock_call.side_effect = exception.NovaException response = self.driver.list_instances() mock_call.assert_called_with("node.list", associated=True, limit=0) self.assertFalse(mock_inst_by_uuid.called) self.assertThat(response, HasLength(0)) @mock.patch.object(cw.IronicClientWrapper, 'call') def test_list_instance_uuids(self, mock_call): num_nodes = 2 nodes = [] for n in range(num_nodes): nodes.append(ironic_utils.get_test_node( instance_uuid=uuidutils.generate_uuid())) mock_call.return_value = nodes uuids = self.driver.list_instance_uuids() mock_call.assert_called_with('node.list', associated=True, limit=0) expected = [n.instance_uuid for n in nodes] self.assertEqual(sorted(expected), sorted(uuids)) @mock.patch.object(FAKE_CLIENT.node, 'list') @mock.patch.object(FAKE_CLIENT.node, 'get') def test_node_is_available_empty_cache_empty_list(self, mock_get, mock_list): node = ironic_utils.get_test_node() mock_get.return_value = node mock_list.return_value = [] self.assertTrue(self.driver.node_is_available(node.uuid)) mock_get.assert_called_with(node.uuid, fields=ironic_driver._NODE_FIELDS) mock_list.assert_called_with(detail=True, limit=0) mock_get.side_effect = ironic_exception.NotFound self.assertFalse(self.driver.node_is_available(node.uuid)) @mock.patch.object(FAKE_CLIENT.node, 'list') @mock.patch.object(FAKE_CLIENT.node, 'get') def test_node_is_available_empty_cache(self, mock_get, mock_list): node = ironic_utils.get_test_node() mock_get.return_value = node mock_list.return_value = [node] self.assertTrue(self.driver.node_is_available(node.uuid)) mock_list.assert_called_with(detail=True, limit=0) self.assertEqual(0, mock_get.call_count) @mock.patch.object(FAKE_CLIENT.node, 'list') @mock.patch.object(FAKE_CLIENT.node, 'get') def test_node_is_available_with_cache(self, mock_get, mock_list): node = ironic_utils.get_test_node() mock_get.return_value = node mock_list.return_value = [node] # populate the cache self.driver.get_available_nodes(refresh=True) # prove that zero calls are made after populating cache mock_list.reset_mock() self.assertTrue(self.driver.node_is_available(node.uuid)) self.assertEqual(0, mock_list.call_count) self.assertEqual(0, mock_get.call_count) def test__node_resources_unavailable(self): node_dicts = [ # a node in maintenance /w no instance and power OFF {'uuid': uuidutils.generate_uuid(), 'maintenance': True, 'power_state': ironic_states.POWER_OFF, 'provision_state': ironic_states.AVAILABLE}, # a node in maintenance /w no instance and ERROR power state {'uuid': uuidutils.generate_uuid(), 'maintenance': True, 'power_state': ironic_states.ERROR, 'provision_state': ironic_states.AVAILABLE}, # a node not in maintenance /w no instance and bad power state {'uuid': uuidutils.generate_uuid(), 'power_state': ironic_states.NOSTATE, 'provision_state': ironic_states.AVAILABLE}, # a node not in maintenance or bad power state, bad provision state {'uuid': uuidutils.generate_uuid(), 'power_state': ironic_states.POWER_ON, 'provision_state': ironic_states.MANAGEABLE}, # a node in cleaning {'uuid': uuidutils.generate_uuid(), 'power_state': ironic_states.POWER_ON, 'provision_state': ironic_states.CLEANING}, # a node in cleaning, waiting for a clean step to finish {'uuid': uuidutils.generate_uuid(), 'power_state': ironic_states.POWER_ON, 'provision_state': ironic_states.CLEANWAIT}, # a node in deleting {'uuid': uuidutils.generate_uuid(), 'power_state': ironic_states.POWER_ON, 'provision_state': ironic_states.DELETING}, # a node in deleted {'uuid': uuidutils.generate_uuid(), 'power_state': ironic_states.POWER_ON, 'provision_state': ironic_states.DELETED}, # a node in AVAILABLE with an instance uuid {'uuid': uuidutils.generate_uuid(), 'instance_uuid': uuidutils.generate_uuid(), 'power_state': ironic_states.POWER_OFF, 'provision_state': ironic_states.AVAILABLE} ] for n in node_dicts: node = ironic_utils.get_test_node(**n) self.assertTrue(self.driver._node_resources_unavailable(node)) for ok_state in (ironic_states.AVAILABLE, ironic_states.NOSTATE): # these are both ok and should present as available as they # have no instance_uuid avail_node = ironic_utils.get_test_node( power_state=ironic_states.POWER_OFF, provision_state=ok_state) unavailable = self.driver._node_resources_unavailable(avail_node) self.assertFalse(unavailable) def test__node_resources_used(self): node_dicts = [ # a node in maintenance /w instance and active {'uuid': uuidutils.generate_uuid(), 'instance_uuid': uuidutils.generate_uuid(), 'provision_state': ironic_states.ACTIVE}, ] for n in node_dicts: node = ironic_utils.get_test_node(**n) self.assertTrue(self.driver._node_resources_used(node)) unused_node = ironic_utils.get_test_node( instance_uuid=None, provision_state=ironic_states.AVAILABLE) self.assertFalse(self.driver._node_resources_used(unused_node)) @mock.patch.object(FAKE_CLIENT.node, 'list') def test_get_available_nodes(self, mock_list): node_dicts = [ # a node in maintenance /w no instance and power OFF {'uuid': uuidutils.generate_uuid(), 'maintenance': True, 'power_state': ironic_states.POWER_OFF}, # a node /w instance and power ON {'uuid': uuidutils.generate_uuid(), 'instance_uuid': self.instance_uuid, 'power_state': ironic_states.POWER_ON}, # a node not in maintenance /w no instance and bad power state {'uuid': uuidutils.generate_uuid(), 'power_state': ironic_states.ERROR}, ] nodes = [ironic_utils.get_test_node(**n) for n in node_dicts] mock_list.return_value = nodes available_nodes = self.driver.get_available_nodes() expected_uuids = [n['uuid'] for n in node_dicts] self.assertEqual(sorted(expected_uuids), sorted(available_nodes)) @mock.patch.object(FAKE_CLIENT.node, 'get') @mock.patch.object(FAKE_CLIENT.node, 'list') @mock.patch.object(ironic_driver.IronicDriver, '_node_resource') def test_get_available_resource(self, mock_nr, mock_list, mock_get): node = ironic_utils.get_test_node() node_2 = ironic_utils.get_test_node(uuid=uuidutils.generate_uuid()) fake_resource = 'fake-resource' mock_get.return_value = node # ensure cache gets populated without the node we want mock_list.return_value = [node_2] mock_nr.return_value = fake_resource result = self.driver.get_available_resource(node.uuid) self.assertEqual(fake_resource, result) mock_nr.assert_called_once_with(node) mock_get.assert_called_once_with(node.uuid, fields=ironic_driver._NODE_FIELDS) @mock.patch.object(FAKE_CLIENT.node, 'get') @mock.patch.object(FAKE_CLIENT.node, 'list') @mock.patch.object(ironic_driver.IronicDriver, '_node_resource') def test_get_available_resource_with_cache(self, mock_nr, mock_list, mock_get): node = ironic_utils.get_test_node() fake_resource = 'fake-resource' mock_list.return_value = [node] mock_nr.return_value = fake_resource # populate the cache self.driver.get_available_nodes(refresh=True) mock_list.reset_mock() result = self.driver.get_available_resource(node.uuid) self.assertEqual(fake_resource, result) self.assertEqual(0, mock_list.call_count) self.assertEqual(0, mock_get.call_count) mock_nr.assert_called_once_with(node) @mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid') def test_get_info(self, mock_gbiu): properties = {'memory_mb': 512, 'cpus': 2} power_state = ironic_states.POWER_ON node = ironic_utils.get_test_node(instance_uuid=self.instance_uuid, properties=properties, power_state=power_state) mock_gbiu.return_value = node # ironic_states.POWER_ON should be mapped to # nova_states.RUNNING memory_kib = properties['memory_mb'] * 1024 instance = fake_instance.fake_instance_obj('fake-context', uuid=self.instance_uuid) result = self.driver.get_info(instance) self.assertEqual(hardware.InstanceInfo(state=nova_states.RUNNING, max_mem_kb=memory_kib, mem_kb=memory_kib, num_cpu=properties['cpus']), result) @mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid') def test_get_info_http_not_found(self, mock_gbiu): mock_gbiu.side_effect = ironic_exception.NotFound() instance = fake_instance.fake_instance_obj( self.ctx, uuid=uuidutils.generate_uuid()) result = self.driver.get_info(instance) self.assertEqual(hardware.InstanceInfo(state=nova_states.NOSTATE), result) @mock.patch.object(FAKE_CLIENT, 'node') def test_macs_for_instance(self, mock_node): node = ironic_utils.get_test_node() port = ironic_utils.get_test_port() mock_node.get.return_value = node mock_node.list_ports.return_value = [port] instance = fake_instance.fake_instance_obj(self.ctx, node=node.uuid) result = self.driver.macs_for_instance(instance) self.assertEqual(set([port.address]), result) mock_node.list_ports.assert_called_once_with(node.uuid) @mock.patch.object(FAKE_CLIENT.node, 'get') def test_macs_for_instance_http_not_found(self, mock_get): mock_get.side_effect = ironic_exception.NotFound() instance = fake_instance.fake_instance_obj( self.ctx, node=uuidutils.generate_uuid()) result = self.driver.macs_for_instance(instance) self.assertIsNone(result) @mock.patch.object(objects.Instance, 'save') @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall') @mock.patch.object(FAKE_CLIENT, 'node') @mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active') @mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields') @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs') @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall') def _test_spawn(self, mock_sf, mock_pvifs, mock_adf, mock_wait_active, mock_node, mock_looping, mock_save): node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid) instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid) fake_flavor = objects.Flavor(ephemeral_gb=0) instance.flavor = fake_flavor mock_node.get.return_value = node mock_node.validate.return_value = ironic_utils.get_test_validation() mock_node.get_by_instance_uuid.return_value = node mock_node.set_provision_state.return_value = mock.MagicMock() fake_looping_call = FakeLoopingCall() mock_looping.return_value = fake_looping_call image_meta = ironic_utils.get_test_image_meta() self.driver.spawn(self.ctx, instance, image_meta, [], None) mock_node.get.assert_called_once_with( node_uuid, fields=ironic_driver._NODE_FIELDS) mock_node.validate.assert_called_once_with(node_uuid) mock_adf.assert_called_once_with(node, instance, test.MatchType(objects.ImageMeta), fake_flavor) mock_pvifs.assert_called_once_with(node, instance, None) mock_sf.assert_called_once_with(instance, None) mock_node.set_provision_state.assert_called_once_with(node_uuid, 'active', configdrive=mock.ANY) self.assertIsNone(instance.default_ephemeral_device) self.assertFalse(mock_save.called) mock_looping.assert_called_once_with(mock_wait_active, instance) fake_looping_call.start.assert_called_once_with( interval=CONF.ironic.api_retry_interval) fake_looping_call.wait.assert_called_once_with() @mock.patch.object(ironic_driver.IronicDriver, '_generate_configdrive') @mock.patch.object(configdrive, 'required_by') def test_spawn(self, mock_required_by, mock_configdrive): mock_required_by.return_value = False self._test_spawn() # assert configdrive was not generated self.assertFalse(mock_configdrive.called) @mock.patch.object(ironic_driver.IronicDriver, '_generate_configdrive') @mock.patch.object(configdrive, 'required_by') def test_spawn_with_configdrive(self, mock_required_by, mock_configdrive): mock_required_by.return_value = True self._test_spawn() # assert configdrive was generated mock_configdrive.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, extra_md={}, files=[]) @mock.patch.object(configdrive, 'required_by') @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall') @mock.patch.object(FAKE_CLIENT, 'node') @mock.patch.object(ironic_driver.IronicDriver, 'destroy') @mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active') @mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields') @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs') @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall') def test_spawn_destroyed_after_failure(self, mock_sf, mock_pvifs, mock_adf, mock_wait_active, mock_destroy, mock_node, mock_looping, mock_required_by): mock_required_by.return_value = False node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid) fake_flavor = objects.Flavor(ephemeral_gb=0) instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid) instance.flavor = fake_flavor mock_node.get.return_value = node mock_node.validate.return_value = ironic_utils.get_test_validation() mock_node.get_by_instance_uuid.return_value = node mock_node.set_provision_state.return_value = mock.MagicMock() fake_looping_call = FakeLoopingCall() mock_looping.return_value = fake_looping_call deploy_exc = exception.InstanceDeployFailure('foo') fake_looping_call.wait.side_effect = deploy_exc self.assertRaises( exception.InstanceDeployFailure, self.driver.spawn, self.ctx, instance, None, [], None) self.assertEqual(0, mock_destroy.call_count) def _test_add_driver_fields(self, mock_update=None, mock_call=None): node = ironic_utils.get_test_node(driver='fake') instance = fake_instance.fake_instance_obj(self.ctx, node=node.uuid) image_meta = ironic_utils.get_test_image_meta() flavor = ironic_utils.get_test_flavor() self.driver._add_driver_fields(node, instance, image_meta, flavor) expected_patch = [{'path': '/instance_info/image_source', 'op': 'add', 'value': image_meta.id}, {'path': '/instance_info/root_gb', 'op': 'add', 'value': str(instance.root_gb)}, {'path': '/instance_info/swap_mb', 'op': 'add', 'value': str(flavor['swap'])}, {'path': '/instance_info/display_name', 'value': instance.display_name, 'op': 'add'}, {'path': '/instance_info/vcpus', 'op': 'add', 'value': str(instance.vcpus)}, {'path': '/instance_info/memory_mb', 'op': 'add', 'value': str(instance.memory_mb)}, {'path': '/instance_info/local_gb', 'op': 'add', 'value': str(node.properties.get('local_gb', 0))}, {'path': '/instance_uuid', 'op': 'add', 'value': instance.uuid}] if mock_call is not None: # assert call() is invoked with retry_on_conflict False to # avoid bug #1341420 mock_call.assert_called_once_with('node.update', node.uuid, expected_patch, retry_on_conflict=False) if mock_update is not None: mock_update.assert_called_once_with(node.uuid, expected_patch) @mock.patch.object(FAKE_CLIENT.node, 'update') def test__add_driver_fields_mock_update(self, mock_update): self._test_add_driver_fields(mock_update=mock_update) @mock.patch.object(cw.IronicClientWrapper, 'call') def test__add_driver_fields_mock_call(self, mock_call): self._test_add_driver_fields(mock_call=mock_call) @mock.patch.object(FAKE_CLIENT.node, 'update') def test__add_driver_fields_fail(self, mock_update): mock_update.side_effect = ironic_exception.BadRequest() node = ironic_utils.get_test_node(driver='fake') instance = fake_instance.fake_instance_obj(self.ctx, node=node.uuid) image_meta = ironic_utils.get_test_image_meta() flavor = ironic_utils.get_test_flavor() self.assertRaises(exception.InstanceDeployFailure, self.driver._add_driver_fields, node, instance, image_meta, flavor) def _test_remove_driver_fields(self, mock_update): node = ironic_utils.get_test_node(driver='fake') instance = fake_instance.fake_instance_obj(self.ctx, node=node.uuid) self.driver._remove_driver_fields(node, instance) expected_patch = [{'path': '/instance_info', 'op': 'remove'}, {'path': '/instance_uuid', 'op': 'remove'}] mock_update.assert_called_once_with(node.uuid, expected_patch) @mock.patch.object(FAKE_CLIENT.node, 'update') def test_remove_driver_fields(self, mock_update): self._test_remove_driver_fields(mock_update) @mock.patch.object(FAKE_CLIENT.node, 'update') def test_remove_driver_fields_fail(self, mock_update): mock_update.side_effect = ironic_exception.BadRequest() self._test_remove_driver_fields(mock_update) @mock.patch.object(configdrive, 'required_by') @mock.patch.object(FAKE_CLIENT, 'node') def test_spawn_node_driver_validation_fail(self, mock_node, mock_required_by): mock_required_by.return_value = False node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid) flavor = ironic_utils.get_test_flavor() instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid) instance.flavor = flavor mock_node.validate.return_value = ironic_utils.get_test_validation( power=False, deploy=False) mock_node.get.return_value = node image_meta = ironic_utils.get_test_image_meta() self.assertRaises(exception.ValidationError, self.driver.spawn, self.ctx, instance, image_meta, [], None) mock_node.get.assert_called_once_with( node_uuid, fields=ironic_driver._NODE_FIELDS) mock_node.validate.assert_called_once_with(node_uuid) @mock.patch.object(configdrive, 'required_by') @mock.patch.object(FAKE_CLIENT, 'node') @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall') @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs') @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy') def test_spawn_node_prepare_for_deploy_fail(self, mock_cleanup_deploy, mock_pvifs, mock_sf, mock_node, mock_required_by): mock_required_by.return_value = False node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid) flavor = ironic_utils.get_test_flavor() instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid) instance.flavor = flavor mock_node.get.return_value = node mock_node.validate.return_value = ironic_utils.get_test_validation() image_meta = ironic_utils.get_test_image_meta() class TestException(Exception): pass mock_sf.side_effect = TestException() self.assertRaises(TestException, self.driver.spawn, self.ctx, instance, image_meta, [], None) mock_node.get.assert_called_once_with( node_uuid, fields=ironic_driver._NODE_FIELDS) mock_node.validate.assert_called_once_with(node_uuid) mock_cleanup_deploy.assert_called_with(node, instance, None) @mock.patch.object(configdrive, 'required_by') @mock.patch.object(objects.Instance, 'save') @mock.patch.object(FAKE_CLIENT, 'node') @mock.patch.object(ironic_driver.IronicDriver, '_generate_configdrive') @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall') @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs') @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy') def test_spawn_node_configdrive_fail(self, mock_cleanup_deploy, mock_pvifs, mock_sf, mock_configdrive, mock_node, mock_save, mock_required_by): mock_required_by.return_value = True node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid) flavor = ironic_utils.get_test_flavor() instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid) instance.flavor = flavor mock_node.get.return_value = node mock_node.validate.return_value = ironic_utils.get_test_validation() image_meta = ironic_utils.get_test_image_meta() class TestException(Exception): pass mock_configdrive.side_effect = TestException() self.assertRaises(TestException, self.driver.spawn, self.ctx, instance, image_meta, [], None) mock_node.get.assert_called_once_with( node_uuid, fields=ironic_driver._NODE_FIELDS) mock_node.validate.assert_called_once_with(node_uuid) mock_cleanup_deploy.assert_called_with(self.ctx, node, instance, None, flavor=flavor) @mock.patch.object(configdrive, 'required_by') @mock.patch.object(FAKE_CLIENT, 'node') @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall') @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs') @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy') def test_spawn_node_trigger_deploy_fail(self, mock_cleanup_deploy, mock_pvifs, mock_sf, mock_node, mock_required_by): mock_required_by.return_value = False node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid) flavor = ironic_utils.get_test_flavor() instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid) instance.flavor = flavor image_meta = ironic_utils.get_test_image_meta() mock_node.get.return_value = node mock_node.validate.return_value = ironic_utils.get_test_validation() mock_node.set_provision_state.side_effect = exception.NovaException() self.assertRaises(exception.NovaException, self.driver.spawn, self.ctx, instance, image_meta, [], None) mock_node.get.assert_called_once_with( node_uuid, fields=ironic_driver._NODE_FIELDS) mock_node.validate.assert_called_once_with(node_uuid) mock_cleanup_deploy.assert_called_once_with(node, instance, None) @mock.patch.object(configdrive, 'required_by') @mock.patch.object(FAKE_CLIENT, 'node') @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall') @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs') @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy') def test_spawn_node_trigger_deploy_fail2(self, mock_cleanup_deploy, mock_pvifs, mock_sf, mock_node, mock_required_by): mock_required_by.return_value = False node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid) flavor = ironic_utils.get_test_flavor() instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid) instance.flavor = flavor image_meta = ironic_utils.get_test_image_meta() mock_node.get.return_value = node mock_node.validate.return_value = ironic_utils.get_test_validation() mock_node.set_provision_state.side_effect = ironic_exception.BadRequest self.assertRaises(ironic_exception.BadRequest, self.driver.spawn, self.ctx, instance, image_meta, [], None) mock_node.get.assert_called_once_with( node_uuid, fields=ironic_driver._NODE_FIELDS) mock_node.validate.assert_called_once_with(node_uuid) mock_cleanup_deploy.assert_called_once_with(node, instance, None) @mock.patch.object(configdrive, 'required_by') @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall') @mock.patch.object(FAKE_CLIENT, 'node') @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall') @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs') @mock.patch.object(ironic_driver.IronicDriver, 'destroy') def test_spawn_node_trigger_deploy_fail3(self, mock_destroy, mock_pvifs, mock_sf, mock_node, mock_looping, mock_required_by): mock_required_by.return_value = False node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid) flavor = ironic_utils.get_test_flavor() instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid) instance.flavor = flavor image_meta = ironic_utils.get_test_image_meta() mock_node.get.return_value = node mock_node.validate.return_value = ironic_utils.get_test_validation() fake_looping_call = FakeLoopingCall() mock_looping.return_value = fake_looping_call fake_looping_call.wait.side_effect = ironic_exception.BadRequest fake_net_info = utils.get_test_network_info() self.assertRaises(ironic_exception.BadRequest, self.driver.spawn, self.ctx, instance, image_meta, [], None, fake_net_info) self.assertEqual(0, mock_destroy.call_count) @mock.patch.object(configdrive, 'required_by') @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall') @mock.patch.object(objects.Instance, 'save') @mock.patch.object(FAKE_CLIENT, 'node') @mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active') @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs') @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall') def test_spawn_sets_default_ephemeral_device(self, mock_sf, mock_pvifs, mock_wait, mock_node, mock_save, mock_looping, mock_required_by): mock_required_by.return_value = False node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid) flavor = ironic_utils.get_test_flavor(ephemeral_gb=1) instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid) instance.flavor = flavor mock_node.get_by_instance_uuid.return_value = node mock_node.set_provision_state.return_value = mock.MagicMock() image_meta = ironic_utils.get_test_image_meta() self.driver.spawn(self.ctx, instance, image_meta, [], None) self.assertTrue(mock_save.called) self.assertEqual('/dev/sda1', instance.default_ephemeral_device) @mock.patch.object(FAKE_CLIENT, 'node') @mock.patch.object(ironic_driver.IronicDriver, '_remove_driver_fields') @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy') def _test_destroy(self, state, mock_cleanup_deploy, mock_remove_driver_fields, mock_node): node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' network_info = 'foo' node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid, provision_state=state) instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid) def fake_set_provision_state(*_): node.provision_state = None mock_node.get_by_instance_uuid.return_value = node mock_node.set_provision_state.side_effect = fake_set_provision_state self.driver.destroy(self.ctx, instance, network_info, None) mock_node.get_by_instance_uuid.assert_called_with( instance.uuid, fields=ironic_driver._NODE_FIELDS) mock_cleanup_deploy.assert_called_with(node, instance, network_info) # For states that makes sense check if set_provision_state has # been called if state in ironic_driver._UNPROVISION_STATES: mock_node.set_provision_state.assert_called_once_with( node_uuid, 'deleted') self.assertFalse(mock_remove_driver_fields.called) else: self.assertFalse(mock_node.set_provision_state.called) mock_remove_driver_fields.assert_called_once_with(node, instance) def test_destroy(self): for state in ironic_states.PROVISION_STATE_LIST: self._test_destroy(state) @mock.patch.object(FAKE_CLIENT.node, 'set_provision_state') @mock.patch.object(ironic_driver.IronicDriver, '_validate_instance_and_node') def test_destroy_trigger_undeploy_fail(self, fake_validate, mock_sps): node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid, provision_state=ironic_states.ACTIVE) fake_validate.return_value = node instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid) mock_sps.side_effect = exception.NovaException() self.assertRaises(exception.NovaException, self.driver.destroy, self.ctx, instance, None, None) @mock.patch.object(FAKE_CLIENT.node, 'set_provision_state') @mock.patch.object(ironic_driver.IronicDriver, '_validate_instance_and_node') def _test__unprovision_instance(self, mock_validate_inst, mock_set_pstate, state=None): node = ironic_utils.get_test_node( driver='fake', provision_state=state) instance = fake_instance.fake_instance_obj(self.ctx, node=node.uuid) mock_validate_inst.return_value = node self.driver._unprovision(instance, node) mock_validate_inst.assert_called_once_with(instance) mock_set_pstate.assert_called_once_with(node.uuid, "deleted") def test__unprovision_cleaning(self): self._test__unprovision_instance(state=ironic_states.CLEANING) def test__unprovision_cleanwait(self): self._test__unprovision_instance(state=ironic_states.CLEANWAIT) @mock.patch.object(FAKE_CLIENT.node, 'set_provision_state') @mock.patch.object(ironic_driver.IronicDriver, '_validate_instance_and_node') def test__unprovision_fail_max_retries(self, mock_validate_inst, mock_set_pstate): CONF.set_default('api_max_retries', default=2, group='ironic') node = ironic_utils.get_test_node( driver='fake', provision_state=ironic_states.ACTIVE) instance = fake_instance.fake_instance_obj(self.ctx, node=node.uuid) mock_validate_inst.return_value = node self.assertRaises(exception.NovaException, self.driver._unprovision, instance, node) expected_calls = (mock.call(instance), mock.call(instance)) mock_validate_inst.assert_has_calls(expected_calls) mock_set_pstate.assert_called_once_with(node.uuid, "deleted") @mock.patch.object(FAKE_CLIENT.node, 'set_provision_state') @mock.patch.object(ironic_driver.IronicDriver, '_validate_instance_and_node') def test__unprovision_instance_not_found(self, mock_validate_inst, mock_set_pstate): node = ironic_utils.get_test_node( driver='fake', provision_state=ironic_states.DELETING) instance = fake_instance.fake_instance_obj(self.ctx, node=node.uuid) mock_validate_inst.side_effect = exception.InstanceNotFound( instance_id='fake') self.driver._unprovision(instance, node) mock_validate_inst.assert_called_once_with(instance) mock_set_pstate.assert_called_once_with(node.uuid, "deleted") @mock.patch.object(FAKE_CLIENT, 'node') def test_destroy_unassociate_fail(self, mock_node): node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid, provision_state=ironic_states.ACTIVE) instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid) mock_node.get_by_instance_uuid.return_value = node mock_node.update.side_effect = exception.NovaException() self.assertRaises(exception.NovaException, self.driver.destroy, self.ctx, instance, None, None) mock_node.set_provision_state.assert_called_once_with(node_uuid, 'deleted') mock_node.get_by_instance_uuid.assert_called_with( instance.uuid, fields=ironic_driver._NODE_FIELDS) @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall') @mock.patch.object(ironic_driver.IronicDriver, '_validate_instance_and_node') @mock.patch.object(FAKE_CLIENT.node, 'set_power_state') def test_reboot(self, mock_sp, fake_validate, mock_looping): node = ironic_utils.get_test_node() fake_validate.side_effect = [node, node] fake_looping_call = FakeLoopingCall() mock_looping.return_value = fake_looping_call instance = fake_instance.fake_instance_obj(self.ctx, node=node.uuid) self.driver.reboot(self.ctx, instance, None, None) mock_sp.assert_called_once_with(node.uuid, 'reboot') @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall') @mock.patch.object(ironic_driver.IronicDriver, '_validate_instance_and_node') @mock.patch.object(FAKE_CLIENT.node, 'set_power_state') def test_power_off(self, mock_sp, fake_validate, mock_looping): self._test_power_on_off(mock_sp, fake_validate, mock_looping, method_name='power_off') @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall') @mock.patch.object(ironic_driver.IronicDriver, '_validate_instance_and_node') @mock.patch.object(FAKE_CLIENT.node, 'set_power_state') def test_power_on(self, mock_sp, fake_validate, mock_looping): self._test_power_on_off(mock_sp, fake_validate, mock_looping, method_name='power_on') def _test_power_on_off(self, mock_sp, fake_validate, mock_looping, method_name=None): node = ironic_utils.get_test_node() fake_validate.side_effect = [node, node] fake_looping_call = FakeLoopingCall() mock_looping.return_value = fake_looping_call instance = fake_instance.fake_instance_obj(self.ctx, node=self.instance_uuid) # Call the method under test here if method_name == 'power_on': self.driver.power_on(self.ctx, instance, utils.get_test_network_info()) mock_sp.assert_called_once_with(node.uuid, 'on') elif method_name == 'power_off': self.driver.power_off(instance) mock_sp.assert_called_once_with(node.uuid, 'off') @mock.patch.object(FAKE_CLIENT.node, 'list_ports') @mock.patch.object(FAKE_CLIENT.port, 'update') @mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs') def test_plug_vifs_with_port(self, mock_uvifs, mock_port_udt, mock_lp): node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' node = ironic_utils.get_test_node(uuid=node_uuid) # make the address be consistent with network_info's port = ironic_utils.get_test_port(address='fake') mock_lp.return_value = [port] instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid) network_info = utils.get_test_network_info() port_id = six.text_type(network_info[0]['id']) expected_patch = [{'op': 'add', 'path': '/extra/vif_port_id', 'value': port_id}] self.driver._plug_vifs(node, instance, network_info) # asserts mock_uvifs.assert_called_once_with(node, instance, network_info) mock_lp.assert_called_once_with(node_uuid) mock_port_udt.assert_called_with(port.uuid, expected_patch) @mock.patch.object(FAKE_CLIENT.node, 'get') @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs') def test_plug_vifs(self, mock__plug_vifs, mock_get): node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' node = ironic_utils.get_test_node(uuid=node_uuid) mock_get.return_value = node instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid) network_info = utils.get_test_network_info() self.driver.plug_vifs(instance, network_info) mock_get.assert_called_once_with(node_uuid, fields=ironic_driver._NODE_FIELDS) mock__plug_vifs.assert_called_once_with(node, instance, network_info) @mock.patch.object(FAKE_CLIENT.port, 'update') @mock.patch.object(FAKE_CLIENT.node, 'list_ports') @mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs') def test_plug_vifs_multiple_ports(self, mock_uvifs, mock_lp, mock_port_udt): node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' node = ironic_utils.get_test_node(uuid=node_uuid) first_ironic_port_uuid = 'aaaaaaaa-bbbb-1111-dddd-eeeeeeeeeeee' first_port = ironic_utils.get_test_port(uuid=first_ironic_port_uuid, node_uuid=node_uuid, address='11:FF:FF:FF:FF:FF') second_ironic_port_uuid = 'aaaaaaaa-bbbb-2222-dddd-eeeeeeeeeeee' second_port = ironic_utils.get_test_port(uuid=second_ironic_port_uuid, node_uuid=node_uuid, address='22:FF:FF:FF:FF:FF') mock_lp.return_value = [second_port, first_port] instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid) first_vif_id = 'aaaaaaaa-vv11-cccc-dddd-eeeeeeeeeeee' second_vif_id = 'aaaaaaaa-vv22-cccc-dddd-eeeeeeeeeeee' first_vif = ironic_utils.get_test_vif( address='22:FF:FF:FF:FF:FF', id=second_vif_id) second_vif = ironic_utils.get_test_vif( address='11:FF:FF:FF:FF:FF', id=first_vif_id) network_info = [first_vif, second_vif] self.driver._plug_vifs(node, instance, network_info) # asserts mock_uvifs.assert_called_once_with(node, instance, network_info) mock_lp.assert_called_once_with(node_uuid) calls = (mock.call(first_ironic_port_uuid, [{'op': 'add', 'path': '/extra/vif_port_id', 'value': first_vif_id}]), mock.call(second_ironic_port_uuid, [{'op': 'add', 'path': '/extra/vif_port_id', 'value': second_vif_id}])) mock_port_udt.assert_has_calls(calls, any_order=True) @mock.patch.object(FAKE_CLIENT.port, 'update') @mock.patch.object(FAKE_CLIENT.node, 'list_ports') @mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs') def test_plug_vifs_count_mismatch(self, mock_uvifs, mock_lp, mock_port_udt): node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' node = ironic_utils.get_test_node(uuid=node_uuid) port = ironic_utils.get_test_port() mock_lp.return_value = [port] instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid) # len(network_info) > len(ports) network_info = (utils.get_test_network_info() + utils.get_test_network_info()) self.assertRaises(exception.NovaException, self.driver._plug_vifs, node, instance, network_info) # asserts mock_uvifs.assert_called_once_with(node, instance, network_info) mock_lp.assert_called_once_with(node_uuid) # assert port.update() was not called self.assertFalse(mock_port_udt.called) @mock.patch.object(FAKE_CLIENT.port, 'update') @mock.patch.object(FAKE_CLIENT.node, 'list_ports') @mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs') def test_plug_vifs_no_network_info(self, mock_uvifs, mock_lp, mock_port_udt): node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' node = ironic_utils.get_test_node(uuid=node_uuid) port = ironic_utils.get_test_port() mock_lp.return_value = [port] instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid) network_info = [] self.driver._plug_vifs(node, instance, network_info) # asserts mock_uvifs.assert_called_once_with(node, instance, network_info) mock_lp.assert_called_once_with(node_uuid) # assert port.update() was not called self.assertFalse(mock_port_udt.called) @mock.patch.object(FAKE_CLIENT.port, 'update') @mock.patch.object(FAKE_CLIENT, 'node') def test_unplug_vifs(self, mock_node, mock_update): node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' node = ironic_utils.get_test_node(uuid=node_uuid) port = ironic_utils.get_test_port(extra={'vif_port_id': 'fake-vif'}) mock_node.get.return_value = node mock_node.list_ports.return_value = [port] instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid) expected_patch = [{'op': 'remove', 'path': '/extra/vif_port_id'}] self.driver.unplug_vifs(instance, utils.get_test_network_info()) # asserts mock_node.get.assert_called_once_with( node_uuid, fields=ironic_driver._NODE_FIELDS) mock_node.list_ports.assert_called_once_with(node_uuid, detail=True) mock_update.assert_called_once_with(port.uuid, expected_patch) @mock.patch.object(FAKE_CLIENT.port, 'update') @mock.patch.object(FAKE_CLIENT, 'node') def test_unplug_vifs_port_not_associated(self, mock_node, mock_update): node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' node = ironic_utils.get_test_node(uuid=node_uuid) port = ironic_utils.get_test_port(extra={}) mock_node.get.return_value = node mock_node.list_ports.return_value = [port] instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid) self.driver.unplug_vifs(instance, utils.get_test_network_info()) mock_node.get.assert_called_once_with( node_uuid, fields=ironic_driver._NODE_FIELDS) mock_node.list_ports.assert_called_once_with(node_uuid, detail=True) # assert port.update() was not called self.assertFalse(mock_update.called) @mock.patch.object(FAKE_CLIENT.port, 'update') def test_unplug_vifs_no_network_info(self, mock_update): instance = fake_instance.fake_instance_obj(self.ctx) network_info = [] self.driver.unplug_vifs(instance, network_info) # assert port.update() was not called self.assertFalse(mock_update.called) @mock.patch.object(firewall.NoopFirewallDriver, 'unfilter_instance', create=True) def test_unfilter_instance(self, mock_ui): instance = fake_instance.fake_instance_obj(self.ctx) network_info = utils.get_test_network_info() self.driver.unfilter_instance(instance, network_info) mock_ui.assert_called_once_with(instance, network_info) @mock.patch.object(firewall.NoopFirewallDriver, 'setup_basic_filtering', create=True) @mock.patch.object(firewall.NoopFirewallDriver, 'prepare_instance_filter', create=True) def test_ensure_filtering_rules_for_instance(self, mock_pif, mock_sbf): instance = fake_instance.fake_instance_obj(self.ctx) network_info = utils.get_test_network_info() self.driver.ensure_filtering_rules_for_instance(instance, network_info) mock_sbf.assert_called_once_with(instance, network_info) mock_pif.assert_called_once_with(instance, network_info) @mock.patch.object(firewall.NoopFirewallDriver, 'refresh_instance_security_rules', create=True) def test_refresh_instance_security_rules(self, mock_risr): instance = fake_instance.fake_instance_obj(self.ctx) self.driver.refresh_instance_security_rules(instance) mock_risr.assert_called_once_with(instance) @mock.patch.object(firewall.NoopFirewallDriver, 'refresh_instance_security_rules', create=True) def test_refresh_security_group_rules(self, mock_risr): fake_group = 'fake-security-group-members' self.driver.refresh_instance_security_rules(fake_group) mock_risr.assert_called_once_with(fake_group) @mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active') @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall') @mock.patch.object(FAKE_CLIENT.node, 'set_provision_state') @mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields') @mock.patch.object(FAKE_CLIENT.node, 'get') @mock.patch.object(objects.Instance, 'save') def _test_rebuild(self, mock_save, mock_get, mock_driver_fields, mock_set_pstate, mock_looping, mock_wait_active, preserve=False): node_uuid = uuidutils.generate_uuid() node = ironic_utils.get_test_node(uuid=node_uuid, instance_uuid=self.instance_uuid, instance_type_id=5) mock_get.return_value = node image_meta = ironic_utils.get_test_image_meta() flavor_id = 5 flavor = objects.Flavor(flavor_id=flavor_id, name='baremetal') instance = fake_instance.fake_instance_obj(self.ctx, uuid=self.instance_uuid, node=node_uuid, instance_type_id=flavor_id) instance.flavor = flavor fake_looping_call = FakeLoopingCall() mock_looping.return_value = fake_looping_call self.driver.rebuild( context=self.ctx, instance=instance, image_meta=image_meta, injected_files=None, admin_password=None, bdms=None, detach_block_devices=None, attach_block_devices=None, preserve_ephemeral=preserve) mock_save.assert_called_once_with( expected_task_state=[task_states.REBUILDING]) mock_driver_fields.assert_called_once_with( node, instance, test.MatchType(objects.ImageMeta), flavor, preserve) mock_set_pstate.assert_called_once_with(node_uuid, ironic_states.REBUILD) mock_looping.assert_called_once_with(mock_wait_active, instance) fake_looping_call.start.assert_called_once_with( interval=CONF.ironic.api_retry_interval) fake_looping_call.wait.assert_called_once_with() def test_rebuild_preserve_ephemeral(self): self._test_rebuild(preserve=True) def test_rebuild_no_preserve_ephemeral(self): self._test_rebuild(preserve=False) @mock.patch.object(FAKE_CLIENT.node, 'set_provision_state') @mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields') @mock.patch.object(FAKE_CLIENT.node, 'get') @mock.patch.object(objects.Instance, 'save') def test_rebuild_failures(self, mock_save, mock_get, mock_driver_fields, mock_set_pstate): node_uuid = uuidutils.generate_uuid() node = ironic_utils.get_test_node(uuid=node_uuid, instance_uuid=self.instance_uuid, instance_type_id=5) mock_get.return_value = node image_meta = ironic_utils.get_test_image_meta() flavor_id = 5 flavor = objects.Flavor(flavor_id=flavor_id, name='baremetal') instance = fake_instance.fake_instance_obj(self.ctx, uuid=self.instance_uuid, node=node_uuid, instance_type_id=flavor_id) instance.flavor = flavor exceptions = [ exception.NovaException(), ironic_exception.BadRequest(), ironic_exception.InternalServerError(), ] for e in exceptions: mock_set_pstate.side_effect = e self.assertRaises(exception.InstanceDeployFailure, self.driver.rebuild, context=self.ctx, instance=instance, image_meta=image_meta, injected_files=None, admin_password=None, bdms=None, detach_block_devices=None, attach_block_devices=None) @mock.patch.object(FAKE_CLIENT.node, 'get') def _test_network_binding_host_id(self, is_neutron, mock_get): node_uuid = uuidutils.generate_uuid() hostname = 'ironic-compute' instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid, host=hostname) if is_neutron: provider = 'neutron' expected = None else: provider = 'none' expected = hostname node = ironic_utils.get_test_node(uuid=node_uuid, instance_uuid=self.instance_uuid, instance_type_id=5, network_provider=provider) mock_get.return_value = node host_id = self.driver.network_binding_host_id(self.ctx, instance) self.assertEqual(expected, host_id) def test_network_binding_host_id_neutron(self): self._test_network_binding_host_id(True) def test_network_binding_host_id_none(self): self._test_network_binding_host_id(False) @mock.patch.object(instance_metadata, 'InstanceMetadata') @mock.patch.object(configdrive, 'ConfigDriveBuilder') class IronicDriverGenerateConfigDriveTestCase(test.NoDBTestCase): @mock.patch.object(cw, 'IronicClientWrapper', lambda *_: FAKE_CLIENT_WRAPPER) def setUp(self): super(IronicDriverGenerateConfigDriveTestCase, self).setUp() self.flags(**IRONIC_FLAGS) self.driver = ironic_driver.IronicDriver(None) self.driver.virtapi = fake.FakeVirtAPI() self.ctx = nova_context.get_admin_context() node_uuid = uuidutils.generate_uuid() self.node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid) self.instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid) self.network_info = utils.get_test_network_info() def test_generate_configdrive(self, mock_cd_builder, mock_instance_meta): mock_instance_meta.return_value = 'fake-instance' mock_make_drive = mock.MagicMock(make_drive=lambda *_: None) mock_cd_builder.return_value.__enter__.return_value = mock_make_drive self.driver._generate_configdrive(self.instance, self.node, self.network_info) mock_cd_builder.assert_called_once_with(instance_md='fake-instance') mock_instance_meta.assert_called_once_with(self.instance, network_info=self.network_info, extra_md={}, content=None) def test_generate_configdrive_fail(self, mock_cd_builder, mock_instance_meta): mock_cd_builder.side_effect = exception.ConfigDriveMountFailed( operation='foo', error='error') mock_instance_meta.return_value = 'fake-instance' mock_make_drive = mock.MagicMock(make_drive=lambda *_: None) mock_cd_builder.return_value.__enter__.return_value = mock_make_drive self.assertRaises(exception.ConfigDriveMountFailed, self.driver._generate_configdrive, self.instance, self.node, self.network_info) mock_cd_builder.assert_called_once_with(instance_md='fake-instance') mock_instance_meta.assert_called_once_with(self.instance, network_info=self.network_info, extra_md={}, content=None) nova-13.1.4/nova/tests/unit/virt/ironic/utils.py0000664000567000056710000001153413064447152022763 0ustar jenkinsjenkins00000000000000# Copyright 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import objects from nova.virt.ironic import ironic_states def get_test_validation(**kw): return type('interfaces', (object,), {'power': kw.get('power', True), 'deploy': kw.get('deploy', True), 'console': kw.get('console', True), 'rescue': kw.get('rescue', True)})() def get_test_node(**kw): return type('node', (object,), {'uuid': kw.get('uuid', 'eeeeeeee-dddd-cccc-bbbb-aaaaaaaaaaaa'), 'chassis_uuid': kw.get('chassis_uuid'), 'power_state': kw.get('power_state', ironic_states.NOSTATE), 'target_power_state': kw.get('target_power_state', ironic_states.NOSTATE), 'provision_state': kw.get('provision_state', ironic_states.NOSTATE), 'target_provision_state': kw.get('target_provision_state', ironic_states.NOSTATE), 'last_error': kw.get('last_error'), 'instance_uuid': kw.get('instance_uuid'), 'instance_info': kw.get('instance_info'), 'driver': kw.get('driver', 'fake'), 'driver_info': kw.get('driver_info', {}), 'properties': kw.get('properties', {}), 'reservation': kw.get('reservation'), 'maintenance': kw.get('maintenance', False), 'network_provider': kw.get('network_provider'), 'extra': kw.get('extra', {}), 'updated_at': kw.get('created_at'), 'created_at': kw.get('updated_at')})() def get_test_port(**kw): return type('port', (object,), {'uuid': kw.get('uuid', 'gggggggg-uuuu-qqqq-ffff-llllllllllll'), 'node_uuid': kw.get('node_uuid', get_test_node().uuid), 'address': kw.get('address', 'FF:FF:FF:FF:FF:FF'), 'extra': kw.get('extra', {}), 'created_at': kw.get('created_at'), 'updated_at': kw.get('updated_at')})() def get_test_vif(**kw): return { 'profile': kw.get('profile', {}), 'ovs_interfaceid': kw.get('ovs_interfaceid'), 'preserve_on_delete': kw.get('preserve_on_delete', False), 'network': kw.get('network', {}), 'devname': kw.get('devname', 'tapaaaaaaaa-00'), 'vnic_type': kw.get('vnic_type', 'baremetal'), 'qbh_params': kw.get('qbh_params'), 'meta': kw.get('meta', {}), 'details': kw.get('details', {}), 'address': kw.get('address', 'FF:FF:FF:FF:FF:FF'), 'active': kw.get('active', True), 'type': kw.get('type', 'ironic'), 'id': kw.get('id', 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'), 'qbg_params': kw.get('qbg_params')} def get_test_flavor(**kw): default_extra_specs = {'baremetal:deploy_kernel_id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', 'baremetal:deploy_ramdisk_id': 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'} flavor = {'name': kw.get('name', 'fake.flavor'), 'extra_specs': kw.get('extra_specs', default_extra_specs), 'swap': kw.get('swap', 0), 'ephemeral_gb': kw.get('ephemeral_gb', 0)} return objects.Flavor(**flavor) def get_test_image_meta(**kw): return objects.ImageMeta.from_dict( {'id': kw.get('id', 'cccccccc-cccc-cccc-cccc-cccccccccccc')}) class FakePortClient(object): def get(self, port_uuid): pass def update(self, port_uuid, patch): pass class FakeNodeClient(object): def list(self, detail=False): return [] def get(self, node_uuid, fields=None): pass def get_by_instance_uuid(self, instance_uuid, fields=None): pass def list_ports(self, node_uuid): pass def set_power_state(self, node_uuid, target): pass def set_provision_state(self, node_uuid, target): pass def update(self, node_uuid, patch): pass def validate(self, node_uuid): pass class FakeClient(object): node = FakeNodeClient() port = FakePortClient() nova-13.1.4/nova/tests/unit/virt/ironic/__init__.py0000664000567000056710000000000013064447140023341 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/virt/ironic/test_client_wrapper.py0000664000567000056710000002035113064447152025675 0ustar jenkinsjenkins00000000000000# Copyright 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ironicclient import client as ironic_client from ironicclient import exc as ironic_exception import mock from oslo_config import cfg from nova import exception from nova import test from nova.tests.unit.virt.ironic import utils as ironic_utils from nova.virt.ironic import client_wrapper CONF = cfg.CONF FAKE_CLIENT = ironic_utils.FakeClient() def get_new_fake_client(*args, **kwargs): return ironic_utils.FakeClient() class IronicClientWrapperTestCase(test.NoDBTestCase): def setUp(self): super(IronicClientWrapperTestCase, self).setUp() self.ironicclient = client_wrapper.IronicClientWrapper() # Do not waste time sleeping cfg.CONF.set_override('api_retry_interval', 0, 'ironic') @mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr') @mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client') def test_call_good_no_args(self, mock_get_client, mock_multi_getattr): mock_get_client.return_value = FAKE_CLIENT self.ironicclient.call("node.list") mock_get_client.assert_called_once_with(retry_on_conflict=True) mock_multi_getattr.assert_called_once_with(FAKE_CLIENT, "node.list") mock_multi_getattr.return_value.assert_called_once_with() @mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr') @mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client') def test_call_good_with_args(self, mock_get_client, mock_multi_getattr): mock_get_client.return_value = FAKE_CLIENT self.ironicclient.call("node.list", 'test', associated=True) mock_get_client.assert_called_once_with(retry_on_conflict=True) mock_multi_getattr.assert_called_once_with(FAKE_CLIENT, "node.list") mock_multi_getattr.return_value.assert_called_once_with( 'test', associated=True) @mock.patch.object(ironic_client, 'get_client') def test__get_client_no_auth_token(self, mock_ir_cli): self.flags(admin_auth_token=None, group='ironic') ironicclient = client_wrapper.IronicClientWrapper() # dummy call to have _get_client() called ironicclient.call("node.list") expected = {'os_username': CONF.ironic.admin_username, 'os_password': CONF.ironic.admin_password, 'os_auth_url': CONF.ironic.admin_url, 'os_tenant_name': CONF.ironic.admin_tenant_name, 'os_service_type': 'baremetal', 'os_endpoint_type': 'public', 'ironic_url': CONF.ironic.api_endpoint, 'max_retries': CONF.ironic.api_max_retries, 'retry_interval': CONF.ironic.api_retry_interval, 'os_ironic_api_version': '1.8'} mock_ir_cli.assert_called_once_with(CONF.ironic.api_version, **expected) @mock.patch.object(ironic_client, 'get_client') def test__get_client_with_auth_token(self, mock_ir_cli): self.flags(admin_auth_token='fake-token', group='ironic') ironicclient = client_wrapper.IronicClientWrapper() # dummy call to have _get_client() called ironicclient.call("node.list") expected = {'os_auth_token': 'fake-token', 'ironic_url': CONF.ironic.api_endpoint, 'max_retries': CONF.ironic.api_max_retries, 'retry_interval': CONF.ironic.api_retry_interval, 'os_ironic_api_version': '1.8'} mock_ir_cli.assert_called_once_with(CONF.ironic.api_version, **expected) @mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr') @mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client') def test_call_fail(self, mock_get_client, mock_multi_getattr): cfg.CONF.set_override('api_max_retries', 2, 'ironic') test_obj = mock.Mock() test_obj.side_effect = ironic_exception.HTTPServiceUnavailable mock_multi_getattr.return_value = test_obj mock_get_client.return_value = FAKE_CLIENT self.assertRaises(exception.NovaException, self.ironicclient.call, "node.list") self.assertEqual(3, test_obj.call_count) @mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr') @mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client') def test_call_with_api_max_retries_neg_conf_val(self, mock_get_client, mock_multi_getattr): cfg.CONF.set_default('api_max_retries', -1, 'ironic') test_obj = mock.Mock() test_obj.side_effect = ironic_exception.HTTPServiceUnavailable mock_multi_getattr.return_value = test_obj mock_get_client.return_value = FAKE_CLIENT self.assertRaises(exception.NovaException, self.ironicclient.call, "node.list") self.assertEqual(1, test_obj.call_count) @mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr') @mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client') def test_call_fail_unexpected_exception(self, mock_get_client, mock_multi_getattr): test_obj = mock.Mock() test_obj.side_effect = ironic_exception.HTTPNotFound mock_multi_getattr.return_value = test_obj mock_get_client.return_value = FAKE_CLIENT self.assertRaises(ironic_exception.HTTPNotFound, self.ironicclient.call, "node.list") @mock.patch.object(ironic_client, 'get_client') def test__get_client_unauthorized(self, mock_get_client): mock_get_client.side_effect = ironic_exception.Unauthorized self.assertRaises(exception.NovaException, self.ironicclient._get_client) @mock.patch.object(ironic_client, 'get_client') def test__get_client_unexpected_exception(self, mock_get_client): mock_get_client.side_effect = ironic_exception.ConnectionRefused self.assertRaises(ironic_exception.ConnectionRefused, self.ironicclient._get_client) def test__multi_getattr_good(self): response = self.ironicclient._multi_getattr(FAKE_CLIENT, "node.list") self.assertEqual(FAKE_CLIENT.node.list, response) def test__multi_getattr_fail(self): self.assertRaises(AttributeError, self.ironicclient._multi_getattr, FAKE_CLIENT, "nonexistent") @mock.patch.object(ironic_client, 'get_client') def test__client_is_cached(self, mock_get_client): mock_get_client.side_effect = get_new_fake_client ironicclient = client_wrapper.IronicClientWrapper() first_client = ironicclient._get_client() second_client = ironicclient._get_client() self.assertEqual(id(first_client), id(second_client)) @mock.patch.object(ironic_client, 'get_client') def test__invalidate_cached_client(self, mock_get_client): mock_get_client.side_effect = get_new_fake_client ironicclient = client_wrapper.IronicClientWrapper() first_client = ironicclient._get_client() ironicclient._invalidate_cached_client() second_client = ironicclient._get_client() self.assertNotEqual(id(first_client), id(second_client)) @mock.patch.object(ironic_client, 'get_client') def test_call_uses_cached_client(self, mock_get_client): mock_get_client.side_effect = get_new_fake_client ironicclient = client_wrapper.IronicClientWrapper() for n in range(0, 4): ironicclient.call("node.list") self.assertEqual(1, mock_get_client.call_count) nova-13.1.4/nova/tests/unit/virt/ironic/test_patcher.py0000664000567000056710000001342113064447152024305 0ustar jenkinsjenkins00000000000000# Copyright 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova import context as nova_context from nova import test from nova.tests.unit import fake_instance from nova.tests.unit.virt.ironic import utils as ironic_utils from nova.virt.ironic import patcher CONF = cfg.CONF class IronicDriverFieldsTestCase(test.NoDBTestCase): def setUp(self): super(IronicDriverFieldsTestCase, self).setUp() self.image_meta = ironic_utils.get_test_image_meta() self.flavor = ironic_utils.get_test_flavor() self.ctx = nova_context.get_admin_context() self.instance = fake_instance.fake_instance_obj(self.ctx) self.node = ironic_utils.get_test_node(driver='fake') # Generic expected patches self._expected_deploy_patch = [ {'path': '/instance_info/image_source', 'value': self.image_meta.id, 'op': 'add'}, {'path': '/instance_info/root_gb', 'value': str(self.instance['root_gb']), 'op': 'add'}, {'path': '/instance_info/swap_mb', 'value': str(self.flavor['swap']), 'op': 'add'}, {'path': '/instance_info/display_name', 'value': self.instance['display_name'], 'op': 'add'}, {'path': '/instance_info/vcpus', 'value': str(self.instance['vcpus']), 'op': 'add'}, {'path': '/instance_info/memory_mb', 'value': str(self.instance['memory_mb']), 'op': 'add'}, {'path': '/instance_info/local_gb', 'value': str(self.node.properties.get('local_gb', 0)), 'op': 'add'} ] def test_create_generic(self): node = ironic_utils.get_test_node(driver='pxe_fake') patcher_obj = patcher.create(node) self.assertIsInstance(patcher_obj, patcher.GenericDriverFields) def test_generic_get_deploy_patch(self): node = ironic_utils.get_test_node(driver='fake') patch = patcher.create(node).get_deploy_patch( self.instance, self.image_meta, self.flavor) self.assertEqual(sorted(self._expected_deploy_patch), sorted(patch)) def test_generic_get_deploy_patch_capabilities(self): node = ironic_utils.get_test_node(driver='fake') self.flavor['extra_specs']['capabilities:boot_mode'] = 'bios' expected = [{'path': '/instance_info/capabilities', 'value': '{"boot_mode": "bios"}', 'op': 'add'}] expected += self._expected_deploy_patch patch = patcher.create(node).get_deploy_patch( self.instance, self.image_meta, self.flavor) self.assertEqual(sorted(expected), sorted(patch)) def test_generic_get_deploy_patch_capabilities_op(self): node = ironic_utils.get_test_node(driver='fake') self.flavor['extra_specs']['capabilities:boot_mode'] = ' bios' expected = [{'path': '/instance_info/capabilities', 'value': '{"boot_mode": " bios"}', 'op': 'add'}] expected += self._expected_deploy_patch patch = patcher.create(node).get_deploy_patch( self.instance, self.image_meta, self.flavor) self.assertEqual(sorted(expected), sorted(patch)) def test_generic_get_deploy_patch_capabilities_nested_key(self): node = ironic_utils.get_test_node(driver='fake') self.flavor['extra_specs']['capabilities:key1:key2'] = ' bios' expected = [{'path': '/instance_info/capabilities', 'value': '{"key1:key2": " bios"}', 'op': 'add'}] expected += self._expected_deploy_patch patch = patcher.create(node).get_deploy_patch( self.instance, self.image_meta, self.flavor) self.assertEqual(sorted(expected), sorted(patch)) def test_generic_get_deploy_patch_ephemeral(self): CONF.set_override('default_ephemeral_format', 'testfmt') node = ironic_utils.get_test_node(driver='fake') instance = fake_instance.fake_instance_obj(self.ctx, ephemeral_gb=10) patch = patcher.create(node).get_deploy_patch( instance, self.image_meta, self.flavor) expected = [{'path': '/instance_info/ephemeral_gb', 'value': str(instance.ephemeral_gb), 'op': 'add'}, {'path': '/instance_info/ephemeral_format', 'value': 'testfmt', 'op': 'add'}] expected += self._expected_deploy_patch self.assertEqual(sorted(expected), sorted(patch)) def test_generic_get_deploy_patch_preserve_ephemeral(self): node = ironic_utils.get_test_node(driver='fake') for preserve in [True, False]: patch = patcher.create(node).get_deploy_patch( self.instance, self.image_meta, self.flavor, preserve_ephemeral=preserve) expected = [{'path': '/instance_info/preserve_ephemeral', 'value': str(preserve), 'op': 'add', }] expected += self._expected_deploy_patch self.assertEqual(sorted(expected), sorted(patch)) nova-13.1.4/nova/tests/unit/virt/test_osinfo.py0000664000567000056710000000656113064447141022676 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import mock from nova import exception from nova import objects from nova import test from nova.tests.unit.virt import fakelibosinfo from nova.virt import osinfo class LibvirtOsInfoTest(test.NoDBTestCase): def setUp(self): super(LibvirtOsInfoTest, self).setUp() image_meta = {'properties': {'os_distro': 'fedora22', 'hw_disk_bus': 'ide', 'hw_vif_model': 'rtl8139'} } self.img_meta = objects.ImageMeta.from_dict(image_meta) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.osinfo.libosinfo', fakelibosinfo)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.osinfo._OsInfoDatabase._instance', None)) def test_get_os(self): os_info_db = osinfo._OsInfoDatabase.get_instance() os_name = os_info_db.get_os('fedora22').get_name() self.assertEqual('Fedora 22', os_name) def test_get_os_fails(self): os_info_db = osinfo._OsInfoDatabase.get_instance() self.assertRaises(exception.OsInfoNotFound, os_info_db.get_os, 'test33') def test_module_load_failed(self): self.useFixture(fixtures.MonkeyPatch( 'nova.virt.osinfo.libosinfo', None)) with test.nested( mock.patch.object(osinfo.importutils, 'import_module', side_effect=ImportError('gi.repository.Libosinfo')), mock.patch.object(osinfo.LOG, 'info')) as (mock_import, mock_log): os_info_db = osinfo._OsInfoDatabase.get_instance() self.assertIsNone(os_info_db.get_os('fedora22')) os_info_db = osinfo._OsInfoDatabase.get_instance() self.assertIsNone(os_info_db.get_os('fedora19')) self.assertEqual(1, mock_log.call_count) def test_hardware_properties_from_osinfo(self): """Verifies that HardwareProperties attributes are being set from libosinfo. """ img_meta = {'properties': {'os_distro': 'fedora22'} } img_meta = objects.ImageMeta.from_dict(img_meta) osinfo_obj = osinfo.HardwareProperties(img_meta) self.assertEqual('virtio', osinfo_obj.network_model) self.assertEqual('virtio', osinfo_obj.disk_model) def test_hardware_properties_from_meta(self): """Verifies that HardwareProperties attributes are being set from image properties. """ with mock.patch.object(osinfo._OsInfoDatabase, 'get_instance'): osinfo_obj = osinfo.HardwareProperties(self.img_meta) self.assertEqual('rtl8139', osinfo_obj.network_model) self.assertEqual('ide', osinfo_obj.disk_model) nova-13.1.4/nova/tests/unit/virt/test_driver.py0000664000567000056710000000525513064447152022675 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Citrix Systems, Inc. # Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import fixture as fixture_config from nova import test from nova.virt import driver class FakeDriver(object): def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs class FakeDriver2(FakeDriver): pass class ToDriverRegistryTestCase(test.NoDBTestCase): def assertDriverInstance(self, inst, class_, *args, **kwargs): self.assertEqual(class_, inst.__class__) self.assertEqual(args, inst.args) self.assertEqual(kwargs, inst.kwargs) def test_driver_dict_from_config(self): drvs = driver.driver_dict_from_config( [ 'key1=nova.tests.unit.virt.test_driver.FakeDriver', 'key2=nova.tests.unit.virt.test_driver.FakeDriver2', ], 'arg1', 'arg2', param1='value1', param2='value2' ) self.assertEqual( sorted(['key1', 'key2']), sorted(drvs.keys()) ) self.assertDriverInstance( drvs['key1'], FakeDriver, 'arg1', 'arg2', param1='value1', param2='value2') self.assertDriverInstance( drvs['key2'], FakeDriver2, 'arg1', 'arg2', param1='value1', param2='value2') class DriverMethodTestCase(test.NoDBTestCase): def setUp(self): super(DriverMethodTestCase, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf def test_is_xenapi_true(self): self.CONF.set_override('compute_driver', 'xenapi.XenAPIDriver', enforce_type=True) self.assertTrue(driver.is_xenapi()) def test_is_xenapi_false(self): driver_names = ('libvirt.LibvirtDriver', 'fake.FakeDriver', 'ironic.IronicDriver', 'vmwareapi.VMwareVCDriver', 'hyperv.HyperVDriver', None) for driver_name in driver_names: self.CONF.set_override('compute_driver', driver_name, enforce_type=True) self.assertFalse(driver.is_xenapi()) nova-13.1.4/nova/tests/unit/virt/hyperv/0000775000567000056710000000000013064447471021303 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/virt/hyperv/test_base.py0000664000567000056710000000260013064447140023615 0ustar jenkinsjenkins00000000000000# Copyright 2014 Cloudbase Solutions Srl # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from os_win import utilsfactory from six.moves import builtins from nova import test class HyperVBaseTestCase(test.NoDBTestCase): def setUp(self): super(HyperVBaseTestCase, self).setUp() self._mock_wmi = mock.MagicMock() wmi_patcher = mock.patch.object(builtins, 'wmi', create=True, new=self._mock_wmi) platform_patcher = mock.patch('sys.platform', 'win32') utilsfactory_patcher = mock.patch.object(utilsfactory, '_get_class') platform_patcher.start() wmi_patcher.start() utilsfactory_patcher.start() self.addCleanup(wmi_patcher.stop) self.addCleanup(platform_patcher.stop) self.addCleanup(utilsfactory_patcher.stop) nova-13.1.4/nova/tests/unit/virt/hyperv/test_pathutils.py0000664000567000056710000000605513064447152024733 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from six.moves import builtins from nova import exception from nova.tests.unit.virt.hyperv import test_base from nova.virt.hyperv import constants from nova.virt.hyperv import pathutils class PathUtilsTestCase(test_base.HyperVBaseTestCase): """Unit tests for the Hyper-V PathUtils class.""" def setUp(self): super(PathUtilsTestCase, self).setUp() self.fake_instance_dir = os.path.join('C:', 'fake_instance_dir') self.fake_instance_name = 'fake_instance_name' self._pathutils = pathutils.PathUtils() def _mock_lookup_configdrive_path(self, ext): self._pathutils.get_instance_dir = mock.MagicMock( return_value=self.fake_instance_dir) def mock_exists(*args, **kwargs): path = args[0] return True if path[(path.rfind('.') + 1):] == ext else False self._pathutils.exists = mock_exists configdrive_path = self._pathutils.lookup_configdrive_path( self.fake_instance_name) return configdrive_path def test_lookup_configdrive_path(self): for format_ext in constants.DISK_FORMAT_MAP: configdrive_path = self._mock_lookup_configdrive_path(format_ext) fake_path = os.path.join(self.fake_instance_dir, 'configdrive.' + format_ext) self.assertEqual(configdrive_path, fake_path) def test_lookup_configdrive_path_non_exist(self): self._pathutils.get_instance_dir = mock.MagicMock( return_value=self.fake_instance_dir) self._pathutils.exists = mock.MagicMock(return_value=False) configdrive_path = self._pathutils.lookup_configdrive_path( self.fake_instance_name) self.assertIsNone(configdrive_path) @mock.patch('os.path.join') def test_get_instances_sub_dir(self, fake_path_join): class WindowsError(Exception): def __init__(self, winerror=None): self.winerror = winerror fake_dir_name = "fake_dir_name" fake_windows_error = WindowsError self._pathutils.check_create_dir = mock.MagicMock( side_effect=WindowsError(pathutils.ERROR_INVALID_NAME)) with mock.patch.object(builtins, 'WindowsError', fake_windows_error, create=True): self.assertRaises(exception.AdminRequired, self._pathutils._get_instances_sub_dir, fake_dir_name) nova-13.1.4/nova/tests/unit/virt/hyperv/test_driver.py0000664000567000056710000004500013064447152024202 0ustar jenkinsjenkins00000000000000# Copyright 2015 Cloudbase Solutions SRL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit tests for the Hyper-V Driver. """ import platform import sys import mock from os_win import exceptions as os_win_exc from nova import exception from nova import safe_utils from nova.tests.unit import fake_instance from nova.tests.unit.virt.hyperv import test_base from nova.virt import driver as base_driver from nova.virt.hyperv import driver class HyperVDriverTestCase(test_base.HyperVBaseTestCase): FAKE_WIN_2008R2_VERSION = '6.0.0' @mock.patch.object(driver.HyperVDriver, '_check_minimum_windows_version') def setUp(self, mock_check_minimum_windows_version): super(HyperVDriverTestCase, self).setUp() self.context = 'context' self.driver = driver.HyperVDriver(mock.sentinel.virtapi) self.driver._hostops = mock.MagicMock() self.driver._volumeops = mock.MagicMock() self.driver._vmops = mock.MagicMock() self.driver._snapshotops = mock.MagicMock() self.driver._livemigrationops = mock.MagicMock() self.driver._migrationops = mock.MagicMock() self.driver._rdpconsoleops = mock.MagicMock() @mock.patch.object(driver.utilsfactory, 'get_hostutils') def test_check_minimum_windows_version(self, mock_get_hostutils): mock_hostutils = mock_get_hostutils.return_value mock_hostutils.check_min_windows_version.return_value = False self.assertRaises(exception.HypervisorTooOld, self.driver._check_minimum_windows_version) def test_public_api_signatures(self): # NOTE(claudiub): wrapped functions do not keep the same signature in # Python 2.7, which causes this test to fail. Instead, we should # compare the public API signatures of the unwrapped methods. for attr in driver.HyperVDriver.__dict__: class_member = getattr(driver.HyperVDriver, attr) if callable(class_member): mocked_method = mock.patch.object( driver.HyperVDriver, attr, safe_utils.get_wrapped_function(class_member)) mocked_method.start() self.addCleanup(mocked_method.stop) self.assertPublicAPISignatures(base_driver.ComputeDriver, driver.HyperVDriver) def test_converted_exception(self): self.driver._vmops.get_info.side_effect = ( os_win_exc.OSWinException) self.assertRaises(exception.NovaException, self.driver.get_info, mock.sentinel.instance) self.driver._vmops.get_info.side_effect = os_win_exc.HyperVException self.assertRaises(exception.NovaException, self.driver.get_info, mock.sentinel.instance) self.driver._vmops.get_info.side_effect = ( os_win_exc.HyperVVMNotFoundException(vm_name='foofoo')) self.assertRaises(exception.InstanceNotFound, self.driver.get_info, mock.sentinel.instance) def test_assert_original_traceback_maintained(self): def bar(self): foo = "foofoo" raise os_win_exc.HyperVVMNotFoundException(vm_name=foo) self.driver._vmops.get_info.side_effect = bar try: self.driver.get_info(mock.sentinel.instance) self.fail("Test expected exception, but it was not raised.") except exception.InstanceNotFound: # exception has been raised as expected. _, _, trace = sys.exc_info() while trace.tb_next: # iterate until the original exception source, bar. trace = trace.tb_next # original frame will contain the 'foo' variable. self.assertEqual('foofoo', trace.tb_frame.f_locals['foo']) @mock.patch.object(driver.eventhandler, 'InstanceEventHandler') def test_init_host(self, mock_InstanceEventHandler): self.driver.init_host(mock.sentinel.host) self.driver._vmops.restart_vm_log_writers.assert_called_once_with() mock_InstanceEventHandler.assert_called_once_with( state_change_callback=self.driver.emit_event, running_state_callback=self.driver._vmops.log_vm_serial_output) fake_event_handler = mock_InstanceEventHandler.return_value fake_event_handler.start_listener.assert_called_once_with() def test_list_instance_uuids(self): self.driver.list_instance_uuids() self.driver._vmops.list_instance_uuids.assert_called_once_with() def test_list_instances(self): self.driver.list_instances() self.driver._vmops.list_instances.assert_called_once_with() def test_spawn(self): self.driver.spawn( mock.sentinel.context, mock.sentinel.instance, mock.sentinel.image_meta, mock.sentinel.injected_files, mock.sentinel.admin_password, mock.sentinel.network_info, mock.sentinel.block_device_info) self.driver._vmops.spawn.assert_called_once_with( mock.sentinel.context, mock.sentinel.instance, mock.sentinel.image_meta, mock.sentinel.injected_files, mock.sentinel.admin_password, mock.sentinel.network_info, mock.sentinel.block_device_info) def test_reboot(self): self.driver.reboot( mock.sentinel.context, mock.sentinel.instance, mock.sentinel.network_info, mock.sentinel.reboot_type, mock.sentinel.block_device_info, mock.sentinel.bad_vol_callback) self.driver._vmops.reboot.assert_called_once_with( mock.sentinel.instance, mock.sentinel.network_info, mock.sentinel.reboot_type) def test_destroy(self): self.driver.destroy( mock.sentinel.context, mock.sentinel.instance, mock.sentinel.network_info, mock.sentinel.block_device_info, mock.sentinel.destroy_disks, mock.sentinel.migrate_data) self.driver._vmops.destroy.assert_called_once_with( mock.sentinel.instance, mock.sentinel.network_info, mock.sentinel.block_device_info, mock.sentinel.destroy_disks) def test_get_info(self): self.driver.get_info(mock.sentinel.instance) self.driver._vmops.get_info.assert_called_once_with( mock.sentinel.instance) def test_attach_volume(self): mock_instance = fake_instance.fake_instance_obj(self.context) self.driver.attach_volume( mock.sentinel.context, mock.sentinel.connection_info, mock_instance, mock.sentinel.mountpoint, mock.sentinel.disk_bus, mock.sentinel.device_type, mock.sentinel.encryption) self.driver._volumeops.attach_volume.assert_called_once_with( mock.sentinel.connection_info, mock_instance.name) def test_detach_volume(self): mock_instance = fake_instance.fake_instance_obj(self.context) self.driver.detach_volume( mock.sentinel.connection_info, mock_instance, mock.sentinel.mountpoint, mock.sentinel.encryption) self.driver._volumeops.detach_volume.assert_called_once_with( mock.sentinel.connection_info, mock_instance.name) def test_get_volume_connector(self): self.driver.get_volume_connector(mock.sentinel.instance) self.driver._volumeops.get_volume_connector.assert_called_once_with( mock.sentinel.instance) def test_get_available_resource(self): self.driver.get_available_resource(mock.sentinel.nodename) self.driver._hostops.get_available_resource.assert_called_once_with() def test_get_available_nodes(self): response = self.driver.get_available_nodes(mock.sentinel.refresh) self.assertEqual([platform.node()], response) def test_host_power_action(self): self.driver.host_power_action(mock.sentinel.action) self.driver._hostops.host_power_action.assert_called_once_with( mock.sentinel.action) def test_snapshot(self): self.driver.snapshot( mock.sentinel.context, mock.sentinel.instance, mock.sentinel.image_id, mock.sentinel.update_task_state) self.driver._snapshotops.snapshot.assert_called_once_with( mock.sentinel.context, mock.sentinel.instance, mock.sentinel.image_id, mock.sentinel.update_task_state) def test_pause(self): self.driver.pause(mock.sentinel.instance) self.driver._vmops.pause.assert_called_once_with( mock.sentinel.instance) def test_unpause(self): self.driver.unpause(mock.sentinel.instance) self.driver._vmops.unpause.assert_called_once_with( mock.sentinel.instance) def test_suspend(self): self.driver.suspend(mock.sentinel.context, mock.sentinel.instance) self.driver._vmops.suspend.assert_called_once_with( mock.sentinel.instance) def test_resume(self): self.driver.resume( mock.sentinel.context, mock.sentinel.instance, mock.sentinel.network_info, mock.sentinel.block_device_info) self.driver._vmops.resume.assert_called_once_with( mock.sentinel.instance) def test_power_off(self): self.driver.power_off( mock.sentinel.instance, mock.sentinel.timeout, mock.sentinel.retry_interval) self.driver._vmops.power_off.assert_called_once_with( mock.sentinel.instance, mock.sentinel.timeout, mock.sentinel.retry_interval) def test_power_on(self): self.driver.power_on( mock.sentinel.context, mock.sentinel.instance, mock.sentinel.network_info, mock.sentinel.block_device_info) self.driver._vmops.power_on.assert_called_once_with( mock.sentinel.instance, mock.sentinel.block_device_info) def test_resume_state_on_host_boot(self): self.driver.resume_state_on_host_boot( mock.sentinel.context, mock.sentinel.instance, mock.sentinel.network_info, mock.sentinel.block_device_info) self.driver._vmops.resume_state_on_host_boot.assert_called_once_with( mock.sentinel.context, mock.sentinel.instance, mock.sentinel.network_info, mock.sentinel.block_device_info) def test_live_migration(self): self.driver.live_migration( mock.sentinel.context, mock.sentinel.instance, mock.sentinel.dest, mock.sentinel.post_method, mock.sentinel.recover_method, mock.sentinel.block_migration, mock.sentinel.migrate_data) self.driver._livemigrationops.live_migration.assert_called_once_with( mock.sentinel.context, mock.sentinel.instance, mock.sentinel.dest, mock.sentinel.post_method, mock.sentinel.recover_method, mock.sentinel.block_migration, mock.sentinel.migrate_data) @mock.patch.object(driver.HyperVDriver, 'destroy') def test_rollback_live_migration_at_destination(self, mock_destroy): self.driver.rollback_live_migration_at_destination( mock.sentinel.context, mock.sentinel.instance, mock.sentinel.network_info, mock.sentinel.block_device_info, mock.sentinel.destroy_disks, mock.sentinel.migrate_data) mock_destroy.assert_called_once_with( mock.sentinel.context, mock.sentinel.instance, mock.sentinel.network_info, mock.sentinel.block_device_info) def test_pre_live_migration(self): self.driver.pre_live_migration( mock.sentinel.context, mock.sentinel.instance, mock.sentinel.block_device_info, mock.sentinel.network_info, mock.sentinel.disk_info, mock.sentinel.migrate_data) pre_live_migration = self.driver._livemigrationops.pre_live_migration pre_live_migration.assert_called_once_with( mock.sentinel.context, mock.sentinel.instance, mock.sentinel.block_device_info, mock.sentinel.network_info) def test_post_live_migration(self): self.driver.post_live_migration( mock.sentinel.context, mock.sentinel.instance, mock.sentinel.block_device_info, mock.sentinel.migrate_data) post_live_migration = self.driver._livemigrationops.post_live_migration post_live_migration.assert_called_once_with( mock.sentinel.context, mock.sentinel.instance, mock.sentinel.block_device_info) def test_post_live_migration_at_destination(self): self.driver.post_live_migration_at_destination( mock.sentinel.context, mock.sentinel.instance, mock.sentinel.network_info, mock.sentinel.block_migration, mock.sentinel.block_device_info) mtd = self.driver._livemigrationops.post_live_migration_at_destination mtd.assert_called_once_with( mock.sentinel.context, mock.sentinel.instance, mock.sentinel.network_info, mock.sentinel.block_migration) def test_check_can_live_migrate_destination(self): self.driver.check_can_live_migrate_destination( mock.sentinel.context, mock.sentinel.instance, mock.sentinel.src_compute_info, mock.sentinel.dst_compute_info, mock.sentinel.block_migration, mock.sentinel.disk_over_commit) mtd = self.driver._livemigrationops.check_can_live_migrate_destination mtd.assert_called_once_with( mock.sentinel.context, mock.sentinel.instance, mock.sentinel.src_compute_info, mock.sentinel.dst_compute_info, mock.sentinel.block_migration, mock.sentinel.disk_over_commit) def test_check_can_live_migrate_destination_cleanup(self): self.driver.check_can_live_migrate_destination_cleanup( mock.sentinel.context, mock.sentinel.dest_check_data) _livemigrops = self.driver._livemigrationops method = _livemigrops.check_can_live_migrate_destination_cleanup method.assert_called_once_with( mock.sentinel.context, mock.sentinel.dest_check_data) def test_check_can_live_migrate_source(self): self.driver.check_can_live_migrate_source( mock.sentinel.context, mock.sentinel.instance, mock.sentinel.dest_check_data, mock.sentinel.block_device_info) method = self.driver._livemigrationops.check_can_live_migrate_source method.assert_called_once_with( mock.sentinel.context, mock.sentinel.instance, mock.sentinel.dest_check_data) def test_plug_vifs(self): self.assertRaises(NotImplementedError, self.driver.plug_vifs, mock.sentinel.instance, mock.sentinel.network_info) def test_unplug_vifs(self): self.assertRaises(NotImplementedError, self.driver.unplug_vifs, mock.sentinel.instance, mock.sentinel.network_info) def test_refresh_instance_security_rules(self): self.assertRaises(NotImplementedError, self.driver.refresh_instance_security_rules, instance=mock.sentinel.instance) def test_migrate_disk_and_power_off(self): self.driver.migrate_disk_and_power_off( mock.sentinel.context, mock.sentinel.instance, mock.sentinel.dest, mock.sentinel.flavor, mock.sentinel.network_info, mock.sentinel.block_device_info, mock.sentinel.timeout, mock.sentinel.retry_interval) migr_power_off = self.driver._migrationops.migrate_disk_and_power_off migr_power_off.assert_called_once_with( mock.sentinel.context, mock.sentinel.instance, mock.sentinel.dest, mock.sentinel.flavor, mock.sentinel.network_info, mock.sentinel.block_device_info, mock.sentinel.timeout, mock.sentinel.retry_interval) def test_confirm_migration(self): self.driver.confirm_migration( mock.sentinel.migration, mock.sentinel.instance, mock.sentinel.network_info) self.driver._migrationops.confirm_migration.assert_called_once_with( mock.sentinel.migration, mock.sentinel.instance, mock.sentinel.network_info) def test_finish_revert_migration(self): self.driver.finish_revert_migration( mock.sentinel.context, mock.sentinel.instance, mock.sentinel.network_info, mock.sentinel.block_device_info, mock.sentinel.power_on) finish_revert_migr = self.driver._migrationops.finish_revert_migration finish_revert_migr.assert_called_once_with( mock.sentinel.context, mock.sentinel.instance, mock.sentinel.network_info, mock.sentinel.block_device_info, mock.sentinel.power_on) def test_finish_migration(self): self.driver.finish_migration( mock.sentinel.context, mock.sentinel.migration, mock.sentinel.instance, mock.sentinel.disk_info, mock.sentinel.network_info, mock.sentinel.image_meta, mock.sentinel.resize_instance, mock.sentinel.block_device_info, mock.sentinel.power_on) self.driver._migrationops.finish_migration.assert_called_once_with( mock.sentinel.context, mock.sentinel.migration, mock.sentinel.instance, mock.sentinel.disk_info, mock.sentinel.network_info, mock.sentinel.image_meta, mock.sentinel.resize_instance, mock.sentinel.block_device_info, mock.sentinel.power_on) def test_get_host_ip_addr(self): self.driver.get_host_ip_addr() self.driver._hostops.get_host_ip_addr.assert_called_once_with() def test_get_host_uptime(self): self.driver.get_host_uptime() self.driver._hostops.get_host_uptime.assert_called_once_with() def test_get_rdp_console(self): self.driver.get_rdp_console( mock.sentinel.context, mock.sentinel.instance) self.driver._rdpconsoleops.get_rdp_console.assert_called_once_with( mock.sentinel.instance) def test_get_console_output(self): self.driver.get_console_output( mock.sentinel.context, mock.sentinel.instance) self.driver._vmops.get_console_output.assert_called_once_with( mock.sentinel.instance) nova-13.1.4/nova/tests/unit/virt/hyperv/test_snapshotops.py0000664000567000056710000001335613064447152025301 0ustar jenkinsjenkins00000000000000# Copyright 2014 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from nova.compute import task_states from nova.tests.unit import fake_instance from nova.tests.unit.virt.hyperv import test_base from nova.virt.hyperv import snapshotops class SnapshotOpsTestCase(test_base.HyperVBaseTestCase): """Unit tests for the Hyper-V SnapshotOps class.""" def setUp(self): super(SnapshotOpsTestCase, self).setUp() self.context = 'fake_context' self._snapshotops = snapshotops.SnapshotOps() self._snapshotops._pathutils = mock.MagicMock() self._snapshotops._vmutils = mock.MagicMock() self._snapshotops._vhdutils = mock.MagicMock() @mock.patch('nova.image.glance.get_remote_image_service') def test_save_glance_image(self, mock_get_remote_image_service): image_metadata = {"is_public": False, "disk_format": "vhd", "container_format": "bare", "properties": {}} glance_image_service = mock.MagicMock() mock_get_remote_image_service.return_value = (glance_image_service, mock.sentinel.IMAGE_ID) self._snapshotops._save_glance_image(context=self.context, image_id=mock.sentinel.IMAGE_ID, image_vhd_path=mock.sentinel.PATH) mock_get_remote_image_service.assert_called_once_with( self.context, mock.sentinel.IMAGE_ID) self._snapshotops._pathutils.open.assert_called_with( mock.sentinel.PATH, 'rb') glance_image_service.update.assert_called_once_with( self.context, mock.sentinel.IMAGE_ID, image_metadata, self._snapshotops._pathutils.open().__enter__()) @mock.patch('nova.virt.hyperv.snapshotops.SnapshotOps._save_glance_image') def _test_snapshot(self, mock_save_glance_image, base_disk_path): mock_instance = fake_instance.fake_instance_obj(self.context) mock_update = mock.MagicMock() fake_src_path = os.path.join('fake', 'path') self._snapshotops._pathutils.lookup_root_vhd_path.return_value = ( fake_src_path) fake_exp_dir = os.path.join(os.path.join('fake', 'exp'), 'dir') self._snapshotops._pathutils.get_export_dir.return_value = fake_exp_dir self._snapshotops._vhdutils.get_vhd_parent_path.return_value = ( base_disk_path) fake_snapshot_path = ( self._snapshotops._vmutils.take_vm_snapshot.return_value) self._snapshotops.snapshot(context=self.context, instance=mock_instance, image_id=mock.sentinel.IMAGE_ID, update_task_state=mock_update) self._snapshotops._vmutils.take_vm_snapshot.assert_called_once_with( mock_instance.name) mock_lookup_path = self._snapshotops._pathutils.lookup_root_vhd_path mock_lookup_path.assert_called_once_with(mock_instance.name) mock_get_vhd_path = self._snapshotops._vhdutils.get_vhd_parent_path mock_get_vhd_path.assert_called_once_with(fake_src_path) self._snapshotops._pathutils.get_export_dir.assert_called_once_with( mock_instance.name) expected = [mock.call(fake_src_path, os.path.join(fake_exp_dir, os.path.basename(fake_src_path)))] dest_vhd_path = os.path.join(fake_exp_dir, os.path.basename(fake_src_path)) if base_disk_path: basename = os.path.basename(base_disk_path) base_dest_disk_path = os.path.join(fake_exp_dir, basename) expected.append(mock.call(base_disk_path, base_dest_disk_path)) mock_reconnect = self._snapshotops._vhdutils.reconnect_parent_vhd mock_reconnect.assert_called_once_with(dest_vhd_path, base_dest_disk_path) self._snapshotops._vhdutils.merge_vhd.assert_called_once_with( dest_vhd_path) mock_save_glance_image.assert_called_once_with( self.context, mock.sentinel.IMAGE_ID, base_dest_disk_path) else: mock_save_glance_image.assert_called_once_with( self.context, mock.sentinel.IMAGE_ID, dest_vhd_path) self._snapshotops._pathutils.copyfile.has_calls(expected) expected_update = [ mock.call(task_state=task_states.IMAGE_PENDING_UPLOAD), mock.call(task_state=task_states.IMAGE_UPLOADING, expected_state=task_states.IMAGE_PENDING_UPLOAD)] mock_update.has_calls(expected_update) self._snapshotops._vmutils.remove_vm_snapshot.assert_called_once_with( fake_snapshot_path) self._snapshotops._pathutils.rmtree.assert_called_once_with( fake_exp_dir) def test_snapshot(self): base_disk_path = os.path.join('fake', 'disk') self._test_snapshot(base_disk_path=base_disk_path) def test_snapshot_no_base_disk(self): self._test_snapshot(base_disk_path=None) nova-13.1.4/nova/tests/unit/virt/hyperv/test_hostops.py0000664000567000056710000001756113064447152024421 0ustar jenkinsjenkins00000000000000# Copyright 2014 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import mock from os_win import constants as os_win_const from oslo_config import cfg from oslo_serialization import jsonutils from oslo_utils import units from nova.tests.unit.virt.hyperv import test_base from nova.virt.hyperv import constants from nova.virt.hyperv import hostops CONF = cfg.CONF class HostOpsTestCase(test_base.HyperVBaseTestCase): """Unit tests for the Hyper-V HostOps class.""" FAKE_ARCHITECTURE = 0 FAKE_NAME = 'fake_name' FAKE_MANUFACTURER = 'FAKE_MANUFACTURER' FAKE_NUM_CPUS = 1 FAKE_INSTANCE_DIR = "C:/fake/dir" FAKE_LOCAL_IP = '10.11.12.13' FAKE_TICK_COUNT = 1000000 def setUp(self): super(HostOpsTestCase, self).setUp() self._hostops = hostops.HostOps() self._hostops._hostutils = mock.MagicMock() self._hostops._pathutils = mock.MagicMock() def test_get_cpu_info(self): mock_processors = mock.MagicMock() info = {'Architecture': self.FAKE_ARCHITECTURE, 'Name': self.FAKE_NAME, 'Manufacturer': self.FAKE_MANUFACTURER, 'NumberOfCores': self.FAKE_NUM_CPUS, 'NumberOfLogicalProcessors': self.FAKE_NUM_CPUS} def getitem(key): return info[key] mock_processors.__getitem__.side_effect = getitem self._hostops._hostutils.get_cpus_info.return_value = [mock_processors] response = self._hostops._get_cpu_info() self._hostops._hostutils.get_cpus_info.assert_called_once_with() expected = [mock.call(fkey) for fkey in os_win_const.PROCESSOR_FEATURE.keys()] self._hostops._hostutils.is_cpu_feature_present.has_calls(expected) expected_response = self._get_mock_cpu_info() self.assertEqual(expected_response, response) def _get_mock_cpu_info(self): return {'vendor': self.FAKE_MANUFACTURER, 'model': self.FAKE_NAME, 'arch': constants.WMI_WIN32_PROCESSOR_ARCHITECTURE[ self.FAKE_ARCHITECTURE], 'features': list(os_win_const.PROCESSOR_FEATURE.values()), 'topology': {'cores': self.FAKE_NUM_CPUS, 'threads': self.FAKE_NUM_CPUS, 'sockets': self.FAKE_NUM_CPUS}} def test_get_memory_info(self): self._hostops._hostutils.get_memory_info.return_value = (2 * units.Ki, 1 * units.Ki) response = self._hostops._get_memory_info() self._hostops._hostutils.get_memory_info.assert_called_once_with() self.assertEqual((2, 1, 1), response) def test_get_local_hdd_info_gb(self): self._hostops._pathutils.get_instances_dir.return_value = '' self._hostops._hostutils.get_volume_info.return_value = (2 * units.Gi, 1 * units.Gi) response = self._hostops._get_local_hdd_info_gb() self._hostops._pathutils.get_instances_dir.assert_called_once_with() self._hostops._hostutils.get_volume_info.assert_called_once_with('') self.assertEqual((2, 1, 1), response) def test_get_hypervisor_version(self): self._hostops._hostutils.get_windows_version.return_value = '6.3.9600' response_lower = self._hostops._get_hypervisor_version() self._hostops._hostutils.get_windows_version.return_value = '10.1.0' response_higher = self._hostops._get_hypervisor_version() self.assertEqual(6003, response_lower) self.assertEqual(10001, response_higher) @mock.patch.object(hostops.HostOps, '_get_cpu_info') @mock.patch.object(hostops.HostOps, '_get_memory_info') @mock.patch.object(hostops.HostOps, '_get_hypervisor_version') @mock.patch.object(hostops.HostOps, '_get_local_hdd_info_gb') @mock.patch('platform.node') def test_get_available_resource(self, mock_node, mock_get_local_hdd_info_gb, mock_get_hypervisor_version, mock_get_memory_info, mock_get_cpu_info): mock_get_local_hdd_info_gb.return_value = (mock.sentinel.LOCAL_GB, mock.sentinel.LOCAL_GB_FREE, mock.sentinel.LOCAL_GB_USED) mock_get_memory_info.return_value = (mock.sentinel.MEMORY_MB, mock.sentinel.MEMORY_MB_FREE, mock.sentinel.MEMORY_MB_USED) mock_cpu_info = self._get_mock_cpu_info() mock_get_cpu_info.return_value = mock_cpu_info mock_get_hypervisor_version.return_value = mock.sentinel.VERSION response = self._hostops.get_available_resource() mock_get_memory_info.assert_called_once_with() mock_get_cpu_info.assert_called_once_with() mock_get_hypervisor_version.assert_called_once_with() expected = {'supported_instances': [("i686", "hyperv", "hvm"), ("x86_64", "hyperv", "hvm")], 'hypervisor_hostname': mock_node(), 'cpu_info': jsonutils.dumps(mock_cpu_info), 'hypervisor_version': mock.sentinel.VERSION, 'memory_mb': mock.sentinel.MEMORY_MB, 'memory_mb_used': mock.sentinel.MEMORY_MB_USED, 'local_gb': mock.sentinel.LOCAL_GB, 'local_gb_used': mock.sentinel.LOCAL_GB_USED, 'vcpus': self.FAKE_NUM_CPUS, 'vcpus_used': 0, 'hypervisor_type': 'hyperv', 'numa_topology': None, } self.assertEqual(expected, response) def _test_host_power_action(self, action): self._hostops._hostutils.host_power_action = mock.Mock() self._hostops.host_power_action(action) self._hostops._hostutils.host_power_action.assert_called_with( action) def test_host_power_action_shutdown(self): self._test_host_power_action(constants.HOST_POWER_ACTION_SHUTDOWN) def test_host_power_action_reboot(self): self._test_host_power_action(constants.HOST_POWER_ACTION_REBOOT) def test_host_power_action_exception(self): self.assertRaises(NotImplementedError, self._hostops.host_power_action, constants.HOST_POWER_ACTION_STARTUP) def test_get_host_ip_addr(self): CONF.set_override('my_ip', None) self._hostops._hostutils.get_local_ips.return_value = [ self.FAKE_LOCAL_IP] response = self._hostops.get_host_ip_addr() self._hostops._hostutils.get_local_ips.assert_called_once_with() self.assertEqual(self.FAKE_LOCAL_IP, response) @mock.patch('time.strftime') def test_get_host_uptime(self, mock_time): self._hostops._hostutils.get_host_tick_count64.return_value = ( self.FAKE_TICK_COUNT) response = self._hostops.get_host_uptime() tdelta = datetime.timedelta(milliseconds=int(self.FAKE_TICK_COUNT)) expected = "%s up %s, 0 users, load average: 0, 0, 0" % ( str(mock_time()), str(tdelta)) self.assertEqual(expected, response) nova-13.1.4/nova/tests/unit/virt/hyperv/__init__.py0000664000567000056710000000000013064447140023373 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/virt/hyperv/test_vmops.py0000664000567000056710000016114413064447152024063 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from eventlet import timeout as etimeout import mock from os_win import constants as os_win_const from os_win import exceptions as os_win_exc from oslo_concurrency import processutils from oslo_config import cfg from oslo_utils import units import six import testtools from nova import exception from nova import objects from nova.tests.unit import fake_instance from nova.tests.unit.objects import test_virtual_interface from nova.tests.unit.virt.hyperv import test_base from nova.virt import hardware from nova.virt.hyperv import constants from nova.virt.hyperv import vmops CONF = cfg.CONF class VMOpsTestCase(test_base.HyperVBaseTestCase): """Unit tests for the Hyper-V VMOps class.""" _FAKE_TIMEOUT = 2 FAKE_SIZE = 10 FAKE_DIR = 'fake_dir' FAKE_ROOT_PATH = 'C:\\path\\to\\fake.%s' FAKE_CONFIG_DRIVE_ISO = 'configdrive.iso' FAKE_CONFIG_DRIVE_VHD = 'configdrive.vhd' FAKE_UUID = '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3' FAKE_LOG = 'fake_log' _WIN_VERSION_6_3 = '6.3.0' _WIN_VERSION_10 = '10.0' ISO9660 = 'iso9660' _FAKE_CONFIGDRIVE_PATH = 'C:/fake_instance_dir/configdrive.vhd' def setUp(self): super(VMOpsTestCase, self).setUp() self.context = 'fake-context' self._vmops = vmops.VMOps() self._vmops._vmutils = mock.MagicMock() self._vmops._metricsutils = mock.MagicMock() self._vmops._vhdutils = mock.MagicMock() self._vmops._pathutils = mock.MagicMock() self._vmops._hostutils = mock.MagicMock() @mock.patch('nova.network.is_neutron') @mock.patch('nova.virt.hyperv.vmops.importutils.import_object') def test_load_vif_driver_neutron(self, mock_import_object, is_neutron): is_neutron.return_value = True self._vmops._load_vif_driver_class() mock_import_object.assert_called_once_with( vmops.NEUTRON_VIF_DRIVER) @mock.patch('nova.network.is_neutron') @mock.patch('nova.virt.hyperv.vmops.importutils.import_object') def test_load_vif_driver_nova(self, mock_import_object, is_neutron): is_neutron.return_value = False self._vmops._load_vif_driver_class() mock_import_object.assert_called_once_with( vmops.NOVA_VIF_DRIVER) @mock.patch('nova.network.is_neutron') def test_load_vif_driver_unknown(self, is_neutron): # TODO(sdague): delete once network_api_class is removed from # config. is_neutron.return_value = None self.assertRaises(TypeError, self._vmops._load_vif_driver_class) @mock.patch('nova.virt.hyperv.vmops.importutils.import_object') def test_load_vif_driver_class_error(self, mock_import_object): mock_import_object.side_effect = KeyError self.assertRaises(TypeError, self._vmops._load_vif_driver_class) def test_list_instances(self): mock_instance = mock.MagicMock() self._vmops._vmutils.list_instances.return_value = [mock_instance] response = self._vmops.list_instances() self._vmops._vmutils.list_instances.assert_called_once_with() self.assertEqual(response, [mock_instance]) def _test_get_info(self, vm_exists): mock_instance = fake_instance.fake_instance_obj(self.context) mock_info = mock.MagicMock(spec_set=dict) fake_info = {'EnabledState': 2, 'MemoryUsage': mock.sentinel.FAKE_MEM_KB, 'NumberOfProcessors': mock.sentinel.FAKE_NUM_CPU, 'UpTime': mock.sentinel.FAKE_CPU_NS} def getitem(key): return fake_info[key] mock_info.__getitem__.side_effect = getitem expected = hardware.InstanceInfo(state=constants.HYPERV_POWER_STATE[2], max_mem_kb=mock.sentinel.FAKE_MEM_KB, mem_kb=mock.sentinel.FAKE_MEM_KB, num_cpu=mock.sentinel.FAKE_NUM_CPU, cpu_time_ns=mock.sentinel.FAKE_CPU_NS) self._vmops._vmutils.vm_exists.return_value = vm_exists self._vmops._vmutils.get_vm_summary_info.return_value = mock_info if not vm_exists: self.assertRaises(exception.InstanceNotFound, self._vmops.get_info, mock_instance) else: response = self._vmops.get_info(mock_instance) self._vmops._vmutils.vm_exists.assert_called_once_with( mock_instance.name) self._vmops._vmutils.get_vm_summary_info.assert_called_once_with( mock_instance.name) self.assertEqual(response, expected) def test_get_info(self): self._test_get_info(vm_exists=True) def test_get_info_exception(self): self._test_get_info(vm_exists=False) def _prepare_create_root_vhd_mocks(self, use_cow_images, vhd_format, vhd_size): mock_instance = fake_instance.fake_instance_obj(self.context) mock_instance.root_gb = self.FAKE_SIZE self.flags(use_cow_images=use_cow_images) self._vmops._vhdutils.get_vhd_info.return_value = {'VirtualSize': vhd_size * units.Gi} self._vmops._vhdutils.get_vhd_format.return_value = vhd_format root_vhd_internal_size = mock_instance.root_gb * units.Gi get_size = self._vmops._vhdutils.get_internal_vhd_size_by_file_size get_size.return_value = root_vhd_internal_size self._vmops._pathutils.exists.return_value = True return mock_instance @mock.patch('nova.virt.hyperv.imagecache.ImageCache.get_cached_image') def _test_create_root_vhd_exception(self, mock_get_cached_image, vhd_format): mock_instance = self._prepare_create_root_vhd_mocks( use_cow_images=False, vhd_format=vhd_format, vhd_size=(self.FAKE_SIZE + 1)) fake_vhd_path = self.FAKE_ROOT_PATH % vhd_format mock_get_cached_image.return_value = fake_vhd_path fake_root_path = self._vmops._pathutils.get_root_vhd_path.return_value self.assertRaises(exception.FlavorDiskSmallerThanImage, self._vmops._create_root_vhd, self.context, mock_instance) self.assertFalse(self._vmops._vhdutils.resize_vhd.called) self._vmops._pathutils.exists.assert_called_once_with( fake_root_path) self._vmops._pathutils.remove.assert_called_once_with( fake_root_path) @mock.patch('nova.virt.hyperv.imagecache.ImageCache.get_cached_image') def _test_create_root_vhd_qcow(self, mock_get_cached_image, vhd_format): mock_instance = self._prepare_create_root_vhd_mocks( use_cow_images=True, vhd_format=vhd_format, vhd_size=(self.FAKE_SIZE - 1)) fake_vhd_path = self.FAKE_ROOT_PATH % vhd_format mock_get_cached_image.return_value = fake_vhd_path fake_root_path = self._vmops._pathutils.get_root_vhd_path.return_value root_vhd_internal_size = mock_instance.root_gb * units.Gi get_size = self._vmops._vhdutils.get_internal_vhd_size_by_file_size response = self._vmops._create_root_vhd(context=self.context, instance=mock_instance) self.assertEqual(fake_root_path, response) self._vmops._pathutils.get_root_vhd_path.assert_called_with( mock_instance.name, vhd_format) differencing_vhd = self._vmops._vhdutils.create_differencing_vhd differencing_vhd.assert_called_with(fake_root_path, fake_vhd_path) self._vmops._vhdutils.get_vhd_info.assert_called_once_with( fake_vhd_path) if vhd_format is constants.DISK_FORMAT_VHD: self.assertFalse(get_size.called) self.assertFalse(self._vmops._vhdutils.resize_vhd.called) else: get_size.assert_called_once_with(fake_vhd_path, root_vhd_internal_size) self._vmops._vhdutils.resize_vhd.assert_called_once_with( fake_root_path, root_vhd_internal_size, is_file_max_size=False) @mock.patch('nova.virt.hyperv.imagecache.ImageCache.get_cached_image') def _test_create_root_vhd(self, mock_get_cached_image, vhd_format): mock_instance = self._prepare_create_root_vhd_mocks( use_cow_images=False, vhd_format=vhd_format, vhd_size=(self.FAKE_SIZE - 1)) fake_vhd_path = self.FAKE_ROOT_PATH % vhd_format mock_get_cached_image.return_value = fake_vhd_path fake_root_path = self._vmops._pathutils.get_root_vhd_path.return_value root_vhd_internal_size = mock_instance.root_gb * units.Gi get_size = self._vmops._vhdutils.get_internal_vhd_size_by_file_size response = self._vmops._create_root_vhd(context=self.context, instance=mock_instance) self.assertEqual(fake_root_path, response) self._vmops._pathutils.get_root_vhd_path.assert_called_with( mock_instance.name, vhd_format) self._vmops._pathutils.copyfile.assert_called_once_with( fake_vhd_path, fake_root_path) get_size.assert_called_once_with(fake_vhd_path, root_vhd_internal_size) self._vmops._vhdutils.resize_vhd.assert_called_once_with( fake_root_path, root_vhd_internal_size, is_file_max_size=False) def test_create_root_vhd(self): self._test_create_root_vhd(vhd_format=constants.DISK_FORMAT_VHD) def test_create_root_vhdx(self): self._test_create_root_vhd(vhd_format=constants.DISK_FORMAT_VHDX) def test_create_root_vhd_use_cow_images_true(self): self._test_create_root_vhd_qcow(vhd_format=constants.DISK_FORMAT_VHD) def test_create_root_vhdx_use_cow_images_true(self): self._test_create_root_vhd_qcow(vhd_format=constants.DISK_FORMAT_VHDX) def test_create_root_vhdx_size_less_than_internal(self): self._test_create_root_vhd_exception( vhd_format=constants.DISK_FORMAT_VHD) def test_is_resize_needed_exception(self): inst = mock.MagicMock() self.assertRaises( exception.FlavorDiskSmallerThanImage, self._vmops._is_resize_needed, mock.sentinel.FAKE_PATH, self.FAKE_SIZE, self.FAKE_SIZE - 1, inst) def test_is_resize_needed_true(self): inst = mock.MagicMock() self.assertTrue(self._vmops._is_resize_needed( mock.sentinel.FAKE_PATH, self.FAKE_SIZE, self.FAKE_SIZE + 1, inst)) def test_is_resize_needed_false(self): inst = mock.MagicMock() self.assertFalse(self._vmops._is_resize_needed( mock.sentinel.FAKE_PATH, self.FAKE_SIZE, self.FAKE_SIZE, inst)) def test_create_ephemeral_vhd(self): mock_instance = fake_instance.fake_instance_obj(self.context) mock_instance.ephemeral_gb = self.FAKE_SIZE best_supported = self._vmops._vhdutils.get_best_supported_vhd_format best_supported.return_value = mock.sentinel.FAKE_FORMAT self._vmops._pathutils.get_ephemeral_vhd_path.return_value = ( mock.sentinel.FAKE_PATH) response = self._vmops.create_ephemeral_vhd(instance=mock_instance) self._vmops._pathutils.get_ephemeral_vhd_path.assert_called_with( mock_instance.name, mock.sentinel.FAKE_FORMAT) self._vmops._vhdutils.create_dynamic_vhd.assert_called_with( mock.sentinel.FAKE_PATH, mock_instance.ephemeral_gb * units.Gi) self.assertEqual(mock.sentinel.FAKE_PATH, response) @mock.patch('nova.virt.hyperv.vmops.VMOps.destroy') @mock.patch('nova.virt.hyperv.vmops.VMOps.power_on') @mock.patch('nova.virt.hyperv.vmops.VMOps.attach_config_drive') @mock.patch('nova.virt.hyperv.vmops.VMOps._create_config_drive') @mock.patch('nova.virt.configdrive.required_by') @mock.patch('nova.virt.hyperv.vmops.VMOps.create_instance') @mock.patch('nova.virt.hyperv.vmops.VMOps.get_image_vm_generation') @mock.patch('nova.virt.hyperv.vmops.VMOps.create_ephemeral_vhd') @mock.patch('nova.virt.hyperv.vmops.VMOps._create_root_vhd') @mock.patch('nova.virt.hyperv.volumeops.VolumeOps.' 'ebs_root_in_block_devices') @mock.patch('nova.virt.hyperv.vmops.VMOps._delete_disk_files') def _test_spawn(self, mock_delete_disk_files, mock_ebs_root_in_block_devices, mock_create_root_vhd, mock_create_ephemeral_vhd, mock_get_image_vm_gen, mock_create_instance, mock_configdrive_required, mock_create_config_drive, mock_attach_config_drive, mock_power_on, mock_destroy, exists, boot_from_volume, configdrive_required, fail): mock_instance = fake_instance.fake_instance_obj(self.context) mock_image_meta = mock.MagicMock() fake_root_path = mock_create_root_vhd.return_value fake_root_path = None if boot_from_volume else fake_root_path fake_ephemeral_path = mock_create_ephemeral_vhd.return_value fake_vm_gen = mock_get_image_vm_gen.return_value fake_config_drive_path = mock_create_config_drive.return_value self._vmops._vmutils.vm_exists.return_value = exists mock_ebs_root_in_block_devices.return_value = boot_from_volume mock_create_root_vhd.return_value = fake_root_path mock_configdrive_required.return_value = configdrive_required mock_create_instance.side_effect = fail if exists: self.assertRaises(exception.InstanceExists, self._vmops.spawn, self.context, mock_instance, mock_image_meta, [mock.sentinel.FILE], mock.sentinel.PASSWORD, mock.sentinel.INFO, mock.sentinel.DEV_INFO) elif fail is os_win_exc.HyperVException: self.assertRaises(os_win_exc.HyperVException, self._vmops.spawn, self.context, mock_instance, mock_image_meta, [mock.sentinel.FILE], mock.sentinel.PASSWORD, mock.sentinel.INFO, mock.sentinel.DEV_INFO) mock_destroy.assert_called_once_with(mock_instance) else: self._vmops.spawn(self.context, mock_instance, mock_image_meta, [mock.sentinel.FILE], mock.sentinel.PASSWORD, mock.sentinel.INFO, mock.sentinel.DEV_INFO) self._vmops._vmutils.vm_exists.assert_called_once_with( mock_instance.name) mock_delete_disk_files.assert_called_once_with( mock_instance.name) mock_ebs_root_in_block_devices.assert_called_once_with( mock.sentinel.DEV_INFO) if not boot_from_volume: mock_create_root_vhd.assert_called_once_with(self.context, mock_instance) mock_create_ephemeral_vhd.assert_called_once_with(mock_instance) mock_get_image_vm_gen.assert_called_once_with( mock_instance.uuid, fake_root_path, mock_image_meta) mock_create_instance.assert_called_once_with( mock_instance, mock.sentinel.INFO, mock.sentinel.DEV_INFO, fake_root_path, fake_ephemeral_path, fake_vm_gen) mock_configdrive_required.assert_called_once_with(mock_instance) if configdrive_required: mock_create_config_drive.assert_called_once_with( mock_instance, [mock.sentinel.FILE], mock.sentinel.PASSWORD, mock.sentinel.INFO) mock_attach_config_drive.assert_called_once_with( mock_instance, fake_config_drive_path, fake_vm_gen) mock_power_on.assert_called_once_with(mock_instance) def test_spawn(self): self._test_spawn(exists=False, boot_from_volume=False, configdrive_required=True, fail=None) def test_spawn_instance_exists(self): self._test_spawn(exists=True, boot_from_volume=False, configdrive_required=True, fail=None) def test_spawn_create_instance_exception(self): self._test_spawn(exists=False, boot_from_volume=False, configdrive_required=True, fail=os_win_exc.HyperVException) def test_spawn_not_required(self): self._test_spawn(exists=False, boot_from_volume=False, configdrive_required=False, fail=None) def test_spawn_root_in_block(self): self._test_spawn(exists=False, boot_from_volume=True, configdrive_required=False, fail=None) def test_spawn_no_admin_permissions(self): self._vmops._vmutils.check_admin_permissions.side_effect = ( os_win_exc.HyperVException) self.assertRaises(os_win_exc.HyperVException, self._vmops.spawn, self.context, mock.DEFAULT, mock.DEFAULT, [mock.sentinel.FILE], mock.sentinel.PASSWORD, mock.sentinel.INFO, mock.sentinel.DEV_INFO) @mock.patch('nova.virt.hyperv.volumeops.VolumeOps' '.attach_volumes') @mock.patch.object(vmops.VMOps, '_attach_drive') def _test_create_instance(self, mock_attach_drive, mock_attach_volumes, fake_root_path, fake_ephemeral_path, enable_instance_metrics, vm_gen=constants.VM_GEN_1): mock_vif_driver = mock.MagicMock() self._vmops._vif_driver = mock_vif_driver self.flags(enable_instance_metrics_collection=enable_instance_metrics, group='hyperv') fake_network_info = {'id': mock.sentinel.ID, 'address': mock.sentinel.ADDRESS} mock_instance = fake_instance.fake_instance_obj(self.context) instance_path = os.path.join(CONF.instances_path, mock_instance.name) self._vmops.create_instance(instance=mock_instance, network_info=[fake_network_info], block_device_info=mock.sentinel.DEV_INFO, root_vhd_path=fake_root_path, eph_vhd_path=fake_ephemeral_path, vm_gen=vm_gen) self._vmops._vmutils.create_vm.assert_called_once_with( mock_instance.name, mock_instance.memory_mb, mock_instance.vcpus, CONF.hyperv.limit_cpu_features, CONF.hyperv.dynamic_memory_ratio, vm_gen, instance_path, [mock_instance.uuid]) expected = [] ctrl_type = vmops.VM_GENERATIONS_CONTROLLER_TYPES[vm_gen] ctrl_disk_addr = 0 if fake_root_path: expected.append(mock.call(mock_instance.name, fake_root_path, 0, ctrl_disk_addr, ctrl_type, constants.DISK)) ctrl_disk_addr = 1 if fake_ephemeral_path: expected.append(mock.call(mock_instance.name, fake_ephemeral_path, 0, ctrl_disk_addr, ctrl_type, constants.DISK)) mock_attach_drive.has_calls(expected) self._vmops._vmutils.create_scsi_controller.assert_called_once_with( mock_instance.name) ebs_root = vm_gen is not constants.VM_GEN_2 and fake_root_path is None mock_attach_volumes.assert_called_once_with(mock.sentinel.DEV_INFO, mock_instance.name, ebs_root) self._vmops._vmutils.create_nic.assert_called_once_with( mock_instance.name, mock.sentinel.ID, mock.sentinel.ADDRESS) mock_vif_driver.plug.assert_called_once_with(mock_instance, fake_network_info) mock_enable = self._vmops._metricsutils.enable_vm_metrics_collection if enable_instance_metrics: mock_enable.assert_called_once_with(mock_instance.name) def test_create_instance(self): fake_ephemeral_path = mock.sentinel.FAKE_EPHEMERAL_PATH self._test_create_instance(fake_root_path=mock.sentinel.FAKE_ROOT_PATH, fake_ephemeral_path=fake_ephemeral_path, enable_instance_metrics=True) def test_create_instance_no_root_path(self): fake_ephemeral_path = mock.sentinel.FAKE_EPHEMERAL_PATH self._test_create_instance(fake_root_path=None, fake_ephemeral_path=fake_ephemeral_path, enable_instance_metrics=True) def test_create_instance_no_ephemeral_path(self): self._test_create_instance(fake_root_path=mock.sentinel.FAKE_ROOT_PATH, fake_ephemeral_path=None, enable_instance_metrics=True) def test_create_instance_no_path(self): self._test_create_instance(fake_root_path=None, fake_ephemeral_path=None, enable_instance_metrics=False) def test_create_instance_enable_instance_metrics_false(self): fake_ephemeral_path = mock.sentinel.FAKE_EPHEMERAL_PATH self._test_create_instance(fake_root_path=mock.sentinel.FAKE_ROOT_PATH, fake_ephemeral_path=fake_ephemeral_path, enable_instance_metrics=False) def test_create_instance_gen2(self): self._test_create_instance(fake_root_path=None, fake_ephemeral_path=None, enable_instance_metrics=False, vm_gen=constants.VM_GEN_2) def test_attach_drive_vm_to_scsi(self): self._vmops._attach_drive( mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH, mock.sentinel.FAKE_DRIVE_ADDR, mock.sentinel.FAKE_CTRL_DISK_ADDR, constants.CTRL_TYPE_SCSI) self._vmops._vmutils.attach_scsi_drive.assert_called_once_with( mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH, constants.DISK) def test_attach_drive_vm_to_ide(self): self._vmops._attach_drive( mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH, mock.sentinel.FAKE_DRIVE_ADDR, mock.sentinel.FAKE_CTRL_DISK_ADDR, constants.CTRL_TYPE_IDE) self._vmops._vmutils.attach_ide_drive.assert_called_once_with( mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH, mock.sentinel.FAKE_DRIVE_ADDR, mock.sentinel.FAKE_CTRL_DISK_ADDR, constants.DISK) def test_get_image_vm_generation_default(self): image_meta = objects.ImageMeta.from_dict({"properties": {}}) self._vmops._hostutils.get_default_vm_generation.return_value = ( constants.IMAGE_PROP_VM_GEN_1) self._vmops._hostutils.get_supported_vm_types.return_value = [ constants.IMAGE_PROP_VM_GEN_1, constants.IMAGE_PROP_VM_GEN_2] response = self._vmops.get_image_vm_generation( mock.sentinel.instance_id, mock.sentinel.FAKE_PATH, image_meta) self.assertEqual(constants.VM_GEN_1, response) def test_get_image_vm_generation_gen2(self): image_meta = objects.ImageMeta.from_dict( {"properties": {"hw_machine_type": constants.IMAGE_PROP_VM_GEN_2}}) self._vmops._hostutils.get_supported_vm_types.return_value = [ constants.IMAGE_PROP_VM_GEN_1, constants.IMAGE_PROP_VM_GEN_2] self._vmops._vhdutils.get_vhd_format.return_value = ( constants.DISK_FORMAT_VHDX) response = self._vmops.get_image_vm_generation( mock.sentinel.instance_id, mock.sentinel.FAKE_PATH, image_meta) self.assertEqual(constants.VM_GEN_2, response) def test_get_image_vm_generation_not_vhdx(self): image_meta = objects.ImageMeta.from_dict( {"properties": {'hw_machine_type': constants.IMAGE_PROP_VM_GEN_2}}) self._vmops._hostutils.get_supported_vm_types.return_value = [ constants.IMAGE_PROP_VM_GEN_1, constants.IMAGE_PROP_VM_GEN_2] self._vmops._vhdutils.get_vhd_format.return_value = ( constants.DISK_FORMAT_VHD) self.assertRaises(exception.InstanceUnacceptable, self._vmops.get_image_vm_generation, mock.sentinel.instance_id, mock.sentinel.FAKE_PATH, image_meta) @mock.patch('nova.api.metadata.base.InstanceMetadata') @mock.patch('nova.virt.configdrive.ConfigDriveBuilder') @mock.patch('nova.utils.execute') def _test_create_config_drive(self, mock_execute, mock_ConfigDriveBuilder, mock_InstanceMetadata, config_drive_format, config_drive_cdrom, side_effect): mock_instance = fake_instance.fake_instance_obj(self.context) self.flags(config_drive_format=config_drive_format) self.flags(config_drive_cdrom=config_drive_cdrom, group='hyperv') self.flags(config_drive_inject_password=True, group='hyperv') self._vmops._pathutils.get_instance_dir.return_value = ( self.FAKE_DIR) mock_ConfigDriveBuilder().__enter__().make_drive.side_effect = [ side_effect] if config_drive_format != self.ISO9660: self.assertRaises(exception.ConfigDriveUnsupportedFormat, self._vmops._create_config_drive, mock_instance, [mock.sentinel.FILE], mock.sentinel.PASSWORD, mock.sentinel.NET_INFO) elif side_effect is processutils.ProcessExecutionError: self.assertRaises(processutils.ProcessExecutionError, self._vmops._create_config_drive, mock_instance, [mock.sentinel.FILE], mock.sentinel.PASSWORD, mock.sentinel.NET_INFO) else: path = self._vmops._create_config_drive(mock_instance, [mock.sentinel.FILE], mock.sentinel.PASSWORD, mock.sentinel.NET_INFO) mock_InstanceMetadata.assert_called_once_with( mock_instance, content=[mock.sentinel.FILE], extra_md={'admin_pass': mock.sentinel.PASSWORD}, network_info=mock.sentinel.NET_INFO) self._vmops._pathutils.get_instance_dir.assert_called_once_with( mock_instance.name) mock_ConfigDriveBuilder.assert_called_with( instance_md=mock_InstanceMetadata()) mock_make_drive = mock_ConfigDriveBuilder().__enter__().make_drive path_iso = os.path.join(self.FAKE_DIR, self.FAKE_CONFIG_DRIVE_ISO) path_vhd = os.path.join(self.FAKE_DIR, self.FAKE_CONFIG_DRIVE_VHD) mock_make_drive.assert_called_once_with(path_iso) if not CONF.hyperv.config_drive_cdrom: expected = path_vhd mock_execute.assert_called_once_with( CONF.hyperv.qemu_img_cmd, 'convert', '-f', 'raw', '-O', 'vpc', path_iso, path_vhd, attempts=1) self._vmops._pathutils.remove.assert_called_once_with( os.path.join(self.FAKE_DIR, self.FAKE_CONFIG_DRIVE_ISO)) else: expected = path_iso self.assertEqual(expected, path) def test_create_config_drive_cdrom(self): self._test_create_config_drive(config_drive_format=self.ISO9660, config_drive_cdrom=True, side_effect=None) def test_create_config_drive_vhd(self): self._test_create_config_drive(config_drive_format=self.ISO9660, config_drive_cdrom=False, side_effect=None) def test_create_config_drive_other_drive_format(self): self._test_create_config_drive(config_drive_format=mock.sentinel.OTHER, config_drive_cdrom=False, side_effect=None) def test_create_config_drive_execution_error(self): self._test_create_config_drive( config_drive_format=self.ISO9660, config_drive_cdrom=False, side_effect=processutils.ProcessExecutionError) def test_attach_config_drive_exception(self): instance = fake_instance.fake_instance_obj(self.context) self.assertRaises(exception.InvalidDiskFormat, self._vmops.attach_config_drive, instance, 'C:/fake_instance_dir/configdrive.xxx', constants.VM_GEN_1) @mock.patch.object(vmops.VMOps, '_attach_drive') def test_attach_config_drive(self, mock_attach_drive): instance = fake_instance.fake_instance_obj(self.context) self._vmops.attach_config_drive(instance, self._FAKE_CONFIGDRIVE_PATH, constants.VM_GEN_1) mock_attach_drive.assert_called_once_with( instance.name, self._FAKE_CONFIGDRIVE_PATH, 1, 0, constants.CTRL_TYPE_IDE, constants.DISK) @mock.patch.object(vmops.VMOps, '_attach_drive') def test_attach_config_drive_gen2(self, mock_attach_drive): instance = fake_instance.fake_instance_obj(self.context) self._vmops.attach_config_drive(instance, self._FAKE_CONFIGDRIVE_PATH, constants.VM_GEN_2) mock_attach_drive.assert_called_once_with( instance.name, self._FAKE_CONFIGDRIVE_PATH, 1, 0, constants.CTRL_TYPE_SCSI, constants.DISK) def test_delete_disk_files(self): mock_instance = fake_instance.fake_instance_obj(self.context) self._vmops._delete_disk_files(mock_instance.name) self._vmops._pathutils.get_instance_dir.assert_called_once_with( mock_instance.name, create_dir=False, remove_dir=True) @mock.patch('nova.virt.hyperv.volumeops.VolumeOps.disconnect_volumes') @mock.patch('nova.virt.hyperv.vmops.VMOps._delete_disk_files') @mock.patch('nova.virt.hyperv.vmops.VMOps.power_off') def test_destroy(self, mock_power_off, mock_delete_disk_files, mock_disconnect_volumes): mock_instance = fake_instance.fake_instance_obj(self.context) self._vmops._vmutils.vm_exists.return_value = True self._vmops._vif_driver = mock.MagicMock() self._vmops.destroy(instance=mock_instance, network_info=[mock.sentinel.fake_vif], block_device_info=mock.sentinel.FAKE_BD_INFO) self._vmops._vmutils.vm_exists.assert_called_with( mock_instance.name) mock_power_off.assert_called_once_with(mock_instance) self._vmops._vif_driver.unplug.assert_called_once_with( mock_instance, mock.sentinel.fake_vif) self._vmops._vmutils.destroy_vm.assert_called_once_with( mock_instance.name) mock_disconnect_volumes.assert_called_once_with( mock.sentinel.FAKE_BD_INFO) mock_delete_disk_files.assert_called_once_with( mock_instance.name) def test_destroy_inexistent_instance(self): mock_instance = fake_instance.fake_instance_obj(self.context) self._vmops._vmutils.vm_exists.return_value = False self._vmops.destroy(instance=mock_instance) self.assertFalse(self._vmops._vmutils.destroy_vm.called) @mock.patch('nova.virt.hyperv.vmops.VMOps.power_off') def test_destroy_exception(self, mock_power_off): mock_instance = fake_instance.fake_instance_obj(self.context) self._vmops._vmutils.destroy_vm.side_effect = ( os_win_exc.HyperVException) self._vmops._vmutils.vm_exists.return_value = True self.assertRaises(os_win_exc.HyperVException, self._vmops.destroy, mock_instance) def test_reboot_hard(self): self._test_reboot(vmops.REBOOT_TYPE_HARD, os_win_const.HYPERV_VM_STATE_REBOOT) @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown") def test_reboot_soft(self, mock_soft_shutdown): mock_soft_shutdown.return_value = True self._test_reboot(vmops.REBOOT_TYPE_SOFT, os_win_const.HYPERV_VM_STATE_ENABLED) @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown") def test_reboot_soft_failed(self, mock_soft_shutdown): mock_soft_shutdown.return_value = False self._test_reboot(vmops.REBOOT_TYPE_SOFT, os_win_const.HYPERV_VM_STATE_REBOOT) @mock.patch("nova.virt.hyperv.vmops.VMOps.power_on") @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown") def test_reboot_soft_exception(self, mock_soft_shutdown, mock_power_on): mock_soft_shutdown.return_value = True mock_power_on.side_effect = os_win_exc.HyperVException( "Expected failure") instance = fake_instance.fake_instance_obj(self.context) self.assertRaises(os_win_exc.HyperVException, self._vmops.reboot, instance, {}, vmops.REBOOT_TYPE_SOFT) mock_soft_shutdown.assert_called_once_with(instance) mock_power_on.assert_called_once_with(instance) def _test_reboot(self, reboot_type, vm_state): instance = fake_instance.fake_instance_obj(self.context) with mock.patch.object(self._vmops, '_set_vm_state') as mock_set_state: self._vmops.reboot(instance, {}, reboot_type) mock_set_state.assert_called_once_with(instance, vm_state) @mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off") def test_soft_shutdown(self, mock_wait_for_power_off): instance = fake_instance.fake_instance_obj(self.context) mock_wait_for_power_off.return_value = True result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT) mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm mock_shutdown_vm.assert_called_once_with(instance.name) mock_wait_for_power_off.assert_called_once_with( instance.name, self._FAKE_TIMEOUT) self.assertTrue(result) @mock.patch("time.sleep") def test_soft_shutdown_failed(self, mock_sleep): instance = fake_instance.fake_instance_obj(self.context) mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm mock_shutdown_vm.side_effect = os_win_exc.HyperVException( "Expected failure.") result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT) mock_shutdown_vm.assert_called_once_with(instance.name) self.assertFalse(result) @mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off") def test_soft_shutdown_wait(self, mock_wait_for_power_off): instance = fake_instance.fake_instance_obj(self.context) mock_wait_for_power_off.side_effect = [False, True] result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT, 1) calls = [mock.call(instance.name, 1), mock.call(instance.name, self._FAKE_TIMEOUT - 1)] mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm mock_shutdown_vm.assert_called_with(instance.name) mock_wait_for_power_off.assert_has_calls(calls) self.assertTrue(result) @mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off") def test_soft_shutdown_wait_timeout(self, mock_wait_for_power_off): instance = fake_instance.fake_instance_obj(self.context) mock_wait_for_power_off.return_value = False result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT, 1.5) calls = [mock.call(instance.name, 1.5), mock.call(instance.name, self._FAKE_TIMEOUT - 1.5)] mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm mock_shutdown_vm.assert_called_with(instance.name) mock_wait_for_power_off.assert_has_calls(calls) self.assertFalse(result) @mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state') def test_pause(self, mock_set_vm_state): mock_instance = fake_instance.fake_instance_obj(self.context) self._vmops.pause(instance=mock_instance) mock_set_vm_state.assert_called_once_with( mock_instance, os_win_const.HYPERV_VM_STATE_PAUSED) @mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state') def test_unpause(self, mock_set_vm_state): mock_instance = fake_instance.fake_instance_obj(self.context) self._vmops.unpause(instance=mock_instance) mock_set_vm_state.assert_called_once_with( mock_instance, os_win_const.HYPERV_VM_STATE_ENABLED) @mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state') def test_suspend(self, mock_set_vm_state): mock_instance = fake_instance.fake_instance_obj(self.context) self._vmops.suspend(instance=mock_instance) mock_set_vm_state.assert_called_once_with( mock_instance, os_win_const.HYPERV_VM_STATE_SUSPENDED) @mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state') def test_resume(self, mock_set_vm_state): mock_instance = fake_instance.fake_instance_obj(self.context) self._vmops.resume(instance=mock_instance) mock_set_vm_state.assert_called_once_with( mock_instance, os_win_const.HYPERV_VM_STATE_ENABLED) def _test_power_off(self, timeout, set_state_expected=True): instance = fake_instance.fake_instance_obj(self.context) with mock.patch.object(self._vmops, '_set_vm_state') as mock_set_state: self._vmops.power_off(instance, timeout) if set_state_expected: mock_set_state.assert_called_once_with( instance, os_win_const.HYPERV_VM_STATE_DISABLED) def test_power_off_hard(self): self._test_power_off(timeout=0) @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown") def test_power_off_exception(self, mock_soft_shutdown): mock_soft_shutdown.return_value = False self._test_power_off(timeout=1) @mock.patch("nova.virt.hyperv.vmops.VMOps._set_vm_state") @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown") def test_power_off_soft(self, mock_soft_shutdown, mock_set_state): instance = fake_instance.fake_instance_obj(self.context) mock_soft_shutdown.return_value = True self._vmops.power_off(instance, 1, 0) mock_soft_shutdown.assert_called_once_with( instance, 1, vmops.SHUTDOWN_TIME_INCREMENT) self.assertFalse(mock_set_state.called) @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown") def test_power_off_unexisting_instance(self, mock_soft_shutdown): mock_soft_shutdown.side_effect = os_win_exc.HyperVVMNotFoundException( vm_name=mock.sentinel.vm_name) self._test_power_off(timeout=1, set_state_expected=False) @mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state') def test_power_on(self, mock_set_vm_state): mock_instance = fake_instance.fake_instance_obj(self.context) self._vmops.power_on(mock_instance) mock_set_vm_state.assert_called_once_with( mock_instance, os_win_const.HYPERV_VM_STATE_ENABLED) @mock.patch('nova.virt.hyperv.volumeops.VolumeOps' '.fix_instance_volume_disk_paths') @mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state') def test_power_on_having_block_devices(self, mock_set_vm_state, mock_fix_instance_vol_paths): mock_instance = fake_instance.fake_instance_obj(self.context) self._vmops.power_on(mock_instance, mock.sentinel.block_device_info) mock_fix_instance_vol_paths.assert_called_once_with( mock_instance.name, mock.sentinel.block_device_info) mock_set_vm_state.assert_called_once_with( mock_instance, os_win_const.HYPERV_VM_STATE_ENABLED) @mock.patch.object(vmops.VMOps, 'log_vm_serial_output') @mock.patch.object(vmops.VMOps, '_delete_vm_console_log') def _test_set_vm_state(self, mock_delete_vm_console_log, mock_log_vm_output, state): mock_instance = fake_instance.fake_instance_obj(self.context) self._vmops._set_vm_state(mock_instance, state) self._vmops._vmutils.set_vm_state.assert_called_once_with( mock_instance.name, state) if state in (os_win_const.HYPERV_VM_STATE_DISABLED, os_win_const.HYPERV_VM_STATE_REBOOT): mock_delete_vm_console_log.assert_called_once_with(mock_instance) if state in (os_win_const.HYPERV_VM_STATE_ENABLED, os_win_const.HYPERV_VM_STATE_REBOOT): mock_log_vm_output.assert_called_once_with(mock_instance.name, mock_instance.uuid) def test_set_vm_state_disabled(self): self._test_set_vm_state(state=os_win_const.HYPERV_VM_STATE_DISABLED) def test_set_vm_state_enabled(self): self._test_set_vm_state(state=os_win_const.HYPERV_VM_STATE_ENABLED) def test_set_vm_state_reboot(self): self._test_set_vm_state(state=os_win_const.HYPERV_VM_STATE_REBOOT) def test_set_vm_state_exception(self): mock_instance = fake_instance.fake_instance_obj(self.context) self._vmops._vmutils.set_vm_state.side_effect = ( os_win_exc.HyperVException) self.assertRaises(os_win_exc.HyperVException, self._vmops._set_vm_state, mock_instance, mock.sentinel.STATE) def test_get_vm_state(self): summary_info = {'EnabledState': os_win_const.HYPERV_VM_STATE_DISABLED} with mock.patch.object(self._vmops._vmutils, 'get_vm_summary_info') as mock_get_summary_info: mock_get_summary_info.return_value = summary_info response = self._vmops._get_vm_state(mock.sentinel.FAKE_VM_NAME) self.assertEqual(response, os_win_const.HYPERV_VM_STATE_DISABLED) @mock.patch.object(vmops.VMOps, '_get_vm_state') def test_wait_for_power_off_true(self, mock_get_state): mock_get_state.return_value = os_win_const.HYPERV_VM_STATE_DISABLED result = self._vmops._wait_for_power_off( mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT) mock_get_state.assert_called_with(mock.sentinel.FAKE_VM_NAME) self.assertTrue(result) @mock.patch.object(vmops.etimeout, "with_timeout") def test_wait_for_power_off_false(self, mock_with_timeout): mock_with_timeout.side_effect = etimeout.Timeout() result = self._vmops._wait_for_power_off( mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT) self.assertFalse(result) @mock.patch.object(vmops.ioutils, 'IOThread') def _test_log_vm_serial_output(self, mock_io_thread, worker_running=False, worker_exists=False): self._vmops._pathutils.get_vm_console_log_paths.return_value = ( mock.sentinel.log_path, ) fake_instance_uuid = 'fake-uuid' fake_existing_worker = mock.Mock() fake_existing_worker.is_active.return_value = worker_running fake_log_writers = {fake_instance_uuid: fake_existing_worker} self._vmops._vm_log_writers = ( fake_log_writers if worker_exists else {}) self._vmops.log_vm_serial_output(mock.sentinel.instance_name, fake_instance_uuid) if not (worker_exists and worker_running): expected_pipe_path = r'\\.\pipe\%s' % fake_instance_uuid expected_current_worker = mock_io_thread.return_value expected_current_worker.start.assert_called_once_with() mock_io_thread.assert_called_once_with( expected_pipe_path, mock.sentinel.log_path, self._vmops._MAX_CONSOLE_LOG_FILE_SIZE) else: expected_current_worker = fake_existing_worker self.assertEqual(expected_current_worker, self._vmops._vm_log_writers[fake_instance_uuid]) def test_log_vm_serial_output_unexisting_worker(self): self._test_log_vm_serial_output() def test_log_vm_serial_output_worker_stopped(self): self._test_log_vm_serial_output(worker_exists=True) def test_log_vm_serial_output_worker_running(self): self._test_log_vm_serial_output(worker_exists=True, worker_running=True) def test_copy_vm_console_logs(self): fake_local_paths = (mock.sentinel.FAKE_PATH, mock.sentinel.FAKE_PATH_ARCHIVED) fake_remote_paths = (mock.sentinel.FAKE_REMOTE_PATH, mock.sentinel.FAKE_REMOTE_PATH_ARCHIVED) self._vmops._pathutils.get_vm_console_log_paths.side_effect = [ fake_local_paths, fake_remote_paths] self._vmops._pathutils.exists.side_effect = [True, False] self._vmops.copy_vm_console_logs(mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_DEST) calls = [mock.call(mock.sentinel.FAKE_VM_NAME), mock.call(mock.sentinel.FAKE_VM_NAME, remote_server=mock.sentinel.FAKE_DEST)] self._vmops._pathutils.get_vm_console_log_paths.assert_has_calls(calls) calls = [mock.call(mock.sentinel.FAKE_PATH), mock.call(mock.sentinel.FAKE_PATH_ARCHIVED)] self._vmops._pathutils.exists.assert_has_calls(calls) self._vmops._pathutils.copy.assert_called_once_with( mock.sentinel.FAKE_PATH, mock.sentinel.FAKE_REMOTE_PATH) @mock.patch.object(vmops.ioutils, 'IOThread') def test_log_vm_serial_output(self, fake_iothread): self._vmops._pathutils.get_vm_console_log_paths.return_value = [ mock.sentinel.FAKE_PATH] self._vmops.log_vm_serial_output(mock.sentinel.FAKE_VM_NAME, self.FAKE_UUID) pipe_path = r'\\.\pipe\%s' % self.FAKE_UUID fake_iothread.assert_called_once_with( pipe_path, mock.sentinel.FAKE_PATH, self._vmops._MAX_CONSOLE_LOG_FILE_SIZE) fake_iothread.return_value.start.assert_called_once_with() @testtools.skip('mock_open in 1.2 read only works once 1475661') @mock.patch("os.path.exists") def test_get_console_output(self, fake_path_exists): mock_instance = fake_instance.fake_instance_obj(self.context) fake_path_exists.return_value = True self._vmops._pathutils.get_vm_console_log_paths.return_value = ( mock.sentinel.FAKE_PATH, mock.sentinel.FAKE_PATH_ARCHIVED) with mock.patch('nova.virt.hyperv.vmops.open', mock.mock_open(read_data=self.FAKE_LOG), create=True): instance_log = self._vmops.get_console_output(mock_instance) # get_vm_console_log_paths returns 2 paths. self.assertEqual(self.FAKE_LOG * 2, instance_log) expected_calls = [mock.call(mock.sentinel.FAKE_PATH_ARCHIVED), mock.call(mock.sentinel.FAKE_PATH)] fake_path_exists.assert_has_calls(expected_calls, any_order=False) @mock.patch.object(six.moves.builtins, 'open') @mock.patch("os.path.exists") def test_get_console_output_exception(self, fake_path_exists, fake_open): fake_vm = mock.MagicMock() fake_open.side_effect = IOError fake_path_exists.return_value = True self._vmops._pathutils.get_vm_console_log_paths.return_value = ( mock.sentinel.fake_console_log_path, mock.sentinel.fake_console_log_archived) with mock.patch('nova.virt.hyperv.vmops.open', fake_open, create=True): self.assertRaises(exception.ConsoleLogOutputException, self._vmops.get_console_output, fake_vm) @mock.patch.object(vmops.fileutils, 'delete_if_exists') def test_delete_vm_console_log(self, mock_delete_if_exists): mock_instance = fake_instance.fake_instance_obj(self.context) self._vmops._pathutils.get_vm_console_log_paths.return_value = ( mock.sentinel.FAKE_PATH, ) mock_log_writer = mock.MagicMock() self._vmops._vm_log_writers[mock_instance['uuid']] = mock_log_writer self._vmops._delete_vm_console_log(mock_instance) mock_log_writer.join.assert_called_once_with() mock_delete_if_exists.assert_called_once_with(mock.sentinel.FAKE_PATH) def test_create_vm_com_port_pipe(self): mock_instance = fake_instance.fake_instance_obj(self.context) pipe_path = r'\\.\pipe\%s' % mock_instance['uuid'] self._vmops._create_vm_com_port_pipe(mock_instance) get_vm_serial_port = self._vmops._vmutils.get_vm_serial_port_connection get_vm_serial_port.assert_called_once_with(mock_instance['name'], update_connection=pipe_path) @mock.patch.object(vmops.VMOps, "log_vm_serial_output") @mock.patch("os.path.basename") @mock.patch("os.path.exists") def test_restart_vm_log_writers(self, mock_exists, mock_basename, mock_log_vm_output): self._vmops._vmutils.get_active_instances.return_value = [ mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_VM_NAME_OTHER] mock_exists.side_effect = [True, False] self._vmops.restart_vm_log_writers() calls = [mock.call(mock.sentinel.FAKE_VM_NAME), mock.call(mock.sentinel.FAKE_VM_NAME_OTHER)] self._vmops._pathutils.get_instance_dir.assert_has_calls(calls) get_vm_serial_port = self._vmops._vmutils.get_vm_serial_port_connection get_vm_serial_port.assert_called_once_with(mock.sentinel.FAKE_VM_NAME) mock_log_vm_output.assert_called_once_with(mock.sentinel.FAKE_VM_NAME, mock_basename.return_value) def test_list_instance_uuids(self): fake_uuid = '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3' with mock.patch.object(self._vmops._vmutils, 'list_instance_notes') as mock_list_notes: mock_list_notes.return_value = [('fake_name', [fake_uuid])] response = self._vmops.list_instance_uuids() mock_list_notes.assert_called_once_with() self.assertEqual(response, [fake_uuid]) def test_copy_vm_dvd_disks(self): fake_paths = [mock.sentinel.FAKE_DVD_PATH1, mock.sentinel.FAKE_DVD_PATH2] mock_copy = self._vmops._pathutils.copyfile mock_get_dvd_disk_paths = self._vmops._vmutils.get_vm_dvd_disk_paths mock_get_dvd_disk_paths.return_value = fake_paths self._vmops._pathutils.get_instance_dir.return_value = ( mock.sentinel.FAKE_DEST_PATH) self._vmops.copy_vm_dvd_disks(mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_DEST_HOST) mock_get_dvd_disk_paths.assert_called_with(mock.sentinel.FAKE_VM_NAME) self._vmops._pathutils.get_instance_dir.assert_called_once_with( mock.sentinel.FAKE_VM_NAME, remote_server=mock.sentinel.FAKE_DEST_HOST) mock_copy.has_calls(mock.call(mock.sentinel.FAKE_DVD_PATH1, mock.sentinel.FAKE_DEST_PATH), mock.call(mock.sentinel.FAKE_DVD_PATH2, mock.sentinel.FAKE_DEST_PATH)) @mock.patch.object(vmops.VMOps, '_get_vm_state') def test_check_hotplug_available_vm_disabled(self, mock_get_vm_state): fake_vm = fake_instance.fake_instance_obj(self.context) mock_get_vm_state.return_value = os_win_const.HYPERV_VM_STATE_DISABLED result = self._vmops._check_hotplug_available(fake_vm) self.assertTrue(result) mock_get_vm_state.assert_called_once_with(fake_vm.name) self.assertFalse( self._vmops._hostutils.check_min_windows_version.called) self.assertFalse(self._vmops._vmutils.get_vm_generation.called) @mock.patch.object(vmops.VMOps, '_get_vm_state') def _test_check_hotplug_available( self, mock_get_vm_state, expected_result=False, vm_gen=constants.VM_GEN_2, windows_version=_WIN_VERSION_10): fake_vm = fake_instance.fake_instance_obj(self.context) mock_get_vm_state.return_value = os_win_const.HYPERV_VM_STATE_ENABLED self._vmops._vmutils.get_vm_generation.return_value = vm_gen fake_check_win_vers = self._vmops._hostutils.check_min_windows_version fake_check_win_vers.return_value = ( windows_version == self._WIN_VERSION_10) result = self._vmops._check_hotplug_available(fake_vm) self.assertEqual(expected_result, result) mock_get_vm_state.assert_called_once_with(fake_vm.name) fake_check_win_vers.assert_called_once_with(10, 0) def test_check_if_hotplug_available(self): self._test_check_hotplug_available(expected_result=True) def test_check_if_hotplug_available_gen1(self): self._test_check_hotplug_available( expected_result=False, vm_gen=constants.VM_GEN_1) def test_check_if_hotplug_available_win_6_3(self): self._test_check_hotplug_available( expected_result=False, windows_version=self._WIN_VERSION_6_3) @mock.patch.object(vmops.VMOps, '_check_hotplug_available') def test_attach_interface(self, mock_check_hotplug_available): mock_check_hotplug_available.return_value = True fake_vm = fake_instance.fake_instance_obj(self.context) fake_vif = test_virtual_interface.fake_vif self._vmops._vif_driver = mock.MagicMock() self._vmops.attach_interface(fake_vm, fake_vif) mock_check_hotplug_available.assert_called_once_with(fake_vm) self._vmops._vif_driver.plug.assert_called_once_with( fake_vm, fake_vif) self._vmops._vmutils.create_nic.assert_called_once_with( fake_vm.name, fake_vif['id'], fake_vif['address']) @mock.patch.object(vmops.VMOps, '_check_hotplug_available') def test_attach_interface_failed(self, mock_check_hotplug_available): mock_check_hotplug_available.return_value = False self.assertRaises(exception.InterfaceAttachFailed, self._vmops.attach_interface, mock.MagicMock(), mock.sentinel.fake_vif) @mock.patch.object(vmops.VMOps, '_check_hotplug_available') def test_detach_interface(self, mock_check_hotplug_available): mock_check_hotplug_available.return_value = True fake_vm = fake_instance.fake_instance_obj(self.context) fake_vif = test_virtual_interface.fake_vif self._vmops._vif_driver = mock.MagicMock() self._vmops.detach_interface(fake_vm, fake_vif) mock_check_hotplug_available.assert_called_once_with(fake_vm) self._vmops._vif_driver.unplug.assert_called_once_with( fake_vm, fake_vif) self._vmops._vmutils.destroy_nic.assert_called_once_with( fake_vm.name, fake_vif['id']) @mock.patch.object(vmops.VMOps, '_check_hotplug_available') def test_detach_interface_failed(self, mock_check_hotplug_available): mock_check_hotplug_available.return_value = False self.assertRaises(exception.InterfaceDetachFailed, self._vmops.detach_interface, mock.MagicMock(), mock.sentinel.fake_vif) @mock.patch.object(vmops.VMOps, '_check_hotplug_available') def test_detach_interface_missing_instance(self, mock_check_hotplug): mock_check_hotplug.side_effect = os_win_exc.HyperVVMNotFoundException( vm_name='fake_vm') self.assertRaises(exception.InterfaceDetachFailed, self._vmops.detach_interface, mock.MagicMock(), mock.sentinel.fake_vif) nova-13.1.4/nova/tests/unit/virt/hyperv/test_rdpconsoleops.py0000664000567000056710000000335613064447140025606 0ustar jenkinsjenkins00000000000000# Copyright 2015 Cloudbase Solutions SRL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit tests for the Hyper-V RDPConsoleOps. """ import mock from nova.tests.unit.virt.hyperv import test_base from nova.virt.hyperv import rdpconsoleops class RDPConsoleOpsTestCase(test_base.HyperVBaseTestCase): def setUp(self): super(RDPConsoleOpsTestCase, self).setUp() self.rdpconsoleops = rdpconsoleops.RDPConsoleOps() self.rdpconsoleops._hostops = mock.MagicMock() self.rdpconsoleops._vmutils = mock.MagicMock() self.rdpconsoleops._rdpconsoleutils = mock.MagicMock() def test_get_rdp_console(self): mock_get_host_ip = self.rdpconsoleops._hostops.get_host_ip_addr mock_get_rdp_port = ( self.rdpconsoleops._rdpconsoleutils.get_rdp_console_port) mock_get_vm_id = self.rdpconsoleops._vmutils.get_vm_id connect_info = self.rdpconsoleops.get_rdp_console(mock.DEFAULT) self.assertEqual(mock_get_host_ip.return_value, connect_info.host) self.assertEqual(mock_get_rdp_port.return_value, connect_info.port) self.assertEqual(mock_get_vm_id.return_value, connect_info.internal_access_path) nova-13.1.4/nova/tests/unit/virt/hyperv/test_migrationops.py0000664000567000056710000005254013064447152025431 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from os_win import exceptions as os_win_exc from oslo_utils import units from nova import exception from nova import objects from nova import test from nova.tests.unit import fake_instance from nova.tests.unit.virt.hyperv import test_base from nova.virt.hyperv import migrationops class MigrationOpsTestCase(test_base.HyperVBaseTestCase): """Unit tests for the Hyper-V MigrationOps class.""" _FAKE_DISK = 'fake_disk' _FAKE_TIMEOUT = 10 _FAKE_RETRY_INTERVAL = 5 def setUp(self): super(MigrationOpsTestCase, self).setUp() self.context = 'fake-context' self._migrationops = migrationops.MigrationOps() self._migrationops._hostutils = mock.MagicMock() self._migrationops._vmops = mock.MagicMock() self._migrationops._vmutils = mock.MagicMock() self._migrationops._pathutils = mock.Mock() self._migrationops._vhdutils = mock.MagicMock() self._migrationops._pathutils = mock.MagicMock() self._migrationops._volumeops = mock.MagicMock() self._migrationops._imagecache = mock.MagicMock() def _check_migrate_disk_files(self, host): instance_path = 'fake/instance/path' self._migrationops._pathutils.get_instance_dir.return_value = ( instance_path) get_revert_dir = ( self._migrationops._pathutils.get_instance_migr_revert_dir) self._migrationops._hostutils.get_local_ips.return_value = [host] self._migrationops._pathutils.exists.return_value = True expected_get_dir = [mock.call(mock.sentinel.instance_name)] expected_move_calls = [mock.call(instance_path, get_revert_dir.return_value)] self._migrationops._migrate_disk_files( instance_name=mock.sentinel.instance_name, disk_files=[self._FAKE_DISK], dest=mock.sentinel.dest_path) self._migrationops._hostutils.get_local_ips.assert_called_once_with() get_revert_dir.assert_called_with(mock.sentinel.instance_name, remove_dir=True, create_dir=True) if host == mock.sentinel.dest_path: fake_dest_path = '%s_tmp' % instance_path self._migrationops._pathutils.exists.assert_called_once_with( fake_dest_path) self._migrationops._pathutils.rmtree.assert_called_once_with( fake_dest_path) self._migrationops._pathutils.makedirs.assert_called_once_with( fake_dest_path) expected_move_calls.append(mock.call(fake_dest_path, instance_path)) else: fake_dest_path = instance_path expected_get_dir.append(mock.call(mock.sentinel.instance_name, mock.sentinel.dest_path, remove_dir=True)) self._migrationops._pathutils.get_instance_dir.assert_has_calls( expected_get_dir) self._migrationops._pathutils.copy.assert_called_once_with( self._FAKE_DISK, fake_dest_path) self._migrationops._pathutils.move_folder_files.assert_has_calls( expected_move_calls) def test_migrate_disk_files(self): self._check_migrate_disk_files(host=mock.sentinel.other_dest_path) def test_migrate_disk_files_same_host(self): self._check_migrate_disk_files(host=mock.sentinel.dest_path) @mock.patch.object(migrationops.MigrationOps, '_cleanup_failed_disk_migration') def test_migrate_disk_files_exception(self, mock_cleanup): instance_path = 'fake/instance/path' fake_dest_path = '%s_tmp' % instance_path self._migrationops._pathutils.get_instance_dir.return_value = ( instance_path) get_revert_dir = ( self._migrationops._pathutils.get_instance_migr_revert_dir) self._migrationops._hostutils.get_local_ips.return_value = [ mock.sentinel.dest_path] self._migrationops._pathutils.copy.side_effect = IOError( "Expected exception.") self.assertRaises(IOError, self._migrationops._migrate_disk_files, instance_name=mock.sentinel.instance_name, disk_files=[self._FAKE_DISK], dest=mock.sentinel.dest_path) mock_cleanup.assert_called_once_with(instance_path, get_revert_dir.return_value, fake_dest_path) def test_cleanup_failed_disk_migration(self): self._migrationops._pathutils.exists.return_value = True self._migrationops._cleanup_failed_disk_migration( instance_path=mock.sentinel.instance_path, revert_path=mock.sentinel.revert_path, dest_path=mock.sentinel.dest_path) expected = [mock.call(mock.sentinel.dest_path), mock.call(mock.sentinel.revert_path)] self._migrationops._pathutils.exists.assert_has_calls(expected) move_folder_files = self._migrationops._pathutils.move_folder_files move_folder_files.assert_called_once_with( mock.sentinel.revert_path, mock.sentinel.instance_path) self._migrationops._pathutils.rmtree.assert_has_calls([ mock.call(mock.sentinel.dest_path), mock.call(mock.sentinel.revert_path)]) def test_check_target_flavor(self): mock_instance = fake_instance.fake_instance_obj(self.context) mock_instance.root_gb = 1 mock_flavor = mock.MagicMock(root_gb=0) self.assertRaises(exception.InstanceFaultRollback, self._migrationops._check_target_flavor, mock_instance, mock_flavor) def test_check_and_attach_config_drive(self): mock_instance = fake_instance.fake_instance_obj( self.context, expected_attrs=['system_metadata']) mock_instance.config_drive = 'True' self._migrationops._check_and_attach_config_drive( mock_instance, mock.sentinel.vm_gen) self._migrationops._vmops.attach_config_drive.assert_called_once_with( mock_instance, self._migrationops._pathutils.lookup_configdrive_path.return_value, mock.sentinel.vm_gen) def test_check_and_attach_config_drive_unknown_path(self): instance = fake_instance.fake_instance_obj( self.context, expected_attrs=['system_metadata']) instance.config_drive = 'True' self._migrationops._pathutils.lookup_configdrive_path.return_value = ( None) self.assertRaises(exception.ConfigDriveNotFound, self._migrationops._check_and_attach_config_drive, instance, mock.sentinel.FAKE_VM_GEN) @mock.patch.object(migrationops.MigrationOps, '_migrate_disk_files') @mock.patch.object(migrationops.MigrationOps, '_check_target_flavor') def test_migrate_disk_and_power_off(self, mock_check_flavor, mock_migrate_disk_files): instance = fake_instance.fake_instance_obj(self.context) flavor = mock.MagicMock() network_info = mock.MagicMock() disk_files = [mock.MagicMock()] volume_drives = [mock.MagicMock()] mock_get_vm_st_path = self._migrationops._vmutils.get_vm_storage_paths mock_get_vm_st_path.return_value = (disk_files, volume_drives) self._migrationops.migrate_disk_and_power_off( self.context, instance, mock.sentinel.FAKE_DEST, flavor, network_info, None, self._FAKE_TIMEOUT, self._FAKE_RETRY_INTERVAL) mock_check_flavor.assert_called_once_with(instance, flavor) self._migrationops._vmops.power_off.assert_called_once_with( instance, self._FAKE_TIMEOUT, self._FAKE_RETRY_INTERVAL) mock_get_vm_st_path.assert_called_once_with(instance.name) mock_migrate_disk_files.assert_called_once_with( instance.name, disk_files, mock.sentinel.FAKE_DEST) self._migrationops._vmops.destroy.assert_called_once_with( instance, destroy_disks=False) def test_confirm_migration(self): mock_instance = fake_instance.fake_instance_obj(self.context) self._migrationops.confirm_migration( migration=mock.sentinel.migration, instance=mock_instance, network_info=mock.sentinel.network_info) get_instance_migr_revert_dir = ( self._migrationops._pathutils.get_instance_migr_revert_dir) get_instance_migr_revert_dir.assert_called_with(mock_instance.name, remove_dir=True) def test_revert_migration_files(self): instance_path = ( self._migrationops._pathutils.get_instance_dir.return_value) get_revert_dir = ( self._migrationops._pathutils.get_instance_migr_revert_dir) self._migrationops._revert_migration_files( instance_name=mock.sentinel.instance_name) self._migrationops._pathutils.get_instance_dir.assert_called_once_with( mock.sentinel.instance_name, create_dir=False, remove_dir=True) get_revert_dir.assert_called_with(mock.sentinel.instance_name) self._migrationops._pathutils.rename.assert_called_once_with( get_revert_dir.return_value, instance_path) @mock.patch.object(migrationops.MigrationOps, '_check_and_attach_config_drive') @mock.patch.object(migrationops.MigrationOps, '_revert_migration_files') @mock.patch.object(objects.ImageMeta, "from_instance") def _check_finish_revert_migration(self, mock_image, mock_revert_migration_files, mock_check_attach_config_drive, boot_from_volume=False): mock_image.return_value = objects.ImageMeta.from_dict({}) mock_instance = fake_instance.fake_instance_obj(self.context) mock_ebs_root_in_block_devices = ( self._migrationops._volumeops.ebs_root_in_block_devices) mock_ebs_root_in_block_devices.return_value = boot_from_volume lookup_ephemeral = ( self._migrationops._pathutils.lookup_ephemeral_vhd_path) self._migrationops.finish_revert_migration( context=self.context, instance=mock_instance, network_info=mock.sentinel.network_info, block_device_info=mock.sentinel.block_device, power_on=True) mock_revert_migration_files.assert_called_once_with( mock_instance.name) mock_ebs_root_in_block_devices.assert_called_once_with( mock.sentinel.block_device) if not boot_from_volume: lookup_root_vhd = ( self._migrationops._pathutils.lookup_root_vhd_path) lookup_root_vhd.assert_called_once_with(mock_instance.name) fake_root_path = lookup_root_vhd.return_value else: fake_root_path = None lookup_ephemeral.assert_called_with(mock_instance.name) get_image_vm_gen = self._migrationops._vmops.get_image_vm_generation get_image_vm_gen.assert_called_once_with( mock_instance.uuid, fake_root_path, test.MatchType(objects.ImageMeta)) self._migrationops._vmops.create_instance.assert_called_once_with( mock_instance, mock.sentinel.network_info, mock.sentinel.block_device, fake_root_path, lookup_ephemeral.return_value, get_image_vm_gen.return_value) mock_check_attach_config_drive.assert_called_once_with( mock_instance, get_image_vm_gen.return_value) self._migrationops._vmops.power_on.assert_called_once_with( mock_instance) def test_finish_revert_migration_boot_from_volume(self): self._check_finish_revert_migration(boot_from_volume=True) def test_finish_revert_migration_not_in_block_device(self): self._check_finish_revert_migration() def test_merge_base_vhd(self): fake_diff_vhd_path = 'fake/diff/path' fake_base_vhd_path = 'fake/base/path' base_vhd_copy_path = os.path.join( os.path.dirname(fake_diff_vhd_path), os.path.basename(fake_base_vhd_path)) self._migrationops._merge_base_vhd(diff_vhd_path=fake_diff_vhd_path, base_vhd_path=fake_base_vhd_path) self._migrationops._pathutils.copyfile.assert_called_once_with( fake_base_vhd_path, base_vhd_copy_path) recon_parent_vhd = self._migrationops._vhdutils.reconnect_parent_vhd recon_parent_vhd.assert_called_once_with(fake_diff_vhd_path, base_vhd_copy_path) self._migrationops._vhdutils.merge_vhd.assert_called_once_with( fake_diff_vhd_path) self._migrationops._pathutils.rename.assert_called_once_with( base_vhd_copy_path, fake_diff_vhd_path) def test_merge_base_vhd_exception(self): fake_diff_vhd_path = 'fake/diff/path' fake_base_vhd_path = 'fake/base/path' base_vhd_copy_path = os.path.join( os.path.dirname(fake_diff_vhd_path), os.path.basename(fake_base_vhd_path)) self._migrationops._vhdutils.reconnect_parent_vhd.side_effect = ( os_win_exc.HyperVException) self._migrationops._pathutils.exists.return_value = True self.assertRaises(os_win_exc.HyperVException, self._migrationops._merge_base_vhd, fake_diff_vhd_path, fake_base_vhd_path) self._migrationops._pathutils.exists.assert_called_once_with( base_vhd_copy_path) self._migrationops._pathutils.remove.assert_called_once_with( base_vhd_copy_path) @mock.patch.object(migrationops.MigrationOps, '_resize_vhd') def test_check_resize_vhd(self, mock_resize_vhd): self._migrationops._check_resize_vhd( vhd_path=mock.sentinel.vhd_path, vhd_info={'VirtualSize': 1}, new_size=2) mock_resize_vhd.assert_called_once_with(mock.sentinel.vhd_path, 2) def test_check_resize_vhd_exception(self): self.assertRaises(exception.CannotResizeDisk, self._migrationops._check_resize_vhd, mock.sentinel.vhd_path, {'VirtualSize': 1}, 0) @mock.patch.object(migrationops.MigrationOps, '_merge_base_vhd') def test_resize_vhd(self, mock_merge_base_vhd): fake_vhd_path = 'fake/path.vhd' new_vhd_size = 2 self._migrationops._resize_vhd(vhd_path=fake_vhd_path, new_size=new_vhd_size) get_vhd_parent_path = self._migrationops._vhdutils.get_vhd_parent_path get_vhd_parent_path.assert_called_once_with(fake_vhd_path) mock_merge_base_vhd.assert_called_once_with( fake_vhd_path, self._migrationops._vhdutils.get_vhd_parent_path.return_value) self._migrationops._vhdutils.resize_vhd.assert_called_once_with( fake_vhd_path, new_vhd_size) def test_check_base_disk(self): mock_instance = fake_instance.fake_instance_obj(self.context) fake_src_vhd_path = 'fake/src/path' fake_base_vhd = 'fake/vhd' get_cached_image = self._migrationops._imagecache.get_cached_image get_cached_image.return_value = fake_base_vhd self._migrationops._check_base_disk( context=self.context, instance=mock_instance, diff_vhd_path=mock.sentinel.diff_vhd_path, src_base_disk_path=fake_src_vhd_path) get_cached_image.assert_called_once_with(self.context, mock_instance) recon_parent_vhd = self._migrationops._vhdutils.reconnect_parent_vhd recon_parent_vhd.assert_called_once_with( mock.sentinel.diff_vhd_path, fake_base_vhd) @mock.patch.object(migrationops.MigrationOps, '_check_and_attach_config_drive') @mock.patch.object(migrationops.MigrationOps, '_check_base_disk') @mock.patch.object(migrationops.MigrationOps, '_check_resize_vhd') def _check_finish_migration(self, mock_check_resize_vhd, mock_check_base_disk, mock_check_attach_config_drive, ephemeral_path=None, boot_from_volume=False): mock_instance = fake_instance.fake_instance_obj(self.context) mock_instance.ephemeral_gb = 1 mock_vhd_info = mock.MagicMock() mock_eph_info = mock.MagicMock() lookup_root_vhd = self._migrationops._pathutils.lookup_root_vhd_path side_effect = [mock_eph_info] if boot_from_volume else [mock_vhd_info, mock_eph_info] mock_ebs_root_in_block_devices = ( self._migrationops._volumeops.ebs_root_in_block_devices) mock_ebs_root_in_block_devices.return_value = boot_from_volume self._migrationops._vhdutils.get_vhd_info.side_effect = side_effect look_up_ephem = self._migrationops._pathutils.lookup_ephemeral_vhd_path look_up_ephem.return_value = ephemeral_path expected_check_resize = [] expected_get_info = [] self._migrationops.finish_migration( context=self.context, migration=mock.sentinel.migration, instance=mock_instance, disk_info=mock.sentinel.disk_info, network_info=mock.sentinel.network_info, image_meta=mock.sentinel.image_meta, resize_instance=True) mock_ebs_root_in_block_devices.assert_called_once_with(None) if not boot_from_volume: root_vhd_path = lookup_root_vhd.return_value lookup_root_vhd.assert_called_with(mock_instance.name) expected_get_info = [mock.call(root_vhd_path)] mock_vhd_info.get.assert_called_once_with("ParentPath") mock_check_base_disk.assert_called_once_with( self.context, mock_instance, root_vhd_path, mock_vhd_info.get.return_value) expected_check_resize.append( mock.call(root_vhd_path, mock_vhd_info, mock_instance.root_gb * units.Gi)) else: root_vhd_path = None look_up_ephem.assert_called_once_with(mock_instance.name) if ephemeral_path is None: create_eph_vhd = self._migrationops._vmops.create_ephemeral_vhd create_eph_vhd.assert_called_once_with(mock_instance) ephemeral_path = create_eph_vhd.return_value else: expected_get_info.append(mock.call(ephemeral_path)) expected_check_resize.append( mock.call(ephemeral_path, mock_eph_info, mock_instance.ephemeral_gb * units.Gi)) mock_check_resize_vhd.assert_has_calls(expected_check_resize) self._migrationops._vhdutils.get_vhd_info.assert_has_calls( expected_get_info) get_image_vm_gen = self._migrationops._vmops.get_image_vm_generation get_image_vm_gen.assert_called_once_with(mock_instance.uuid, root_vhd_path, mock.sentinel.image_meta) self._migrationops._vmops.create_instance.assert_called_once_with( mock_instance, mock.sentinel.network_info, None, root_vhd_path, ephemeral_path, get_image_vm_gen.return_value) mock_check_attach_config_drive.assert_called_once_with( mock_instance, get_image_vm_gen.return_value) self._migrationops._vmops.power_on.assert_called_once_with( mock_instance) def test_finish_migration(self): self._check_finish_migration( ephemeral_path=mock.sentinel.ephemeral_path) def test_finish_migration_boot_from_volume(self): self._check_finish_migration( ephemeral_path=mock.sentinel.ephemeral_path, boot_from_volume=True) def test_finish_migration_no_ephemeral(self): self._check_finish_migration() def test_finish_migration_no_root(self): mock_instance = fake_instance.fake_instance_obj(self.context) mock_ebs_root_in_block_devices = ( self._migrationops._volumeops.ebs_root_in_block_devices) mock_ebs_root_in_block_devices.return_value = False self._migrationops._pathutils.lookup_root_vhd_path.return_value = None self.assertRaises(exception.DiskNotFound, self._migrationops.finish_migration, self.context, mock.sentinel.migration, mock_instance, mock.sentinel.disk_info, mock.sentinel.network_info, mock.sentinel.image_meta, True, None, True) nova-13.1.4/nova/tests/unit/virt/hyperv/test_eventhandler.py0000664000567000056710000001461313064447152025374 0ustar jenkinsjenkins00000000000000# Copyright 2015 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import eventlet import mock from os_win import constants from os_win import exceptions as os_win_exc from os_win import utilsfactory from nova.tests.unit.virt.hyperv import test_base from nova import utils from nova.virt.hyperv import eventhandler class EventHandlerTestCase(test_base.HyperVBaseTestCase): _FAKE_POLLING_INTERVAL = 3 _FAKE_EVENT_CHECK_TIMEFRAME = 15 @mock.patch.object(utilsfactory, 'get_vmutils') def setUp(self, mock_get_vmutils): super(EventHandlerTestCase, self).setUp() self._state_change_callback = mock.Mock() self._running_state_callback = mock.Mock() self.flags( power_state_check_timeframe=self._FAKE_EVENT_CHECK_TIMEFRAME, group='hyperv') self.flags( power_state_event_polling_interval=self._FAKE_POLLING_INTERVAL, group='hyperv') self._event_handler = eventhandler.InstanceEventHandler( self._state_change_callback, self._running_state_callback) @mock.patch.object(eventhandler, 'wmi', create=True) @mock.patch.object(eventhandler.InstanceEventHandler, '_dispatch_event') @mock.patch.object(eventlet, 'sleep') def _test_poll_events(self, mock_sleep, mock_dispatch, mock_wmi, event_found=True): fake_listener = mock.Mock() mock_wmi.x_wmi_timed_out = Exception fake_listener.side_effect = (mock.sentinel.event if event_found else mock_wmi.x_wmi_timed_out, KeyboardInterrupt) self._event_handler._listener = fake_listener # This is supposed to run as a daemon, so we'll just cause an exception # in order to be able to test the method. self.assertRaises(KeyboardInterrupt, self._event_handler._poll_events) if event_found: mock_dispatch.assert_called_once_with(mock.sentinel.event) else: mock_sleep.assert_called_once_with(self._FAKE_POLLING_INTERVAL) def test_poll_having_events(self): # Test case in which events were found in the checked interval self._test_poll_events() def test_poll_no_event_found(self): self._test_poll_events(event_found=False) @mock.patch.object(eventhandler.InstanceEventHandler, '_get_instance_uuid') @mock.patch.object(eventhandler.InstanceEventHandler, '_emit_event') def _test_dispatch_event(self, mock_emit_event, mock_get_uuid, missing_uuid=False): mock_get_uuid.return_value = ( mock.sentinel.instance_uuid if not missing_uuid else None) self._event_handler._vmutils.get_vm_power_state.return_value = ( mock.sentinel.power_state) event = mock.Mock() event.ElementName = mock.sentinel.instance_name event.EnabledState = mock.sentinel.enabled_state self._event_handler._dispatch_event(event) if not missing_uuid: mock_emit_event.assert_called_once_with( mock.sentinel.instance_name, mock.sentinel.instance_uuid, mock.sentinel.power_state) else: self.assertFalse(mock_emit_event.called) def test_dispatch_event_new_final_state(self): self._test_dispatch_event() def test_dispatch_event_missing_uuid(self): self._test_dispatch_event(missing_uuid=True) @mock.patch.object(eventhandler.InstanceEventHandler, '_get_virt_event') @mock.patch.object(utils, 'spawn_n') def test_emit_event(self, mock_spawn, mock_get_event): self._event_handler._emit_event(mock.sentinel.instance_name, mock.sentinel.instance_uuid, constants.HYPERV_VM_STATE_ENABLED) virt_event = mock_get_event.return_value mock_spawn.assert_has_calls( [mock.call(self._state_change_callback, virt_event), mock.call(self._running_state_callback, mock.sentinel.instance_name, mock.sentinel.instance_uuid)]) def _test_get_instance_uuid(self, instance_found=True, missing_uuid=False): if instance_found: side_effect = (mock.sentinel.instance_uuid if not missing_uuid else None, ) else: side_effect = os_win_exc.HyperVVMNotFoundException( vm_name=mock.sentinel.instance_name) mock_get_uuid = self._event_handler._vmutils.get_instance_uuid mock_get_uuid.side_effect = side_effect instance_uuid = self._event_handler._get_instance_uuid( mock.sentinel.instance_name) expected_uuid = (mock.sentinel.instance_uuid if instance_found and not missing_uuid else None) self.assertEqual(expected_uuid, instance_uuid) def test_get_nova_created_instance_uuid(self): self._test_get_instance_uuid() def test_get_deleted_instance_uuid(self): self._test_get_instance_uuid(instance_found=False) def test_get_instance_uuid_missing_notes(self): self._test_get_instance_uuid(missing_uuid=True) @mock.patch('nova.virt.event.LifecycleEvent') def test_get_virt_event(self, mock_lifecycle_event): instance_state = constants.HYPERV_VM_STATE_ENABLED expected_transition = self._event_handler._TRANSITION_MAP[ instance_state] virt_event = self._event_handler._get_virt_event( mock.sentinel.instance_uuid, instance_state) self.assertEqual(mock_lifecycle_event.return_value, virt_event) mock_lifecycle_event.assert_called_once_with( uuid=mock.sentinel.instance_uuid, transition=expected_transition) nova-13.1.4/nova/tests/unit/virt/hyperv/test_livemigrationops.py0000664000567000056710000001324213064447152026305 0ustar jenkinsjenkins00000000000000# Copyright 2014 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from os_win import exceptions as os_win_exc from oslo_config import cfg from nova.tests.unit import fake_instance from nova.tests.unit.virt.hyperv import test_base from nova.virt.hyperv import livemigrationops CONF = cfg.CONF class LiveMigrationOpsTestCase(test_base.HyperVBaseTestCase): """Unit tests for the Hyper-V LiveMigrationOps class.""" def setUp(self): super(LiveMigrationOpsTestCase, self).setUp() self.context = 'fake_context' self._livemigrops = livemigrationops.LiveMigrationOps() self._livemigrops._livemigrutils = mock.MagicMock() self._livemigrops._pathutils = mock.MagicMock() @mock.patch('nova.virt.hyperv.vmops.VMOps.copy_vm_console_logs') @mock.patch('nova.virt.hyperv.vmops.VMOps.copy_vm_dvd_disks') def _test_live_migration(self, mock_get_vm_dvd_paths, mock_copy_logs, side_effect): mock_instance = fake_instance.fake_instance_obj(self.context) mock_post = mock.MagicMock() mock_recover = mock.MagicMock() fake_dest = mock.sentinel.DESTINATION self._livemigrops._livemigrutils.live_migrate_vm.side_effect = [ side_effect] if side_effect is os_win_exc.HyperVException: self.assertRaises(os_win_exc.HyperVException, self._livemigrops.live_migration, self.context, mock_instance, fake_dest, mock_post, mock_recover, False, None) mock_recover.assert_called_once_with(self.context, mock_instance, fake_dest, False) else: self._livemigrops.live_migration(context=self.context, instance_ref=mock_instance, dest=fake_dest, post_method=mock_post, recover_method=mock_recover) mock_copy_logs.assert_called_once_with(mock_instance.name, fake_dest) mock_live_migr = self._livemigrops._livemigrutils.live_migrate_vm mock_live_migr.assert_called_once_with(mock_instance.name, fake_dest) mock_post.assert_called_once_with(self.context, mock_instance, fake_dest, False) def test_live_migration(self): self._test_live_migration(side_effect=None) def test_live_migration_exception(self): self._test_live_migration(side_effect=os_win_exc.HyperVException) @mock.patch('nova.virt.hyperv.volumeops.VolumeOps' '.ebs_root_in_block_devices') @mock.patch('nova.virt.hyperv.imagecache.ImageCache.get_cached_image') @mock.patch('nova.virt.hyperv.volumeops.VolumeOps' '.initialize_volumes_connection') def test_pre_live_migration(self, mock_initialize_connection, mock_get_cached_image, mock_ebs_root_in_block_devices): mock_instance = fake_instance.fake_instance_obj(self.context) mock_instance.image_ref = "fake_image_ref" mock_ebs_root_in_block_devices.return_value = None CONF.set_override('use_cow_images', True) self._livemigrops.pre_live_migration( self.context, mock_instance, block_device_info=mock.sentinel.BLOCK_INFO, network_info=mock.sentinel.NET_INFO) check_config = ( self._livemigrops._livemigrutils.check_live_migration_config) check_config.assert_called_once_with() mock_ebs_root_in_block_devices.assert_called_once_with( mock.sentinel.BLOCK_INFO) mock_get_cached_image.assert_called_once_with(self.context, mock_instance) mock_initialize_connection.assert_called_once_with( mock.sentinel.BLOCK_INFO) @mock.patch('nova.virt.hyperv.volumeops.VolumeOps.disconnect_volumes') def test_post_live_migration(self, mock_disconnect_volumes): self._livemigrops.post_live_migration( self.context, mock.sentinel.instance, mock.sentinel.block_device_info) mock_disconnect_volumes.assert_called_once_with( mock.sentinel.block_device_info) self._livemigrops._pathutils.get_instance_dir.assert_called_once_with( mock.sentinel.instance.name, create_dir=False, remove_dir=True) @mock.patch('nova.virt.hyperv.vmops.VMOps.log_vm_serial_output') def test_post_live_migration_at_destination(self, mock_log_vm): mock_instance = fake_instance.fake_instance_obj(self.context) self._livemigrops.post_live_migration_at_destination( self.context, mock_instance, network_info=mock.sentinel.NET_INFO, block_migration=mock.sentinel.BLOCK_INFO) mock_log_vm.assert_called_once_with(mock_instance.name, mock_instance.uuid) nova-13.1.4/nova/tests/unit/virt/hyperv/test_vif.py0000664000567000056710000000250013064447152023471 0ustar jenkinsjenkins00000000000000# Copyright 2015 Cloudbase Solutions Srl # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova.tests.unit.virt.hyperv import test_base from nova.virt.hyperv import vif class HyperVNovaNetworkVIFDriverTestCase(test_base.HyperVBaseTestCase): def setUp(self): super(HyperVNovaNetworkVIFDriverTestCase, self).setUp() self.vif_driver = vif.HyperVNovaNetworkVIFDriver() def test_plug(self): self.flags(vswitch_name=mock.sentinel.vswitch_name, group='hyperv') fake_vif = {'id': mock.sentinel.fake_id} self.vif_driver.plug(mock.sentinel.instance, fake_vif) netutils = self.vif_driver._netutils netutils.connect_vnic_to_vswitch.assert_called_once_with( mock.sentinel.vswitch_name, mock.sentinel.fake_id) nova-13.1.4/nova/tests/unit/virt/hyperv/test_imagecache.py0000664000567000056710000001522313064447152024761 0ustar jenkinsjenkins00000000000000# Copyright 2014 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from oslo_config import cfg from oslo_utils import units from nova import exception from nova import objects from nova.tests.unit import fake_instance from nova.tests.unit.objects import test_flavor from nova.tests.unit.virt.hyperv import test_base from nova.virt.hyperv import constants from nova.virt.hyperv import imagecache CONF = cfg.CONF class ImageCacheTestCase(test_base.HyperVBaseTestCase): """Unit tests for the Hyper-V ImageCache class.""" FAKE_BASE_DIR = 'fake/base/dir' FAKE_FORMAT = 'fake_format' FAKE_IMAGE_REF = 'fake_image_ref' FAKE_VHD_SIZE_GB = 1 def setUp(self): super(ImageCacheTestCase, self).setUp() self.context = 'fake-context' self.instance = fake_instance.fake_instance_obj(self.context) # utilsfactory will check the host OS version via get_hostutils, # in order to return the proper Utils Class, so it must be mocked. patched_get_hostutils = mock.patch.object(imagecache.utilsfactory, "get_hostutils") patched_get_vhdutils = mock.patch.object(imagecache.utilsfactory, "get_vhdutils") patched_get_hostutils.start() patched_get_vhdutils.start() self.addCleanup(patched_get_hostutils.stop) self.addCleanup(patched_get_vhdutils.stop) self.imagecache = imagecache.ImageCache() self.imagecache._pathutils = mock.MagicMock() self.imagecache._vhdutils = mock.MagicMock() def _test_get_root_vhd_size_gb(self, old_flavor=True): if old_flavor: mock_flavor = objects.Flavor(**test_flavor.fake_flavor) self.instance.old_flavor = mock_flavor else: self.instance.old_flavor = None return self.imagecache._get_root_vhd_size_gb(self.instance) def test_get_root_vhd_size_gb_old_flavor(self): ret_val = self._test_get_root_vhd_size_gb() self.assertEqual(test_flavor.fake_flavor['root_gb'], ret_val) def test_get_root_vhd_size_gb(self): ret_val = self._test_get_root_vhd_size_gb(old_flavor=False) self.assertEqual(self.instance.root_gb, ret_val) @mock.patch.object(imagecache.ImageCache, '_get_root_vhd_size_gb') def test_resize_and_cache_vhd_smaller(self, mock_get_vhd_size_gb): self.imagecache._vhdutils.get_vhd_size.return_value = { 'VirtualSize': (self.FAKE_VHD_SIZE_GB + 1) * units.Gi } mock_get_vhd_size_gb.return_value = self.FAKE_VHD_SIZE_GB mock_internal_vhd_size = ( self.imagecache._vhdutils.get_internal_vhd_size_by_file_size) mock_internal_vhd_size.return_value = self.FAKE_VHD_SIZE_GB * units.Gi self.assertRaises(exception.FlavorDiskSmallerThanImage, self.imagecache._resize_and_cache_vhd, mock.sentinel.instance, mock.sentinel.vhd_path) self.imagecache._vhdutils.get_vhd_size.assert_called_once_with( mock.sentinel.vhd_path) mock_get_vhd_size_gb.assert_called_once_with(mock.sentinel.instance) mock_internal_vhd_size.assert_called_once_with( mock.sentinel.vhd_path, self.FAKE_VHD_SIZE_GB * units.Gi) def _prepare_get_cached_image(self, path_exists, use_cow): self.instance.image_ref = self.FAKE_IMAGE_REF self.imagecache._pathutils.get_base_vhd_dir.return_value = ( self.FAKE_BASE_DIR) self.imagecache._pathutils.exists.return_value = path_exists self.imagecache._vhdutils.get_vhd_format.return_value = ( constants.DISK_FORMAT_VHD) CONF.set_override('use_cow_images', use_cow) expected_path = os.path.join(self.FAKE_BASE_DIR, self.FAKE_IMAGE_REF) expected_vhd_path = "%s.%s" % (expected_path, constants.DISK_FORMAT_VHD.lower()) return (expected_path, expected_vhd_path) @mock.patch.object(imagecache.images, 'fetch') def test_get_cached_image_with_fetch(self, mock_fetch): (expected_path, expected_vhd_path) = self._prepare_get_cached_image(False, False) result = self.imagecache.get_cached_image(self.context, self.instance) self.assertEqual(expected_vhd_path, result) mock_fetch.assert_called_once_with(self.context, self.FAKE_IMAGE_REF, expected_path, self.instance['user_id'], self.instance['project_id']) self.imagecache._vhdutils.get_vhd_format.assert_called_once_with( expected_path) self.imagecache._pathutils.rename.assert_called_once_with( expected_path, expected_vhd_path) @mock.patch.object(imagecache.images, 'fetch') def test_get_cached_image_with_fetch_exception(self, mock_fetch): (expected_path, expected_vhd_path) = self._prepare_get_cached_image(False, False) # path doesn't exist until fetched. self.imagecache._pathutils.exists.side_effect = [False, False, True] mock_fetch.side_effect = exception.InvalidImageRef( image_href=self.FAKE_IMAGE_REF) self.assertRaises(exception.InvalidImageRef, self.imagecache.get_cached_image, self.context, self.instance) self.imagecache._pathutils.remove.assert_called_once_with( expected_path) @mock.patch.object(imagecache.ImageCache, '_resize_and_cache_vhd') def test_get_cached_image_use_cow(self, mock_resize): (expected_path, expected_vhd_path) = self._prepare_get_cached_image(True, True) expected_resized_vhd_path = expected_vhd_path + 'x' mock_resize.return_value = expected_resized_vhd_path result = self.imagecache.get_cached_image(self.context, self.instance) self.assertEqual(expected_resized_vhd_path, result) mock_resize.assert_called_once_with(self.instance, expected_vhd_path) nova-13.1.4/nova/tests/unit/virt/hyperv/test_volumeops.py0000664000567000056710000006371613064447152024756 0ustar jenkinsjenkins00000000000000# Copyright 2014 Cloudbase Solutions Srl # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from os_win import exceptions as os_win_exc from oslo_config import cfg from nova import exception from nova import test from nova.tests.unit import fake_block_device from nova.tests.unit.virt.hyperv import test_base from nova.virt.hyperv import volumeops CONF = cfg.CONF connection_data = {'volume_id': 'fake_vol_id', 'target_lun': mock.sentinel.fake_lun, 'target_iqn': mock.sentinel.fake_iqn, 'target_portal': mock.sentinel.fake_portal, 'auth_method': 'chap', 'auth_username': mock.sentinel.fake_user, 'auth_password': mock.sentinel.fake_pass} def get_fake_block_dev_info(): return {'block_device_mapping': [ fake_block_device.AnonFakeDbBlockDeviceDict({'source_type': 'volume'})] } def get_fake_connection_info(**kwargs): return {'data': dict(connection_data, **kwargs), 'serial': mock.sentinel.serial} class VolumeOpsTestCase(test_base.HyperVBaseTestCase): """Unit tests for VolumeOps class.""" def setUp(self): super(VolumeOpsTestCase, self).setUp() self._volumeops = volumeops.VolumeOps() self._volumeops._volutils = mock.MagicMock() self._volumeops._vmutils = mock.Mock() def test_get_volume_driver(self): fake_conn_info = {'driver_volume_type': mock.sentinel.fake_driver_type} self._volumeops.volume_drivers[mock.sentinel.fake_driver_type] = ( mock.sentinel.fake_driver) result = self._volumeops._get_volume_driver( connection_info=fake_conn_info) self.assertEqual(mock.sentinel.fake_driver, result) def test_get_volume_driver_exception(self): fake_conn_info = {'driver_volume_type': 'fake_driver'} self.assertRaises(exception.VolumeDriverNotFound, self._volumeops._get_volume_driver, connection_info=fake_conn_info) @mock.patch.object(volumeops.VolumeOps, 'attach_volume') def test_attach_volumes(self, mock_attach_volume): block_device_info = get_fake_block_dev_info() self._volumeops.attach_volumes(block_device_info, mock.sentinel.instance_name, ebs_root=True) mock_attach_volume.assert_called_once_with( block_device_info['block_device_mapping'][0]['connection_info'], mock.sentinel.instance_name, True) def test_fix_instance_volume_disk_paths_empty_bdm(self): self._volumeops.fix_instance_volume_disk_paths( mock.sentinel.instance_name, block_device_info={}) self.assertFalse( self._volumeops._vmutils.get_vm_physical_disk_mapping.called) @mock.patch.object(volumeops.VolumeOps, 'get_disk_path_mapping') def test_fix_instance_volume_disk_paths(self, mock_get_disk_path_mapping): block_device_info = get_fake_block_dev_info() mock_disk1 = { 'mounted_disk_path': mock.sentinel.mounted_disk1_path, 'resource_path': mock.sentinel.resource1_path } mock_disk2 = { 'mounted_disk_path': mock.sentinel.mounted_disk2_path, 'resource_path': mock.sentinel.resource2_path } mock_vm_disk_mapping = { mock.sentinel.disk1_serial: mock_disk1, mock.sentinel.disk2_serial: mock_disk2 } # In this case, only the first disk needs to be updated. mock_phys_disk_path_mapping = { mock.sentinel.disk1_serial: mock.sentinel.actual_disk1_path, mock.sentinel.disk2_serial: mock.sentinel.mounted_disk2_path } vmutils = self._volumeops._vmutils vmutils.get_vm_physical_disk_mapping.return_value = ( mock_vm_disk_mapping) mock_get_disk_path_mapping.return_value = mock_phys_disk_path_mapping self._volumeops.fix_instance_volume_disk_paths( mock.sentinel.instance_name, block_device_info) vmutils.get_vm_physical_disk_mapping.assert_called_once_with( mock.sentinel.instance_name) mock_get_disk_path_mapping.assert_called_once_with( block_device_info) vmutils.set_disk_host_res.assert_called_once_with( mock.sentinel.resource1_path, mock.sentinel.actual_disk1_path) @mock.patch.object(volumeops.VolumeOps, '_get_volume_driver') def test_disconnect_volumes(self, mock_get_volume_driver): block_device_info = get_fake_block_dev_info() block_device_mapping = block_device_info['block_device_mapping'] block_device_mapping[0]['connection_info'] = { 'driver_volume_type': mock.sentinel.fake_vol_type} fake_volume_driver = mock_get_volume_driver.return_value self._volumeops.disconnect_volumes(block_device_info) fake_volume_driver.disconnect_volumes.assert_called_once_with( block_device_mapping) @mock.patch('nova.block_device.volume_in_mapping') def test_ebs_root_in_block_devices(self, mock_vol_in_mapping): block_device_info = get_fake_block_dev_info() response = self._volumeops.ebs_root_in_block_devices(block_device_info) mock_vol_in_mapping.assert_called_once_with( self._volumeops._default_root_device, block_device_info) self.assertEqual(mock_vol_in_mapping.return_value, response) def test_get_volume_connector(self): mock_instance = mock.DEFAULT initiator = self._volumeops._volutils.get_iscsi_initiator.return_value expected = {'ip': CONF.my_ip, 'host': CONF.host, 'initiator': initiator} response = self._volumeops.get_volume_connector(instance=mock_instance) self._volumeops._volutils.get_iscsi_initiator.assert_called_once_with() self.assertEqual(expected, response) @mock.patch.object(volumeops.VolumeOps, '_get_volume_driver') def test_initialize_volumes_connection(self, mock_get_volume_driver): block_device_info = get_fake_block_dev_info() self._volumeops.initialize_volumes_connection(block_device_info) init_vol_conn = ( mock_get_volume_driver.return_value.initialize_volume_connection) init_vol_conn.assert_called_once_with( block_device_info['block_device_mapping'][0]['connection_info']) @mock.patch.object(volumeops.VolumeOps, 'get_mounted_disk_path_from_volume') def test_get_disk_path_mapping(self, mock_get_disk_path): block_device_info = get_fake_block_dev_info() block_device_mapping = block_device_info['block_device_mapping'] fake_conn_info = get_fake_connection_info() block_device_mapping[0]['connection_info'] = fake_conn_info mock_get_disk_path.return_value = mock.sentinel.disk_path resulted_disk_path_mapping = self._volumeops.get_disk_path_mapping( block_device_info) mock_get_disk_path.assert_called_once_with(fake_conn_info) expected_disk_path_mapping = { mock.sentinel.serial: mock.sentinel.disk_path } self.assertEqual(expected_disk_path_mapping, resulted_disk_path_mapping) def test_group_block_devices_by_type(self): block_device_map = get_fake_block_dev_info()['block_device_mapping'] block_device_map[0]['connection_info'] = { 'driver_volume_type': 'iscsi'} result = self._volumeops._group_block_devices_by_type( block_device_map) expected = {'iscsi': [block_device_map[0]]} self.assertEqual(expected, result) @mock.patch.object(volumeops.VolumeOps, '_get_volume_driver') def test_get_mounted_disk_path_from_volume(self, mock_get_volume_driver): fake_conn_info = get_fake_connection_info() fake_volume_driver = mock_get_volume_driver.return_value resulted_disk_path = self._volumeops.get_mounted_disk_path_from_volume( fake_conn_info) mock_get_volume_driver.assert_called_once_with( connection_info=fake_conn_info) get_mounted_disk = fake_volume_driver.get_mounted_disk_path_from_volume get_mounted_disk.assert_called_once_with(fake_conn_info) self.assertEqual(get_mounted_disk.return_value, resulted_disk_path) class ISCSIVolumeDriverTestCase(test_base.HyperVBaseTestCase): """Unit tests for Hyper-V ISCSIVolumeDriver class.""" def setUp(self): super(ISCSIVolumeDriverTestCase, self).setUp() self._volume_driver = volumeops.ISCSIVolumeDriver() self._volume_driver._vmutils = mock.MagicMock() self._volume_driver._volutils = mock.MagicMock() def test_login_storage_target_auth_exception(self): connection_info = get_fake_connection_info( auth_method='fake_auth_method') self.assertRaises(exception.UnsupportedBDMVolumeAuthMethod, self._volume_driver.login_storage_target, connection_info) @mock.patch.object(volumeops.ISCSIVolumeDriver, '_get_mounted_disk_from_lun') def _check_login_storage_target(self, mock_get_mounted_disk_from_lun, dev_number): connection_info = get_fake_connection_info() login_target = self._volume_driver._volutils.login_storage_target get_number = self._volume_driver._volutils.get_device_number_for_target get_number.return_value = dev_number self._volume_driver.login_storage_target(connection_info) get_number.assert_called_once_with(mock.sentinel.fake_iqn, mock.sentinel.fake_lun) if not dev_number: login_target.assert_called_once_with( mock.sentinel.fake_lun, mock.sentinel.fake_iqn, mock.sentinel.fake_portal, mock.sentinel.fake_user, mock.sentinel.fake_pass) mock_get_mounted_disk_from_lun.assert_called_once_with( mock.sentinel.fake_iqn, mock.sentinel.fake_lun, True) else: self.assertFalse(login_target.called) def test_login_storage_target_already_logged(self): self._check_login_storage_target(dev_number=1) def test_login_storage_target(self): self._check_login_storage_target(dev_number=0) def _check_logout_storage_target(self, disconnected_luns_count=0): self._volume_driver._volutils.get_target_lun_count.return_value = 1 self._volume_driver.logout_storage_target( target_iqn=mock.sentinel.fake_iqn, disconnected_luns_count=disconnected_luns_count) logout_storage = self._volume_driver._volutils.logout_storage_target if disconnected_luns_count: logout_storage.assert_called_once_with(mock.sentinel.fake_iqn) else: self.assertFalse(logout_storage.called) def test_logout_storage_target_skip(self): self._check_logout_storage_target() def test_logout_storage_target(self): self._check_logout_storage_target(disconnected_luns_count=1) @mock.patch.object(volumeops.ISCSIVolumeDriver, '_get_mounted_disk_from_lun') def test_get_mounted_disk_path_from_volume(self, mock_get_mounted_disk_from_lun): connection_info = get_fake_connection_info() resulted_disk_path = ( self._volume_driver.get_mounted_disk_path_from_volume( connection_info)) mock_get_mounted_disk_from_lun.assert_called_once_with( connection_info['data']['target_iqn'], connection_info['data']['target_lun'], wait_for_device=True) self.assertEqual(mock_get_mounted_disk_from_lun.return_value, resulted_disk_path) @mock.patch.object(volumeops.ISCSIVolumeDriver, '_get_mounted_disk_from_lun') @mock.patch.object(volumeops.ISCSIVolumeDriver, 'logout_storage_target') @mock.patch.object(volumeops.ISCSIVolumeDriver, 'login_storage_target') def test_attach_volume_exception(self, mock_login_storage_target, mock_logout_storage_target, mock_get_mounted_disk): connection_info = get_fake_connection_info() mock_get_mounted_disk.side_effect = os_win_exc.HyperVException self.assertRaises(os_win_exc.HyperVException, self._volume_driver.attach_volume, connection_info, mock.sentinel.instance_name) mock_logout_storage_target.assert_called_with(mock.sentinel.fake_iqn) @mock.patch.object(volumeops.ISCSIVolumeDriver, '_get_mounted_disk_from_lun') @mock.patch.object(volumeops.ISCSIVolumeDriver, 'login_storage_target') def _check_attach_volume(self, mock_login_storage_target, mock_get_mounted_disk_from_lun, ebs_root): connection_info = get_fake_connection_info() get_ide_path = self._volume_driver._vmutils.get_vm_ide_controller get_scsi_path = self._volume_driver._vmutils.get_vm_scsi_controller fake_ide_path = get_ide_path.return_value fake_scsi_path = get_scsi_path.return_value fake_mounted_disk_path = mock_get_mounted_disk_from_lun.return_value attach_vol = self._volume_driver._vmutils.attach_volume_to_controller get_free_slot = self._volume_driver._vmutils.get_free_controller_slot get_free_slot.return_value = 1 self._volume_driver.attach_volume( connection_info=connection_info, instance_name=mock.sentinel.instance_name, ebs_root=ebs_root) mock_login_storage_target.assert_called_once_with(connection_info) mock_get_mounted_disk_from_lun.assert_called_once_with( mock.sentinel.fake_iqn, mock.sentinel.fake_lun, wait_for_device=True) if ebs_root: get_ide_path.assert_called_once_with( mock.sentinel.instance_name, 0) attach_vol.assert_called_once_with(mock.sentinel.instance_name, fake_ide_path, 0, fake_mounted_disk_path, serial=mock.sentinel.serial) else: get_scsi_path.assert_called_once_with(mock.sentinel.instance_name) get_free_slot.assert_called_once_with(fake_scsi_path) attach_vol.assert_called_once_with(mock.sentinel.instance_name, fake_scsi_path, 1, fake_mounted_disk_path, serial=mock.sentinel.serial) def test_attach_volume_ebs(self): self._check_attach_volume(ebs_root=True) def test_attach_volume(self): self._check_attach_volume(ebs_root=False) @mock.patch.object(volumeops.ISCSIVolumeDriver, '_get_mounted_disk_from_lun') @mock.patch.object(volumeops.ISCSIVolumeDriver, 'logout_storage_target') def test_detach_volume(self, mock_logout_storage_target, mock_get_mounted_disk_from_lun): connection_info = get_fake_connection_info() self._volume_driver.detach_volume(connection_info, mock.sentinel.instance_name) mock_get_mounted_disk_from_lun.assert_called_once_with( mock.sentinel.fake_iqn, mock.sentinel.fake_lun, wait_for_device=True) self._volume_driver._vmutils.detach_vm_disk.assert_called_once_with( mock.sentinel.instance_name, mock_get_mounted_disk_from_lun.return_value) mock_logout_storage_target.assert_called_once_with( mock.sentinel.fake_iqn) def test_get_mounted_disk_from_lun(self): with test.nested( mock.patch.object(self._volume_driver._volutils, 'get_device_number_for_target'), mock.patch.object(self._volume_driver._vmutils, 'get_mounted_disk_by_drive_number') ) as (mock_get_device_number_for_target, mock_get_mounted_disk_by_drive_number): mock_get_device_number_for_target.return_value = 0 mock_get_mounted_disk_by_drive_number.return_value = ( mock.sentinel.disk_path) disk = self._volume_driver._get_mounted_disk_from_lun( mock.sentinel.target_iqn, mock.sentinel.target_lun) self.assertEqual(mock.sentinel.disk_path, disk) def test_get_target_from_disk_path(self): result = self._volume_driver.get_target_from_disk_path( mock.sentinel.physical_drive_path) mock_get_target = ( self._volume_driver._volutils.get_target_from_disk_path) mock_get_target.assert_called_once_with( mock.sentinel.physical_drive_path) self.assertEqual(mock_get_target.return_value, result) @mock.patch('time.sleep') def test_get_mounted_disk_from_lun_failure(self, fake_sleep): self.flags(mounted_disk_query_retry_count=1, group='hyperv') with mock.patch.object(self._volume_driver._volutils, 'get_device_number_for_target') as m_device_num: m_device_num.side_effect = [None, -1] self.assertRaises(exception.NotFound, self._volume_driver._get_mounted_disk_from_lun, mock.sentinel.target_iqn, mock.sentinel.target_lun) @mock.patch.object(volumeops.ISCSIVolumeDriver, 'logout_storage_target') def test_disconnect_volumes(self, mock_logout_storage_target): block_device_info = get_fake_block_dev_info() connection_info = get_fake_connection_info() block_device_mapping = block_device_info['block_device_mapping'] block_device_mapping[0]['connection_info'] = connection_info self._volume_driver.disconnect_volumes(block_device_mapping) mock_logout_storage_target.assert_called_once_with( mock.sentinel.fake_iqn, 1) def test_get_target_lun_count(self): result = self._volume_driver.get_target_lun_count( mock.sentinel.target_iqn) mock_get_lun_count = self._volume_driver._volutils.get_target_lun_count mock_get_lun_count.assert_called_once_with(mock.sentinel.target_iqn) self.assertEqual(mock_get_lun_count.return_value, result) @mock.patch.object(volumeops.ISCSIVolumeDriver, 'login_storage_target') def test_initialize_volume_connection(self, mock_login_storage_target): self._volume_driver.initialize_volume_connection( mock.sentinel.connection_info) mock_login_storage_target.assert_called_once_with( mock.sentinel.connection_info) class SMBFSVolumeDriverTestCase(test_base.HyperVBaseTestCase): """Unit tests for the Hyper-V SMBFSVolumeDriver class.""" _FAKE_SHARE = '//1.2.3.4/fake_share' _FAKE_SHARE_NORMALIZED = _FAKE_SHARE.replace('/', '\\') _FAKE_DISK_NAME = 'fake_volume_name.vhdx' _FAKE_USERNAME = 'fake_username' _FAKE_PASSWORD = 'fake_password' _FAKE_SMB_OPTIONS = '-o username=%s,password=%s' % (_FAKE_USERNAME, _FAKE_PASSWORD) _FAKE_CONNECTION_INFO = {'data': {'export': _FAKE_SHARE, 'name': _FAKE_DISK_NAME, 'options': _FAKE_SMB_OPTIONS, 'volume_id': 'fake_vol_id'}} def setUp(self): super(SMBFSVolumeDriverTestCase, self).setUp() self._volume_driver = volumeops.SMBFSVolumeDriver() self._volume_driver._vmutils = mock.MagicMock() self._volume_driver._smbutils = mock.MagicMock() self._volume_driver._volutils = mock.MagicMock() @mock.patch.object(volumeops.SMBFSVolumeDriver, '_get_disk_path') def test_get_mounted_disk_path_from_volume(self, mock_get_disk_path): disk_path = self._volume_driver.get_mounted_disk_path_from_volume( mock.sentinel.conn_info) self.assertEqual(mock_get_disk_path.return_value, disk_path) mock_get_disk_path.assert_called_once_with(mock.sentinel.conn_info) @mock.patch.object(volumeops.SMBFSVolumeDriver, 'ensure_share_mounted') @mock.patch.object(volumeops.SMBFSVolumeDriver, '_get_disk_path') def _check_attach_volume(self, mock_get_disk_path, mock_ensure_share_mounted, ebs_root=False): mock_get_disk_path.return_value = mock.sentinel.disk_path self._volume_driver.attach_volume( self._FAKE_CONNECTION_INFO, mock.sentinel.instance_name, ebs_root) if ebs_root: get_vm_ide_controller = ( self._volume_driver._vmutils.get_vm_ide_controller) get_vm_ide_controller.assert_called_once_with( mock.sentinel.instance_name, 0) ctrller_path = get_vm_ide_controller.return_value slot = 0 else: get_vm_scsi_controller = ( self._volume_driver._vmutils.get_vm_scsi_controller) get_vm_scsi_controller.assert_called_once_with( mock.sentinel.instance_name) get_free_controller_slot = ( self._volume_driver._vmutils.get_free_controller_slot) get_free_controller_slot.assert_called_once_with( get_vm_scsi_controller.return_value) ctrller_path = get_vm_scsi_controller.return_value slot = get_free_controller_slot.return_value mock_ensure_share_mounted.assert_called_once_with( self._FAKE_CONNECTION_INFO) mock_get_disk_path.assert_called_once_with(self._FAKE_CONNECTION_INFO) self._volume_driver._vmutils.attach_drive.assert_called_once_with( mock.sentinel.instance_name, mock.sentinel.disk_path, ctrller_path, slot) def test_attach_volume_ide(self): self._check_attach_volume(ebs_root=True) def test_attach_volume_scsi(self): self._check_attach_volume() @mock.patch.object(volumeops.SMBFSVolumeDriver, 'ensure_share_mounted') @mock.patch.object(volumeops.SMBFSVolumeDriver, '_get_disk_path') def test_attach_non_existing_image(self, mock_get_disk_path, mock_ensure_share_mounted): self._volume_driver._vmutils.attach_drive.side_effect = ( os_win_exc.HyperVException) self.assertRaises(exception.VolumeAttachFailed, self._volume_driver.attach_volume, self._FAKE_CONNECTION_INFO, mock.sentinel.instance_name) @mock.patch.object(volumeops.SMBFSVolumeDriver, '_get_disk_path') def test_detach_volume(self, mock_get_disk_path): mock_get_disk_path.return_value = ( mock.sentinel.disk_path) self._volume_driver.detach_volume(self._FAKE_CONNECTION_INFO, mock.sentinel.instance_name) self._volume_driver._vmutils.detach_vm_disk.assert_called_once_with( mock.sentinel.instance_name, mock.sentinel.disk_path, is_physical=False) def test_parse_credentials(self): username, password = self._volume_driver._parse_credentials( self._FAKE_SMB_OPTIONS) self.assertEqual(self._FAKE_USERNAME, username) self.assertEqual(self._FAKE_PASSWORD, password) def test_get_export_path(self): result = self._volume_driver._get_export_path( self._FAKE_CONNECTION_INFO) expected = self._FAKE_SHARE.replace('/', '\\') self.assertEqual(expected, result) def test_get_disk_path(self): expected = os.path.join(self._FAKE_SHARE_NORMALIZED, self._FAKE_DISK_NAME) disk_path = self._volume_driver._get_disk_path( self._FAKE_CONNECTION_INFO) self.assertEqual(expected, disk_path) @mock.patch.object(volumeops.SMBFSVolumeDriver, '_parse_credentials') def _test_ensure_mounted(self, mock_parse_credentials, is_mounted=False): mock_mount_smb_share = self._volume_driver._smbutils.mount_smb_share self._volume_driver._smbutils.check_smb_mapping.return_value = ( is_mounted) mock_parse_credentials.return_value = ( self._FAKE_USERNAME, self._FAKE_PASSWORD) self._volume_driver.ensure_share_mounted( self._FAKE_CONNECTION_INFO) if is_mounted: self.assertFalse( mock_mount_smb_share.called) else: mock_mount_smb_share.assert_called_once_with( self._FAKE_SHARE_NORMALIZED, username=self._FAKE_USERNAME, password=self._FAKE_PASSWORD) def test_ensure_mounted_new_share(self): self._test_ensure_mounted() def test_ensure_already_mounted(self): self._test_ensure_mounted(is_mounted=True) def test_disconnect_volumes(self): block_device_mapping = [ {'connection_info': self._FAKE_CONNECTION_INFO}] self._volume_driver.disconnect_volumes(block_device_mapping) mock_unmount_share = self._volume_driver._smbutils.unmount_smb_share mock_unmount_share.assert_called_once_with( self._FAKE_SHARE_NORMALIZED) nova-13.1.4/nova/tests/unit/virt/test_diagnostics.py0000664000567000056710000002413713064447140023706 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import exception from nova import test from nova.virt import diagnostics class DiagnosticsTests(test.NoDBTestCase): def test_cpu_diagnostics_default(self): cpu = diagnostics.CpuDiagnostics() self.assertEqual(0, cpu.time) def test_cpu_diagnostics(self): cpu = diagnostics.CpuDiagnostics(time=7) self.assertEqual(7, cpu.time) def test_nic_diagnostics_default(self): nic = diagnostics.NicDiagnostics() self.assertEqual('00:00:00:00:00:00', nic.mac_address) self.assertEqual(0, nic.rx_octets) self.assertEqual(0, nic.rx_errors) self.assertEqual(0, nic.rx_drop) self.assertEqual(0, nic.rx_packets) self.assertEqual(0, nic.tx_octets) self.assertEqual(0, nic.tx_errors) self.assertEqual(0, nic.tx_drop) self.assertEqual(0, nic.tx_packets) def test_nic_diagnostics(self): nic = diagnostics.NicDiagnostics(mac_address='00:00:ca:fe:00:00', rx_octets=1, rx_errors=2, rx_drop=3, rx_packets=4, tx_octets=5, tx_errors=6, tx_drop=7, tx_packets=8) self.assertEqual('00:00:ca:fe:00:00', nic.mac_address) self.assertEqual(1, nic.rx_octets) self.assertEqual(2, nic.rx_errors) self.assertEqual(3, nic.rx_drop) self.assertEqual(4, nic.rx_packets) self.assertEqual(5, nic.tx_octets) self.assertEqual(6, nic.tx_errors) self.assertEqual(7, nic.tx_drop) self.assertEqual(8, nic.tx_packets) def test_disk_diagnostics_default(self): disk = diagnostics.DiskDiagnostics() self.assertEqual('', disk.id) self.assertEqual(0, disk.read_bytes) self.assertEqual(0, disk.read_requests) self.assertEqual(0, disk.write_bytes) self.assertEqual(0, disk.write_requests) self.assertEqual(0, disk.errors_count) def test_disk_diagnostics(self): disk = diagnostics.DiskDiagnostics(id='fake_disk_id', read_bytes=1, read_requests=2, write_bytes=3, write_requests=4, errors_count=5) self.assertEqual('fake_disk_id', disk.id) self.assertEqual(1, disk.read_bytes) self.assertEqual(2, disk.read_requests) self.assertEqual(3, disk.write_bytes) self.assertEqual(4, disk.write_requests) self.assertEqual(5, disk.errors_count) def test_memory_diagnostics_default(self): memory = diagnostics.MemoryDiagnostics() self.assertEqual(0, memory.maximum) self.assertEqual(0, memory.used) def test_memory_diagnostics(self): memory = diagnostics.MemoryDiagnostics(maximum=1, used=2) self.assertEqual(1, memory.maximum) self.assertEqual(2, memory.used) def test_diagnostics_default(self): diags = diagnostics.Diagnostics() self.assertIsNone(diags.state) self.assertIsNone(diags.driver) self.assertIsNone(diags.hypervisor_os) self.assertEqual(0, diags.uptime) self.assertFalse(diags.config_drive) self.assertEqual([], diags.cpu_details) self.assertEqual([], diags.nic_details) self.assertEqual([], diags.disk_details) self.assertEqual(0, diags.memory_details.maximum) self.assertEqual(0, diags.memory_details.used) self.assertEqual('1.0', diags.version) def test_diagnostics(self): cpu_details = [diagnostics.CpuDiagnostics()] nic_details = [diagnostics.NicDiagnostics()] disk_details = [diagnostics.DiskDiagnostics()] diags = diagnostics.Diagnostics( state='fake-state', driver='fake-driver', hypervisor_os='fake-os', uptime=1, cpu_details=cpu_details, nic_details=nic_details, disk_details=disk_details, config_drive=True) self.assertEqual('fake-state', diags.state) self.assertEqual('fake-driver', diags.driver) self.assertEqual('fake-os', diags.hypervisor_os) self.assertEqual(1, diags.uptime) self.assertTrue(diags.config_drive) self.assertEqual(1, len(diags.cpu_details)) self.assertEqual(1, len(diags.nic_details)) self.assertEqual(1, len(diags.disk_details)) self.assertEqual(0, diags.memory_details.maximum) self.assertEqual(0, diags.memory_details.used) self.assertEqual('1.0', diags.version) def test_add_cpu(self): diags = diagnostics.Diagnostics() self.assertEqual([], diags.cpu_details) diags.add_cpu(time=7) self.assertEqual(1, len(diags.cpu_details)) self.assertEqual(7, diags.cpu_details[0].time) def test_add_nic(self): diags = diagnostics.Diagnostics() self.assertEqual([], diags.nic_details) diags.add_nic(mac_address='00:00:ca:fe:00:00', rx_octets=1, rx_errors=2, rx_drop=3, rx_packets=4, tx_octets=5, tx_errors=6, tx_drop=7, tx_packets=8) self.assertEqual(1, len(diags.nic_details)) self.assertEqual('00:00:ca:fe:00:00', diags.nic_details[0].mac_address) self.assertEqual(1, diags.nic_details[0].rx_octets) self.assertEqual(2, diags.nic_details[0].rx_errors) self.assertEqual(3, diags.nic_details[0].rx_drop) self.assertEqual(4, diags.nic_details[0].rx_packets) self.assertEqual(5, diags.nic_details[0].tx_octets) self.assertEqual(6, diags.nic_details[0].tx_errors) self.assertEqual(7, diags.nic_details[0].tx_drop) self.assertEqual(8, diags.nic_details[0].tx_packets) def test_add_disk(self): diags = diagnostics.Diagnostics() self.assertEqual([], diags.disk_details) diags.add_disk(id='fake_disk_id', read_bytes=1, read_requests=2, write_bytes=3, write_requests=4, errors_count=5) self.assertEqual(1, len(diags.disk_details)) self.assertEqual('fake_disk_id', diags.disk_details[0].id) self.assertEqual(1, diags.disk_details[0].read_bytes) self.assertEqual(2, diags.disk_details[0].read_requests) self.assertEqual(3, diags.disk_details[0].write_bytes) self.assertEqual(4, diags.disk_details[0].write_requests) self.assertEqual(5, diags.disk_details[0].errors_count) def test_diagnostics_serialize_default(self): diags = diagnostics.Diagnostics() expected = {'config_drive': False, 'cpu_details': [], 'disk_details': [], 'driver': None, 'hypervisor_os': None, 'memory_details': {'maximum': 0, 'used': 0}, 'nic_details': [], 'state': None, 'uptime': 0, 'version': '1.0'} result = diags.serialize() self.assertEqual(expected, result) def test_diagnostics_serialize(self): cpu_details = [diagnostics.CpuDiagnostics()] nic_details = [diagnostics.NicDiagnostics()] disk_details = [diagnostics.DiskDiagnostics()] diags = diagnostics.Diagnostics( state='fake-state', driver='fake-driver', hypervisor_os='fake-os', uptime=1, cpu_details=cpu_details, nic_details=nic_details, disk_details=disk_details, config_drive=True) expected = {'config_drive': True, 'cpu_details': [{'time': 0}], 'disk_details': [{'errors_count': 0, 'id': '', 'read_bytes': 0, 'read_requests': 0, 'write_bytes': 0, 'write_requests': 0}], 'driver': 'fake-driver', 'hypervisor_os': 'fake-os', 'memory_details': {'maximum': 0, 'used': 0}, 'nic_details': [{'mac_address': '00:00:00:00:00:00', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 0, 'rx_packets': 0, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}], 'state': 'fake-state', 'uptime': 1, 'version': '1.0'} result = diags.serialize() self.assertEqual(expected, result) def test_diagnostics_invalid_input(self): self.assertRaises(exception.InvalidInput, diagnostics.Diagnostics, cpu_details='invalid type') self.assertRaises(exception.InvalidInput, diagnostics.Diagnostics, cpu_details=['invalid entry']) self.assertRaises(exception.InvalidInput, diagnostics.Diagnostics, nic_details='invalid type') self.assertRaises(exception.InvalidInput, diagnostics.Diagnostics, nic_details=['invalid entry']) self.assertRaises(exception.InvalidInput, diagnostics.Diagnostics, disk_details='invalid type') self.assertRaises(exception.InvalidInput, diagnostics.Diagnostics, disk_details=['invalid entry']) nova-13.1.4/nova/tests/unit/virt/xenapi/0000775000567000056710000000000013064447471021252 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/virt/xenapi/test_agent.py0000664000567000056710000004240213064447152023757 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import time import uuid import mock from nova import exception from nova import test from nova.virt.xenapi import agent from nova.virt.xenapi import fake as xenapi_fake def _get_fake_instance(**kwargs): system_metadata = [] for k, v in kwargs.items(): system_metadata.append({ "key": k, "value": v }) return { "system_metadata": system_metadata, "uuid": "uuid", "key_data": "ssh-rsa asdf", "os_type": "asdf", } class AgentTestCaseBase(test.NoDBTestCase): def _create_agent(self, instance, session="session"): self.session = session self.virtapi = "virtapi" self.vm_ref = "vm_ref" return agent.XenAPIBasedAgent(self.session, self.virtapi, instance, self.vm_ref) class AgentImageFlagsTestCase(AgentTestCaseBase): def test_agent_is_present(self): self.flags(use_agent_default=False, group='xenserver') instance = {"system_metadata": [{"key": "image_xenapi_use_agent", "value": "true"}]} self.assertTrue(agent.should_use_agent(instance)) def test_agent_is_disabled(self): self.flags(use_agent_default=True, group='xenserver') instance = {"system_metadata": [{"key": "image_xenapi_use_agent", "value": "false"}]} self.assertFalse(agent.should_use_agent(instance)) def test_agent_uses_deafault_when_prop_invalid(self): self.flags(use_agent_default=True, group='xenserver') instance = {"system_metadata": [{"key": "image_xenapi_use_agent", "value": "bob"}], "uuid": "uuid"} self.assertTrue(agent.should_use_agent(instance)) def test_agent_default_not_present(self): self.flags(use_agent_default=False, group='xenserver') instance = {"system_metadata": []} self.assertFalse(agent.should_use_agent(instance)) def test_agent_default_present(self): self.flags(use_agent_default=True, group='xenserver') instance = {"system_metadata": []} self.assertTrue(agent.should_use_agent(instance)) class SysMetaKeyTestBase(object): key = None def _create_agent_with_value(self, value): kwargs = {self.key: value} instance = _get_fake_instance(**kwargs) return self._create_agent(instance) def test_get_sys_meta_key_true(self): agent = self._create_agent_with_value("true") self.assertTrue(agent._get_sys_meta_key(self.key)) def test_get_sys_meta_key_false(self): agent = self._create_agent_with_value("False") self.assertFalse(agent._get_sys_meta_key(self.key)) def test_get_sys_meta_key_invalid_is_false(self): agent = self._create_agent_with_value("invalid") self.assertFalse(agent._get_sys_meta_key(self.key)) def test_get_sys_meta_key_missing_is_false(self): instance = _get_fake_instance() agent = self._create_agent(instance) self.assertFalse(agent._get_sys_meta_key(self.key)) class SkipSshFlagTestCase(SysMetaKeyTestBase, AgentTestCaseBase): key = "image_xenapi_skip_agent_inject_ssh" def test_skip_ssh_key_inject(self): agent = self._create_agent_with_value("True") self.assertTrue(agent._skip_ssh_key_inject()) class SkipFileInjectAtBootFlagTestCase(SysMetaKeyTestBase, AgentTestCaseBase): key = "image_xenapi_skip_agent_inject_files_at_boot" def test_skip_inject_files_at_boot(self): agent = self._create_agent_with_value("True") self.assertTrue(agent._skip_inject_files_at_boot()) class InjectSshTestCase(AgentTestCaseBase): def test_inject_ssh_key_succeeds(self): instance = _get_fake_instance() agent = self._create_agent(instance) self.mox.StubOutWithMock(agent, "inject_file") agent.inject_file("/root/.ssh/authorized_keys", "\n# The following ssh key was injected by Nova" "\nssh-rsa asdf\n") self.mox.ReplayAll() agent.inject_ssh_key() def _test_inject_ssh_key_skipped(self, instance): agent = self._create_agent(instance) # make sure its not called self.mox.StubOutWithMock(agent, "inject_file") self.mox.ReplayAll() agent.inject_ssh_key() def test_inject_ssh_key_skipped_no_key_data(self): instance = _get_fake_instance() instance["key_data"] = None self._test_inject_ssh_key_skipped(instance) def test_inject_ssh_key_skipped_windows(self): instance = _get_fake_instance() instance["os_type"] = "windows" self._test_inject_ssh_key_skipped(instance) def test_inject_ssh_key_skipped_cloud_init_present(self): instance = _get_fake_instance( image_xenapi_skip_agent_inject_ssh="True") self._test_inject_ssh_key_skipped(instance) class FileInjectionTestCase(AgentTestCaseBase): def test_inject_file(self): instance = _get_fake_instance() agent = self._create_agent(instance) self.mox.StubOutWithMock(agent, "_call_agent") b64_path = base64.b64encode('path') b64_contents = base64.b64encode('contents') agent._call_agent('inject_file', {'b64_contents': b64_contents, 'b64_path': b64_path}) self.mox.ReplayAll() agent.inject_file("path", "contents") def test_inject_files(self): instance = _get_fake_instance() agent = self._create_agent(instance) self.mox.StubOutWithMock(agent, "inject_file") files = [("path1", "content1"), ("path2", "content2")] agent.inject_file(*files[0]) agent.inject_file(*files[1]) self.mox.ReplayAll() agent.inject_files(files) def test_inject_files_skipped_when_cloud_init_installed(self): instance = _get_fake_instance( image_xenapi_skip_agent_inject_files_at_boot="True") agent = self._create_agent(instance) self.mox.StubOutWithMock(agent, "inject_file") files = [("path1", "content1"), ("path2", "content2")] self.mox.ReplayAll() agent.inject_files(files) class FakeRebootException(Exception): details = ["", "", "", "asdf REBOOT: asdf"] class RebootRetryTestCase(AgentTestCaseBase): @mock.patch.object(agent, '_wait_for_new_dom_id') def test_retry_on_reboot(self, mock_wait): mock_session = mock.Mock() def fake_call_plugin(*args, **kwargs): if fake_call_plugin.called: return {"returncode": '0', "message": "done"} else: fake_call_plugin.called = True raise FakeRebootException() fake_call_plugin.called = False mock_session.XenAPI.Failure = FakeRebootException mock_session.VM.get_domid.return_value = "fake_dom_id" mock_session.call_plugin.side_effect = fake_call_plugin agent = self._create_agent(None, mock_session) result = agent._call_agent("asdf") self.assertEqual("done", result) self.assertTrue(mock_session.VM.get_domid.called) self.assertEqual(2, mock_session.call_plugin.call_count) mock_wait.assert_called_once_with(mock_session, self.vm_ref, "fake_dom_id", "asdf") @mock.patch.object(time, 'sleep') @mock.patch.object(time, 'time') def test_wait_for_new_dom_id_found(self, mock_time, mock_sleep): mock_session = mock.Mock() mock_session.VM.get_domid.return_value = "new" agent._wait_for_new_dom_id(mock_session, "vm_ref", "old", "method") mock_session.VM.get_domid.assert_called_once_with("vm_ref") self.assertFalse(mock_sleep.called) @mock.patch.object(time, 'sleep') @mock.patch.object(time, 'time') def test_wait_for_new_dom_id_after_retry(self, mock_time, mock_sleep): self.flags(agent_timeout=3, group="xenserver") mock_time.return_value = 0 mock_session = mock.Mock() old = 40 new = 42 mock_session.VM.get_domid.side_effect = [old, -1, new] agent._wait_for_new_dom_id(mock_session, "vm_ref", old, "method") mock_session.VM.get_domid.assert_called_with("vm_ref") self.assertEqual(3, mock_session.VM.get_domid.call_count) self.assertEqual(2, mock_sleep.call_count) @mock.patch.object(time, 'sleep') @mock.patch.object(time, 'time') def test_wait_for_new_dom_id_timeout(self, mock_time, mock_sleep): self.flags(agent_timeout=3, group="xenserver") def fake_time(): fake_time.time = fake_time.time + 1 return fake_time.time fake_time.time = 0 mock_time.side_effect = fake_time mock_session = mock.Mock() mock_session.VM.get_domid.return_value = "old" self.assertRaises(exception.AgentTimeout, agent._wait_for_new_dom_id, mock_session, "vm_ref", "old", "method") self.assertEqual(4, mock_session.VM.get_domid.call_count) class SetAdminPasswordTestCase(AgentTestCaseBase): @mock.patch.object(agent.XenAPIBasedAgent, '_call_agent') @mock.patch("nova.virt.xenapi.agent.SimpleDH") def test_exchange_key_with_agent(self, mock_simple_dh, mock_call_agent): agent = self._create_agent(None) instance_mock = mock_simple_dh() instance_mock.get_public.return_value = 4321 mock_call_agent.return_value = "1234" result = agent._exchange_key_with_agent() mock_call_agent.assert_called_once_with('key_init', {"pub": "4321"}, success_codes=['D0'], ignore_errors=False) result.compute_shared.assert_called_once_with(1234) @mock.patch.object(agent.XenAPIBasedAgent, '_call_agent') @mock.patch.object(agent.XenAPIBasedAgent, '_save_instance_password_if_sshkey_present') @mock.patch.object(agent.XenAPIBasedAgent, '_exchange_key_with_agent') def test_set_admin_password_works(self, mock_exchange, mock_save, mock_call_agent): mock_dh = mock.Mock(spec_set=agent.SimpleDH) mock_dh.encrypt.return_value = "enc_pass" mock_exchange.return_value = mock_dh agent_inst = self._create_agent(None) agent_inst.set_admin_password("new_pass") mock_dh.encrypt.assert_called_once_with("new_pass\n") mock_call_agent.assert_called_once_with('password', {'enc_pass': 'enc_pass'}) mock_save.assert_called_once_with("new_pass") @mock.patch.object(agent.XenAPIBasedAgent, '_add_instance_fault') @mock.patch.object(agent.XenAPIBasedAgent, '_exchange_key_with_agent') def test_set_admin_password_silently_fails(self, mock_exchange, mock_add_fault): error = exception.AgentTimeout(method="fake") mock_exchange.side_effect = error agent_inst = self._create_agent(None) agent_inst.set_admin_password("new_pass") mock_add_fault.assert_called_once_with(error, mock.ANY) class UpgradeRequiredTestCase(test.NoDBTestCase): def test_less_than(self): self.assertTrue(agent.is_upgrade_required('1.2.3.4', '1.2.3.5')) def test_greater_than(self): self.assertFalse(agent.is_upgrade_required('1.2.3.5', '1.2.3.4')) def test_equal(self): self.assertFalse(agent.is_upgrade_required('1.2.3.4', '1.2.3.4')) def test_non_lexical(self): self.assertFalse(agent.is_upgrade_required('1.2.3.10', '1.2.3.4')) def test_length(self): self.assertTrue(agent.is_upgrade_required('1.2.3', '1.2.3.4')) @mock.patch.object(uuid, "uuid4") class CallAgentTestCase(AgentTestCaseBase): def test_call_agent_success(self, mock_uuid): session = mock.Mock() instance = {"uuid": "fake"} addl_args = {"foo": "bar"} session.VM.get_domid.return_value = '42' mock_uuid.return_value = 1 session.call_plugin.return_value = {'returncode': '4', 'message': "asdf\\r\\n"} self.assertEqual("asdf", agent._call_agent(session, instance, "vm_ref", "method", addl_args, timeout=300, success_codes=['0', '4'])) expected_args = { 'id': '1', 'dom_id': '42', 'timeout': '300', } expected_args.update(addl_args) session.VM.get_domid.assert_called_once_with("vm_ref") session.call_plugin.assert_called_once_with("agent", "method", expected_args) def _call_agent_setup(self, session, mock_uuid, returncode='0', success_codes=None, exception=None): session.XenAPI.Failure = xenapi_fake.Failure instance = {"uuid": "fake"} session.VM.get_domid.return_value = 42 mock_uuid.return_value = 1 if exception: session.call_plugin.side_effect = exception else: session.call_plugin.return_value = {'returncode': returncode, 'message': "asdf\\r\\n"} return agent._call_agent(session, instance, "vm_ref", "method", success_codes=success_codes) def _assert_agent_called(self, session, mock_uuid): expected_args = { 'id': '1', 'dom_id': '42', 'timeout': '30', } session.call_plugin.assert_called_once_with("agent", "method", expected_args) session.VM.get_domid.assert_called_once_with("vm_ref") def test_call_agent_works_with_defaults(self, mock_uuid): session = mock.Mock() self._call_agent_setup(session, mock_uuid) self._assert_agent_called(session, mock_uuid) def test_call_agent_fails_with_timeout(self, mock_uuid): session = mock.Mock() self.assertRaises(exception.AgentTimeout, self._call_agent_setup, session, mock_uuid, exception=xenapi_fake.Failure(["TIMEOUT:fake"])) self._assert_agent_called(session, mock_uuid) def test_call_agent_fails_with_not_implemented(self, mock_uuid): session = mock.Mock() self.assertRaises(exception.AgentNotImplemented, self._call_agent_setup, session, mock_uuid, exception=xenapi_fake.Failure(["NOT IMPLEMENTED:"])) self._assert_agent_called(session, mock_uuid) def test_call_agent_fails_with_other_error(self, mock_uuid): session = mock.Mock() self.assertRaises(exception.AgentError, self._call_agent_setup, session, mock_uuid, exception=xenapi_fake.Failure(["asdf"])) self._assert_agent_called(session, mock_uuid) def test_call_agent_fails_with_returned_error(self, mock_uuid): session = mock.Mock() self.assertRaises(exception.AgentError, self._call_agent_setup, session, mock_uuid, returncode='42') self._assert_agent_called(session, mock_uuid) class XenAPIBasedAgent(AgentTestCaseBase): @mock.patch.object(agent.XenAPIBasedAgent, "_add_instance_fault") @mock.patch.object(agent, "_call_agent") def test_call_agent_swallows_error(self, mock_call_agent, mock_add_instance_fault): fake_error = exception.AgentError(method="bob") mock_call_agent.side_effect = fake_error instance = _get_fake_instance() agent = self._create_agent(instance) agent._call_agent("bob") mock_call_agent.assert_called_once_with(agent.session, agent.instance, agent.vm_ref, "bob", None, None, None) mock_add_instance_fault.assert_called_once_with(fake_error, mock.ANY) @mock.patch.object(agent.XenAPIBasedAgent, "_add_instance_fault") @mock.patch.object(agent, "_call_agent") def test_call_agent_throws_error(self, mock_call_agent, mock_add_instance_fault): fake_error = exception.AgentError(method="bob") mock_call_agent.side_effect = fake_error instance = _get_fake_instance() agent = self._create_agent(instance) self.assertRaises(exception.AgentError, agent._call_agent, "bob", ignore_errors=False) mock_call_agent.assert_called_once_with(agent.session, agent.instance, agent.vm_ref, "bob", None, None, None) self.assertFalse(mock_add_instance_fault.called) nova-13.1.4/nova/tests/unit/virt/xenapi/image/0000775000567000056710000000000013064447471022334 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/virt/xenapi/image/test_glance.py0000664000567000056710000002647513064447152025210 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random import time import mock from mox3 import mox from oslo_log import log as logging from nova.compute import utils as compute_utils from nova import context from nova import exception from nova.tests.unit.virt.xenapi import stubs from nova.virt.xenapi import driver as xenapi_conn from nova.virt.xenapi import fake from nova.virt.xenapi.image import glance from nova.virt.xenapi import vm_utils class TestGlanceStore(stubs.XenAPITestBaseNoDB): def setUp(self): super(TestGlanceStore, self).setUp() self.store = glance.GlanceStore() self.flags(host='1.1.1.1', port=123, api_insecure=False, group='glance') self.flags(connection_url='test_url', connection_password='test_pass', group='xenserver') self.context = context.RequestContext( 'user', 'project', auth_token='foobar') fake.reset() stubs.stubout_session(self.stubs, fake.SessionBase) driver = xenapi_conn.XenAPIDriver(False) self.session = driver._session self.stubs.Set( vm_utils, 'get_sr_path', lambda *a, **kw: '/fake/sr/path') self.instance = {'uuid': 'blah', 'system_metadata': [], 'auto_disk_config': True, 'os_type': 'default', 'xenapi_use_agent': 'true'} def _get_params(self): return {'image_id': 'fake_image_uuid', 'endpoint': 'http://1.1.1.1:123', 'sr_path': '/fake/sr/path', 'extra_headers': {'X-Auth-Token': 'foobar', 'X-Roles': '', 'X-Tenant-Id': 'project', 'X-User-Id': 'user', 'X-Identity-Status': 'Confirmed'}} def _get_download_params(self): params = self._get_params() params['uuid_stack'] = ['uuid1'] return params def test_download_image(self): params = self._get_download_params() self.stubs.Set(vm_utils, '_make_uuid_stack', lambda *a, **kw: ['uuid1']) self.mox.StubOutWithMock(self.session, 'call_plugin_serialized') self.session.call_plugin_serialized('glance', 'download_vhd2', **params) self.mox.ReplayAll() self.store.download_image(self.context, self.session, self.instance, 'fake_image_uuid') self.mox.VerifyAll() @mock.patch.object(vm_utils, '_make_uuid_stack', return_value=['uuid1']) @mock.patch.object(random, 'shuffle') @mock.patch.object(time, 'sleep') @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') @mock.patch.object(logging.getLogger('nova.virt.xenapi.client.session'), 'debug') def test_download_image_retry(self, mock_log_debug, mock_fault, mock_sleep, mock_shuffle, mock_make_uuid_stack): params = self._get_download_params() self.flags(num_retries=2, group='glance') params.pop("endpoint") calls = [mock.call('glance', 'download_vhd2', endpoint='http://10.0.1.1:9292', **params), mock.call('glance', 'download_vhd2', endpoint='http://10.0.0.1:9293', **params)] log_calls = [mock.call(mock.ANY, {'callback_result': 'http://10.0.1.1:9292', 'attempts': 3, 'attempt': 1, 'fn': 'download_vhd2', 'plugin': 'glance'}), mock.call(mock.ANY, {'callback_result': 'http://10.0.0.1:9293', 'attempts': 3, 'attempt': 2, 'fn': 'download_vhd2', 'plugin': 'glance'})] glance_api_servers = ['10.0.1.1:9292', 'http://10.0.0.1:9293'] self.flags(api_servers=glance_api_servers, group='glance') with (mock.patch.object(self.session, 'call_plugin_serialized') ) as mock_call_plugin_serialized: error_details = ["", "", "RetryableError", ""] error = self.session.XenAPI.Failure(details=error_details) mock_call_plugin_serialized.side_effect = [error, "success"] self.store.download_image(self.context, self.session, self.instance, 'fake_image_uuid') mock_call_plugin_serialized.assert_has_calls(calls) mock_log_debug.assert_has_calls(log_calls, any_order=True) self.assertEqual(1, mock_fault.call_count) def _get_upload_params(self, auto_disk_config=True, expected_os_type='default'): params = self._get_params() params['vdi_uuids'] = ['fake_vdi_uuid'] params['properties'] = {'auto_disk_config': auto_disk_config, 'os_type': expected_os_type} return params def _test_upload_image(self, auto_disk_config, expected_os_type='default'): params = self._get_upload_params(auto_disk_config, expected_os_type) self.mox.StubOutWithMock(self.session, 'call_plugin_serialized') self.session.call_plugin_serialized('glance', 'upload_vhd2', **params) self.mox.ReplayAll() self.store.upload_image(self.context, self.session, self.instance, 'fake_image_uuid', ['fake_vdi_uuid']) self.mox.VerifyAll() def test_upload_image(self): self._test_upload_image(True) def test_upload_image_None_os_type(self): self.instance['os_type'] = None self._test_upload_image(True, 'linux') def test_upload_image_no_os_type(self): del self.instance['os_type'] self._test_upload_image(True, 'linux') def test_upload_image_auto_config_disk_disabled(self): sys_meta = [{"key": "image_auto_disk_config", "value": "Disabled"}] self.instance["system_metadata"] = sys_meta self._test_upload_image("disabled") def test_upload_image_raises_exception(self): params = self._get_upload_params() self.mox.StubOutWithMock(self.session, 'call_plugin_serialized') self.session.call_plugin_serialized('glance', 'upload_vhd2', **params).AndRaise(RuntimeError) self.mox.ReplayAll() self.assertRaises(RuntimeError, self.store.upload_image, self.context, self.session, self.instance, 'fake_image_uuid', ['fake_vdi_uuid']) self.mox.VerifyAll() def test_upload_image_retries_then_raises_exception(self): self.flags(num_retries=2, group='glance') params = self._get_upload_params() self.mox.StubOutWithMock(self.session, 'call_plugin_serialized') self.mox.StubOutWithMock(time, 'sleep') self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc') error_details = ["", "", "RetryableError", ""] error = self.session.XenAPI.Failure(details=error_details) self.session.call_plugin_serialized('glance', 'upload_vhd2', **params).AndRaise(error) compute_utils.add_instance_fault_from_exc(self.context, self.instance, error, (fake.Failure, error, mox.IgnoreArg())) time.sleep(0.5) self.session.call_plugin_serialized('glance', 'upload_vhd2', **params).AndRaise(error) compute_utils.add_instance_fault_from_exc(self.context, self.instance, error, (fake.Failure, error, mox.IgnoreArg())) time.sleep(1) self.session.call_plugin_serialized('glance', 'upload_vhd2', **params).AndRaise(error) compute_utils.add_instance_fault_from_exc(self.context, self.instance, error, (fake.Failure, error, mox.IgnoreArg())) self.mox.ReplayAll() self.assertRaises(exception.CouldNotUploadImage, self.store.upload_image, self.context, self.session, self.instance, 'fake_image_uuid', ['fake_vdi_uuid']) self.mox.VerifyAll() def test_upload_image_retries_on_signal_exception(self): self.flags(num_retries=2, group='glance') params = self._get_upload_params() self.mox.StubOutWithMock(self.session, 'call_plugin_serialized') self.mox.StubOutWithMock(time, 'sleep') self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc') error_details = ["", "task signaled", "", ""] error = self.session.XenAPI.Failure(details=error_details) self.session.call_plugin_serialized('glance', 'upload_vhd2', **params).AndRaise(error) compute_utils.add_instance_fault_from_exc(self.context, self.instance, error, (fake.Failure, error, mox.IgnoreArg())) time.sleep(0.5) # Note(johngarbutt) XenServer 6.1 and later has this error error_details = ["", "signal: SIGTERM", "", ""] error = self.session.XenAPI.Failure(details=error_details) self.session.call_plugin_serialized('glance', 'upload_vhd2', **params).AndRaise(error) compute_utils.add_instance_fault_from_exc(self.context, self.instance, error, (fake.Failure, error, mox.IgnoreArg())) time.sleep(1) self.session.call_plugin_serialized('glance', 'upload_vhd2', **params) self.mox.ReplayAll() self.store.upload_image(self.context, self.session, self.instance, 'fake_image_uuid', ['fake_vdi_uuid']) self.mox.VerifyAll() nova-13.1.4/nova/tests/unit/virt/xenapi/image/__init__.py0000664000567000056710000000000013064447141024425 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/virt/xenapi/image/test_bittorrent.py0000664000567000056710000001153713064447152026144 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mox3 import mox import six from nova import context from nova import test from nova.tests.unit.virt.xenapi import stubs from nova.virt.xenapi import driver as xenapi_conn from nova.virt.xenapi import fake from nova.virt.xenapi.image import bittorrent from nova.virt.xenapi import vm_utils class TestBittorrentStore(stubs.XenAPITestBaseNoDB): def setUp(self): super(TestBittorrentStore, self).setUp() self.store = bittorrent.BittorrentStore() self.mox = mox.Mox() self.flags(torrent_base_url='http://foo', connection_url='test_url', connection_password='test_pass', group='xenserver') self.context = context.RequestContext( 'user', 'project', auth_token='foobar') fake.reset() stubs.stubout_session(self.stubs, fake.SessionBase) driver = xenapi_conn.XenAPIDriver(False) self.session = driver._session self.stubs.Set( vm_utils, 'get_sr_path', lambda *a, **kw: '/fake/sr/path') def test_download_image(self): instance = {'uuid': '00000000-0000-0000-0000-000000007357'} params = {'image_id': 'fake_image_uuid', 'sr_path': '/fake/sr/path', 'torrent_download_stall_cutoff': 600, 'torrent_listen_port_end': 6891, 'torrent_listen_port_start': 6881, 'torrent_max_last_accessed': 86400, 'torrent_max_seeder_processes_per_host': 1, 'torrent_seed_chance': 1.0, 'torrent_seed_duration': 3600, 'torrent_url': 'http://foo/fake_image_uuid.torrent', 'uuid_stack': ['uuid1']} self.stubs.Set(vm_utils, '_make_uuid_stack', lambda *a, **kw: ['uuid1']) self.mox.StubOutWithMock(self.session, 'call_plugin_serialized') self.session.call_plugin_serialized( 'bittorrent', 'download_vhd', **params) self.mox.ReplayAll() self.store.download_image(self.context, self.session, instance, 'fake_image_uuid') self.mox.VerifyAll() def test_upload_image(self): self.assertRaises(NotImplementedError, self.store.upload_image, self.context, self.session, mox.IgnoreArg, 'fake_image_uuid', ['fake_vdi_uuid']) class LookupTorrentURLTestCase(test.NoDBTestCase): def setUp(self): super(LookupTorrentURLTestCase, self).setUp() self.store = bittorrent.BittorrentStore() self.image_id = 'fakeimageid' def test_default_fetch_url_no_base_url_set(self): self.flags(torrent_base_url=None, group='xenserver') exc = self.assertRaises( RuntimeError, self.store._lookup_torrent_url_fn) self.assertEqual('Cannot create default bittorrent URL without' ' xenserver.torrent_base_url configuration option' ' set.', six.text_type(exc)) def test_default_fetch_url_base_url_is_set(self): self.flags(torrent_base_url='http://foo', group='xenserver') lookup_fn = self.store._lookup_torrent_url_fn() self.assertEqual('http://foo/fakeimageid.torrent', lookup_fn(self.image_id)) def test_invalid_base_url_warning_logged(self): self.flags(torrent_base_url='www.foo.com', group='xenserver') # Make sure a warning is logged when an invalid base URL is set, # where invalid means it does not contain any slash characters warnings = [] def fake_warn(msg): warnings.append(msg) self.stubs.Set(bittorrent.LOG, 'warn', fake_warn) lookup_fn = self.store._lookup_torrent_url_fn() self.assertEqual('fakeimageid.torrent', lookup_fn(self.image_id)) self.assertTrue(any('does not contain a slash character' in msg for msg in warnings), '_lookup_torrent_url_fn() did not log a warning ' 'message when the torrent_base_url did not contain a ' 'slash character.') nova-13.1.4/nova/tests/unit/virt/xenapi/image/test_vdi_through_dev.py0000664000567000056710000001542313064447152027126 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import tarfile import eventlet import six from nova.image import glance from nova import test from nova.virt.xenapi.client import session as xenapi_session from nova.virt.xenapi.image import vdi_through_dev @contextlib.contextmanager def fake_context(result=None): yield result class TestDelegatingToCommand(test.NoDBTestCase): def test_upload_image_is_delegated_to_command(self): command = self.mox.CreateMock(vdi_through_dev.UploadToGlanceAsRawTgz) self.mox.StubOutWithMock(vdi_through_dev, 'UploadToGlanceAsRawTgz') vdi_through_dev.UploadToGlanceAsRawTgz( 'ctx', 'session', 'instance', 'image_id', 'vdis').AndReturn( command) command.upload_image().AndReturn('result') self.mox.ReplayAll() store = vdi_through_dev.VdiThroughDevStore() result = store.upload_image( 'ctx', 'session', 'instance', 'image_id', 'vdis') self.assertEqual('result', result) class TestUploadToGlanceAsRawTgz(test.NoDBTestCase): def test_upload_image(self): store = vdi_through_dev.UploadToGlanceAsRawTgz( 'context', 'session', 'instance', 'id', ['vdi0', 'vdi1']) self.mox.StubOutWithMock(store, '_perform_upload') self.mox.StubOutWithMock(store, '_get_vdi_ref') self.mox.StubOutWithMock(vdi_through_dev, 'glance') self.mox.StubOutWithMock(vdi_through_dev, 'vm_utils') self.mox.StubOutWithMock(vdi_through_dev, 'utils') store._get_vdi_ref().AndReturn('vdi_ref') vdi_through_dev.vm_utils.vdi_attached_here( 'session', 'vdi_ref', read_only=True).AndReturn( fake_context('dev')) vdi_through_dev.utils.make_dev_path('dev').AndReturn('devpath') vdi_through_dev.utils.temporary_chown('devpath').AndReturn( fake_context()) store._perform_upload('devpath') self.mox.ReplayAll() store.upload_image() def test__perform_upload(self): producer = self.mox.CreateMock(vdi_through_dev.TarGzProducer) consumer = self.mox.CreateMock(glance.UpdateGlanceImage) pool = self.mox.CreateMock(eventlet.GreenPool) store = vdi_through_dev.UploadToGlanceAsRawTgz( 'context', 'session', 'instance', 'id', ['vdi0', 'vdi1']) self.mox.StubOutWithMock(store, '_create_pipe') self.mox.StubOutWithMock(store, '_get_virtual_size') self.mox.StubOutWithMock(producer, 'get_metadata') self.mox.StubOutWithMock(vdi_through_dev, 'TarGzProducer') self.mox.StubOutWithMock(glance, 'UpdateGlanceImage') self.mox.StubOutWithMock(vdi_through_dev, 'eventlet') producer.get_metadata().AndReturn('metadata') store._get_virtual_size().AndReturn('324') store._create_pipe().AndReturn(('readfile', 'writefile')) vdi_through_dev.TarGzProducer( 'devpath', 'writefile', '324', 'disk.raw').AndReturn( producer) glance.UpdateGlanceImage('context', 'id', 'metadata', 'readfile').AndReturn(consumer) vdi_through_dev.eventlet.GreenPool().AndReturn(pool) pool.spawn(producer.start) pool.spawn(consumer.start) pool.waitall() self.mox.ReplayAll() store._perform_upload('devpath') def test__get_vdi_ref(self): session = self.mox.CreateMock(xenapi_session.XenAPISession) store = vdi_through_dev.UploadToGlanceAsRawTgz( 'context', session, 'instance', 'id', ['vdi0', 'vdi1']) session.call_xenapi('VDI.get_by_uuid', 'vdi0').AndReturn('vdi_ref') self.mox.ReplayAll() self.assertEqual('vdi_ref', store._get_vdi_ref()) def test__get_virtual_size(self): session = self.mox.CreateMock(xenapi_session.XenAPISession) store = vdi_through_dev.UploadToGlanceAsRawTgz( 'context', session, 'instance', 'id', ['vdi0', 'vdi1']) self.mox.StubOutWithMock(store, '_get_vdi_ref') store._get_vdi_ref().AndReturn('vdi_ref') session.call_xenapi('VDI.get_virtual_size', 'vdi_ref') self.mox.ReplayAll() store._get_virtual_size() def test__create_pipe(self): store = vdi_through_dev.UploadToGlanceAsRawTgz( 'context', 'session', 'instance', 'id', ['vdi0', 'vdi1']) self.mox.StubOutWithMock(vdi_through_dev, 'os') self.mox.StubOutWithMock(vdi_through_dev, 'greenio') vdi_through_dev.os.pipe().AndReturn(('rpipe', 'wpipe')) vdi_through_dev.greenio.GreenPipe('rpipe', 'rb', 0).AndReturn('rfile') vdi_through_dev.greenio.GreenPipe('wpipe', 'wb', 0).AndReturn('wfile') self.mox.ReplayAll() result = store._create_pipe() self.assertEqual(('rfile', 'wfile'), result) class TestTarGzProducer(test.NoDBTestCase): def test_constructor(self): producer = vdi_through_dev.TarGzProducer('devpath', 'writefile', '100', 'fname') self.assertEqual('devpath', producer.fpath) self.assertEqual('writefile', producer.output) self.assertEqual('100', producer.size) self.assertEqual('writefile', producer.output) def test_start(self): outf = six.StringIO() producer = vdi_through_dev.TarGzProducer('fpath', outf, '100', 'fname') tfile = self.mox.CreateMock(tarfile.TarFile) tinfo = self.mox.CreateMock(tarfile.TarInfo) inf = self.mox.CreateMock(open) self.mox.StubOutWithMock(vdi_through_dev, 'tarfile') self.mox.StubOutWithMock(producer, '_open_file') vdi_through_dev.tarfile.TarInfo(name='fname').AndReturn(tinfo) vdi_through_dev.tarfile.open(fileobj=outf, mode='w|gz').AndReturn( fake_context(tfile)) producer._open_file('fpath', 'rb').AndReturn(fake_context(inf)) tfile.addfile(tinfo, fileobj=inf) outf.close() self.mox.ReplayAll() producer.start() self.assertEqual(100, tinfo.size) def test_get_metadata(self): producer = vdi_through_dev.TarGzProducer('devpath', 'writefile', '100', 'fname') self.assertEqual({ 'disk_format': 'raw', 'container_format': 'tgz'}, producer.get_metadata()) nova-13.1.4/nova/tests/unit/virt/xenapi/image/test_utils.py0000664000567000056710000001767013064447141025112 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import tarfile import mock from nova import test from nova.virt.xenapi.image import utils @mock.patch.object(utils, 'IMAGE_API') class GlanceImageTestCase(test.NoDBTestCase): def _get_image(self): return utils.GlanceImage(mock.sentinel.context, mock.sentinel.image_ref) def test_meta(self, mocked): mocked.get.return_value = mock.sentinel.meta image = self._get_image() self.assertEqual(mock.sentinel.meta, image.meta) mocked.get.assert_called_once_with(mock.sentinel.context, mock.sentinel.image_ref) def test_download_to(self, mocked): mocked.download.return_value = None image = self._get_image() result = image.download_to(mock.sentinel.fobj) self.assertIsNone(result) mocked.download.assert_called_once_with(mock.sentinel.context, mock.sentinel.image_ref, mock.sentinel.fobj) def test_is_raw_tgz_empty_meta(self, mocked): mocked.get.return_value = {} image = self._get_image() self.assertFalse(image.is_raw_tgz()) def test_is_raw_tgz_for_raw_tgz(self, mocked): mocked.get.return_value = { 'disk_format': 'raw', 'container_format': 'tgz' } image = self._get_image() self.assertTrue(image.is_raw_tgz()) def test_data(self, mocked): mocked.download.return_value = mock.sentinel.image image = self._get_image() self.assertEqual(mock.sentinel.image, image.data()) class RawImageTestCase(test.NoDBTestCase): def test_get_size(self): glance_image = self.mox.CreateMock(utils.GlanceImage) glance_image.meta = {'size': '123'} raw_image = utils.RawImage(glance_image) self.mox.ReplayAll() self.assertEqual(123, raw_image.get_size()) def test_stream_to(self): glance_image = self.mox.CreateMock(utils.GlanceImage) glance_image.download_to('file').AndReturn('result') raw_image = utils.RawImage(glance_image) self.mox.ReplayAll() self.assertEqual('result', raw_image.stream_to('file')) class TestIterableBasedFile(test.NoDBTestCase): def test_constructor(self): class FakeIterable(object): def __iter__(_self): return 'iterator' the_file = utils.IterableToFileAdapter(FakeIterable()) self.assertEqual('iterator', the_file.iterator) def test_read_one_character(self): the_file = utils.IterableToFileAdapter([ 'chunk1', 'chunk2' ]) self.assertEqual('c', the_file.read(1)) def test_read_stores_remaining_characters(self): the_file = utils.IterableToFileAdapter([ 'chunk1', 'chunk2' ]) the_file.read(1) self.assertEqual('hunk1', the_file.remaining_data) def test_read_remaining_characters(self): the_file = utils.IterableToFileAdapter([ 'chunk1', 'chunk2' ]) self.assertEqual('c', the_file.read(1)) self.assertEqual('h', the_file.read(1)) def test_read_reached_end_of_file(self): the_file = utils.IterableToFileAdapter([ 'chunk1', 'chunk2' ]) self.assertEqual('chunk1', the_file.read(100)) self.assertEqual('chunk2', the_file.read(100)) self.assertEqual('', the_file.read(100)) def test_empty_chunks(self): the_file = utils.IterableToFileAdapter([ '', '', 'chunk2' ]) self.assertEqual('chunk2', the_file.read(100)) class RawTGZTestCase(test.NoDBTestCase): def test_as_tarfile(self): image = utils.RawTGZImage(None) self.mox.StubOutWithMock(image, '_as_file') self.mox.StubOutWithMock(utils.tarfile, 'open') image._as_file().AndReturn('the_file') utils.tarfile.open(mode='r|gz', fileobj='the_file').AndReturn('tf') self.mox.ReplayAll() result = image._as_tarfile() self.assertEqual('tf', result) def test_as_file(self): self.mox.StubOutWithMock(utils, 'IterableToFileAdapter') glance_image = self.mox.CreateMock(utils.GlanceImage) image = utils.RawTGZImage(glance_image) glance_image.data().AndReturn('iterable-data') utils.IterableToFileAdapter('iterable-data').AndReturn('data-as-file') self.mox.ReplayAll() result = image._as_file() self.assertEqual('data-as-file', result) def test_get_size(self): tar_file = self.mox.CreateMock(tarfile.TarFile) tar_info = self.mox.CreateMock(tarfile.TarInfo) image = utils.RawTGZImage(None) self.mox.StubOutWithMock(image, '_as_tarfile') image._as_tarfile().AndReturn(tar_file) tar_file.next().AndReturn(tar_info) tar_info.size = 124 self.mox.ReplayAll() result = image.get_size() self.assertEqual(124, result) self.assertEqual(image._tar_info, tar_info) self.assertEqual(image._tar_file, tar_file) def test_get_size_called_twice(self): tar_file = self.mox.CreateMock(tarfile.TarFile) tar_info = self.mox.CreateMock(tarfile.TarInfo) image = utils.RawTGZImage(None) self.mox.StubOutWithMock(image, '_as_tarfile') image._as_tarfile().AndReturn(tar_file) tar_file.next().AndReturn(tar_info) tar_info.size = 124 self.mox.ReplayAll() image.get_size() result = image.get_size() self.assertEqual(124, result) self.assertEqual(image._tar_info, tar_info) self.assertEqual(image._tar_file, tar_file) def test_stream_to_without_size_retrieved(self): source_tar = self.mox.CreateMock(tarfile.TarFile) first_tarinfo = self.mox.CreateMock(tarfile.TarInfo) target_file = self.mox.CreateMock(open) source_file = self.mox.CreateMock(open) image = utils.RawTGZImage(None) image._image_service_and_image_id = ('service', 'id') self.mox.StubOutWithMock(image, '_as_tarfile', source_tar) self.mox.StubOutWithMock(utils.shutil, 'copyfileobj') image._as_tarfile().AndReturn(source_tar) source_tar.next().AndReturn(first_tarinfo) source_tar.extractfile(first_tarinfo).AndReturn(source_file) utils.shutil.copyfileobj(source_file, target_file) source_tar.close() self.mox.ReplayAll() image.stream_to(target_file) def test_stream_to_with_size_retrieved(self): source_tar = self.mox.CreateMock(tarfile.TarFile) first_tarinfo = self.mox.CreateMock(tarfile.TarInfo) target_file = self.mox.CreateMock(open) source_file = self.mox.CreateMock(open) first_tarinfo.size = 124 image = utils.RawTGZImage(None) image._image_service_and_image_id = ('service', 'id') self.mox.StubOutWithMock(image, '_as_tarfile', source_tar) self.mox.StubOutWithMock(utils.shutil, 'copyfileobj') image._as_tarfile().AndReturn(source_tar) source_tar.next().AndReturn(first_tarinfo) source_tar.extractfile(first_tarinfo).AndReturn(source_file) utils.shutil.copyfileobj(source_file, target_file) source_tar.close() self.mox.ReplayAll() image.get_size() image.stream_to(target_file) nova-13.1.4/nova/tests/unit/virt/xenapi/stubs.py0000664000567000056710000003242413064447152022765 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Stubouts, mocks and fixtures for the test suite.""" import pickle import random import sys import fixtures from oslo_serialization import jsonutils import six from nova import test import nova.tests.unit.image.fake from nova.virt.xenapi.client import session from nova.virt.xenapi import fake from nova.virt.xenapi import vm_utils from nova.virt.xenapi import vmops def stubout_firewall_driver(stubs, conn): def fake_none(self, *args): return _vmops = conn._vmops stubs.Set(_vmops.firewall_driver, 'prepare_instance_filter', fake_none) stubs.Set(_vmops.firewall_driver, 'instance_filter_exists', fake_none) def stubout_instance_snapshot(stubs): def fake_fetch_image(context, session, instance, name_label, image, type): return {'root': dict(uuid=_make_fake_vdi(), file=None), 'kernel': dict(uuid=_make_fake_vdi(), file=None), 'ramdisk': dict(uuid=_make_fake_vdi(), file=None)} stubs.Set(vm_utils, '_fetch_image', fake_fetch_image) def fake_wait_for_vhd_coalesce(*args): # TODO(sirp): Should we actually fake out the data here return "fakeparent", "fakebase" stubs.Set(vm_utils, '_wait_for_vhd_coalesce', fake_wait_for_vhd_coalesce) def stubout_session(stubs, cls, product_version=(5, 6, 2), product_brand='XenServer', **opt_args): """Stubs out methods from XenAPISession.""" stubs.Set(session.XenAPISession, '_create_session', lambda s, url: cls(url, **opt_args)) stubs.Set(session.XenAPISession, '_get_product_version_and_brand', lambda s: (product_version, product_brand)) def stubout_get_this_vm_uuid(stubs): def f(session): vms = [rec['uuid'] for ref, rec in six.iteritems(fake.get_all_records('VM')) if rec['is_control_domain']] return vms[0] stubs.Set(vm_utils, 'get_this_vm_uuid', f) def stubout_image_service_download(stubs): def fake_download(*args, **kwargs): pass stubs.Set(nova.tests.unit.image.fake._FakeImageService, 'download', fake_download) def stubout_stream_disk(stubs): def fake_stream_disk(*args, **kwargs): pass stubs.Set(vm_utils, '_stream_disk', fake_stream_disk) def stubout_determine_is_pv_objectstore(stubs): """Assumes VMs stu have PV kernels.""" def f(*args): return False stubs.Set(vm_utils, '_determine_is_pv_objectstore', f) def stubout_is_snapshot(stubs): """Always returns true xenapi fake driver does not create vmrefs for snapshots. """ def f(*args): return True stubs.Set(vm_utils, 'is_snapshot', f) def stubout_lookup_image(stubs): """Simulates a failure in lookup image.""" def f(_1, _2, _3, _4): raise Exception("Test Exception raised by fake lookup_image") stubs.Set(vm_utils, 'lookup_image', f) def stubout_fetch_disk_image(stubs, raise_failure=False): """Simulates a failure in fetch image_glance_disk.""" def _fake_fetch_disk_image(context, session, instance, name_label, image, image_type): if raise_failure: raise fake.Failure("Test Exception raised by " "fake fetch_image_glance_disk") elif image_type == vm_utils.ImageType.KERNEL: filename = "kernel" elif image_type == vm_utils.ImageType.RAMDISK: filename = "ramdisk" else: filename = "unknown" vdi_type = vm_utils.ImageType.to_string(image_type) return {vdi_type: dict(uuid=None, file=filename)} stubs.Set(vm_utils, '_fetch_disk_image', _fake_fetch_disk_image) def stubout_create_vm(stubs): """Simulates a failure in create_vm.""" def f(*args): raise fake.Failure("Test Exception raised by fake create_vm") stubs.Set(vm_utils, 'create_vm', f) def stubout_attach_disks(stubs): """Simulates a failure in _attach_disks.""" def f(*args): raise fake.Failure("Test Exception raised by fake _attach_disks") stubs.Set(vmops.VMOps, '_attach_disks', f) def _make_fake_vdi(): sr_ref = fake.get_all('SR')[0] vdi_ref = fake.create_vdi('', sr_ref) vdi_rec = fake.get_record('VDI', vdi_ref) return vdi_rec['uuid'] class FakeSessionForVMTests(fake.SessionBase): """Stubs out a XenAPISession for VM tests.""" _fake_iptables_save_output = ("# Generated by iptables-save v1.4.10 on " "Sun Nov 6 22:49:02 2011\n" "*filter\n" ":INPUT ACCEPT [0:0]\n" ":FORWARD ACCEPT [0:0]\n" ":OUTPUT ACCEPT [0:0]\n" "COMMIT\n" "# Completed on Sun Nov 6 22:49:02 2011\n") def host_call_plugin(self, _1, _2, plugin, method, _5): if plugin == 'glance' and method in ('download_vhd', 'download_vhd2'): root_uuid = _make_fake_vdi() return pickle.dumps(dict(root=dict(uuid=root_uuid))) elif (plugin, method) == ("xenhost", "iptables_config"): return fake.as_json(out=self._fake_iptables_save_output, err='') else: return (super(FakeSessionForVMTests, self). host_call_plugin(_1, _2, plugin, method, _5)) def VM_start(self, _1, ref, _2, _3): vm = fake.get_record('VM', ref) if vm['power_state'] != 'Halted': raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted', vm['power_state']]) vm['power_state'] = 'Running' vm['is_a_template'] = False vm['is_control_domain'] = False vm['domid'] = random.randrange(1, 1 << 16) return vm def VM_start_on(self, _1, vm_ref, host_ref, _2, _3): vm_rec = self.VM_start(_1, vm_ref, _2, _3) vm_rec['resident_on'] = host_ref def VDI_snapshot(self, session_ref, vm_ref, _1): sr_ref = "fakesr" return fake.create_vdi('fakelabel', sr_ref, read_only=True) def SR_scan(self, session_ref, sr_ref): pass class FakeSessionForFirewallTests(FakeSessionForVMTests): """Stubs out a XenApi Session for doing IPTable Firewall tests.""" def __init__(self, uri, test_case=None): super(FakeSessionForFirewallTests, self).__init__(uri) if hasattr(test_case, '_in_rules'): self._in_rules = test_case._in_rules if hasattr(test_case, '_in6_filter_rules'): self._in6_filter_rules = test_case._in6_filter_rules self._test_case = test_case def host_call_plugin(self, _1, _2, plugin, method, args): """Mock method four host_call_plugin to be used in unit tests for the dom0 iptables Firewall drivers for XenAPI """ if plugin == "xenhost" and method == "iptables_config": # The command to execute is a json-encoded list cmd_args = args.get('cmd_args', None) cmd = jsonutils.loads(cmd_args) if not cmd: ret_str = '' else: output = '' process_input = args.get('process_input', None) if cmd == ['ip6tables-save', '-c']: output = '\n'.join(self._in6_filter_rules) if cmd == ['iptables-save', '-c']: output = '\n'.join(self._in_rules) if cmd == ['iptables-restore', '-c', ]: lines = process_input.split('\n') if '*filter' in lines: if self._test_case is not None: self._test_case._out_rules = lines output = '\n'.join(lines) if cmd == ['ip6tables-restore', '-c', ]: lines = process_input.split('\n') if '*filter' in lines: output = '\n'.join(lines) ret_str = fake.as_json(out=output, err='') return ret_str else: return (super(FakeSessionForVMTests, self). host_call_plugin(_1, _2, plugin, method, args)) def stub_out_vm_methods(stubs): def fake_acquire_bootlock(self, vm): pass def fake_release_bootlock(self, vm): pass def fake_generate_ephemeral(*args): pass def fake_wait_for_device(dev): pass stubs.Set(vmops.VMOps, "_acquire_bootlock", fake_acquire_bootlock) stubs.Set(vmops.VMOps, "_release_bootlock", fake_release_bootlock) stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral) stubs.Set(vm_utils, '_wait_for_device', fake_wait_for_device) class ReplaceModule(fixtures.Fixture): """Replace a module with a fake module.""" def __init__(self, name, new_value): self.name = name self.new_value = new_value def _restore(self, old_value): sys.modules[self.name] = old_value def setUp(self): super(ReplaceModule, self).setUp() old_value = sys.modules.get(self.name) sys.modules[self.name] = self.new_value self.addCleanup(self._restore, old_value) class FakeSessionForVolumeTests(fake.SessionBase): """Stubs out a XenAPISession for Volume tests.""" def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11): valid_vdi = False refs = fake.get_all('VDI') for ref in refs: rec = fake.get_record('VDI', ref) if rec['uuid'] == uuid: valid_vdi = True if not valid_vdi: raise fake.Failure([['INVALID_VDI', 'session', self._session]]) class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests): """Stubs out a XenAPISession for Volume tests: it injects failures.""" def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11): # This is for testing failure raise fake.Failure([['INVALID_VDI', 'session', self._session]]) def PBD_unplug(self, _1, ref): rec = fake.get_record('PBD', ref) rec['currently-attached'] = False def SR_forget(self, _1, ref): pass def stub_out_migration_methods(stubs): fakesr = fake.create_sr() def fake_import_all_migrated_disks(session, instance, import_root=True): vdi_ref = fake.create_vdi(instance['name'], fakesr) vdi_rec = fake.get_record('VDI', vdi_ref) vdi_rec['other_config']['nova_disk_type'] = 'root' return {"root": {'uuid': vdi_rec['uuid'], 'ref': vdi_ref}, "ephemerals": {}} def fake_wait_for_instance_to_start(self, *args): pass def fake_get_vdi(session, vm_ref, userdevice='0'): vdi_ref_parent = fake.create_vdi('derp-parent', fakesr) vdi_rec_parent = fake.get_record('VDI', vdi_ref_parent) vdi_ref = fake.create_vdi('derp', fakesr, sm_config={'vhd-parent': vdi_rec_parent['uuid']}) vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref) return vdi_ref, vdi_rec def fake_sr(session, *args): return fakesr def fake_get_sr_path(*args): return "fake" def fake_destroy(*args, **kwargs): pass def fake_generate_ephemeral(*args): pass stubs.Set(vmops.VMOps, '_destroy', fake_destroy) stubs.Set(vmops.VMOps, '_wait_for_instance_to_start', fake_wait_for_instance_to_start) stubs.Set(vm_utils, 'import_all_migrated_disks', fake_import_all_migrated_disks) stubs.Set(vm_utils, 'scan_default_sr', fake_sr) stubs.Set(vm_utils, 'get_vdi_for_vm_safely', fake_get_vdi) stubs.Set(vm_utils, 'get_sr_path', fake_get_sr_path) stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral) class FakeSessionForFailedMigrateTests(FakeSessionForVMTests): def VM_assert_can_migrate(self, session, vmref, migrate_data, live, vdi_map, vif_map, options): raise fake.Failure("XenAPI VM.assert_can_migrate failed") def host_migrate_receive(self, session, hostref, networkref, options): raise fake.Failure("XenAPI host.migrate_receive failed") def VM_migrate_send(self, session, vmref, migrate_data, islive, vdi_map, vif_map, options): raise fake.Failure("XenAPI VM.migrate_send failed") # FIXME(sirp): XenAPITestBase is deprecated, all tests should be converted # over to use XenAPITestBaseNoDB class XenAPITestBase(test.TestCase): def setUp(self): super(XenAPITestBase, self).setUp() self.useFixture(ReplaceModule('XenAPI', fake)) fake.reset() class XenAPITestBaseNoDB(test.NoDBTestCase): def setUp(self): super(XenAPITestBaseNoDB, self).setUp() self.useFixture(ReplaceModule('XenAPI', fake)) fake.reset() nova-13.1.4/nova/tests/unit/virt/xenapi/test_network_utils.py0000664000567000056710000000544213064447141025573 0ustar jenkinsjenkins00000000000000 # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import exception from nova.tests.unit.virt.xenapi import stubs from nova.virt.xenapi import network_utils class NetworkUtilsTestCase(stubs.XenAPITestBaseNoDB): def test_find_network_with_name_label_works(self): session = mock.Mock() session.network.get_by_name_label.return_value = ["net"] result = network_utils.find_network_with_name_label(session, "label") self.assertEqual("net", result) session.network.get_by_name_label.assert_called_once_with("label") def test_find_network_with_name_returns_none(self): session = mock.Mock() session.network.get_by_name_label.return_value = [] result = network_utils.find_network_with_name_label(session, "label") self.assertIsNone(result) def test_find_network_with_name_label_raises(self): session = mock.Mock() session.network.get_by_name_label.return_value = ["net", "net2"] self.assertRaises(exception.NovaException, network_utils.find_network_with_name_label, session, "label") def test_find_network_with_bridge_works(self): session = mock.Mock() session.network.get_all_records_where.return_value = {"net": "asdf"} result = network_utils.find_network_with_bridge(session, "bridge") self.assertEqual(result, "net") expr = 'field "name__label" = "bridge" or field "bridge" = "bridge"' session.network.get_all_records_where.assert_called_once_with(expr) def test_find_network_with_bridge_raises_too_many(self): session = mock.Mock() session.network.get_all_records_where.return_value = { "net": "asdf", "net2": "asdf2" } self.assertRaises(exception.NovaException, network_utils.find_network_with_bridge, session, "bridge") def test_find_network_with_bridge_raises_no_networks(self): session = mock.Mock() session.network.get_all_records_where.return_value = {} self.assertRaises(exception.NovaException, network_utils.find_network_with_bridge, session, "bridge") nova-13.1.4/nova/tests/unit/virt/xenapi/test_vm_utils.py0000664000567000056710000030515313064447152024530 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import uuid from eventlet import greenthread import fixtures import mock from mox3 import mox from oslo_concurrency import lockutils from oslo_concurrency import processutils from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_utils import fixture as utils_fixture from oslo_utils import timeutils from oslo_utils import units from oslo_utils import uuidutils import six from nova.compute import flavors from nova.compute import power_state from nova.compute import vm_mode from nova import context from nova import exception from nova import objects from nova import test from nova.tests.unit import fake_flavor from nova.tests.unit import fake_instance from nova.tests.unit.objects import test_flavor from nova.tests.unit.virt.xenapi import stubs from nova.tests.unit.virt.xenapi import test_xenapi from nova import utils from nova.virt import hardware from nova.virt.xenapi.client import session as xenapi_session from nova.virt.xenapi import driver as xenapi_conn from nova.virt.xenapi import fake from nova.virt.xenapi import vm_utils CONF = cfg.CONF XENSM_TYPE = 'xensm' ISCSI_TYPE = 'iscsi' def get_fake_connection_data(sr_type): fakes = {XENSM_TYPE: {'sr_uuid': 'falseSR', 'name_label': 'fake_storage', 'name_description': 'test purposes', 'server': 'myserver', 'serverpath': '/local/scratch/myname', 'sr_type': 'nfs', 'introduce_sr_keys': ['server', 'serverpath', 'sr_type'], 'vdi_uuid': 'falseVDI'}, ISCSI_TYPE: {'volume_id': 'fake_volume_id', 'target_lun': 1, 'target_iqn': 'fake_iqn:volume-fake_volume_id', 'target_portal': u'localhost:3260', 'target_discovered': False}, } return fakes[sr_type] def _get_fake_session(error=None): session = mock.Mock() xenapi_session.apply_session_helpers(session) if error is not None: class FakeException(Exception): details = [error, "a", "b", "c"] session.XenAPI.Failure = FakeException session.call_xenapi.side_effect = FakeException return session @contextlib.contextmanager def contextified(result): yield result def _fake_noop(*args, **kwargs): return class VMUtilsTestBase(stubs.XenAPITestBaseNoDB): pass class LookupTestCase(VMUtilsTestBase): def setUp(self): super(LookupTestCase, self).setUp() self.session = self.mox.CreateMockAnything('Fake Session') self.name_label = 'my_vm' def _do_mock(self, result): self.session.call_xenapi( "VM.get_by_name_label", self.name_label).AndReturn(result) self.mox.ReplayAll() def test_normal(self): self._do_mock(['x']) result = vm_utils.lookup(self.session, self.name_label) self.assertEqual('x', result) def test_no_result(self): self._do_mock([]) result = vm_utils.lookup(self.session, self.name_label) self.assertIsNone(result) def test_too_many(self): self._do_mock(['a', 'b']) self.assertRaises(exception.InstanceExists, vm_utils.lookup, self.session, self.name_label) def test_rescue_none(self): self.session.call_xenapi( "VM.get_by_name_label", self.name_label + '-rescue').AndReturn([]) self._do_mock(['x']) result = vm_utils.lookup(self.session, self.name_label, check_rescue=True) self.assertEqual('x', result) def test_rescue_found(self): self.session.call_xenapi( "VM.get_by_name_label", self.name_label + '-rescue').AndReturn(['y']) self.mox.ReplayAll() result = vm_utils.lookup(self.session, self.name_label, check_rescue=True) self.assertEqual('y', result) def test_rescue_too_many(self): self.session.call_xenapi( "VM.get_by_name_label", self.name_label + '-rescue').AndReturn(['a', 'b', 'c']) self.mox.ReplayAll() self.assertRaises(exception.InstanceExists, vm_utils.lookup, self.session, self.name_label, check_rescue=True) class GenerateConfigDriveTestCase(VMUtilsTestBase): def test_no_admin_pass(self): instance = {} self.mox.StubOutWithMock(vm_utils, 'safe_find_sr') vm_utils.safe_find_sr('session').AndReturn('sr_ref') self.mox.StubOutWithMock(vm_utils, 'create_vdi') vm_utils.create_vdi('session', 'sr_ref', instance, 'config-2', 'configdrive', 64 * units.Mi).AndReturn('vdi_ref') self.mox.StubOutWithMock(vm_utils, 'vdi_attached_here') vm_utils.vdi_attached_here( 'session', 'vdi_ref', read_only=False).AndReturn( contextified('mounted_dev')) class FakeInstanceMetadata(object): def __init__(_self, instance, content=None, extra_md=None, network_info=None): self.assertEqual(network_info, "nw_info") def metadata_for_config_drive(_self): return [] self.useFixture(fixtures.MonkeyPatch( 'nova.api.metadata.base.InstanceMetadata', FakeInstanceMetadata)) self.mox.StubOutWithMock(utils, 'execute') utils.execute('genisoimage', '-o', mox.IgnoreArg(), '-ldots', '-allow-lowercase', '-allow-multidot', '-l', '-publisher', mox.IgnoreArg(), '-quiet', '-J', '-r', '-V', 'config-2', mox.IgnoreArg(), attempts=1, run_as_root=False).AndReturn(None) utils.execute('dd', mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), run_as_root=True).AndReturn(None) self.mox.StubOutWithMock(vm_utils, 'create_vbd') vm_utils.create_vbd('session', 'vm_ref', 'vdi_ref', mox.IgnoreArg(), bootable=False, read_only=True).AndReturn(None) self.mox.ReplayAll() # And the actual call we're testing vm_utils.generate_configdrive('session', instance, 'vm_ref', 'userdevice', "nw_info") @mock.patch.object(vm_utils, "destroy_vdi") @mock.patch.object(vm_utils, "vdi_attached_here") @mock.patch.object(vm_utils, "create_vdi") @mock.patch.object(vm_utils, "safe_find_sr") def test_vdi_cleaned_up(self, mock_find, mock_create_vdi, mock_attached, mock_destroy): mock_create_vdi.return_value = 'vdi_ref' mock_attached.side_effect = test.TestingException mock_destroy.side_effect = exception.StorageError(reason="") instance = {"uuid": "asdf"} self.assertRaises(test.TestingException, vm_utils.generate_configdrive, 'session', instance, 'vm_ref', 'userdevice', 'nw_info') mock_destroy.assert_called_once_with('session', 'vdi_ref') class XenAPIGetUUID(VMUtilsTestBase): def test_get_this_vm_uuid_new_kernel(self): self.mox.StubOutWithMock(vm_utils, '_get_sys_hypervisor_uuid') vm_utils._get_sys_hypervisor_uuid().AndReturn( '2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f') self.mox.ReplayAll() self.assertEqual('2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f', vm_utils.get_this_vm_uuid(None)) self.mox.VerifyAll() def test_get_this_vm_uuid_old_kernel_reboot(self): self.mox.StubOutWithMock(vm_utils, '_get_sys_hypervisor_uuid') self.mox.StubOutWithMock(utils, 'execute') vm_utils._get_sys_hypervisor_uuid().AndRaise( IOError(13, 'Permission denied')) utils.execute('xenstore-read', 'domid', run_as_root=True).AndReturn( ('27', '')) utils.execute('xenstore-read', '/local/domain/27/vm', run_as_root=True).AndReturn( ('/vm/2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f', '')) self.mox.ReplayAll() self.assertEqual('2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f', vm_utils.get_this_vm_uuid(None)) self.mox.VerifyAll() class FakeSession(object): def call_xenapi(self, *args): pass def call_plugin(self, *args): pass def call_plugin_serialized(self, plugin, fn, *args, **kwargs): pass def call_plugin_serialized_with_retry(self, plugin, fn, num_retries, callback, *args, **kwargs): pass class FetchVhdImageTestCase(VMUtilsTestBase): def setUp(self): super(FetchVhdImageTestCase, self).setUp() self.context = context.get_admin_context() self.context.auth_token = 'auth_token' self.session = FakeSession() self.instance = {"uuid": "uuid"} self.mox.StubOutWithMock(vm_utils, '_make_uuid_stack') vm_utils._make_uuid_stack().AndReturn(["uuid_stack"]) self.mox.StubOutWithMock(vm_utils, 'get_sr_path') vm_utils.get_sr_path(self.session).AndReturn('sr_path') def _stub_glance_download_vhd(self, raise_exc=None): self.mox.StubOutWithMock( self.session, 'call_plugin_serialized_with_retry') func = self.session.call_plugin_serialized_with_retry( 'glance', 'download_vhd2', 0, mox.IgnoreArg(), mox.IgnoreArg(), extra_headers={'X-Auth-Token': 'auth_token', 'X-Roles': '', 'X-Tenant-Id': None, 'X-User-Id': None, 'X-Identity-Status': 'Confirmed'}, image_id='image_id', uuid_stack=["uuid_stack"], sr_path='sr_path') if raise_exc: func.AndRaise(raise_exc) else: func.AndReturn({'root': {'uuid': 'vdi'}}) def _stub_bittorrent_download_vhd(self, raise_exc=None): self.mox.StubOutWithMock( self.session, 'call_plugin_serialized') func = self.session.call_plugin_serialized( 'bittorrent', 'download_vhd', image_id='image_id', uuid_stack=["uuid_stack"], sr_path='sr_path', torrent_download_stall_cutoff=600, torrent_listen_port_start=6881, torrent_listen_port_end=6891, torrent_max_last_accessed=86400, torrent_max_seeder_processes_per_host=1, torrent_seed_chance=1.0, torrent_seed_duration=3600, torrent_url='http://foo/image_id.torrent' ) if raise_exc: func.AndRaise(raise_exc) else: func.AndReturn({'root': {'uuid': 'vdi'}}) def test_fetch_vhd_image_works_with_glance(self): self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent') vm_utils._image_uses_bittorrent( self.context, self.instance).AndReturn(False) self._stub_glance_download_vhd() self.mox.StubOutWithMock(vm_utils, 'safe_find_sr') vm_utils.safe_find_sr(self.session).AndReturn("sr") self.mox.StubOutWithMock(vm_utils, '_scan_sr') vm_utils._scan_sr(self.session, "sr") self.mox.StubOutWithMock(vm_utils, '_check_vdi_size') vm_utils._check_vdi_size( self.context, self.session, self.instance, "vdi") self.mox.ReplayAll() self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context, self.session, self.instance, 'image_id')['root']['uuid']) self.mox.VerifyAll() def test_fetch_vhd_image_works_with_bittorrent(self): cfg.CONF.import_opt('torrent_base_url', 'nova.virt.xenapi.image.bittorrent', group='xenserver') self.flags(torrent_base_url='http://foo', group='xenserver') self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent') vm_utils._image_uses_bittorrent( self.context, self.instance).AndReturn(True) self._stub_bittorrent_download_vhd() self.mox.StubOutWithMock(vm_utils, 'safe_find_sr') vm_utils.safe_find_sr(self.session).AndReturn("sr") self.mox.StubOutWithMock(vm_utils, '_scan_sr') vm_utils._scan_sr(self.session, "sr") self.mox.StubOutWithMock(vm_utils, '_check_vdi_size') vm_utils._check_vdi_size(self.context, self.session, self.instance, "vdi") self.mox.ReplayAll() self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context, self.session, self.instance, 'image_id')['root']['uuid']) self.mox.VerifyAll() def test_fetch_vhd_image_cleans_up_vdi_on_fail(self): self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent') vm_utils._image_uses_bittorrent( self.context, self.instance).AndReturn(False) self._stub_glance_download_vhd() self.mox.StubOutWithMock(vm_utils, 'safe_find_sr') vm_utils.safe_find_sr(self.session).AndReturn("sr") self.mox.StubOutWithMock(vm_utils, '_scan_sr') vm_utils._scan_sr(self.session, "sr") self.mox.StubOutWithMock(vm_utils, '_check_vdi_size') vm_utils._check_vdi_size(self.context, self.session, self.instance, "vdi").AndRaise(exception.FlavorDiskSmallerThanImage( flavor_size=0, image_size=1)) self.mox.StubOutWithMock(self.session, 'call_xenapi') self.session.call_xenapi("VDI.get_by_uuid", "vdi").AndReturn("ref") self.mox.StubOutWithMock(vm_utils, 'destroy_vdi') vm_utils.destroy_vdi(self.session, "ref").AndRaise(exception.StorageError(reason="")) self.mox.ReplayAll() self.assertRaises(exception.FlavorDiskSmallerThanImage, vm_utils._fetch_vhd_image, self.context, self.session, self.instance, 'image_id') self.mox.VerifyAll() def test_fallback_to_default_handler(self): cfg.CONF.import_opt('torrent_base_url', 'nova.virt.xenapi.image.bittorrent', group='xenserver') self.flags(torrent_base_url='http://foo', group='xenserver') self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent') vm_utils._image_uses_bittorrent( self.context, self.instance).AndReturn(True) self._stub_bittorrent_download_vhd(raise_exc=RuntimeError) vm_utils._make_uuid_stack().AndReturn(["uuid_stack"]) vm_utils.get_sr_path(self.session).AndReturn('sr_path') self._stub_glance_download_vhd() self.mox.StubOutWithMock(vm_utils, 'safe_find_sr') vm_utils.safe_find_sr(self.session).AndReturn("sr") self.mox.StubOutWithMock(vm_utils, '_scan_sr') vm_utils._scan_sr(self.session, "sr") self.mox.StubOutWithMock(vm_utils, '_check_vdi_size') vm_utils._check_vdi_size(self.context, self.session, self.instance, "vdi") self.mox.ReplayAll() self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context, self.session, self.instance, 'image_id')['root']['uuid']) self.mox.VerifyAll() def test_default_handler_does_not_fallback_to_itself(self): cfg.CONF.import_opt('torrent_base_url', 'nova.virt.xenapi.image.bittorrent', group='xenserver') self.flags(torrent_base_url='http://foo', group='xenserver') self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent') vm_utils._image_uses_bittorrent( self.context, self.instance).AndReturn(False) self._stub_glance_download_vhd(raise_exc=RuntimeError) self.mox.ReplayAll() self.assertRaises(RuntimeError, vm_utils._fetch_vhd_image, self.context, self.session, self.instance, 'image_id') self.mox.VerifyAll() class TestImageCompression(VMUtilsTestBase): def test_image_compression(self): # Testing for nova.conf, too low, negative, and a correct value. self.assertIsNone(vm_utils.get_compression_level()) self.flags(image_compression_level=0, group='xenserver') self.assertIsNone(vm_utils.get_compression_level()) self.flags(image_compression_level=-6, group='xenserver') self.assertIsNone(vm_utils.get_compression_level()) self.flags(image_compression_level=6, group='xenserver') self.assertEqual(vm_utils.get_compression_level(), 6) class ResizeHelpersTestCase(VMUtilsTestBase): def setUp(self): super(ResizeHelpersTestCase, self).setUp() self.context = context.RequestContext('user', 'project') def test_repair_filesystem(self): self.mox.StubOutWithMock(utils, 'execute') utils.execute('e2fsck', '-f', "-y", "fakepath", run_as_root=True, check_exit_code=[0, 1, 2]).AndReturn( ("size is: 42", "")) self.mox.ReplayAll() vm_utils._repair_filesystem("fakepath") def _call_tune2fs_remove_journal(self, path): utils.execute("tune2fs", "-O ^has_journal", path, run_as_root=True) def _call_tune2fs_add_journal(self, path): utils.execute("tune2fs", "-j", path, run_as_root=True) def _call_parted_mkpart(self, path, start, end): utils.execute('parted', '--script', path, 'rm', '1', run_as_root=True) utils.execute('parted', '--script', path, 'mkpart', 'primary', '%ds' % start, '%ds' % end, run_as_root=True) def _call_parted_boot_flag(self, path): utils.execute('parted', '--script', path, 'set', '1', 'boot', 'on', run_as_root=True) def test_resize_part_and_fs_down_succeeds(self): self.mox.StubOutWithMock(vm_utils, "_repair_filesystem") self.mox.StubOutWithMock(utils, 'execute') dev_path = "/dev/fake" partition_path = "%s1" % dev_path vm_utils._repair_filesystem(partition_path) self._call_tune2fs_remove_journal(partition_path) utils.execute("resize2fs", partition_path, "10s", run_as_root=True) self._call_parted_mkpart(dev_path, 0, 9) self._call_parted_boot_flag(dev_path) self._call_tune2fs_add_journal(partition_path) self.mox.ReplayAll() vm_utils._resize_part_and_fs("fake", 0, 20, 10, "boot") def test_log_progress_if_required(self): self.mox.StubOutWithMock(vm_utils.LOG, "debug") vm_utils.LOG.debug("Sparse copy in progress, " "%(complete_pct).2f%% complete. " "%(left)s bytes left to copy", {"complete_pct": 50.0, "left": 1}) current = timeutils.utcnow() time_fixture = self.useFixture(utils_fixture.TimeFixture(current)) time_fixture.advance_time_seconds( vm_utils.PROGRESS_INTERVAL_SECONDS + 1) self.mox.ReplayAll() vm_utils._log_progress_if_required(1, current, 2) def test_log_progress_if_not_required(self): self.mox.StubOutWithMock(vm_utils.LOG, "debug") current = timeutils.utcnow() time_fixture = self.useFixture(utils_fixture.TimeFixture(current)) time_fixture.advance_time_seconds( vm_utils.PROGRESS_INTERVAL_SECONDS - 1) self.mox.ReplayAll() vm_utils._log_progress_if_required(1, current, 2) def test_resize_part_and_fs_down_fails_disk_too_big(self): self.mox.StubOutWithMock(vm_utils, "_repair_filesystem") self.mox.StubOutWithMock(utils, 'execute') dev_path = "/dev/fake" partition_path = "%s1" % dev_path new_sectors = 10 vm_utils._repair_filesystem(partition_path) self._call_tune2fs_remove_journal(partition_path) mobj = utils.execute("resize2fs", partition_path, "%ss" % new_sectors, run_as_root=True) mobj.AndRaise(processutils.ProcessExecutionError) self.mox.ReplayAll() self.assertRaises(exception.ResizeError, vm_utils._resize_part_and_fs, "fake", 0, 20, 10, "boot") def test_resize_part_and_fs_up_succeeds(self): self.mox.StubOutWithMock(vm_utils, "_repair_filesystem") self.mox.StubOutWithMock(utils, 'execute') dev_path = "/dev/fake" partition_path = "%s1" % dev_path vm_utils._repair_filesystem(partition_path) self._call_tune2fs_remove_journal(partition_path) self._call_parted_mkpart(dev_path, 0, 29) utils.execute("resize2fs", partition_path, run_as_root=True) self._call_tune2fs_add_journal(partition_path) self.mox.ReplayAll() vm_utils._resize_part_and_fs("fake", 0, 20, 30, "") def test_resize_disk_throws_on_zero_size(self): flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=0) self.assertRaises(exception.ResizeError, vm_utils.resize_disk, "session", "instance", "vdi_ref", flavor) def test_auto_config_disk_returns_early_on_zero_size(self): vm_utils.try_auto_configure_disk("bad_session", "bad_vdi_ref", 0) @mock.patch.object(utils, "execute") def test_get_partitions(self, mock_execute): parted_return = "BYT;\n...\n" parted_return += "1:2s:11s:10s:ext3::boot;\n" parted_return += "2:20s:11s:10s::bob:;\n" mock_execute.return_value = (parted_return, None) partitions = vm_utils._get_partitions("abc") self.assertEqual(2, len(partitions)) self.assertEqual((1, 2, 10, "ext3", "", "boot"), partitions[0]) self.assertEqual((2, 20, 10, "", "bob", ""), partitions[1]) class CheckVDISizeTestCase(VMUtilsTestBase): def setUp(self): super(CheckVDISizeTestCase, self).setUp() self.context = 'fakecontext' self.session = 'fakesession' self.instance = objects.Instance(uuid=str(uuid.uuid4())) self.flavor = objects.Flavor() self.vdi_uuid = 'fakeuuid' def test_not_too_large(self): self.mox.StubOutWithMock(vm_utils, '_get_vdi_chain_size') vm_utils._get_vdi_chain_size(self.session, self.vdi_uuid).AndReturn(1073741824) self.mox.ReplayAll() with mock.patch.object(self.instance, 'get_flavor') as get: self.flavor.root_gb = 1 get.return_value = self.flavor vm_utils._check_vdi_size(self.context, self.session, self.instance, self.vdi_uuid) def test_too_large(self): self.mox.StubOutWithMock(vm_utils, '_get_vdi_chain_size') vm_utils._get_vdi_chain_size(self.session, self.vdi_uuid).AndReturn(11811160065) # 10GB overhead allowed self.mox.ReplayAll() with mock.patch.object(self.instance, 'get_flavor') as get: self.flavor.root_gb = 1 get.return_value = self.flavor self.assertRaises(exception.FlavorDiskSmallerThanImage, vm_utils._check_vdi_size, self.context, self.session, self.instance, self.vdi_uuid) def test_zero_root_gb_disables_check(self): with mock.patch.object(self.instance, 'get_flavor') as get: self.flavor.root_gb = 0 get.return_value = self.flavor vm_utils._check_vdi_size(self.context, self.session, self.instance, self.vdi_uuid) class GetInstanceForVdisForSrTestCase(VMUtilsTestBase): def setUp(self): super(GetInstanceForVdisForSrTestCase, self).setUp() self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF)) self.fixture.config(disable_process_locking=True, group='oslo_concurrency') self.flags(instance_name_template='%d', firewall_driver='nova.virt.xenapi.firewall.' 'Dom0IptablesFirewallDriver') self.flags(connection_url='test_url', connection_password='test_pass', group='xenserver') def test_get_instance_vdis_for_sr(self): vm_ref = fake.create_vm("foo", "Running") sr_ref = fake.create_sr() vdi_1 = fake.create_vdi('vdiname1', sr_ref) vdi_2 = fake.create_vdi('vdiname2', sr_ref) for vdi_ref in [vdi_1, vdi_2]: fake.create_vbd(vm_ref, vdi_ref) stubs.stubout_session(self.stubs, fake.SessionBase) driver = xenapi_conn.XenAPIDriver(False) result = list(vm_utils.get_instance_vdis_for_sr( driver._session, vm_ref, sr_ref)) self.assertEqual([vdi_1, vdi_2], result) def test_get_instance_vdis_for_sr_no_vbd(self): vm_ref = fake.create_vm("foo", "Running") sr_ref = fake.create_sr() stubs.stubout_session(self.stubs, fake.SessionBase) driver = xenapi_conn.XenAPIDriver(False) result = list(vm_utils.get_instance_vdis_for_sr( driver._session, vm_ref, sr_ref)) self.assertEqual([], result) class VMRefOrRaiseVMFoundTestCase(VMUtilsTestBase): def test_lookup_call(self): mock = mox.Mox() mock.StubOutWithMock(vm_utils, 'lookup') vm_utils.lookup('session', 'somename').AndReturn('ignored') mock.ReplayAll() vm_utils.vm_ref_or_raise('session', 'somename') mock.VerifyAll() def test_return_value(self): mock = mox.Mox() mock.StubOutWithMock(vm_utils, 'lookup') vm_utils.lookup(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn('vmref') mock.ReplayAll() self.assertEqual( 'vmref', vm_utils.vm_ref_or_raise('session', 'somename')) mock.VerifyAll() class VMRefOrRaiseVMNotFoundTestCase(VMUtilsTestBase): def test_exception_raised(self): mock = mox.Mox() mock.StubOutWithMock(vm_utils, 'lookup') vm_utils.lookup('session', 'somename').AndReturn(None) mock.ReplayAll() self.assertRaises( exception.InstanceNotFound, lambda: vm_utils.vm_ref_or_raise('session', 'somename') ) mock.VerifyAll() def test_exception_msg_contains_vm_name(self): mock = mox.Mox() mock.StubOutWithMock(vm_utils, 'lookup') vm_utils.lookup('session', 'somename').AndReturn(None) mock.ReplayAll() try: vm_utils.vm_ref_or_raise('session', 'somename') except exception.InstanceNotFound as e: self.assertIn('somename', six.text_type(e)) mock.VerifyAll() @mock.patch.object(vm_utils, 'safe_find_sr', return_value='safe_find_sr') class CreateCachedImageTestCase(VMUtilsTestBase): def setUp(self): super(CreateCachedImageTestCase, self).setUp() self.session = _get_fake_session() @mock.patch.object(vm_utils, '_clone_vdi', return_value='new_vdi_ref') def test_cached(self, mock_clone_vdi, mock_safe_find_sr): self.session.call_xenapi.side_effect = ['ext', {'vdi_ref': 2}, None, None, None, 'vdi_uuid'] self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}), vm_utils._create_cached_image('context', self.session, 'instance', 'name', 'uuid', vm_utils.ImageType.DISK_VHD)) @mock.patch.object(vm_utils, '_safe_copy_vdi', return_value='new_vdi_ref') def test_no_cow(self, mock_safe_copy_vdi, mock_safe_find_sr): self.flags(use_cow_images=False) self.session.call_xenapi.side_effect = ['ext', {'vdi_ref': 2}, None, None, None, 'vdi_uuid'] self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}), vm_utils._create_cached_image('context', self.session, 'instance', 'name', 'uuid', vm_utils.ImageType.DISK_VHD)) def test_no_cow_no_ext(self, mock_safe_find_sr): self.flags(use_cow_images=False) self.session.call_xenapi.side_effect = ['non-ext', {'vdi_ref': 2}, 'vdi_ref', None, None, None, 'vdi_uuid'] self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}), vm_utils._create_cached_image('context', self.session, 'instance', 'name', 'uuid', vm_utils.ImageType.DISK_VHD)) @mock.patch.object(vm_utils, '_clone_vdi', return_value='new_vdi_ref') @mock.patch.object(vm_utils, '_fetch_image', return_value={'root': {'uuid': 'vdi_uuid', 'file': None}}) def test_noncached(self, mock_fetch_image, mock_clone_vdi, mock_safe_find_sr): self.session.call_xenapi.side_effect = ['ext', {}, 'cache_vdi_ref', None, None, None, None, None, None, 'vdi_uuid'] self.assertEqual((True, {'root': {'uuid': 'vdi_uuid', 'file': None}}), vm_utils._create_cached_image('context', self.session, 'instance', 'name', 'uuid', vm_utils.ImageType.DISK_VHD)) class BittorrentTestCase(VMUtilsTestBase): def setUp(self): super(BittorrentTestCase, self).setUp() self.context = context.get_admin_context() def test_image_uses_bittorrent(self): instance = {'system_metadata': {'image_bittorrent': True}} self.flags(torrent_images='some', group='xenserver') self.assertTrue(vm_utils._image_uses_bittorrent(self.context, instance)) def _test_create_image(self, cache_type): instance = {'system_metadata': {'image_cache_in_nova': True}} self.flags(cache_images=cache_type, group='xenserver') was = {'called': None} def fake_create_cached_image(*args): was['called'] = 'some' return (False, {}) self.stubs.Set(vm_utils, '_create_cached_image', fake_create_cached_image) def fake_fetch_image(*args): was['called'] = 'none' return {} self.stubs.Set(vm_utils, '_fetch_image', fake_fetch_image) vm_utils.create_image(self.context, None, instance, 'foo', 'bar', 'baz') self.assertEqual(was['called'], cache_type) def test_create_image_cached(self): self._test_create_image('some') def test_create_image_uncached(self): self._test_create_image('none') class ShutdownTestCase(VMUtilsTestBase): def test_hardshutdown_should_return_true_when_vm_is_shutdown(self): self.mock = mox.Mox() session = FakeSession() instance = "instance" vm_ref = "vm-ref" self.mock.StubOutWithMock(vm_utils, 'is_vm_shutdown') vm_utils.is_vm_shutdown(session, vm_ref).AndReturn(True) self.mock.StubOutWithMock(vm_utils, 'LOG') self.assertTrue(vm_utils.hard_shutdown_vm( session, instance, vm_ref)) def test_cleanshutdown_should_return_true_when_vm_is_shutdown(self): self.mock = mox.Mox() session = FakeSession() instance = "instance" vm_ref = "vm-ref" self.mock.StubOutWithMock(vm_utils, 'is_vm_shutdown') vm_utils.is_vm_shutdown(session, vm_ref).AndReturn(True) self.mock.StubOutWithMock(vm_utils, 'LOG') self.assertTrue(vm_utils.clean_shutdown_vm( session, instance, vm_ref)) class CreateVBDTestCase(VMUtilsTestBase): def setUp(self): super(CreateVBDTestCase, self).setUp() self.session = FakeSession() self.mock = mox.Mox() self.mock.StubOutWithMock(self.session, 'call_xenapi') self.vbd_rec = self._generate_vbd_rec() def _generate_vbd_rec(self): vbd_rec = {} vbd_rec['VM'] = 'vm_ref' vbd_rec['VDI'] = 'vdi_ref' vbd_rec['userdevice'] = '0' vbd_rec['bootable'] = False vbd_rec['mode'] = 'RW' vbd_rec['type'] = 'disk' vbd_rec['unpluggable'] = True vbd_rec['empty'] = False vbd_rec['other_config'] = {} vbd_rec['qos_algorithm_type'] = '' vbd_rec['qos_algorithm_params'] = {} vbd_rec['qos_supported_algorithms'] = [] return vbd_rec def test_create_vbd_default_args(self): self.session.call_xenapi('VBD.create', self.vbd_rec).AndReturn("vbd_ref") self.mock.ReplayAll() result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0) self.assertEqual(result, "vbd_ref") self.mock.VerifyAll() def test_create_vbd_osvol(self): self.session.call_xenapi('VBD.create', self.vbd_rec).AndReturn("vbd_ref") self.session.call_xenapi('VBD.add_to_other_config', "vbd_ref", "osvol", "True") self.mock.ReplayAll() result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0, osvol=True) self.assertEqual(result, "vbd_ref") self.mock.VerifyAll() def test_create_vbd_extra_args(self): self.vbd_rec['VDI'] = 'OpaqueRef:NULL' self.vbd_rec['type'] = 'a' self.vbd_rec['mode'] = 'RO' self.vbd_rec['bootable'] = True self.vbd_rec['empty'] = True self.vbd_rec['unpluggable'] = False self.session.call_xenapi('VBD.create', self.vbd_rec).AndReturn("vbd_ref") self.mock.ReplayAll() result = vm_utils.create_vbd(self.session, "vm_ref", None, 0, vbd_type="a", read_only=True, bootable=True, empty=True, unpluggable=False) self.assertEqual(result, "vbd_ref") self.mock.VerifyAll() def test_attach_cd(self): self.mock.StubOutWithMock(vm_utils, 'create_vbd') vm_utils.create_vbd(self.session, "vm_ref", None, 1, vbd_type='cd', read_only=True, bootable=True, empty=True, unpluggable=False).AndReturn("vbd_ref") self.session.call_xenapi('VBD.insert', "vbd_ref", "vdi_ref") self.mock.ReplayAll() result = vm_utils.attach_cd(self.session, "vm_ref", "vdi_ref", 1) self.assertEqual(result, "vbd_ref") self.mock.VerifyAll() class UnplugVbdTestCase(VMUtilsTestBase): @mock.patch.object(greenthread, 'sleep') def test_unplug_vbd_works(self, mock_sleep): session = _get_fake_session() vbd_ref = "vbd_ref" vm_ref = 'vm_ref' vm_utils.unplug_vbd(session, vbd_ref, vm_ref) session.call_xenapi.assert_called_once_with('VBD.unplug', vbd_ref) self.assertEqual(0, mock_sleep.call_count) def test_unplug_vbd_raises_unexpected_error(self): session = _get_fake_session() vbd_ref = "vbd_ref" vm_ref = 'vm_ref' session.call_xenapi.side_effect = test.TestingException() self.assertRaises(test.TestingException, vm_utils.unplug_vbd, session, vm_ref, vbd_ref) self.assertEqual(1, session.call_xenapi.call_count) def test_unplug_vbd_already_detached_works(self): error = "DEVICE_ALREADY_DETACHED" session = _get_fake_session(error) vbd_ref = "vbd_ref" vm_ref = 'vm_ref' vm_utils.unplug_vbd(session, vbd_ref, vm_ref) self.assertEqual(1, session.call_xenapi.call_count) def test_unplug_vbd_already_raises_unexpected_xenapi_error(self): session = _get_fake_session("") vbd_ref = "vbd_ref" vm_ref = 'vm_ref' self.assertRaises(exception.StorageError, vm_utils.unplug_vbd, session, vbd_ref, vm_ref) self.assertEqual(1, session.call_xenapi.call_count) def _test_uplug_vbd_retries(self, mock_sleep, error): session = _get_fake_session(error) vbd_ref = "vbd_ref" vm_ref = 'vm_ref' self.assertRaises(exception.StorageError, vm_utils.unplug_vbd, session, vm_ref, vbd_ref) self.assertEqual(11, session.call_xenapi.call_count) self.assertEqual(10, mock_sleep.call_count) def _test_uplug_vbd_retries_with_neg_val(self): session = _get_fake_session() self.flags(num_vbd_unplug_retries=-1, group='xenserver') vbd_ref = "vbd_ref" vm_ref = 'vm_ref' vm_utils.unplug_vbd(session, vbd_ref, vm_ref) self.assertEqual(1, session.call_xenapi.call_count) @mock.patch.object(greenthread, 'sleep') def test_uplug_vbd_retries_on_rejected(self, mock_sleep): self._test_uplug_vbd_retries(mock_sleep, "DEVICE_DETACH_REJECTED") @mock.patch.object(greenthread, 'sleep') def test_uplug_vbd_retries_on_internal_error(self, mock_sleep): self._test_uplug_vbd_retries(mock_sleep, "INTERNAL_ERROR") class VDIOtherConfigTestCase(VMUtilsTestBase): """Tests to ensure that the code is populating VDI's `other_config` attribute with the correct metadta. """ def setUp(self): super(VDIOtherConfigTestCase, self).setUp() class _FakeSession(object): def call_xenapi(self, operation, *args, **kwargs): # VDI.add_to_other_config -> VDI_add_to_other_config method = getattr(self, operation.replace('.', '_'), None) if method: return method(*args, **kwargs) self.operation = operation self.args = args self.kwargs = kwargs self.session = _FakeSession() self.context = context.get_admin_context() self.fake_instance = {'uuid': 'aaaa-bbbb-cccc-dddd', 'name': 'myinstance'} def test_create_vdi(self): # Some images are registered with XenServer explicitly by calling # `create_vdi` vm_utils.create_vdi(self.session, 'sr_ref', self.fake_instance, 'myvdi', 'root', 1024, read_only=True) expected = {'nova_disk_type': 'root', 'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'} self.assertEqual(expected, self.session.args[0]['other_config']) def test_create_image(self): # Other images are registered implicitly when they are dropped into # the SR by a dom0 plugin or some other process self.flags(cache_images='none', group='xenserver') def fake_fetch_image(*args): return {'root': {'uuid': 'fake-uuid'}} self.stubs.Set(vm_utils, '_fetch_image', fake_fetch_image) other_config = {} def VDI_add_to_other_config(ref, key, value): other_config[key] = value # Stubbing on the session object and not class so we don't pollute # other tests self.session.VDI_add_to_other_config = VDI_add_to_other_config self.session.VDI_get_other_config = lambda vdi: {} vm_utils.create_image(self.context, self.session, self.fake_instance, 'myvdi', 'image1', vm_utils.ImageType.DISK_VHD) expected = {'nova_disk_type': 'root', 'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'} self.assertEqual(expected, other_config) def test_import_migrated_vhds(self): # Migrated images should preserve the `other_config` other_config = {} def VDI_add_to_other_config(ref, key, value): other_config[key] = value def call_plugin_serialized(*args, **kwargs): return {'root': {'uuid': 'aaaa-bbbb-cccc-dddd'}} # Stubbing on the session object and not class so we don't pollute # other tests self.session.VDI_add_to_other_config = VDI_add_to_other_config self.session.VDI_get_other_config = lambda vdi: {} self.session.call_plugin_serialized = call_plugin_serialized self.stubs.Set(vm_utils, 'get_sr_path', lambda *a, **k: None) self.stubs.Set(vm_utils, 'scan_default_sr', lambda *a, **k: None) vm_utils._import_migrated_vhds(self.session, self.fake_instance, "disk_label", "root", "vdi_label") expected = {'nova_disk_type': 'root', 'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'} self.assertEqual(expected, other_config) class GenerateDiskTestCase(VMUtilsTestBase): def setUp(self): super(GenerateDiskTestCase, self).setUp() self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF)) self.fixture.config(disable_process_locking=True, group='oslo_concurrency') self.flags(instance_name_template='%d', firewall_driver='nova.virt.xenapi.firewall.' 'Dom0IptablesFirewallDriver') self.flags(connection_url='test_url', connection_password='test_pass', group='xenserver') stubs.stubout_session(self.stubs, fake.SessionBase) driver = xenapi_conn.XenAPIDriver(False) self.session = driver._session self.session.is_local_connection = False self.vm_ref = fake.create_vm("foo", "Running") def tearDown(self): super(GenerateDiskTestCase, self).tearDown() fake.destroy_vm(self.vm_ref) def _expect_parted_calls(self): self.mox.StubOutWithMock(utils, "execute") self.mox.StubOutWithMock(utils, "trycmd") self.mox.StubOutWithMock(vm_utils, "destroy_vdi") self.mox.StubOutWithMock(vm_utils.os.path, "exists") if self.session.is_local_connection: utils.execute('parted', '--script', '/dev/fakedev', 'mklabel', 'msdos', check_exit_code=False, run_as_root=True) utils.execute('parted', '--script', '/dev/fakedev', '--', 'mkpart', 'primary', '2048s', '-0', check_exit_code=False, run_as_root=True) vm_utils.os.path.exists('/dev/mapper/fakedev1').AndReturn(True) utils.trycmd('kpartx', '-a', '/dev/fakedev', discard_warnings=True, run_as_root=True) else: utils.execute('parted', '--script', '/dev/fakedev', 'mklabel', 'msdos', check_exit_code=True, run_as_root=True) utils.execute('parted', '--script', '/dev/fakedev', '--', 'mkpart', 'primary', '2048s', '-0', check_exit_code=True, run_as_root=True) def _check_vdi(self, vdi_ref, check_attached=True): vdi_rec = self.session.call_xenapi("VDI.get_record", vdi_ref) self.assertEqual(str(10 * units.Mi), vdi_rec["virtual_size"]) if check_attached: vbd_ref = vdi_rec["VBDs"][0] vbd_rec = self.session.call_xenapi("VBD.get_record", vbd_ref) self.assertEqual(self.vm_ref, vbd_rec['VM']) else: self.assertEqual(0, len(vdi_rec["VBDs"])) @test_xenapi.stub_vm_utils_with_vdi_attached_here def test_generate_disk_with_no_fs_given(self): self._expect_parted_calls() self.mox.ReplayAll() vdi_ref = vm_utils._generate_disk( self.session, {"uuid": "fake_uuid"}, self.vm_ref, "2", "name", "user", 10, None, None) self._check_vdi(vdi_ref) @test_xenapi.stub_vm_utils_with_vdi_attached_here def test_generate_disk_swap(self): self._expect_parted_calls() utils.execute('mkswap', '/dev/fakedev1', run_as_root=True) self.mox.ReplayAll() vdi_ref = vm_utils._generate_disk( self.session, {"uuid": "fake_uuid"}, self.vm_ref, "2", "name", "swap", 10, "swap", None) self._check_vdi(vdi_ref) @test_xenapi.stub_vm_utils_with_vdi_attached_here def test_generate_disk_ephemeral(self): self._expect_parted_calls() utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'ephemeral', '/dev/fakedev1', run_as_root=True) self.mox.ReplayAll() vdi_ref = vm_utils._generate_disk( self.session, {"uuid": "fake_uuid"}, self.vm_ref, "4", "name", "ephemeral", 10, "ext4", "ephemeral") self._check_vdi(vdi_ref) @test_xenapi.stub_vm_utils_with_vdi_attached_here def test_generate_disk_ensure_cleanup_called(self): self._expect_parted_calls() utils.execute( 'mkfs', '-t', 'ext4', '-F', '-L', 'ephemeral', '/dev/fakedev1', run_as_root=True).AndRaise(test.TestingException) vm_utils.destroy_vdi( self.session, mox.IgnoreArg()).AndRaise(exception.StorageError(reason="")) self.mox.ReplayAll() self.assertRaises( test.TestingException, vm_utils._generate_disk, self.session, {"uuid": "fake_uuid"}, self.vm_ref, "4", "name", "ephemeral", 10, "ext4", "ephemeral") @test_xenapi.stub_vm_utils_with_vdi_attached_here def test_generate_disk_ephemeral_local_not_attached(self): self.session.is_local_connection = True self._expect_parted_calls() utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'ephemeral', '/dev/mapper/fakedev1', run_as_root=True) self.mox.ReplayAll() vdi_ref = vm_utils._generate_disk( self.session, {"uuid": "fake_uuid"}, None, "4", "name", "ephemeral", 10, "ext4", "ephemeral") self._check_vdi(vdi_ref, check_attached=False) class GenerateEphemeralTestCase(VMUtilsTestBase): def setUp(self): super(GenerateEphemeralTestCase, self).setUp() self.session = "session" self.instance = "instance" self.vm_ref = "vm_ref" self.name_label = "name" self.ephemeral_name_label = "name ephemeral" self.userdevice = 4 self.fs_label = "ephemeral" self.mox.StubOutWithMock(vm_utils, "_generate_disk") self.mox.StubOutWithMock(vm_utils, "safe_destroy_vdis") def test_get_ephemeral_disk_sizes_simple(self): result = vm_utils.get_ephemeral_disk_sizes(20) expected = [20] self.assertEqual(expected, list(result)) def test_get_ephemeral_disk_sizes_three_disks_2000(self): result = vm_utils.get_ephemeral_disk_sizes(4030) expected = [2000, 2000, 30] self.assertEqual(expected, list(result)) def test_get_ephemeral_disk_sizes_two_disks_1024(self): result = vm_utils.get_ephemeral_disk_sizes(2048) expected = [1024, 1024] self.assertEqual(expected, list(result)) def _expect_generate_disk(self, size, device, name_label, fs_label): vm_utils._generate_disk( self.session, self.instance, self.vm_ref, str(device), name_label, 'ephemeral', size * 1024, None, fs_label).AndReturn(device) def test_generate_ephemeral_adds_one_disk(self): self._expect_generate_disk( 20, self.userdevice, self.ephemeral_name_label, self.fs_label) self.mox.ReplayAll() vm_utils.generate_ephemeral( self.session, self.instance, self.vm_ref, str(self.userdevice), self.name_label, 20) def test_generate_ephemeral_adds_multiple_disks(self): self._expect_generate_disk( 2000, self.userdevice, self.ephemeral_name_label, self.fs_label) self._expect_generate_disk( 2000, self.userdevice + 1, self.ephemeral_name_label + " (1)", self.fs_label + "1") self._expect_generate_disk( 30, self.userdevice + 2, self.ephemeral_name_label + " (2)", self.fs_label + "2") self.mox.ReplayAll() vm_utils.generate_ephemeral( self.session, self.instance, self.vm_ref, str(self.userdevice), self.name_label, 4030) def test_generate_ephemeral_cleans_up_on_error(self): self._expect_generate_disk( 1024, self.userdevice, self.ephemeral_name_label, self.fs_label) self._expect_generate_disk( 1024, self.userdevice + 1, self.ephemeral_name_label + " (1)", self.fs_label + "1") vm_utils._generate_disk( self.session, self.instance, self.vm_ref, str(self.userdevice + 2), "name ephemeral (2)", 'ephemeral', units.Mi, None, 'ephemeral2').AndRaise(exception.NovaException) vm_utils.safe_destroy_vdis(self.session, [4, 5]) self.mox.ReplayAll() self.assertRaises( exception.NovaException, vm_utils.generate_ephemeral, self.session, self.instance, self.vm_ref, str(self.userdevice), self.name_label, 4096) class FakeFile(object): def __init__(self): self._file_operations = [] def seek(self, offset): self._file_operations.append((self.seek, offset)) class StreamDiskTestCase(VMUtilsTestBase): def setUp(self): super(StreamDiskTestCase, self).setUp() self.mox.StubOutWithMock(vm_utils.utils, 'make_dev_path') self.mox.StubOutWithMock(vm_utils.utils, 'temporary_chown') self.mox.StubOutWithMock(vm_utils, '_write_partition') # NOTE(matelakat): This might hide the fail reason, as test runners # are unhappy with a mocked out open. self.mox.StubOutWithMock(six.moves.builtins, 'open') self.image_service_func = self.mox.CreateMockAnything() def test_non_ami(self): fake_file = FakeFile() vm_utils.utils.make_dev_path('dev').AndReturn('some_path') vm_utils.utils.temporary_chown( 'some_path').AndReturn(contextified(None)) open('some_path', 'wb').AndReturn(contextified(fake_file)) self.image_service_func(fake_file) self.mox.ReplayAll() vm_utils._stream_disk("session", self.image_service_func, vm_utils.ImageType.KERNEL, None, 'dev') self.assertEqual([(fake_file.seek, 0)], fake_file._file_operations) def test_ami_disk(self): fake_file = FakeFile() vm_utils._write_partition("session", 100, 'dev') vm_utils.utils.make_dev_path('dev').AndReturn('some_path') vm_utils.utils.temporary_chown( 'some_path').AndReturn(contextified(None)) open('some_path', 'wb').AndReturn(contextified(fake_file)) self.image_service_func(fake_file) self.mox.ReplayAll() vm_utils._stream_disk("session", self.image_service_func, vm_utils.ImageType.DISK, 100, 'dev') self.assertEqual( [(fake_file.seek, vm_utils.MBR_SIZE_BYTES)], fake_file._file_operations) class VMUtilsSRPath(VMUtilsTestBase): def setUp(self): super(VMUtilsSRPath, self).setUp() self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF)) self.fixture.config(disable_process_locking=True, group='oslo_concurrency') self.flags(instance_name_template='%d', firewall_driver='nova.virt.xenapi.firewall.' 'Dom0IptablesFirewallDriver') self.flags(connection_url='test_url', connection_password='test_pass', group='xenserver') stubs.stubout_session(self.stubs, fake.SessionBase) driver = xenapi_conn.XenAPIDriver(False) self.session = driver._session self.session.is_local_connection = False def test_defined(self): self.mox.StubOutWithMock(vm_utils, "safe_find_sr") self.mox.StubOutWithMock(self.session, "call_xenapi") vm_utils.safe_find_sr(self.session).AndReturn("sr_ref") self.session.host_ref = "host_ref" self.session.call_xenapi('PBD.get_all_records_where', 'field "host"="host_ref" and field "SR"="sr_ref"').AndReturn( {'pbd_ref': {'device_config': {'path': 'sr_path'}}}) self.mox.ReplayAll() self.assertEqual(vm_utils.get_sr_path(self.session), "sr_path") def test_default(self): self.mox.StubOutWithMock(vm_utils, "safe_find_sr") self.mox.StubOutWithMock(self.session, "call_xenapi") vm_utils.safe_find_sr(self.session).AndReturn("sr_ref") self.session.host_ref = "host_ref" self.session.call_xenapi('PBD.get_all_records_where', 'field "host"="host_ref" and field "SR"="sr_ref"').AndReturn( {'pbd_ref': {'device_config': {}}}) self.session.call_xenapi("SR.get_record", "sr_ref").AndReturn( {'uuid': 'sr_uuid', 'type': 'ext'}) self.mox.ReplayAll() self.assertEqual(vm_utils.get_sr_path(self.session), "/var/run/sr-mount/sr_uuid") class CreateKernelRamdiskTestCase(VMUtilsTestBase): def setUp(self): super(CreateKernelRamdiskTestCase, self).setUp() self.context = "context" self.session = FakeSession() self.instance = {"kernel_id": None, "ramdisk_id": None} self.name_label = "name" self.mox.StubOutWithMock(self.session, "call_plugin") self.mox.StubOutWithMock(uuid, "uuid4") self.mox.StubOutWithMock(vm_utils, "_fetch_disk_image") def test_create_kernel_and_ramdisk_no_create(self): self.mox.ReplayAll() result = vm_utils.create_kernel_and_ramdisk(self.context, self.session, self.instance, self.name_label) self.assertEqual((None, None), result) def test_create_kernel_and_ramdisk_create_both_cached(self): kernel_id = "kernel" ramdisk_id = "ramdisk" self.instance["kernel_id"] = kernel_id self.instance["ramdisk_id"] = ramdisk_id args_kernel = {} args_kernel['cached-image'] = kernel_id args_kernel['new-image-uuid'] = "fake_uuid1" uuid.uuid4().AndReturn("fake_uuid1") self.session.call_plugin('kernel', 'create_kernel_ramdisk', args_kernel).AndReturn("k") args_ramdisk = {} args_ramdisk['cached-image'] = ramdisk_id args_ramdisk['new-image-uuid'] = "fake_uuid2" uuid.uuid4().AndReturn("fake_uuid2") self.session.call_plugin('kernel', 'create_kernel_ramdisk', args_ramdisk).AndReturn("r") self.mox.ReplayAll() result = vm_utils.create_kernel_and_ramdisk(self.context, self.session, self.instance, self.name_label) self.assertEqual(("k", "r"), result) def test_create_kernel_and_ramdisk_create_kernel_not_cached(self): kernel_id = "kernel" self.instance["kernel_id"] = kernel_id args_kernel = {} args_kernel['cached-image'] = kernel_id args_kernel['new-image-uuid'] = "fake_uuid1" uuid.uuid4().AndReturn("fake_uuid1") self.session.call_plugin('kernel', 'create_kernel_ramdisk', args_kernel).AndReturn("") kernel = {"kernel": {"file": "k"}} vm_utils._fetch_disk_image(self.context, self.session, self.instance, self.name_label, kernel_id, 0).AndReturn(kernel) self.mox.ReplayAll() result = vm_utils.create_kernel_and_ramdisk(self.context, self.session, self.instance, self.name_label) self.assertEqual(("k", None), result) class ScanSrTestCase(VMUtilsTestBase): @mock.patch.object(vm_utils, "_scan_sr") @mock.patch.object(vm_utils, "safe_find_sr") def test_scan_default_sr(self, mock_safe_find_sr, mock_scan_sr): mock_safe_find_sr.return_value = "sr_ref" self.assertEqual("sr_ref", vm_utils.scan_default_sr("fake_session")) mock_scan_sr.assert_called_once_with("fake_session", "sr_ref") def test_scan_sr_works(self): session = mock.Mock() vm_utils._scan_sr(session, "sr_ref") session.call_xenapi.assert_called_once_with('SR.scan', "sr_ref") def test_scan_sr_unknown_error_fails_once(self): session = mock.Mock() session.call_xenapi.side_effect = test.TestingException self.assertRaises(test.TestingException, vm_utils._scan_sr, session, "sr_ref") session.call_xenapi.assert_called_once_with('SR.scan', "sr_ref") @mock.patch.object(greenthread, 'sleep') def test_scan_sr_known_error_retries_then_throws(self, mock_sleep): session = mock.Mock() class FakeException(Exception): details = ['SR_BACKEND_FAILURE_40', "", "", ""] session.XenAPI.Failure = FakeException session.call_xenapi.side_effect = FakeException self.assertRaises(FakeException, vm_utils._scan_sr, session, "sr_ref") session.call_xenapi.assert_called_with('SR.scan', "sr_ref") self.assertEqual(4, session.call_xenapi.call_count) mock_sleep.assert_has_calls([mock.call(2), mock.call(4), mock.call(8)]) @mock.patch.object(greenthread, 'sleep') def test_scan_sr_known_error_retries_then_succeeds(self, mock_sleep): session = mock.Mock() class FakeException(Exception): details = ['SR_BACKEND_FAILURE_40', "", "", ""] session.XenAPI.Failure = FakeException def fake_call_xenapi(*args): fake_call_xenapi.count += 1 if fake_call_xenapi.count != 2: raise FakeException() fake_call_xenapi.count = 0 session.call_xenapi.side_effect = fake_call_xenapi vm_utils._scan_sr(session, "sr_ref") session.call_xenapi.assert_called_with('SR.scan', "sr_ref") self.assertEqual(2, session.call_xenapi.call_count) mock_sleep.assert_called_once_with(2) @mock.patch.object(flavors, 'extract_flavor', return_value={ 'memory_mb': 1024, 'vcpus': 1, 'vcpu_weight': 1.0, }) class CreateVmTestCase(VMUtilsTestBase): def test_vss_provider(self, mock_extract): self.flags(vcpu_pin_set="2,3") session = _get_fake_session() instance = objects.Instance(uuid="uuid", os_type="windows", system_metadata={}) with mock.patch.object(instance, 'get_flavor') as get: get.return_value = objects.Flavor._from_db_object( None, objects.Flavor(), test_flavor.fake_flavor) vm_utils.create_vm(session, instance, "label", "kernel", "ramdisk") vm_rec = { 'VCPUs_params': {'cap': '0', 'mask': '2,3', 'weight': '1'}, 'PV_args': '', 'memory_static_min': '0', 'ha_restart_priority': '', 'HVM_boot_policy': 'BIOS order', 'PV_bootloader': '', 'tags': [], 'VCPUs_max': '4', 'memory_static_max': '1073741824', 'actions_after_shutdown': 'destroy', 'memory_dynamic_max': '1073741824', 'user_version': '0', 'xenstore_data': {'vm-data/allowvssprovider': 'false'}, 'blocked_operations': {}, 'is_a_template': False, 'name_description': '', 'memory_dynamic_min': '1073741824', 'actions_after_crash': 'destroy', 'memory_target': '1073741824', 'PV_ramdisk': '', 'PV_bootloader_args': '', 'PCI_bus': '', 'other_config': {'nova_uuid': 'uuid'}, 'name_label': 'label', 'actions_after_reboot': 'restart', 'VCPUs_at_startup': '4', 'HVM_boot_params': {'order': 'dc'}, 'platform': {'nx': 'true', 'pae': 'true', 'apic': 'true', 'timeoffset': '0', 'viridian': 'true', 'acpi': 'true'}, 'PV_legacy_args': '', 'PV_kernel': '', 'affinity': '', 'recommendations': '', 'ha_always_run': False } session.call_xenapi.assert_called_once_with("VM.create", vm_rec) def test_invalid_cpu_mask_raises(self, mock_extract): self.flags(vcpu_pin_set="asdf") session = mock.Mock() instance = objects.Instance(uuid=str(uuid.uuid4()), system_metadata={}) with mock.patch.object(instance, 'get_flavor') as get: get.return_value = objects.Flavor._from_db_object( None, objects.Flavor(), test_flavor.fake_flavor) self.assertRaises(exception.Invalid, vm_utils.create_vm, session, instance, "label", "kernel", "ramdisk") def test_destroy_vm(self, mock_extract): session = mock.Mock() instance = objects.Instance(uuid=str(uuid.uuid4())) vm_utils.destroy_vm(session, instance, "vm_ref") session.VM.destroy.assert_called_once_with("vm_ref") def test_destroy_vm_silently_fails(self, mock_extract): session = mock.Mock() exc = test.TestingException() session.XenAPI.Failure = test.TestingException session.VM.destroy.side_effect = exc instance = objects.Instance(uuid=str(uuid.uuid4())) vm_utils.destroy_vm(session, instance, "vm_ref") session.VM.destroy.assert_called_once_with("vm_ref") class DetermineVmModeTestCase(VMUtilsTestBase): def _fake_object(self, updates): return fake_instance.fake_instance_obj(None, **updates) def test_determine_vm_mode_returns_xen_mode(self): instance = self._fake_object({"vm_mode": "xen"}) self.assertEqual(vm_mode.XEN, vm_utils.determine_vm_mode(instance, None)) def test_determine_vm_mode_returns_hvm_mode(self): instance = self._fake_object({"vm_mode": "hvm"}) self.assertEqual(vm_mode.HVM, vm_utils.determine_vm_mode(instance, None)) def test_determine_vm_mode_returns_xen_for_linux(self): instance = self._fake_object({"vm_mode": None, "os_type": "linux"}) self.assertEqual(vm_mode.XEN, vm_utils.determine_vm_mode(instance, None)) def test_determine_vm_mode_returns_hvm_for_windows(self): instance = self._fake_object({"vm_mode": None, "os_type": "windows"}) self.assertEqual(vm_mode.HVM, vm_utils.determine_vm_mode(instance, None)) def test_determine_vm_mode_returns_hvm_by_default(self): instance = self._fake_object({"vm_mode": None, "os_type": None}) self.assertEqual(vm_mode.HVM, vm_utils.determine_vm_mode(instance, None)) def test_determine_vm_mode_returns_xen_for_VHD(self): instance = self._fake_object({"vm_mode": None, "os_type": None}) self.assertEqual(vm_mode.XEN, vm_utils.determine_vm_mode(instance, vm_utils.ImageType.DISK_VHD)) def test_determine_vm_mode_returns_xen_for_DISK(self): instance = self._fake_object({"vm_mode": None, "os_type": None}) self.assertEqual(vm_mode.XEN, vm_utils.determine_vm_mode(instance, vm_utils.ImageType.DISK)) class CallXenAPIHelpersTestCase(VMUtilsTestBase): def test_vm_get_vbd_refs(self): session = mock.Mock() session.call_xenapi.return_value = "foo" self.assertEqual("foo", vm_utils._vm_get_vbd_refs(session, "vm_ref")) session.call_xenapi.assert_called_once_with("VM.get_VBDs", "vm_ref") def test_vbd_get_rec(self): session = mock.Mock() session.call_xenapi.return_value = "foo" self.assertEqual("foo", vm_utils._vbd_get_rec(session, "vbd_ref")) session.call_xenapi.assert_called_once_with("VBD.get_record", "vbd_ref") def test_vdi_get_rec(self): session = mock.Mock() session.call_xenapi.return_value = "foo" self.assertEqual("foo", vm_utils._vdi_get_rec(session, "vdi_ref")) session.call_xenapi.assert_called_once_with("VDI.get_record", "vdi_ref") def test_vdi_snapshot(self): session = mock.Mock() session.call_xenapi.return_value = "foo" self.assertEqual("foo", vm_utils._vdi_snapshot(session, "vdi_ref")) session.call_xenapi.assert_called_once_with("VDI.snapshot", "vdi_ref", {}) def test_vdi_get_virtual_size(self): session = mock.Mock() session.call_xenapi.return_value = "123" self.assertEqual(123, vm_utils._vdi_get_virtual_size(session, "ref")) session.call_xenapi.assert_called_once_with("VDI.get_virtual_size", "ref") @mock.patch.object(vm_utils, '_get_resize_func_name') def test_vdi_resize(self, mock_get_resize_func_name): session = mock.Mock() mock_get_resize_func_name.return_value = "VDI.fake" vm_utils._vdi_resize(session, "ref", 123) session.call_xenapi.assert_called_once_with("VDI.fake", "ref", "123") @mock.patch.object(vm_utils, '_vdi_resize') @mock.patch.object(vm_utils, '_vdi_get_virtual_size') def test_update_vdi_virtual_size_works(self, mock_get_size, mock_resize): mock_get_size.return_value = (1024 ** 3) - 1 instance = {"uuid": "a"} vm_utils.update_vdi_virtual_size("s", instance, "ref", 1) mock_get_size.assert_called_once_with("s", "ref") mock_resize.assert_called_once_with("s", "ref", 1024 ** 3) @mock.patch.object(vm_utils, '_vdi_resize') @mock.patch.object(vm_utils, '_vdi_get_virtual_size') def test_update_vdi_virtual_size_skips_resize_down(self, mock_get_size, mock_resize): mock_get_size.return_value = 1024 ** 3 instance = {"uuid": "a"} vm_utils.update_vdi_virtual_size("s", instance, "ref", 1) mock_get_size.assert_called_once_with("s", "ref") self.assertFalse(mock_resize.called) @mock.patch.object(vm_utils, '_vdi_resize') @mock.patch.object(vm_utils, '_vdi_get_virtual_size') def test_update_vdi_virtual_size_raise_if_disk_big(self, mock_get_size, mock_resize): mock_get_size.return_value = 1024 ** 3 + 1 instance = {"uuid": "a"} self.assertRaises(exception.ResizeError, vm_utils.update_vdi_virtual_size, "s", instance, "ref", 1) mock_get_size.assert_called_once_with("s", "ref") self.assertFalse(mock_resize.called) @mock.patch.object(vm_utils, '_vdi_get_rec') @mock.patch.object(vm_utils, '_vbd_get_rec') @mock.patch.object(vm_utils, '_vm_get_vbd_refs') class GetVdiForVMTestCase(VMUtilsTestBase): def test_get_vdi_for_vm_safely(self, vm_get_vbd_refs, vbd_get_rec, vdi_get_rec): session = "session" vm_get_vbd_refs.return_value = ["a", "b"] vbd_get_rec.return_value = {'userdevice': '0', 'VDI': 'vdi_ref'} vdi_get_rec.return_value = {} result = vm_utils.get_vdi_for_vm_safely(session, "vm_ref") self.assertEqual(('vdi_ref', {}), result) vm_get_vbd_refs.assert_called_once_with(session, "vm_ref") vbd_get_rec.assert_called_once_with(session, "a") vdi_get_rec.assert_called_once_with(session, "vdi_ref") def test_get_vdi_for_vm_safely_fails(self, vm_get_vbd_refs, vbd_get_rec, vdi_get_rec): session = "session" vm_get_vbd_refs.return_value = ["a", "b"] vbd_get_rec.return_value = {'userdevice': '0', 'VDI': 'vdi_ref'} self.assertRaises(exception.NovaException, vm_utils.get_vdi_for_vm_safely, session, "vm_ref", userdevice='1') self.assertEqual([], vdi_get_rec.call_args_list) self.assertEqual(2, len(vbd_get_rec.call_args_list)) @mock.patch.object(vm_utils, '_vdi_get_uuid') @mock.patch.object(vm_utils, '_vbd_get_rec') @mock.patch.object(vm_utils, '_vm_get_vbd_refs') class GetAllVdiForVMTestCase(VMUtilsTestBase): def _setup_get_all_vdi_uuids_for_vm(self, vm_get_vbd_refs, vbd_get_rec, vdi_get_uuid): def fake_vbd_get_rec(session, vbd_ref): return {'userdevice': vbd_ref, 'VDI': "vdi_ref_%s" % vbd_ref} def fake_vdi_get_uuid(session, vdi_ref): return vdi_ref vm_get_vbd_refs.return_value = ["0", "2"] vbd_get_rec.side_effect = fake_vbd_get_rec vdi_get_uuid.side_effect = fake_vdi_get_uuid def test_get_all_vdi_uuids_for_vm_works(self, vm_get_vbd_refs, vbd_get_rec, vdi_get_uuid): self._setup_get_all_vdi_uuids_for_vm(vm_get_vbd_refs, vbd_get_rec, vdi_get_uuid) result = vm_utils.get_all_vdi_uuids_for_vm('session', "vm_ref") expected = ['vdi_ref_0', 'vdi_ref_2'] self.assertEqual(expected, list(result)) def test_get_all_vdi_uuids_for_vm_finds_none(self, vm_get_vbd_refs, vbd_get_rec, vdi_get_uuid): self._setup_get_all_vdi_uuids_for_vm(vm_get_vbd_refs, vbd_get_rec, vdi_get_uuid) result = vm_utils.get_all_vdi_uuids_for_vm('session', "vm_ref", min_userdevice=1) expected = ["vdi_ref_2"] self.assertEqual(expected, list(result)) class GetAllVdisTestCase(VMUtilsTestBase): def test_get_all_vdis_in_sr(self): def fake_get_rec(record_type, ref): if ref == "2": return "vdi_rec_2" session = mock.Mock() session.call_xenapi.return_value = ["1", "2"] session.get_rec.side_effect = fake_get_rec sr_ref = "sr_ref" actual = list(vm_utils._get_all_vdis_in_sr(session, sr_ref)) self.assertEqual(actual, [('2', 'vdi_rec_2')]) session.call_xenapi.assert_called_once_with("SR.get_VDIs", sr_ref) class VDIAttachedHere(VMUtilsTestBase): @mock.patch.object(vm_utils, 'destroy_vbd') @mock.patch.object(vm_utils, '_get_this_vm_ref') @mock.patch.object(vm_utils, 'create_vbd') @mock.patch.object(vm_utils, '_remap_vbd_dev') @mock.patch.object(vm_utils, '_wait_for_device') @mock.patch.object(utils, 'execute') def test_sync_called(self, mock_execute, mock_wait_for_device, mock_remap_vbd_dev, mock_create_vbd, mock_get_this_vm_ref, mock_destroy_vbd): session = _get_fake_session() with vm_utils.vdi_attached_here(session, 'vdi_ref'): pass mock_execute.assert_called_with('sync', run_as_root=True) class SnapshotAttachedHereTestCase(VMUtilsTestBase): @mock.patch.object(vm_utils, '_snapshot_attached_here_impl') def test_snapshot_attached_here(self, mock_impl): def fake_impl(session, instance, vm_ref, label, userdevice, post_snapshot_callback): self.assertEqual("session", session) self.assertEqual("instance", instance) self.assertEqual("vm_ref", vm_ref) self.assertEqual("label", label) self.assertEqual('0', userdevice) self.assertIsNone(post_snapshot_callback) yield "fake" mock_impl.side_effect = fake_impl with vm_utils.snapshot_attached_here("session", "instance", "vm_ref", "label") as result: self.assertEqual("fake", result) mock_impl.assert_called_once_with("session", "instance", "vm_ref", "label", '0', None) @mock.patch.object(vm_utils, '_delete_snapshots_in_vdi_chain') @mock.patch.object(vm_utils, 'safe_destroy_vdis') @mock.patch.object(vm_utils, '_walk_vdi_chain') @mock.patch.object(vm_utils, '_wait_for_vhd_coalesce') @mock.patch.object(vm_utils, '_vdi_get_uuid') @mock.patch.object(vm_utils, '_vdi_snapshot') @mock.patch.object(vm_utils, 'get_vdi_for_vm_safely') def test_snapshot_attached_here_impl(self, mock_get_vdi_for_vm_safely, mock_vdi_snapshot, mock_vdi_get_uuid, mock_wait_for_vhd_coalesce, mock_walk_vdi_chain, mock_safe_destroy_vdis, mock_delete_snapshots_in_vdi_chain): session = "session" instance = {"uuid": "uuid"} mock_callback = mock.Mock() mock_get_vdi_for_vm_safely.return_value = ("vdi_ref", {"SR": "sr_ref", "uuid": "vdi_uuid"}) mock_vdi_snapshot.return_value = "snap_ref" mock_vdi_get_uuid.return_value = "snap_uuid" mock_walk_vdi_chain.return_value = [{"uuid": "a"}, {"uuid": "b"}] try: with vm_utils.snapshot_attached_here(session, instance, "vm_ref", "label", '2', mock_callback) as result: self.assertEqual(["a", "b"], result) raise test.TestingException() self.assertTrue(False) except test.TestingException: pass mock_get_vdi_for_vm_safely.assert_called_once_with(session, "vm_ref", '2') mock_vdi_snapshot.assert_called_once_with(session, "vdi_ref") mock_wait_for_vhd_coalesce.assert_called_once_with(session, instance, "sr_ref", "vdi_ref", ['a', 'b']) mock_vdi_get_uuid.assert_called_once_with(session, "snap_ref") mock_walk_vdi_chain.assert_has_calls([mock.call(session, "vdi_uuid"), mock.call(session, "snap_uuid")]) mock_callback.assert_called_once_with( task_state="image_pending_upload") mock_safe_destroy_vdis.assert_called_once_with(session, ["snap_ref"]) mock_delete_snapshots_in_vdi_chain.assert_called_once_with(session, instance, ['a', 'b'], "sr_ref") @mock.patch.object(greenthread, 'sleep') def test_wait_for_vhd_coalesce_leaf_node(self, mock_sleep): instance = {"uuid": "fake"} vm_utils._wait_for_vhd_coalesce("session", instance, "sr_ref", "vdi_ref", ["uuid"]) self.assertFalse(mock_sleep.called) @mock.patch.object(vm_utils, '_count_children') @mock.patch.object(greenthread, 'sleep') def test_wait_for_vhd_coalesce_parent_snapshot(self, mock_sleep, mock_count): mock_count.return_value = 2 instance = {"uuid": "fake"} vm_utils._wait_for_vhd_coalesce("session", instance, "sr_ref", "vdi_ref", ["uuid1", "uuid2"]) self.assertFalse(mock_sleep.called) self.assertTrue(mock_count.called) @mock.patch.object(greenthread, 'sleep') @mock.patch.object(vm_utils, '_get_vhd_parent_uuid') @mock.patch.object(vm_utils, '_count_children') @mock.patch.object(vm_utils, '_scan_sr') def test_wait_for_vhd_coalesce_raises(self, mock_scan_sr, mock_count, mock_get_vhd_parent_uuid, mock_sleep): mock_count.return_value = 1 instance = {"uuid": "fake"} self.assertRaises(exception.NovaException, vm_utils._wait_for_vhd_coalesce, "session", instance, "sr_ref", "vdi_ref", ["uuid1", "uuid2"]) self.assertTrue(mock_count.called) self.assertEqual(20, mock_sleep.call_count) self.assertEqual(20, mock_scan_sr.call_count) @mock.patch.object(greenthread, 'sleep') @mock.patch.object(vm_utils, '_get_vhd_parent_uuid') @mock.patch.object(vm_utils, '_count_children') @mock.patch.object(vm_utils, '_scan_sr') def test_wait_for_vhd_coalesce_success(self, mock_scan_sr, mock_count, mock_get_vhd_parent_uuid, mock_sleep): mock_count.return_value = 1 instance = {"uuid": "fake"} mock_get_vhd_parent_uuid.side_effect = ["bad", "uuid2"] vm_utils._wait_for_vhd_coalesce("session", instance, "sr_ref", "vdi_ref", ["uuid1", "uuid2"]) self.assertEqual(1, mock_sleep.call_count) self.assertEqual(2, mock_scan_sr.call_count) @mock.patch.object(vm_utils, '_get_all_vdis_in_sr') def test_count_children(self, mock_get_all_vdis_in_sr): vdis = [('child1', {'sm_config': {'vhd-parent': 'parent1'}}), ('child2', {'sm_config': {'vhd-parent': 'parent2'}}), ('child3', {'sm_config': {'vhd-parent': 'parent1'}})] mock_get_all_vdis_in_sr.return_value = vdis self.assertEqual(2, vm_utils._count_children('session', 'parent1', 'sr')) class ImportMigratedDisksTestCase(VMUtilsTestBase): @mock.patch.object(vm_utils, '_import_migrate_ephemeral_disks') @mock.patch.object(vm_utils, '_import_migrated_root_disk') def test_import_all_migrated_disks(self, mock_root, mock_ephemeral): session = "session" instance = "instance" mock_root.return_value = "root_vdi" mock_ephemeral.return_value = ["a", "b"] result = vm_utils.import_all_migrated_disks(session, instance) expected = {'root': 'root_vdi', 'ephemerals': ["a", "b"]} self.assertEqual(expected, result) mock_root.assert_called_once_with(session, instance) mock_ephemeral.assert_called_once_with(session, instance) @mock.patch.object(vm_utils, '_import_migrate_ephemeral_disks') @mock.patch.object(vm_utils, '_import_migrated_root_disk') def test_import_all_migrated_disks_import_root_false(self, mock_root, mock_ephemeral): session = "session" instance = "instance" mock_root.return_value = "root_vdi" mock_ephemeral.return_value = ["a", "b"] result = vm_utils.import_all_migrated_disks(session, instance, import_root=False) expected = {'root': None, 'ephemerals': ["a", "b"]} self.assertEqual(expected, result) self.assertEqual(0, mock_root.call_count) mock_ephemeral.assert_called_once_with(session, instance) @mock.patch.object(vm_utils, '_import_migrated_vhds') def test_import_migrated_root_disk(self, mock_migrate): mock_migrate.return_value = "foo" instance = {"uuid": "uuid", "name": "name"} result = vm_utils._import_migrated_root_disk("s", instance) self.assertEqual("foo", result) mock_migrate.assert_called_once_with("s", instance, "uuid", "root", "name") @mock.patch.object(vm_utils, '_import_migrated_vhds') def test_import_migrate_ephemeral_disks(self, mock_migrate): mock_migrate.return_value = "foo" instance = objects.Instance(id=1, uuid=uuidutils.generate_uuid()) instance.old_flavor = objects.Flavor(ephemeral_gb=4000) result = vm_utils._import_migrate_ephemeral_disks("s", instance) self.assertEqual({'4': 'foo', '5': 'foo'}, result) inst_uuid = instance.uuid inst_name = instance.name expected_calls = [mock.call("s", instance, "%s_ephemeral_1" % inst_uuid, "ephemeral", "%s ephemeral (1)" % inst_name), mock.call("s", instance, "%s_ephemeral_2" % inst_uuid, "ephemeral", "%s ephemeral (2)" % inst_name)] self.assertEqual(expected_calls, mock_migrate.call_args_list) @mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes') def test_import_migrate_ephemeral_disks_use_old_flavor(self, mock_get_sizes): mock_get_sizes.return_value = [] instance = objects.Instance(id=1, uuid=uuidutils.generate_uuid(), ephemeral_gb=2000) instance.old_flavor = objects.Flavor(ephemeral_gb=4000) vm_utils._import_migrate_ephemeral_disks("s", instance) mock_get_sizes.assert_called_once_with(4000) @mock.patch.object(vm_utils, '_set_vdi_info') @mock.patch.object(vm_utils, 'scan_default_sr') @mock.patch.object(vm_utils, 'get_sr_path') def test_import_migrated_vhds(self, mock_get_sr_path, mock_scan_sr, mock_set_info): session = mock.Mock() instance = {"uuid": "uuid"} session.call_plugin_serialized.return_value = {"root": {"uuid": "a"}} session.call_xenapi.return_value = "vdi_ref" mock_get_sr_path.return_value = "sr_path" result = vm_utils._import_migrated_vhds(session, instance, 'chain_label', 'disk_type', 'vdi_label') expected = {'uuid': "a", 'ref': "vdi_ref"} self.assertEqual(expected, result) mock_get_sr_path.assert_called_once_with(session) session.call_plugin_serialized.assert_called_once_with('migration', 'move_vhds_into_sr', instance_uuid='chain_label', sr_path='sr_path', uuid_stack=mock.ANY) mock_scan_sr.assert_called_once_with(session) session.call_xenapi.assert_called_once_with('VDI.get_by_uuid', 'a') mock_set_info.assert_called_once_with(session, 'vdi_ref', 'disk_type', 'vdi_label', 'disk_type', instance) def test_get_vhd_parent_uuid_rec_provided(self): session = mock.Mock() vdi_ref = 'vdi_ref' vdi_rec = {'sm_config': {}} self.assertIsNone(vm_utils._get_vhd_parent_uuid(session, vdi_ref, vdi_rec)) self.assertFalse(session.call_xenapi.called) class MigrateVHDTestCase(VMUtilsTestBase): def _assert_transfer_called(self, session, label): session.call_plugin_serialized.assert_called_once_with( 'migration', 'transfer_vhd', instance_uuid=label, host="dest", vdi_uuid="vdi_uuid", sr_path="sr_path", seq_num=2) def test_migrate_vhd_root(self): session = mock.Mock() instance = {"uuid": "a"} vm_utils.migrate_vhd(session, instance, "vdi_uuid", "dest", "sr_path", 2) self._assert_transfer_called(session, "a") def test_migrate_vhd_ephemeral(self): session = mock.Mock() instance = {"uuid": "a"} vm_utils.migrate_vhd(session, instance, "vdi_uuid", "dest", "sr_path", 2, 2) self._assert_transfer_called(session, "a_ephemeral_2") def test_migrate_vhd_converts_exceptions(self): session = mock.Mock() session.XenAPI.Failure = test.TestingException session.call_plugin_serialized.side_effect = test.TestingException() instance = {"uuid": "a"} self.assertRaises(exception.MigrationError, vm_utils.migrate_vhd, session, instance, "vdi_uuid", "dest", "sr_path", 2) self._assert_transfer_called(session, "a") class StripBaseMirrorTestCase(VMUtilsTestBase): def test_strip_base_mirror_from_vdi_works(self): session = mock.Mock() vm_utils._try_strip_base_mirror_from_vdi(session, "vdi_ref") session.call_xenapi.assert_called_once_with( "VDI.remove_from_sm_config", "vdi_ref", "base_mirror") def test_strip_base_mirror_from_vdi_hides_error(self): session = mock.Mock() session.XenAPI.Failure = test.TestingException session.call_xenapi.side_effect = test.TestingException() vm_utils._try_strip_base_mirror_from_vdi(session, "vdi_ref") session.call_xenapi.assert_called_once_with( "VDI.remove_from_sm_config", "vdi_ref", "base_mirror") @mock.patch.object(vm_utils, '_try_strip_base_mirror_from_vdi') def test_strip_base_mirror_from_vdis(self, mock_strip): def call_xenapi(method, arg): if method == "VM.get_VBDs": return ['VBD_ref_1', 'VBD_ref_2'] if method == "VBD.get_VDI": return 'VDI' + arg[3:] return "Unexpected call_xenapi: %s.%s" % (method, arg) session = mock.Mock() session.call_xenapi.side_effect = call_xenapi vm_utils.strip_base_mirror_from_vdis(session, "vm_ref") expected = [mock.call('VM.get_VBDs', "vm_ref"), mock.call('VBD.get_VDI', "VBD_ref_1"), mock.call('VBD.get_VDI', "VBD_ref_2")] self.assertEqual(expected, session.call_xenapi.call_args_list) expected = [mock.call(session, "VDI_ref_1"), mock.call(session, "VDI_ref_2")] self.assertEqual(expected, mock_strip.call_args_list) class DeviceIdTestCase(VMUtilsTestBase): def test_device_id_is_none_if_not_specified_in_meta_data(self): image_meta = objects.ImageMeta.from_dict({}) session = mock.Mock() session.product_version = (6, 1, 0) self.assertIsNone(vm_utils.get_vm_device_id(session, image_meta)) def test_get_device_id_if_hypervisor_version_is_greater_than_6_1(self): image_meta = objects.ImageMeta.from_dict( {'properties': {'xenapi_device_id': '0002'}}) session = mock.Mock() session.product_version = (6, 2, 0) self.assertEqual(2, vm_utils.get_vm_device_id(session, image_meta)) session.product_version = (6, 3, 1) self.assertEqual(2, vm_utils.get_vm_device_id(session, image_meta)) def test_raise_exception_if_device_id_not_supported_by_hyp_version(self): image_meta = objects.ImageMeta.from_dict( {'properties': {'xenapi_device_id': '0002'}}) session = mock.Mock() session.product_version = (6, 0) exc = self.assertRaises(exception.NovaException, vm_utils.get_vm_device_id, session, image_meta) self.assertEqual("Device id 2 specified is not supported by " "hypervisor version (6, 0)", exc.message) session.product_version = ('6a') exc = self.assertRaises(exception.NovaException, vm_utils.get_vm_device_id, session, image_meta) self.assertEqual("Device id 2 specified is not supported by " "hypervisor version 6a", exc.message) class CreateVmRecordTestCase(VMUtilsTestBase): @mock.patch.object(flavors, 'extract_flavor') def test_create_vm_record_linux(self, mock_extract_flavor): instance = objects.Instance(uuid="uuid123", os_type="linux") self._test_create_vm_record(mock_extract_flavor, instance, False) @mock.patch.object(flavors, 'extract_flavor') def test_create_vm_record_windows(self, mock_extract_flavor): instance = objects.Instance(uuid="uuid123", os_type="windows") with mock.patch.object(instance, 'get_flavor') as get: get.return_value = objects.Flavor._from_db_object( None, objects.Flavor(), test_flavor.fake_flavor) self._test_create_vm_record(mock_extract_flavor, instance, True) def _test_create_vm_record(self, mock_extract_flavor, instance, is_viridian): session = _get_fake_session() flavor = {"memory_mb": 1024, "vcpus": 1, "vcpu_weight": 2} mock_extract_flavor.return_value = flavor with mock.patch.object(instance, 'get_flavor') as get: get.return_value = objects.Flavor(memory_mb=1024, vcpus=1, vcpu_weight=2) vm_utils.create_vm(session, instance, "name", "kernel", "ramdisk", device_id=2) is_viridian_str = str(is_viridian).lower() expected_vm_rec = { 'VCPUs_params': {'cap': '0', 'weight': '2'}, 'PV_args': '', 'memory_static_min': '0', 'ha_restart_priority': '', 'HVM_boot_policy': 'BIOS order', 'PV_bootloader': '', 'tags': [], 'VCPUs_max': '1', 'memory_static_max': '1073741824', 'actions_after_shutdown': 'destroy', 'memory_dynamic_max': '1073741824', 'user_version': '0', 'xenstore_data': {'vm-data/allowvssprovider': 'false'}, 'blocked_operations': {}, 'is_a_template': False, 'name_description': '', 'memory_dynamic_min': '1073741824', 'actions_after_crash': 'destroy', 'memory_target': '1073741824', 'PV_ramdisk': '', 'PV_bootloader_args': '', 'PCI_bus': '', 'other_config': {'nova_uuid': 'uuid123'}, 'name_label': 'name', 'actions_after_reboot': 'restart', 'VCPUs_at_startup': '1', 'HVM_boot_params': {'order': 'dc'}, 'platform': {'nx': 'true', 'pae': 'true', 'apic': 'true', 'timeoffset': '0', 'viridian': is_viridian_str, 'acpi': 'true', 'device_id': '0002'}, 'PV_legacy_args': '', 'PV_kernel': '', 'affinity': '', 'recommendations': '', 'ha_always_run': False} session.call_xenapi.assert_called_with('VM.create', expected_vm_rec) def test_list_vms(self): self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF)) self.fixture.config(disable_process_locking=True, group='oslo_concurrency') self.flags(instance_name_template='%d', firewall_driver='nova.virt.xenapi.firewall.' 'Dom0IptablesFirewallDriver') self.flags(connection_url='test_url', connection_password='test_pass', group='xenserver') fake.create_vm("foo1", "Halted") vm_ref = fake.create_vm("foo2", "Running") stubs.stubout_session(self.stubs, fake.SessionBase) driver = xenapi_conn.XenAPIDriver(False) result = list(vm_utils.list_vms(driver._session)) # Will have 3 VMs - but one is Dom0 and one is not running on the host self.assertEqual(len(driver._session.call_xenapi('VM.get_all')), 3) self.assertEqual(len(result), 1) result_keys = [key for (key, value) in result] self.assertIn(vm_ref, result_keys) class ChildVHDsTestCase(test.NoDBTestCase): all_vdis = [ ("my-vdi-ref", {"uuid": "my-uuid", "sm_config": {}, "is_a_snapshot": False, "other_config": {}}), ("non-parent", {"uuid": "uuid-1", "sm_config": {}, "is_a_snapshot": False, "other_config": {}}), ("diff-parent", {"uuid": "uuid-1", "sm_config": {"vhd-parent": "other-uuid"}, "is_a_snapshot": False, "other_config": {}}), ("child", {"uuid": "uuid-child", "sm_config": {"vhd-parent": "my-uuid"}, "is_a_snapshot": False, "other_config": {}}), ("child-snap", {"uuid": "uuid-child-snap", "sm_config": {"vhd-parent": "my-uuid"}, "is_a_snapshot": True, "other_config": {}}), ] @mock.patch.object(vm_utils, '_get_all_vdis_in_sr') def test_child_vhds_defaults(self, mock_get_all): mock_get_all.return_value = self.all_vdis result = vm_utils._child_vhds("session", "sr_ref", ["my-uuid"]) self.assertJsonEqual(['uuid-child', 'uuid-child-snap'], result) @mock.patch.object(vm_utils, '_get_all_vdis_in_sr') def test_child_vhds_only_snapshots(self, mock_get_all): mock_get_all.return_value = self.all_vdis result = vm_utils._child_vhds("session", "sr_ref", ["my-uuid"], old_snapshots_only=True) self.assertEqual(['uuid-child-snap'], result) @mock.patch.object(vm_utils, '_get_all_vdis_in_sr') def test_child_vhds_chain(self, mock_get_all): mock_get_all.return_value = self.all_vdis result = vm_utils._child_vhds("session", "sr_ref", ["my-uuid", "other-uuid"], old_snapshots_only=True) self.assertEqual(['uuid-child-snap'], result) def test_is_vdi_a_snapshot_works(self): vdi_rec = {"is_a_snapshot": True, "other_config": {}} self.assertTrue(vm_utils._is_vdi_a_snapshot(vdi_rec)) def test_is_vdi_a_snapshot_base_images_false(self): vdi_rec = {"is_a_snapshot": True, "other_config": {"image-id": "fake"}} self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec)) def test_is_vdi_a_snapshot_false_for_non_snapshot(self): vdi_rec = {"is_a_snapshot": False, "other_config": {}} self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec)) class RemoveOldSnapshotsTestCase(test.NoDBTestCase): @mock.patch.object(vm_utils, 'get_vdi_for_vm_safely') @mock.patch.object(vm_utils, '_walk_vdi_chain') @mock.patch.object(vm_utils, '_delete_snapshots_in_vdi_chain') def test_remove_old_snapshots(self, mock_delete, mock_walk, mock_get): instance = {"uuid": "fake"} mock_get.return_value = ("ref", {"uuid": "vdi", "SR": "sr_ref"}) mock_walk.return_value = [{"uuid": "uuid1"}, {"uuid": "uuid2"}] vm_utils.remove_old_snapshots("session", instance, "vm_ref") mock_delete.assert_called_once_with("session", instance, ["uuid1", "uuid2"], "sr_ref") mock_get.assert_called_once_with("session", "vm_ref") mock_walk.assert_called_once_with("session", "vdi") @mock.patch.object(vm_utils, '_child_vhds') def test_delete_snapshots_in_vdi_chain_no_chain(self, mock_child): instance = {"uuid": "fake"} vm_utils._delete_snapshots_in_vdi_chain("session", instance, ["uuid"], "sr") self.assertFalse(mock_child.called) @mock.patch.object(vm_utils, '_child_vhds') def test_delete_snapshots_in_vdi_chain_no_snapshots(self, mock_child): instance = {"uuid": "fake"} mock_child.return_value = [] vm_utils._delete_snapshots_in_vdi_chain("session", instance, ["uuid1", "uuid2"], "sr") mock_child.assert_called_once_with("session", "sr", ["uuid2"], old_snapshots_only=True) @mock.patch.object(vm_utils, '_scan_sr') @mock.patch.object(vm_utils, 'safe_destroy_vdis') @mock.patch.object(vm_utils, '_child_vhds') def test_delete_snapshots_in_vdi_chain_calls_destroy(self, mock_child, mock_destroy, mock_scan): instance = {"uuid": "fake"} mock_child.return_value = ["suuid1", "suuid2"] session = mock.Mock() session.VDI.get_by_uuid.side_effect = ["ref1", "ref2"] vm_utils._delete_snapshots_in_vdi_chain(session, instance, ["uuid1", "uuid2"], "sr") mock_child.assert_called_once_with(session, "sr", ["uuid2"], old_snapshots_only=True) session.VDI.get_by_uuid.assert_has_calls([ mock.call("suuid1"), mock.call("suuid2")]) mock_destroy.assert_called_once_with(session, ["ref1", "ref2"]) mock_scan.assert_called_once_with(session, "sr") class ResizeFunctionTestCase(test.NoDBTestCase): def _call_get_resize_func_name(self, brand, version): session = mock.Mock() session.product_brand = brand session.product_version = version return vm_utils._get_resize_func_name(session) def _test_is_resize(self, brand, version): result = self._call_get_resize_func_name(brand, version) self.assertEqual("VDI.resize", result) def _test_is_resize_online(self, brand, version): result = self._call_get_resize_func_name(brand, version) self.assertEqual("VDI.resize_online", result) def test_xenserver_5_5(self): self._test_is_resize_online("XenServer", (5, 5, 0)) def test_xenserver_6_0(self): self._test_is_resize("XenServer", (6, 0, 0)) def test_xcp_1_1(self): self._test_is_resize_online("XCP", (1, 1, 0)) def test_xcp_1_2(self): self._test_is_resize("XCP", (1, 2, 0)) def test_xcp_2_0(self): self._test_is_resize("XCP", (2, 0, 0)) def test_random_brand(self): self._test_is_resize("asfd", (1, 1, 0)) def test_default(self): self._test_is_resize(None, None) def test_empty(self): self._test_is_resize("", "") def test_bad_version(self): self._test_is_resize("XenServer", "asdf") class VMInfoTests(VMUtilsTestBase): def setUp(self): super(VMInfoTests, self).setUp() self.session = mock.Mock() def test_get_power_state_valid(self): # Save on test setup calls by having these simple tests in one method self.session.call_xenapi.return_value = "Running" self.assertEqual(vm_utils.get_power_state(self.session, "ref"), power_state.RUNNING) self.session.call_xenapi.return_value = "Halted" self.assertEqual(vm_utils.get_power_state(self.session, "ref"), power_state.SHUTDOWN) self.session.call_xenapi.return_value = "Paused" self.assertEqual(vm_utils.get_power_state(self.session, "ref"), power_state.PAUSED) self.session.call_xenapi.return_value = "Suspended" self.assertEqual(vm_utils.get_power_state(self.session, "ref"), power_state.SUSPENDED) self.session.call_xenapi.return_value = "Crashed" self.assertEqual(vm_utils.get_power_state(self.session, "ref"), power_state.CRASHED) def test_get_power_state_invalid(self): self.session.call_xenapi.return_value = "Invalid" self.assertRaises(KeyError, vm_utils.get_power_state, self.session, "ref") _XAPI_record = {'power_state': 'Running', 'memory_static_max': str(10 << 10), 'memory_dynamic_max': str(9 << 10), 'VCPUs_max': '5'} def test_compile_info(self): def call_xenapi(method, *args): if method.startswith('VM.get_') and args[0] == 'dummy': return self._XAPI_record[method[7:]] self.session.call_xenapi.side_effect = call_xenapi info = vm_utils.compile_info(self.session, "dummy") self.assertEqual(hardware.InstanceInfo(state=power_state.RUNNING, max_mem_kb=10, mem_kb=9, num_cpu='5', cpu_time_ns=0), info) nova-13.1.4/nova/tests/unit/virt/xenapi/test_volume_utils.py0000664000567000056710000003074513064447152025417 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from eventlet import greenthread import mock import six from nova import exception from nova import test from nova.tests.unit.virt.xenapi import stubs from nova.virt.xenapi import volume_utils class SROps(stubs.XenAPITestBaseNoDB): def test_find_sr_valid_uuid(self): self.session = mock.Mock() self.session.call_xenapi.return_value = 'sr_ref' self.assertEqual(volume_utils.find_sr_by_uuid(self.session, 'sr_uuid'), 'sr_ref') def test_find_sr_invalid_uuid(self): class UUIDException(Exception): details = ["UUID_INVALID", "", "", ""] self.session = mock.Mock() self.session.XenAPI.Failure = UUIDException self.session.call_xenapi.side_effect = UUIDException self.assertIsNone( volume_utils.find_sr_by_uuid(self.session, 'sr_uuid')) def test_find_sr_from_vdi(self): vdi_ref = 'fake-ref' def fake_call_xenapi(method, *args): self.assertEqual(method, 'VDI.get_SR') self.assertEqual(args[0], vdi_ref) return args[0] session = mock.Mock() session.call_xenapi.side_effect = fake_call_xenapi self.assertEqual(volume_utils.find_sr_from_vdi(session, vdi_ref), vdi_ref) def test_find_sr_from_vdi_exception(self): vdi_ref = 'fake-ref' class FakeException(Exception): pass session = mock.Mock() session.XenAPI.Failure = FakeException session.call_xenapi.side_effect = FakeException self.assertRaises(exception.StorageError, volume_utils.find_sr_from_vdi, session, vdi_ref) class ISCSIParametersTestCase(stubs.XenAPITestBaseNoDB): def test_target_host(self): self.assertEqual(volume_utils._get_target_host('host:port'), 'host') self.assertEqual(volume_utils._get_target_host('host'), 'host') # There is no default value self.assertIsNone(volume_utils._get_target_host(':port')) self.assertIsNone(volume_utils._get_target_host(None)) def test_target_port(self): self.assertEqual(volume_utils._get_target_port('host:port'), 'port') self.assertEqual(volume_utils._get_target_port('host'), '3260') class IntroduceTestCase(stubs.XenAPITestBaseNoDB): @mock.patch.object(volume_utils, '_get_vdi_ref') @mock.patch.object(greenthread, 'sleep') def test_introduce_vdi_retry(self, mock_sleep, mock_get_vdi_ref): def fake_get_vdi_ref(session, sr_ref, vdi_uuid, target_lun): fake_get_vdi_ref.call_count += 1 if fake_get_vdi_ref.call_count == 2: return 'vdi_ref' def fake_call_xenapi(method, *args): if method == 'SR.scan': return elif method == 'VDI.get_record': return {'managed': 'true'} session = mock.Mock() session.call_xenapi.side_effect = fake_call_xenapi mock_get_vdi_ref.side_effect = fake_get_vdi_ref fake_get_vdi_ref.call_count = 0 self.assertEqual(volume_utils.introduce_vdi(session, 'sr_ref'), 'vdi_ref') mock_sleep.assert_called_once_with(20) @mock.patch.object(volume_utils, '_get_vdi_ref') @mock.patch.object(greenthread, 'sleep') def test_introduce_vdi_exception(self, mock_sleep, mock_get_vdi_ref): def fake_call_xenapi(method, *args): if method == 'SR.scan': return elif method == 'VDI.get_record': return {'managed': 'true'} session = mock.Mock() session.call_xenapi.side_effect = fake_call_xenapi mock_get_vdi_ref.return_value = None self.assertRaises(exception.StorageError, volume_utils.introduce_vdi, session, 'sr_ref') mock_sleep.assert_called_once_with(20) class ParseVolumeInfoTestCase(stubs.XenAPITestBaseNoDB): def test_mountpoint_to_number(self): cases = { 'sda': 0, 'sdp': 15, 'hda': 0, 'hdp': 15, 'vda': 0, 'xvda': 0, '0': 0, '10': 10, 'vdq': -1, 'sdq': -1, 'hdq': -1, 'xvdq': -1, } for (input, expected) in six.iteritems(cases): actual = volume_utils._mountpoint_to_number(input) self.assertEqual(actual, expected, '%s yielded %s, not %s' % (input, actual, expected)) @classmethod def _make_connection_info(cls): target_iqn = 'iqn.2010-10.org.openstack:volume-00000001' return {'driver_volume_type': 'iscsi', 'data': {'volume_id': 1, 'target_iqn': target_iqn, 'target_portal': '127.0.0.1:3260,fake', 'target_lun': None, 'auth_method': 'CHAP', 'auth_username': 'username', 'auth_password': 'verybadpass'}} def test_parse_volume_info_parsing_auth_details(self): conn_info = self._make_connection_info() result = volume_utils._parse_volume_info(conn_info['data']) self.assertEqual('username', result['chapuser']) self.assertEqual('verybadpass', result['chappassword']) def test_parse_volume_info_missing_details(self): # Tests that a StorageError is raised if volume_id, target_host, or # target_ign is missing from connection_data. Also ensures that the # auth_password value is not present in the StorageError message. for data_key_to_null in ('volume_id', 'target_portal', 'target_iqn'): conn_info = self._make_connection_info() conn_info['data'][data_key_to_null] = None ex = self.assertRaises(exception.StorageError, volume_utils._parse_volume_info, conn_info['data']) self.assertNotIn('verybadpass', six.text_type(ex)) def test_get_device_number_raise_exception_on_wrong_mountpoint(self): self.assertRaises( exception.StorageError, volume_utils.get_device_number, 'dev/sd') class FindVBDTestCase(stubs.XenAPITestBaseNoDB): def test_find_vbd_by_number_works(self): session = mock.Mock() session.VM.get_VBDs.return_value = ["a", "b"] session.VBD.get_userdevice.return_value = "1" result = volume_utils.find_vbd_by_number(session, "vm_ref", 1) self.assertEqual("a", result) session.VM.get_VBDs.assert_called_once_with("vm_ref") session.VBD.get_userdevice.assert_called_once_with("a") def test_find_vbd_by_number_no_matches(self): session = mock.Mock() session.VM.get_VBDs.return_value = ["a", "b"] session.VBD.get_userdevice.return_value = "3" result = volume_utils.find_vbd_by_number(session, "vm_ref", 1) self.assertIsNone(result) session.VM.get_VBDs.assert_called_once_with("vm_ref") expected = [mock.call("a"), mock.call("b")] self.assertEqual(expected, session.VBD.get_userdevice.call_args_list) def test_find_vbd_by_number_no_vbds(self): session = mock.Mock() session.VM.get_VBDs.return_value = [] result = volume_utils.find_vbd_by_number(session, "vm_ref", 1) self.assertIsNone(result) session.VM.get_VBDs.assert_called_once_with("vm_ref") self.assertFalse(session.VBD.get_userdevice.called) def test_find_vbd_by_number_ignores_exception(self): session = mock.Mock() session.XenAPI.Failure = test.TestingException session.VM.get_VBDs.return_value = ["a"] session.VBD.get_userdevice.side_effect = test.TestingException result = volume_utils.find_vbd_by_number(session, "vm_ref", 1) self.assertIsNone(result) session.VM.get_VBDs.assert_called_once_with("vm_ref") session.VBD.get_userdevice.assert_called_once_with("a") class IntroduceSRTestCase(stubs.XenAPITestBaseNoDB): @mock.patch.object(volume_utils, '_create_pbd') def test_backend_kind(self, create_pbd): session = mock.Mock() session.product_version = (6, 5, 0) session.call_xenapi.return_value = 'sr_ref' params = {'sr_type': 'iscsi'} sr_uuid = 'sr_uuid' label = 'label' expected_params = {'backend-kind': 'vbd'} volume_utils.introduce_sr(session, sr_uuid, label, params) session.call_xenapi.assert_any_call('SR.introduce', sr_uuid, label, '', 'iscsi', '', False, expected_params) @mock.patch.object(volume_utils, '_create_pbd') def test_backend_kind_upstream_fix(self, create_pbd): session = mock.Mock() session.product_version = (7, 0, 0) session.call_xenapi.return_value = 'sr_ref' params = {'sr_type': 'iscsi'} sr_uuid = 'sr_uuid' label = 'label' expected_params = {} volume_utils.introduce_sr(session, sr_uuid, label, params) session.call_xenapi.assert_any_call('SR.introduce', sr_uuid, label, '', 'iscsi', '', False, expected_params) class BootedFromVolumeTestCase(stubs.XenAPITestBaseNoDB): def test_booted_from_volume(self): session = mock.Mock() session.VM.get_VBDs.return_value = ['vbd_ref'] session.VBD.get_userdevice.return_value = '0' session.VBD.get_other_config.return_value = {'osvol': True} booted_from_volume = volume_utils.is_booted_from_volume(session, 'vm_ref') self.assertTrue(booted_from_volume) def test_not_booted_from_volume(self): session = mock.Mock() session.VM.get_VBDs.return_value = ['vbd_ref'] session.VBD.get_userdevice.return_value = '0' session.VBD.get_other_config.return_value = {} booted_from_volume = volume_utils.is_booted_from_volume(session, 'vm_ref') self.assertFalse(booted_from_volume) class MultipleVolumesTestCase(stubs.XenAPITestBaseNoDB): def test_sr_info_two_luns(self): data1 = {'target_portal': 'host:port', 'target_iqn': 'iqn', 'volume_id': 'vol_id_1', 'target_lun': 1} data2 = {'target_portal': 'host:port', 'target_iqn': 'iqn', 'volume_id': 'vol_id_2', 'target_lun': 2} (sr_uuid1, label1, params1) = volume_utils.parse_sr_info(data1) (sr_uuid2, label2, params2) = volume_utils.parse_sr_info(data2) self.assertEqual(sr_uuid1, sr_uuid2) self.assertEqual(label1, label2) @mock.patch.object(volume_utils, 'forget_sr') def test_purge_sr_no_VBDs(self, mock_forget): def _call_xenapi(func, *args): if func == 'SR.get_VDIs': return ['VDI1', 'VDI2'] if func == 'VDI.get_VBDs': return [] self.session = mock.Mock() self.session.call_xenapi = _call_xenapi volume_utils.purge_sr(self.session, 'SR') mock_forget.assert_called_once_with(self.session, 'SR') @mock.patch.object(volume_utils, 'forget_sr') def test_purge_sr_in_use(self, mock_forget): def _call_xenapi(func, *args): if func == 'SR.get_VDIs': return ['VDI1', 'VDI2'] if func == 'VDI.get_VBDs': if args[0] == 'VDI1': return ['VBD1'] if args[0] == 'VDI2': return ['VBD2'] self.session = mock.Mock() self.session.call_xenapi = _call_xenapi volume_utils.purge_sr(self.session, 'SR') self.assertEqual([], mock_forget.mock_calls) nova-13.1.4/nova/tests/unit/virt/xenapi/test_driver.py0000664000567000056710000001423613064447152024160 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import math import mock from oslo_utils import units from nova.compute import arch from nova.tests.unit.virt.xenapi import stubs from nova.virt import driver from nova.virt import fake from nova.virt import xenapi from nova.virt.xenapi import driver as xenapi_driver class XenAPIDriverTestCase(stubs.XenAPITestBaseNoDB): """Unit tests for Driver operations.""" def _get_driver(self): stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) self.flags(connection_url='test_url', connection_password='test_pass', group='xenserver') return xenapi.XenAPIDriver(fake.FakeVirtAPI(), False) def host_stats(self, refresh=True): return {'host_memory_total': 3 * units.Mi, 'host_memory_free_computed': 2 * units.Mi, 'disk_total': 5 * units.Gi, 'disk_used': 2 * units.Gi, 'disk_allocated': 4 * units.Gi, 'host_hostname': 'somename', 'supported_instances': arch.X86_64, 'host_cpu_info': {'cpu_count': 50}, 'cpu_model': { 'vendor': 'GenuineIntel', 'model': 'Intel(R) Xeon(R) CPU X3430 @ 2.40GHz', 'topology': { 'sockets': 1, 'cores': 4, 'threads': 1, }, 'features': [ 'fpu', 'de', 'tsc', 'msr', 'pae', 'mce', 'cx8', 'apic', 'sep', 'mtrr', 'mca', 'cmov', 'pat', 'clflush', 'acpi', 'mmx', 'fxsr', 'sse', 'sse2', 'ss', 'ht', 'nx', 'constant_tsc', 'nonstop_tsc', 'aperfmperf', 'pni', 'vmx', 'est', 'ssse3', 'sse4_1', 'sse4_2', 'popcnt', 'hypervisor', 'ida', 'tpr_shadow', 'vnmi', 'flexpriority', 'ept', 'vpid', ], }, 'vcpus_used': 10, 'pci_passthrough_devices': '', 'host_other-config': {'iscsi_iqn': 'someiqn'}} def test_available_resource(self): driver = self._get_driver() driver._session.product_version = (6, 8, 2) self.stubs.Set(driver.host_state, 'get_host_stats', self.host_stats) resources = driver.get_available_resource(None) self.assertEqual(6008002, resources['hypervisor_version']) self.assertEqual(50, resources['vcpus']) self.assertEqual(3, resources['memory_mb']) self.assertEqual(5, resources['local_gb']) self.assertEqual(10, resources['vcpus_used']) self.assertEqual(3 - 2, resources['memory_mb_used']) self.assertEqual(2, resources['local_gb_used']) self.assertEqual('XenServer', resources['hypervisor_type']) self.assertEqual('somename', resources['hypervisor_hostname']) self.assertEqual(1, resources['disk_available_least']) def test_overhead(self): driver = self._get_driver() instance = {'memory_mb': 30720, 'vcpus': 4} # expected memory overhead per: # https://wiki.openstack.org/wiki/XenServer/Overhead expected = ((instance['memory_mb'] * xenapi_driver.OVERHEAD_PER_MB) + (instance['vcpus'] * xenapi_driver.OVERHEAD_PER_VCPU) + xenapi_driver.OVERHEAD_BASE) expected = math.ceil(expected) overhead = driver.estimate_instance_overhead(instance) self.assertEqual(expected, overhead['memory_mb']) def test_set_bootable(self): driver = self._get_driver() self.mox.StubOutWithMock(driver._vmops, 'set_bootable') driver._vmops.set_bootable('inst', True) self.mox.ReplayAll() driver.set_bootable('inst', True) def test_post_interrupted_snapshot_cleanup(self): driver = self._get_driver() fake_vmops_cleanup = mock.Mock() driver._vmops.post_interrupted_snapshot_cleanup = fake_vmops_cleanup driver.post_interrupted_snapshot_cleanup("context", "instance") fake_vmops_cleanup.assert_called_once_with("context", "instance") def test_public_api_signatures(self): inst = self._get_driver() self.assertPublicAPISignatures(driver.ComputeDriver(None), inst) def test_get_volume_connector(self): ip = '123.123.123.123' driver = self._get_driver() self.flags(connection_url='http://%s' % ip, connection_password='test_pass', group='xenserver') self.stubs.Set(driver.host_state, 'get_host_stats', self.host_stats) connector = driver.get_volume_connector({'uuid': 'fake'}) self.assertIn('ip', connector) self.assertEqual(connector['ip'], ip) self.assertIn('initiator', connector) self.assertEqual(connector['initiator'], 'someiqn') def test_get_block_storage_ip(self): my_ip = '123.123.123.123' connection_ip = '124.124.124.124' driver = self._get_driver() self.flags(connection_url='http://%s' % connection_ip, group='xenserver') self.flags(my_ip=my_ip, my_block_storage_ip=my_ip) ip = driver._get_block_storage_ip() self.assertEqual(connection_ip, ip) def test_get_block_storage_ip_conf(self): driver = self._get_driver() my_ip = '123.123.123.123' my_block_storage_ip = '124.124.124.124' self.flags(my_ip=my_ip, my_block_storage_ip=my_block_storage_ip) ip = driver._get_block_storage_ip() self.assertEqual(my_block_storage_ip, ip) nova-13.1.4/nova/tests/unit/virt/xenapi/__init__.py0000664000567000056710000000000013064447141023343 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/virt/xenapi/test_vmops.py0000664000567000056710000022677113064447152024042 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from eventlet import greenthread import mock import uuid try: import xmlrpclib except ImportError: import six.moves.xmlrpc_client as xmlrpclib from nova.compute import power_state from nova.compute import task_states from nova import context from nova import exception from nova import objects from nova.objects import fields from nova.pci import manager as pci_manager from nova import test from nova.tests.unit import fake_flavor from nova.tests.unit import fake_instance from nova.tests.unit.virt.xenapi import stubs from nova import utils from nova.virt import fake from nova.virt.xenapi import agent as xenapi_agent from nova.virt.xenapi.client import session as xenapi_session from nova.virt.xenapi import fake as xenapi_fake from nova.virt.xenapi import vm_utils from nova.virt.xenapi import vmops from nova.virt.xenapi import volume_utils from nova.virt.xenapi import volumeops class VMOpsTestBase(stubs.XenAPITestBaseNoDB): def setUp(self): super(VMOpsTestBase, self).setUp() self._setup_mock_vmops() self.vms = [] def _setup_mock_vmops(self, product_brand=None, product_version=None): stubs.stubout_session(self.stubs, xenapi_fake.SessionBase) self._session = xenapi_session.XenAPISession('test_url', 'root', 'test_pass') self.vmops = vmops.VMOps(self._session, fake.FakeVirtAPI()) def create_vm(self, name, state="Running"): vm_ref = xenapi_fake.create_vm(name, state) self.vms.append(vm_ref) vm = xenapi_fake.get_record("VM", vm_ref) return vm, vm_ref def tearDown(self): super(VMOpsTestBase, self).tearDown() for vm in self.vms: xenapi_fake.destroy_vm(vm) class VMOpsTestCase(VMOpsTestBase): def setUp(self): super(VMOpsTestCase, self).setUp() self._setup_mock_vmops() self.context = context.RequestContext('user', 'project') self.instance = fake_instance.fake_instance_obj(self.context) def _setup_mock_vmops(self, product_brand=None, product_version=None): self._session = self._get_mock_session(product_brand, product_version) self._vmops = vmops.VMOps(self._session, fake.FakeVirtAPI()) def _get_mock_session(self, product_brand, product_version): class Mock(object): pass mock_session = Mock() mock_session.product_brand = product_brand mock_session.product_version = product_version return mock_session def _test_finish_revert_migration_after_crash(self, backup_made, new_made, vm_shutdown=True): instance = {'name': 'foo', 'task_state': task_states.RESIZE_MIGRATING} context = 'fake_context' self.mox.StubOutWithMock(vm_utils, 'lookup') self.mox.StubOutWithMock(self._vmops, '_destroy') self.mox.StubOutWithMock(vm_utils, 'set_vm_name_label') self.mox.StubOutWithMock(self._vmops, '_attach_mapped_block_devices') self.mox.StubOutWithMock(self._vmops, '_start') self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown') vm_utils.lookup(self._session, 'foo-orig').AndReturn( backup_made and 'foo' or None) vm_utils.lookup(self._session, 'foo').AndReturn( (not backup_made or new_made) and 'foo' or None) if backup_made: if new_made: self._vmops._destroy(instance, 'foo') vm_utils.set_vm_name_label(self._session, 'foo', 'foo') self._vmops._attach_mapped_block_devices(instance, []) vm_utils.is_vm_shutdown(self._session, 'foo').AndReturn(vm_shutdown) if vm_shutdown: self._vmops._start(instance, 'foo') self.mox.ReplayAll() self._vmops.finish_revert_migration(context, instance, []) def test_finish_revert_migration_after_crash(self): self._test_finish_revert_migration_after_crash(True, True) def test_finish_revert_migration_after_crash_before_new(self): self._test_finish_revert_migration_after_crash(True, False) def test_finish_revert_migration_after_crash_before_backup(self): self._test_finish_revert_migration_after_crash(False, False) def test_xsm_sr_check_relaxed_cached(self): self.make_plugin_call_count = 0 def fake_make_plugin_call(plugin, method, **args): self.make_plugin_call_count = self.make_plugin_call_count + 1 return "true" self.stubs.Set(self._vmops, "_make_plugin_call", fake_make_plugin_call) self.assertTrue(self._vmops._is_xsm_sr_check_relaxed()) self.assertTrue(self._vmops._is_xsm_sr_check_relaxed()) self.assertEqual(self.make_plugin_call_count, 1) def test_get_vm_opaque_ref_raises_instance_not_found(self): instance = {"name": "dummy"} self.mox.StubOutWithMock(vm_utils, 'lookup') vm_utils.lookup(self._session, instance['name'], False).AndReturn(None) self.mox.ReplayAll() self.assertRaises(exception.InstanceNotFound, self._vmops._get_vm_opaque_ref, instance) @mock.patch.object(vm_utils, 'destroy_vm') @mock.patch.object(vm_utils, 'clean_shutdown_vm') @mock.patch.object(vm_utils, 'hard_shutdown_vm') def test_clean_shutdown_no_bdm_on_destroy(self, hard_shutdown_vm, clean_shutdown_vm, destroy_vm): vm_ref = 'vm_ref' self._vmops._destroy(self.instance, vm_ref, destroy_disks=False) hard_shutdown_vm.assert_called_once_with(self._vmops._session, self.instance, vm_ref) self.assertEqual(0, clean_shutdown_vm.call_count) @mock.patch.object(vm_utils, 'destroy_vm') @mock.patch.object(vm_utils, 'clean_shutdown_vm') @mock.patch.object(vm_utils, 'hard_shutdown_vm') def test_clean_shutdown_with_bdm_on_destroy(self, hard_shutdown_vm, clean_shutdown_vm, destroy_vm): vm_ref = 'vm_ref' block_device_info = {'block_device_mapping': ['fake']} self._vmops._destroy(self.instance, vm_ref, destroy_disks=False, block_device_info=block_device_info) clean_shutdown_vm.assert_called_once_with(self._vmops._session, self.instance, vm_ref) self.assertEqual(0, hard_shutdown_vm.call_count) @mock.patch.object(vm_utils, 'destroy_vm') @mock.patch.object(vm_utils, 'clean_shutdown_vm', return_value=False) @mock.patch.object(vm_utils, 'hard_shutdown_vm') def test_clean_shutdown_with_bdm_failed_on_destroy(self, hard_shutdown_vm, clean_shutdown_vm, destroy_vm): vm_ref = 'vm_ref' block_device_info = {'block_device_mapping': ['fake']} self._vmops._destroy(self.instance, vm_ref, destroy_disks=False, block_device_info=block_device_info) clean_shutdown_vm.assert_called_once_with(self._vmops._session, self.instance, vm_ref) hard_shutdown_vm.assert_called_once_with(self._vmops._session, self.instance, vm_ref) @mock.patch.object(vm_utils, 'try_auto_configure_disk') @mock.patch.object(vm_utils, 'create_vbd', side_effect=test.TestingException) def test_attach_disks_rescue_auto_disk_config_false(self, create_vbd, try_auto_config): ctxt = context.RequestContext('user', 'project') instance = fake_instance.fake_instance_obj(ctxt) image_meta = objects.ImageMeta.from_dict( {'properties': {'auto_disk_config': 'false'}}) vdis = {'root': {'ref': 'fake-ref'}} self.assertRaises(test.TestingException, self._vmops._attach_disks, instance, image_meta=image_meta, vm_ref=None, name_label=None, vdis=vdis, disk_image_type='fake', network_info=[], rescue=True) self.assertFalse(try_auto_config.called) @mock.patch.object(vm_utils, 'try_auto_configure_disk') @mock.patch.object(vm_utils, 'create_vbd', side_effect=test.TestingException) def test_attach_disks_rescue_auto_disk_config_true(self, create_vbd, try_auto_config): ctxt = context.RequestContext('user', 'project') instance = fake_instance.fake_instance_obj(ctxt) image_meta = objects.ImageMeta.from_dict( {'properties': {'auto_disk_config': 'true'}}) vdis = {'root': {'ref': 'fake-ref'}} self.assertRaises(test.TestingException, self._vmops._attach_disks, instance, image_meta=image_meta, vm_ref=None, name_label=None, vdis=vdis, disk_image_type='fake', network_info=[], rescue=True) try_auto_config.assert_called_once_with(self._vmops._session, 'fake-ref', instance.flavor.root_gb) class InjectAutoDiskConfigTestCase(VMOpsTestBase): def test_inject_auto_disk_config_when_present(self): vm, vm_ref = self.create_vm("dummy") instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": True} self.vmops._inject_auto_disk_config(instance, vm_ref) xenstore_data = vm['xenstore_data'] self.assertEqual(xenstore_data['vm-data/auto-disk-config'], 'True') def test_inject_auto_disk_config_none_as_false(self): vm, vm_ref = self.create_vm("dummy") instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None} self.vmops._inject_auto_disk_config(instance, vm_ref) xenstore_data = vm['xenstore_data'] self.assertEqual(xenstore_data['vm-data/auto-disk-config'], 'False') class GetConsoleOutputTestCase(VMOpsTestBase): def test_get_console_output_works(self): ctxt = context.RequestContext('user', 'project') instance = fake_instance.fake_instance_obj(ctxt) self.mox.StubOutWithMock(self.vmops, '_get_last_dom_id') self.vmops._get_last_dom_id(instance, check_rescue=True).AndReturn(42) self.mox.ReplayAll() self.assertEqual("dom_id: 42", self.vmops.get_console_output(instance)) def test_get_console_output_not_available(self): self.mox.StubOutWithMock(self.vmops, '_get_last_dom_id') ctxt = context.RequestContext('user', 'project') instance = fake_instance.fake_instance_obj(ctxt) # dom_id=0 used to trigger exception in fake XenAPI self.vmops._get_last_dom_id(instance, check_rescue=True).AndReturn(0) self.mox.ReplayAll() self.assertRaises(exception.ConsoleNotAvailable, self.vmops.get_console_output, instance) def test_get_dom_id_works(self): instance = {"name": "dummy"} vm, vm_ref = self.create_vm("dummy") self.assertEqual(vm["domid"], self.vmops._get_dom_id(instance)) def test_get_dom_id_works_with_rescue_vm(self): instance = {"name": "dummy"} vm, vm_ref = self.create_vm("dummy-rescue") self.assertEqual(vm["domid"], self.vmops._get_dom_id(instance, check_rescue=True)) def test_get_dom_id_raises_not_found(self): instance = {"name": "dummy"} self.create_vm("not-dummy") self.assertRaises(exception.NotFound, self.vmops._get_dom_id, instance) def test_get_dom_id_works_with_vmref(self): vm, vm_ref = self.create_vm("dummy") self.assertEqual(vm["domid"], self.vmops._get_dom_id(vm_ref=vm_ref)) class SpawnTestCase(VMOpsTestBase): def _stub_out_common(self): self.mox.StubOutWithMock(self.vmops, '_ensure_instance_name_unique') self.mox.StubOutWithMock(self.vmops, '_ensure_enough_free_mem') self.mox.StubOutWithMock(self.vmops, '_update_instance_progress') self.mox.StubOutWithMock(vm_utils, 'determine_disk_image_type') self.mox.StubOutWithMock(self.vmops, '_get_vdis_for_instance') self.mox.StubOutWithMock(vm_utils, 'safe_destroy_vdis') self.mox.StubOutWithMock(self.vmops._volumeops, 'safe_cleanup_from_vdis') self.mox.StubOutWithMock(self.vmops, '_resize_up_vdis') self.mox.StubOutWithMock(vm_utils, 'create_kernel_and_ramdisk') self.mox.StubOutWithMock(vm_utils, 'destroy_kernel_ramdisk') self.mox.StubOutWithMock(self.vmops, '_create_vm_record') self.mox.StubOutWithMock(self.vmops, '_destroy') self.mox.StubOutWithMock(self.vmops, '_attach_disks') self.mox.StubOutWithMock(pci_manager, 'get_instance_pci_devs') self.mox.StubOutWithMock(vm_utils, 'set_other_config_pci') self.mox.StubOutWithMock(self.vmops, '_attach_orig_disks') self.mox.StubOutWithMock(self.vmops, 'inject_network_info') self.mox.StubOutWithMock(self.vmops, '_inject_hostname') self.mox.StubOutWithMock(self.vmops, '_inject_instance_metadata') self.mox.StubOutWithMock(self.vmops, '_inject_auto_disk_config') self.mox.StubOutWithMock(self.vmops, '_file_inject_vm_settings') self.mox.StubOutWithMock(self.vmops, '_create_vifs') self.mox.StubOutWithMock(self.vmops.firewall_driver, 'setup_basic_filtering') self.mox.StubOutWithMock(self.vmops.firewall_driver, 'prepare_instance_filter') self.mox.StubOutWithMock(self.vmops, '_start') self.mox.StubOutWithMock(self.vmops, '_wait_for_instance_to_start') self.mox.StubOutWithMock(self.vmops, '_configure_new_instance_with_agent') self.mox.StubOutWithMock(self.vmops, '_remove_hostname') self.mox.StubOutWithMock(self.vmops.firewall_driver, 'apply_instance_filter') self.mox.StubOutWithMock(self.vmops, '_update_last_dom_id') self.mox.StubOutWithMock(self.vmops._session, 'call_xenapi') def _test_spawn(self, name_label_param=None, block_device_info_param=None, rescue=False, include_root_vdi=True, throw_exception=None, attach_pci_dev=False, neutron_exception=False): self._stub_out_common() instance = {"name": "dummy", "uuid": "fake_uuid"} name_label = name_label_param if name_label is None: name_label = "dummy" image_meta = objects.ImageMeta.from_dict({"id": "image_id"}) context = "context" session = self.vmops._session injected_files = "fake_files" admin_password = "password" network_info = "net_info" steps = 10 if rescue: steps += 1 block_device_info = block_device_info_param if block_device_info and not block_device_info['root_device_name']: block_device_info = dict(block_device_info_param) block_device_info['root_device_name'] = \ self.vmops.default_root_dev di_type = "di_type" vm_utils.determine_disk_image_type(image_meta).AndReturn(di_type) step = 1 self.vmops._update_instance_progress(context, instance, step, steps) vdis = {"other": {"ref": "fake_ref_2", "osvol": True}} if include_root_vdi: vdis["root"] = {"ref": "fake_ref"} self.vmops._get_vdis_for_instance(context, instance, name_label, image_meta, di_type, block_device_info).AndReturn(vdis) self.vmops._resize_up_vdis(instance, vdis) step += 1 self.vmops._update_instance_progress(context, instance, step, steps) kernel_file = "kernel" ramdisk_file = "ramdisk" vm_utils.create_kernel_and_ramdisk(context, session, instance, name_label).AndReturn((kernel_file, ramdisk_file)) step += 1 self.vmops._update_instance_progress(context, instance, step, steps) vm_ref = "fake_vm_ref" self.vmops._ensure_instance_name_unique(name_label) self.vmops._ensure_enough_free_mem(instance) self.vmops._create_vm_record(context, instance, name_label, di_type, kernel_file, ramdisk_file, image_meta, rescue).AndReturn(vm_ref) step += 1 self.vmops._update_instance_progress(context, instance, step, steps) self.vmops._attach_disks(instance, image_meta, vm_ref, name_label, vdis, di_type, network_info, rescue, admin_password, injected_files) if attach_pci_dev: fake_dev = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': None, 'id': 1, 'compute_node_id': 1, 'address': '00:00.0', 'vendor_id': '1234', 'product_id': 'abcd', 'dev_type': fields.PciDeviceType.STANDARD, 'status': 'available', 'dev_id': 'devid', 'label': 'label', 'instance_uuid': None, 'extra_info': '{}', } pci_manager.get_instance_pci_devs(instance).AndReturn([fake_dev]) vm_utils.set_other_config_pci(self.vmops._session, vm_ref, "0/0000:00:00.0") else: pci_manager.get_instance_pci_devs(instance).AndReturn([]) step += 1 self.vmops._update_instance_progress(context, instance, step, steps) self.vmops._inject_instance_metadata(instance, vm_ref) self.vmops._inject_auto_disk_config(instance, vm_ref) self.vmops._inject_hostname(instance, vm_ref, rescue) self.vmops._file_inject_vm_settings(instance, vm_ref, vdis, network_info) self.vmops.inject_network_info(instance, network_info, vm_ref) step += 1 self.vmops._update_instance_progress(context, instance, step, steps) if neutron_exception: events = [('network-vif-plugged', 1)] self.vmops._get_neutron_events(network_info, True, True).AndReturn(events) self.mox.StubOutWithMock(self.vmops, '_neutron_failed_callback') self.mox.StubOutWithMock(self.vmops._virtapi, 'wait_for_instance_event') self.vmops._virtapi.wait_for_instance_event(instance, events, deadline=300, error_callback=self.vmops._neutron_failed_callback).\ AndRaise(exception.VirtualInterfaceCreateException) else: self.vmops._create_vifs(instance, vm_ref, network_info) self.vmops.firewall_driver.setup_basic_filtering(instance, network_info).AndRaise(NotImplementedError) self.vmops.firewall_driver.prepare_instance_filter(instance, network_info) step += 1 self.vmops._update_instance_progress(context, instance, step, steps) if rescue: self.vmops._attach_orig_disks(instance, vm_ref) step += 1 self.vmops._update_instance_progress(context, instance, step, steps) start_pause = True self.vmops._start(instance, vm_ref, start_pause=start_pause) step += 1 self.vmops._update_instance_progress(context, instance, step, steps) self.vmops.firewall_driver.apply_instance_filter(instance, network_info) step += 1 self.vmops._update_instance_progress(context, instance, step, steps) self.vmops._session.call_xenapi('VM.unpause', vm_ref) self.vmops._wait_for_instance_to_start(instance, vm_ref) self.vmops._update_last_dom_id(vm_ref) self.vmops._configure_new_instance_with_agent(instance, vm_ref, injected_files, admin_password) self.vmops._remove_hostname(instance, vm_ref) step += 1 last_call = self.vmops._update_instance_progress(context, instance, step, steps) if throw_exception: last_call.AndRaise(throw_exception) if throw_exception or neutron_exception: self.vmops._destroy(instance, vm_ref, network_info=network_info) vm_utils.destroy_kernel_ramdisk(self.vmops._session, instance, kernel_file, ramdisk_file) vm_utils.safe_destroy_vdis(self.vmops._session, ["fake_ref"]) self.vmops._volumeops.safe_cleanup_from_vdis(["fake_ref_2"]) self.mox.ReplayAll() self.vmops.spawn(context, instance, image_meta, injected_files, admin_password, network_info, block_device_info_param, name_label_param, rescue) def test_spawn(self): self._test_spawn() def test_spawn_with_alternate_options(self): self._test_spawn(include_root_vdi=False, rescue=True, name_label_param="bob", block_device_info_param={"root_device_name": ""}) def test_spawn_with_pci_available_on_the_host(self): self._test_spawn(attach_pci_dev=True) def test_spawn_performs_rollback_and_throws_exception(self): self.assertRaises(test.TestingException, self._test_spawn, throw_exception=test.TestingException()) def test_spawn_with_neutron(self): self.mox.StubOutWithMock(self.vmops, '_get_neutron_events') events = [('network-vif-plugged', 1)] network_info = "net_info" self.vmops._get_neutron_events(network_info, True, True).AndReturn(events) self.mox.StubOutWithMock(self.vmops, '_neutron_failed_callback') self._test_spawn() def test_spawn_with_neutron_exception(self): self.mox.StubOutWithMock(self.vmops, '_get_neutron_events') self.assertRaises(exception.VirtualInterfaceCreateException, self._test_spawn, neutron_exception=True) def _test_finish_migration(self, power_on=True, resize_instance=True, throw_exception=None, booted_from_volume=False): self._stub_out_common() self.mox.StubOutWithMock(volumeops.VolumeOps, "connect_volume") self.mox.StubOutWithMock(vm_utils, "import_all_migrated_disks") self.mox.StubOutWithMock(self.vmops, "_attach_mapped_block_devices") context = "context" migration = {} name_label = "dummy" instance = {"name": name_label, "uuid": "fake_uuid", "root_device_name": "/dev/xvda"} disk_info = "disk_info" network_info = "net_info" image_meta = objects.ImageMeta.from_dict({"id": "image_id"}) block_device_info = {} import_root = True if booted_from_volume: block_device_info = {'block_device_mapping': [ {'mount_device': '/dev/xvda', 'connection_info': {'data': 'fake-data'}}]} import_root = False volumeops.VolumeOps.connect_volume( {'data': 'fake-data'}).AndReturn(('sr', 'vol-vdi-uuid')) self.vmops._session.call_xenapi('VDI.get_by_uuid', 'vol-vdi-uuid').AndReturn('vol-vdi-ref') session = self.vmops._session self.vmops._ensure_instance_name_unique(name_label) self.vmops._ensure_enough_free_mem(instance) di_type = "di_type" vm_utils.determine_disk_image_type(image_meta).AndReturn(di_type) root_vdi = {"ref": "fake_ref"} ephemeral_vdi = {"ref": "fake_ref_e"} vdis = {"root": root_vdi, "ephemerals": {4: ephemeral_vdi}} vm_utils.import_all_migrated_disks(self.vmops._session, instance, import_root=import_root).AndReturn(vdis) kernel_file = "kernel" ramdisk_file = "ramdisk" vm_utils.create_kernel_and_ramdisk(context, session, instance, name_label).AndReturn((kernel_file, ramdisk_file)) vm_ref = "fake_vm_ref" rescue = False self.vmops._create_vm_record(context, instance, name_label, di_type, kernel_file, ramdisk_file, image_meta, rescue).AndReturn(vm_ref) if resize_instance: self.vmops._resize_up_vdis(instance, vdis) self.vmops._attach_disks(instance, image_meta, vm_ref, name_label, vdis, di_type, network_info, False, None, None) self.vmops._attach_mapped_block_devices(instance, block_device_info) pci_manager.get_instance_pci_devs(instance).AndReturn([]) self.vmops._inject_instance_metadata(instance, vm_ref) self.vmops._inject_auto_disk_config(instance, vm_ref) self.vmops._file_inject_vm_settings(instance, vm_ref, vdis, network_info) self.vmops.inject_network_info(instance, network_info, vm_ref) self.vmops._create_vifs(instance, vm_ref, network_info) self.vmops.firewall_driver.setup_basic_filtering(instance, network_info).AndRaise(NotImplementedError) self.vmops.firewall_driver.prepare_instance_filter(instance, network_info) if power_on: self.vmops._start(instance, vm_ref, start_pause=True) self.vmops.firewall_driver.apply_instance_filter(instance, network_info) if power_on: self.vmops._session.call_xenapi('VM.unpause', vm_ref) self.vmops._wait_for_instance_to_start(instance, vm_ref) self.vmops._update_last_dom_id(vm_ref) last_call = self.vmops._update_instance_progress(context, instance, step=5, total_steps=5) if throw_exception: last_call.AndRaise(throw_exception) self.vmops._destroy(instance, vm_ref, network_info=network_info) vm_utils.destroy_kernel_ramdisk(self.vmops._session, instance, kernel_file, ramdisk_file) vm_utils.safe_destroy_vdis(self.vmops._session, ["fake_ref_e", "fake_ref"]) self.mox.ReplayAll() self.vmops.finish_migration(context, migration, instance, disk_info, network_info, image_meta, resize_instance, block_device_info, power_on) def test_finish_migration(self): self._test_finish_migration() def test_finish_migration_no_power_on(self): self._test_finish_migration(power_on=False, resize_instance=False) def test_finish_migration_booted_from_volume(self): self._test_finish_migration(booted_from_volume=True) def test_finish_migrate_performs_rollback_on_error(self): self.assertRaises(test.TestingException, self._test_finish_migration, power_on=False, resize_instance=False, throw_exception=test.TestingException()) def test_remove_hostname(self): vm, vm_ref = self.create_vm("dummy") instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None} self.mox.StubOutWithMock(self._session, 'call_xenapi') self._session.call_xenapi("VM.remove_from_xenstore_data", vm_ref, "vm-data/hostname") self.mox.ReplayAll() self.vmops._remove_hostname(instance, vm_ref) self.mox.VerifyAll() def test_reset_network(self): class mock_agent(object): def __init__(self): self.called = False def resetnetwork(self): self.called = True vm, vm_ref = self.create_vm("dummy") instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None} agent = mock_agent() self.mox.StubOutWithMock(self.vmops, 'agent_enabled') self.mox.StubOutWithMock(self.vmops, '_get_agent') self.mox.StubOutWithMock(self.vmops, '_inject_hostname') self.mox.StubOutWithMock(self.vmops, '_remove_hostname') self.vmops.agent_enabled(instance).AndReturn(True) self.vmops._get_agent(instance, vm_ref).AndReturn(agent) self.vmops._inject_hostname(instance, vm_ref, False) self.vmops._remove_hostname(instance, vm_ref) self.mox.ReplayAll() self.vmops.reset_network(instance) self.assertTrue(agent.called) self.mox.VerifyAll() def test_inject_hostname(self): instance = {"hostname": "dummy", "os_type": "fake", "uuid": "uuid"} vm_ref = "vm_ref" self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore') self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname', 'dummy') self.mox.ReplayAll() self.vmops._inject_hostname(instance, vm_ref, rescue=False) def test_inject_hostname_with_rescue_prefix(self): instance = {"hostname": "dummy", "os_type": "fake", "uuid": "uuid"} vm_ref = "vm_ref" self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore') self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname', 'RESCUE-dummy') self.mox.ReplayAll() self.vmops._inject_hostname(instance, vm_ref, rescue=True) def test_inject_hostname_with_windows_name_truncation(self): instance = {"hostname": "dummydummydummydummydummy", "os_type": "windows", "uuid": "uuid"} vm_ref = "vm_ref" self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore') self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname', 'RESCUE-dummydum') self.mox.ReplayAll() self.vmops._inject_hostname(instance, vm_ref, rescue=True) def test_wait_for_instance_to_start(self): instance = {"uuid": "uuid"} vm_ref = "vm_ref" self.mox.StubOutWithMock(vm_utils, 'get_power_state') self.mox.StubOutWithMock(greenthread, 'sleep') vm_utils.get_power_state(self._session, vm_ref).AndReturn( power_state.SHUTDOWN) greenthread.sleep(0.5) vm_utils.get_power_state(self._session, vm_ref).AndReturn( power_state.RUNNING) self.mox.ReplayAll() self.vmops._wait_for_instance_to_start(instance, vm_ref) def test_attach_orig_disks(self): instance = {"name": "dummy"} vm_ref = "vm_ref" vbd_refs = {vmops.DEVICE_ROOT: "vdi_ref"} self.mox.StubOutWithMock(vm_utils, 'lookup') self.mox.StubOutWithMock(self.vmops, '_find_vdi_refs') self.mox.StubOutWithMock(vm_utils, 'create_vbd') vm_utils.lookup(self.vmops._session, "dummy").AndReturn("ref") self.vmops._find_vdi_refs("ref", exclude_volumes=True).AndReturn( vbd_refs) vm_utils.create_vbd(self.vmops._session, vm_ref, "vdi_ref", vmops.DEVICE_RESCUE, bootable=False) self.mox.ReplayAll() self.vmops._attach_orig_disks(instance, vm_ref) def test_agent_update_setup(self): # agent updates need to occur after networking is configured instance = {'name': 'betelgeuse', 'uuid': '1-2-3-4-5-6'} vm_ref = 'vm_ref' agent = xenapi_agent.XenAPIBasedAgent(self.vmops._session, self.vmops._virtapi, instance, vm_ref) self.mox.StubOutWithMock(xenapi_agent, 'should_use_agent') self.mox.StubOutWithMock(self.vmops, '_get_agent') self.mox.StubOutWithMock(agent, 'get_version') self.mox.StubOutWithMock(agent, 'resetnetwork') self.mox.StubOutWithMock(agent, 'update_if_needed') xenapi_agent.should_use_agent(instance).AndReturn(True) self.vmops._get_agent(instance, vm_ref).AndReturn(agent) agent.get_version().AndReturn('1.2.3') agent.resetnetwork() agent.update_if_needed('1.2.3') self.mox.ReplayAll() self.vmops._configure_new_instance_with_agent(instance, vm_ref, None, None) @mock.patch.object(utils, 'is_neutron', return_value=True) def test_get_neutron_event(self, mock_is_neutron): network_info = [{"active": False, "id": 1}, {"active": True, "id": 2}, {"active": False, "id": 3}, {"id": 4}] power_on = True first_boot = True events = self.vmops._get_neutron_events(network_info, power_on, first_boot) self.assertEqual("network-vif-plugged", events[0][0]) self.assertEqual(1, events[0][1]) self.assertEqual("network-vif-plugged", events[1][0]) self.assertEqual(3, events[1][1]) @mock.patch.object(utils, 'is_neutron', return_value=False) def test_get_neutron_event_not_neutron_network(self, mock_is_neutron): network_info = [{"active": False, "id": 1}, {"active": True, "id": 2}, {"active": False, "id": 3}, {"id": 4}] power_on = True first_boot = True events = self.vmops._get_neutron_events(network_info, power_on, first_boot) self.assertEqual([], events) @mock.patch.object(utils, 'is_neutron', return_value=True) def test_get_neutron_event_power_off(self, mock_is_neutron): network_info = [{"active": False, "id": 1}, {"active": True, "id": 2}, {"active": False, "id": 3}, {"id": 4}] power_on = False first_boot = True events = self.vmops._get_neutron_events(network_info, power_on, first_boot) self.assertEqual([], events) @mock.patch.object(utils, 'is_neutron', return_value=True) def test_get_neutron_event_not_first_boot(self, mock_is_neutron): network_info = [{"active": False, "id": 1}, {"active": True, "id": 2}, {"active": False, "id": 3}, {"id": 4}] power_on = True first_boot = False events = self.vmops._get_neutron_events(network_info, power_on, first_boot) self.assertEqual([], events) class DestroyTestCase(VMOpsTestBase): def setUp(self): super(DestroyTestCase, self).setUp() self.context = context.RequestContext(user_id=None, project_id=None) self.instance = fake_instance.fake_instance_obj(self.context) @mock.patch.object(vm_utils, 'lookup', side_effect=[None, None]) @mock.patch.object(vm_utils, 'hard_shutdown_vm') @mock.patch.object(volume_utils, 'find_sr_by_uuid') @mock.patch.object(volume_utils, 'forget_sr') def test_no_vm_no_bdm(self, forget_sr, find_sr_by_uuid, hard_shutdown_vm, lookup): self.vmops.destroy(self.instance, 'network_info', {'block_device_mapping': []}) self.assertEqual(0, find_sr_by_uuid.call_count) self.assertEqual(0, forget_sr.call_count) self.assertEqual(0, hard_shutdown_vm.call_count) @mock.patch.object(vm_utils, 'lookup', side_effect=[None, None]) @mock.patch.object(vm_utils, 'hard_shutdown_vm') @mock.patch.object(volume_utils, 'find_sr_by_uuid', return_value=None) @mock.patch.object(volume_utils, 'forget_sr') def test_no_vm_orphaned_volume_no_sr(self, forget_sr, find_sr_by_uuid, hard_shutdown_vm, lookup): self.vmops.destroy(self.instance, 'network_info', {'block_device_mapping': [{'connection_info': {'data': {'volume_id': 'fake-uuid'}}}]}) find_sr_by_uuid.assert_called_once_with(self.vmops._session, 'FA15E-D15C-fake-uuid') self.assertEqual(0, forget_sr.call_count) self.assertEqual(0, hard_shutdown_vm.call_count) @mock.patch.object(vm_utils, 'lookup', side_effect=[None, None]) @mock.patch.object(vm_utils, 'hard_shutdown_vm') @mock.patch.object(volume_utils, 'find_sr_by_uuid', return_value='sr_ref') @mock.patch.object(volume_utils, 'forget_sr') def test_no_vm_orphaned_volume_old_sr(self, forget_sr, find_sr_by_uuid, hard_shutdown_vm, lookup): self.vmops.destroy(self.instance, 'network_info', {'block_device_mapping': [{'connection_info': {'data': {'volume_id': 'fake-uuid'}}}]}) find_sr_by_uuid.assert_called_once_with(self.vmops._session, 'FA15E-D15C-fake-uuid') forget_sr.assert_called_once_with(self.vmops._session, 'sr_ref') self.assertEqual(0, hard_shutdown_vm.call_count) @mock.patch.object(vm_utils, 'lookup', side_effect=[None, None]) @mock.patch.object(vm_utils, 'hard_shutdown_vm') @mock.patch.object(volume_utils, 'find_sr_by_uuid', side_effect=[None, 'sr_ref']) @mock.patch.object(volume_utils, 'forget_sr') @mock.patch.object(uuid, 'uuid5', return_value='fake-uuid') def test_no_vm_orphaned_volume(self, uuid5, forget_sr, find_sr_by_uuid, hard_shutdown_vm, lookup): fake_data = {'volume_id': 'fake-uuid', 'target_portal': 'host:port', 'target_iqn': 'iqn'} self.vmops.destroy(self.instance, 'network_info', {'block_device_mapping': [{'connection_info': {'data': fake_data}}]}) call1 = mock.call(self.vmops._session, 'FA15E-D15C-fake-uuid') call2 = mock.call(self.vmops._session, 'fake-uuid') uuid5.assert_called_once_with(volume_utils.SR_NAMESPACE, 'host/port/iqn') find_sr_by_uuid.assert_has_calls([call1, call2]) forget_sr.assert_called_once_with(self.vmops._session, 'sr_ref') self.assertEqual(0, hard_shutdown_vm.call_count) @mock.patch.object(vmops.VMOps, '_update_instance_progress') @mock.patch.object(vmops.VMOps, '_get_vm_opaque_ref') @mock.patch.object(vm_utils, 'get_sr_path') @mock.patch.object(vmops.VMOps, '_detach_block_devices_from_orig_vm') @mock.patch.object(vmops.VMOps, '_migrate_disk_resizing_down') @mock.patch.object(vmops.VMOps, '_migrate_disk_resizing_up') class MigrateDiskAndPowerOffTestCase(VMOpsTestBase): def setUp(self): super(MigrateDiskAndPowerOffTestCase, self).setUp() self.context = context.RequestContext('user', 'project') def test_migrate_disk_and_power_off_works_down(self, migrate_up, migrate_down, *mocks): instance = {"root_gb": 2, "ephemeral_gb": 0, "uuid": "uuid"} flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=1, ephemeral_gb=0) self.vmops.migrate_disk_and_power_off(None, instance, None, flavor, None) self.assertFalse(migrate_up.called) self.assertTrue(migrate_down.called) def test_migrate_disk_and_power_off_works_up(self, migrate_up, migrate_down, *mocks): instance = {"root_gb": 1, "ephemeral_gb": 1, "uuid": "uuid"} flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=2, ephemeral_gb=2) self.vmops.migrate_disk_and_power_off(None, instance, None, flavor, None) self.assertFalse(migrate_down.called) self.assertTrue(migrate_up.called) def test_migrate_disk_and_power_off_resize_down_ephemeral_fails(self, migrate_up, migrate_down, *mocks): instance = {"ephemeral_gb": 2} flavor = fake_flavor.fake_flavor_obj(self.context, ephemeral_gb=1) self.assertRaises(exception.ResizeError, self.vmops.migrate_disk_and_power_off, None, instance, None, flavor, None) @mock.patch.object(vm_utils, 'get_vdi_for_vm_safely') @mock.patch.object(vm_utils, 'migrate_vhd') @mock.patch.object(vmops.VMOps, '_resize_ensure_vm_is_shutdown') @mock.patch.object(vm_utils, 'get_all_vdi_uuids_for_vm') @mock.patch.object(vmops.VMOps, '_update_instance_progress') @mock.patch.object(vmops.VMOps, '_apply_orig_vm_name_label') class MigrateDiskResizingUpTestCase(VMOpsTestBase): def _fake_snapshot_attached_here(self, session, instance, vm_ref, label, userdevice, post_snapshot_callback): self.assertIsInstance(instance, dict) if userdevice == '0': self.assertEqual("vm_ref", vm_ref) self.assertEqual("fake-snapshot", label) yield ["leaf", "parent", "grandp"] else: leaf = userdevice + "-leaf" parent = userdevice + "-parent" yield [leaf, parent] @mock.patch.object(volume_utils, 'is_booted_from_volume', return_value=False) def test_migrate_disk_resizing_up_works_no_ephemeral(self, mock_is_booted_from_volume, mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids, mock_shutdown, mock_migrate_vhd, mock_get_vdi_for_vm): context = "ctxt" instance = {"name": "fake", "uuid": "uuid"} dest = "dest" vm_ref = "vm_ref" sr_path = "sr_path" mock_get_all_vdi_uuids.return_value = None mock_get_vdi_for_vm.return_value = ({}, {"uuid": "root"}) with mock.patch.object(vm_utils, '_snapshot_attached_here_impl', self._fake_snapshot_attached_here): self.vmops._migrate_disk_resizing_up(context, instance, dest, vm_ref, sr_path) mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session, vm_ref, min_userdevice=4) mock_apply_orig.assert_called_once_with(instance, vm_ref) mock_shutdown.assert_called_once_with(instance, vm_ref) m_vhd_expected = [mock.call(self.vmops._session, instance, "parent", dest, sr_path, 1), mock.call(self.vmops._session, instance, "grandp", dest, sr_path, 2), mock.call(self.vmops._session, instance, "root", dest, sr_path, 0)] self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list) prog_expected = [ mock.call(context, instance, 1, 5), mock.call(context, instance, 2, 5), mock.call(context, instance, 3, 5), mock.call(context, instance, 4, 5) # 5/5: step to be executed by finish migration. ] self.assertEqual(prog_expected, mock_update_progress.call_args_list) @mock.patch.object(volume_utils, 'is_booted_from_volume', return_value=False) def test_migrate_disk_resizing_up_works_with_two_ephemeral(self, mock_is_booted_from_volume, mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids, mock_shutdown, mock_migrate_vhd, mock_get_vdi_for_vm): context = "ctxt" instance = {"name": "fake", "uuid": "uuid"} dest = "dest" vm_ref = "vm_ref" sr_path = "sr_path" mock_get_all_vdi_uuids.return_value = ["vdi-eph1", "vdi-eph2"] mock_get_vdi_for_vm.side_effect = [({}, {"uuid": "root"}), ({}, {"uuid": "4-root"}), ({}, {"uuid": "5-root"})] with mock.patch.object(vm_utils, '_snapshot_attached_here_impl', self._fake_snapshot_attached_here): self.vmops._migrate_disk_resizing_up(context, instance, dest, vm_ref, sr_path) mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session, vm_ref, min_userdevice=4) mock_apply_orig.assert_called_once_with(instance, vm_ref) mock_shutdown.assert_called_once_with(instance, vm_ref) m_vhd_expected = [mock.call(self.vmops._session, instance, "parent", dest, sr_path, 1), mock.call(self.vmops._session, instance, "grandp", dest, sr_path, 2), mock.call(self.vmops._session, instance, "4-parent", dest, sr_path, 1, 1), mock.call(self.vmops._session, instance, "5-parent", dest, sr_path, 1, 2), mock.call(self.vmops._session, instance, "root", dest, sr_path, 0), mock.call(self.vmops._session, instance, "4-root", dest, sr_path, 0, 1), mock.call(self.vmops._session, instance, "5-root", dest, sr_path, 0, 2)] self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list) prog_expected = [ mock.call(context, instance, 1, 5), mock.call(context, instance, 2, 5), mock.call(context, instance, 3, 5), mock.call(context, instance, 4, 5) # 5/5: step to be executed by finish migration. ] self.assertEqual(prog_expected, mock_update_progress.call_args_list) @mock.patch.object(volume_utils, 'is_booted_from_volume', return_value=True) def test_migrate_disk_resizing_up_booted_from_volume(self, mock_is_booted_from_volume, mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids, mock_shutdown, mock_migrate_vhd, mock_get_vdi_for_vm): context = "ctxt" instance = {"name": "fake", "uuid": "uuid"} dest = "dest" vm_ref = "vm_ref" sr_path = "sr_path" mock_get_all_vdi_uuids.return_value = ["vdi-eph1", "vdi-eph2"] mock_get_vdi_for_vm.side_effect = [({}, {"uuid": "4-root"}), ({}, {"uuid": "5-root"})] with mock.patch.object(vm_utils, '_snapshot_attached_here_impl', self._fake_snapshot_attached_here): self.vmops._migrate_disk_resizing_up(context, instance, dest, vm_ref, sr_path) mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session, vm_ref, min_userdevice=4) mock_apply_orig.assert_called_once_with(instance, vm_ref) mock_shutdown.assert_called_once_with(instance, vm_ref) m_vhd_expected = [mock.call(self.vmops._session, instance, "4-parent", dest, sr_path, 1, 1), mock.call(self.vmops._session, instance, "5-parent", dest, sr_path, 1, 2), mock.call(self.vmops._session, instance, "4-root", dest, sr_path, 0, 1), mock.call(self.vmops._session, instance, "5-root", dest, sr_path, 0, 2)] self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list) prog_expected = [ mock.call(context, instance, 1, 5), mock.call(context, instance, 2, 5), mock.call(context, instance, 3, 5), mock.call(context, instance, 4, 5) # 5/5: step to be executed by finish migration. ] self.assertEqual(prog_expected, mock_update_progress.call_args_list) @mock.patch.object(vmops.VMOps, '_restore_orig_vm_and_cleanup_orphan') @mock.patch.object(volume_utils, 'is_booted_from_volume', return_value=False) def test_migrate_disk_resizing_up_rollback(self, mock_is_booted_from_volume, mock_restore, mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids, mock_shutdown, mock_migrate_vhd, mock_get_vdi_for_vm): context = "ctxt" instance = {"name": "fake", "uuid": "fake"} dest = "dest" vm_ref = "vm_ref" sr_path = "sr_path" mock_migrate_vhd.side_effect = test.TestingException mock_restore.side_effect = test.TestingException with mock.patch.object(vm_utils, '_snapshot_attached_here_impl', self._fake_snapshot_attached_here): self.assertRaises(exception.InstanceFaultRollback, self.vmops._migrate_disk_resizing_up, context, instance, dest, vm_ref, sr_path) mock_apply_orig.assert_called_once_with(instance, vm_ref) mock_restore.assert_called_once_with(instance) mock_migrate_vhd.assert_called_once_with(self.vmops._session, instance, "parent", dest, sr_path, 1) class CreateVMRecordTestCase(VMOpsTestBase): @mock.patch.object(vm_utils, 'determine_vm_mode') @mock.patch.object(vm_utils, 'get_vm_device_id') @mock.patch.object(vm_utils, 'create_vm') def test_create_vm_record_with_vm_device_id(self, mock_create_vm, mock_get_vm_device_id, mock_determine_vm_mode): context = "context" instance = objects.Instance(vm_mode="vm_mode", uuid="uuid123") name_label = "dummy" disk_image_type = "vhd" kernel_file = "kernel" ramdisk_file = "ram" device_id = "0002" image_properties = {"xenapi_device_id": device_id} image_meta = objects.ImageMeta.from_dict( {"properties": image_properties}) rescue = False session = "session" self.vmops._session = session mock_get_vm_device_id.return_value = device_id mock_determine_vm_mode.return_value = "vm_mode" self.vmops._create_vm_record(context, instance, name_label, disk_image_type, kernel_file, ramdisk_file, image_meta, rescue) mock_get_vm_device_id.assert_called_with(session, image_meta) mock_create_vm.assert_called_with(session, instance, name_label, kernel_file, ramdisk_file, False, device_id) class BootableTestCase(VMOpsTestBase): def setUp(self): super(BootableTestCase, self).setUp() self.instance = {"name": "test", "uuid": "fake"} vm_rec, self.vm_ref = self.create_vm('test') # sanity check bootlock is initially disabled: self.assertEqual({}, vm_rec['blocked_operations']) def _get_blocked(self): vm_rec = self._session.call_xenapi("VM.get_record", self.vm_ref) return vm_rec['blocked_operations'] def test_acquire_bootlock(self): self.vmops._acquire_bootlock(self.vm_ref) blocked = self._get_blocked() self.assertIn('start', blocked) def test_release_bootlock(self): self.vmops._acquire_bootlock(self.vm_ref) self.vmops._release_bootlock(self.vm_ref) blocked = self._get_blocked() self.assertNotIn('start', blocked) def test_set_bootable(self): self.vmops.set_bootable(self.instance, True) blocked = self._get_blocked() self.assertNotIn('start', blocked) def test_set_not_bootable(self): self.vmops.set_bootable(self.instance, False) blocked = self._get_blocked() self.assertIn('start', blocked) @mock.patch.object(vm_utils, 'update_vdi_virtual_size', autospec=True) class ResizeVdisTestCase(VMOpsTestBase): def test_dont_resize_root_volumes_osvol_false(self, mock_resize): instance = fake_instance.fake_db_instance(root_gb=20) vdis = {'root': {'osvol': False, 'ref': 'vdi_ref'}} self.vmops._resize_up_vdis(instance, vdis) self.assertTrue(mock_resize.called) def test_dont_resize_root_volumes_osvol_true(self, mock_resize): instance = fake_instance.fake_db_instance(root_gb=20) vdis = {'root': {'osvol': True}} self.vmops._resize_up_vdis(instance, vdis) self.assertFalse(mock_resize.called) def test_dont_resize_root_volumes_no_osvol(self, mock_resize): instance = fake_instance.fake_db_instance(root_gb=20) vdis = {'root': {}} self.vmops._resize_up_vdis(instance, vdis) self.assertFalse(mock_resize.called) @mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes') def test_ensure_ephemeral_resize_with_root_volume(self, mock_sizes, mock_resize): mock_sizes.return_value = [2000, 1000] instance = fake_instance.fake_db_instance(root_gb=20, ephemeral_gb=20) ephemerals = {"4": {"ref": 4}, "5": {"ref": 5}} vdis = {'root': {'osvol': True, 'ref': 'vdi_ref'}, 'ephemerals': ephemerals} with mock.patch.object(vm_utils, 'generate_single_ephemeral', autospec=True) as g: self.vmops._resize_up_vdis(instance, vdis) self.assertEqual([mock.call(self.vmops._session, instance, 4, 2000), mock.call(self.vmops._session, instance, 5, 1000)], mock_resize.call_args_list) self.assertFalse(g.called) def test_resize_up_vdis_root(self, mock_resize): instance = {"root_gb": 20, "ephemeral_gb": 0} self.vmops._resize_up_vdis(instance, {"root": {"ref": "vdi_ref"}}) mock_resize.assert_called_once_with(self.vmops._session, instance, "vdi_ref", 20) def test_resize_up_vdis_zero_disks(self, mock_resize): instance = {"root_gb": 0, "ephemeral_gb": 0} self.vmops._resize_up_vdis(instance, {"root": {}}) self.assertFalse(mock_resize.called) def test_resize_up_vdis_no_vdis_like_initial_spawn(self, mock_resize): instance = {"root_gb": 0, "ephemeral_gb": 3000} vdis = {} self.vmops._resize_up_vdis(instance, vdis) self.assertFalse(mock_resize.called) @mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes') def test_resize_up_vdis_ephemeral(self, mock_sizes, mock_resize): mock_sizes.return_value = [2000, 1000] instance = {"root_gb": 0, "ephemeral_gb": 3000} ephemerals = {"4": {"ref": 4}, "5": {"ref": 5}} vdis = {"ephemerals": ephemerals} self.vmops._resize_up_vdis(instance, vdis) mock_sizes.assert_called_once_with(3000) expected = [mock.call(self.vmops._session, instance, 4, 2000), mock.call(self.vmops._session, instance, 5, 1000)] self.assertEqual(expected, mock_resize.call_args_list) @mock.patch.object(vm_utils, 'generate_single_ephemeral') @mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes') def test_resize_up_vdis_ephemeral_with_generate(self, mock_sizes, mock_generate, mock_resize): mock_sizes.return_value = [2000, 1000] instance = {"root_gb": 0, "ephemeral_gb": 3000, "uuid": "a"} ephemerals = {"4": {"ref": 4}} vdis = {"ephemerals": ephemerals} self.vmops._resize_up_vdis(instance, vdis) mock_sizes.assert_called_once_with(3000) mock_resize.assert_called_once_with(self.vmops._session, instance, 4, 2000) mock_generate.assert_called_once_with(self.vmops._session, instance, None, 5, 1000) @mock.patch.object(vm_utils, 'remove_old_snapshots') class CleanupFailedSnapshotTestCase(VMOpsTestBase): def test_post_interrupted_snapshot_cleanup(self, mock_remove): self.vmops._get_vm_opaque_ref = mock.Mock() self.vmops._get_vm_opaque_ref.return_value = "vm_ref" self.vmops.post_interrupted_snapshot_cleanup("context", "instance") mock_remove.assert_called_once_with(self.vmops._session, "instance", "vm_ref") class XenstoreCallsTestCase(VMOpsTestBase): """Test cases for Read/Write/Delete/Update xenstore calls from vmops. """ @mock.patch.object(vmops.VMOps, '_make_plugin_call') def test_read_from_xenstore(self, fake_xapi_call): fake_xapi_call.return_value = "fake_xapi_return" fake_instance = {"name": "fake_instance"} path = "attr/PVAddons/MajorVersion" self.assertEqual("fake_xapi_return", self.vmops._read_from_xenstore(fake_instance, path, vm_ref="vm_ref")) @mock.patch.object(vmops.VMOps, '_make_plugin_call') def test_read_from_xenstore_ignore_missing_path(self, fake_xapi_call): fake_instance = {"name": "fake_instance"} path = "attr/PVAddons/MajorVersion" self.vmops._read_from_xenstore(fake_instance, path, vm_ref="vm_ref") fake_xapi_call.assert_called_once_with('xenstore.py', 'read_record', fake_instance, vm_ref="vm_ref", path=path, ignore_missing_path='True') @mock.patch.object(vmops.VMOps, '_make_plugin_call') def test_read_from_xenstore_missing_path(self, fake_xapi_call): fake_instance = {"name": "fake_instance"} path = "attr/PVAddons/MajorVersion" self.vmops._read_from_xenstore(fake_instance, path, vm_ref="vm_ref", ignore_missing_path=False) fake_xapi_call.assert_called_once_with('xenstore.py', 'read_record', fake_instance, vm_ref="vm_ref", path=path, ignore_missing_path='False') class LiveMigrateTestCase(VMOpsTestBase): @mock.patch.object(vmops.VMOps, '_ensure_host_in_aggregate') def _test_check_can_live_migrate_destination_shared_storage( self, shared, mock_ensure_host): fake_instance = {"name": "fake_instance", "host": "fake_host"} block_migration = None disk_over_commit = False ctxt = 'ctxt' with mock.patch.object(self._session, 'get_rec') as fake_sr_rec: fake_sr_rec.return_value = {'shared': shared} migrate_data_ret = self.vmops.check_can_live_migrate_destination( ctxt, fake_instance, block_migration, disk_over_commit) if shared: self.assertFalse(migrate_data_ret.block_migration) else: self.assertTrue(migrate_data_ret.block_migration) def test_check_can_live_migrate_destination_shared_storage(self): self._test_check_can_live_migrate_destination_shared_storage(True) def test_check_can_live_migrate_destination_shared_storage_false(self): self._test_check_can_live_migrate_destination_shared_storage(False) @mock.patch.object(vmops.VMOps, '_ensure_host_in_aggregate', side_effect=exception.MigrationPreCheckError(reason="")) def test_check_can_live_migrate_destination_block_migration( self, mock_ensure_host): fake_instance = {"name": "fake_instance", "host": "fake_host"} block_migration = None disk_over_commit = False ctxt = 'ctxt' migrate_data_ret = self.vmops.check_can_live_migrate_destination( ctxt, fake_instance, block_migration, disk_over_commit) self.assertTrue(migrate_data_ret.block_migration) self.assertEqual(vm_utils.safe_find_sr(self._session), migrate_data_ret.destination_sr_ref) self.assertEqual({'value': 'fake_migrate_data'}, migrate_data_ret.migrate_send_data) @mock.patch.object(vmops.objects.AggregateList, 'get_by_host') def test_get_host_uuid_from_aggregate_no_aggr(self, mock_get_by_host): mock_get_by_host.return_value = objects.AggregateList(objects=[]) context = "ctx" hostname = "other_host" self.assertRaises(exception.MigrationPreCheckError, self.vmops._get_host_uuid_from_aggregate, context, hostname) @mock.patch.object(vmops.objects.AggregateList, 'get_by_host') def test_get_host_uuid_from_aggregate_bad_aggr(self, mock_get_by_host): context = "ctx" hostname = "other_host" fake_aggregate_obj = objects.Aggregate(hosts=['fake'], metadata={'this': 'that'}) fake_aggr_list = objects.AggregateList(objects=[fake_aggregate_obj]) mock_get_by_host.return_value = fake_aggr_list self.assertRaises(exception.MigrationPreCheckError, self.vmops._get_host_uuid_from_aggregate, context, hostname) @mock.patch.object(vmops.VMOps, 'connect_block_device_volumes') def test_pre_live_migration(self, mock_connect): migrate_data = objects.XenapiLiveMigrateData() migrate_data.block_migration = True sr_uuid_map = {"sr_uuid": "sr_ref"} mock_connect.return_value = {"sr_uuid": "sr_ref"} result = self.vmops.pre_live_migration( None, None, "bdi", None, None, migrate_data) self.assertTrue(result.block_migration) self.assertEqual(result.sr_uuid_map, sr_uuid_map) mock_connect.assert_called_once_with("bdi") def test_pre_live_migration_raises_with_no_data(self): self.assertRaises(exception.InvalidParameterValue, self.vmops.pre_live_migration, None, None, "bdi", None, None, None) class LiveMigrateFakeVersionTestCase(VMOpsTestBase): @mock.patch.object(vmops.VMOps, '_pv_device_reported') @mock.patch.object(vmops.VMOps, '_pv_driver_version_reported') @mock.patch.object(vmops.VMOps, '_write_fake_pv_version') def test_ensure_pv_driver_info_for_live_migration( self, mock_write_fake_pv_version, mock_pv_driver_version_reported, mock_pv_device_reported): mock_pv_device_reported.return_value = True mock_pv_driver_version_reported.return_value = False fake_instance = {"name": "fake_instance"} self.vmops._ensure_pv_driver_info_for_live_migration(fake_instance, "vm_rec") mock_write_fake_pv_version.assert_called_once_with(fake_instance, "vm_rec") @mock.patch.object(vmops.VMOps, '_read_from_xenstore') def test_pv_driver_version_reported_None(self, fake_read_from_xenstore): fake_read_from_xenstore.return_value = '"None"' fake_instance = {"name": "fake_instance"} self.assertFalse(self.vmops._pv_driver_version_reported(fake_instance, "vm_ref")) @mock.patch.object(vmops.VMOps, '_read_from_xenstore') def test_pv_driver_version_reported(self, fake_read_from_xenstore): fake_read_from_xenstore.return_value = '6.2.0' fake_instance = {"name": "fake_instance"} self.assertTrue(self.vmops._pv_driver_version_reported(fake_instance, "vm_ref")) @mock.patch.object(vmops.VMOps, '_read_from_xenstore') def test_pv_device_reported(self, fake_read_from_xenstore): with mock.patch.object(self._session.VM, 'get_record') as fake_vm_rec: fake_vm_rec.return_value = {'VIFs': 'fake-vif-object'} with mock.patch.object(self._session, 'call_xenapi') as fake_call: fake_call.return_value = {'device': '0'} fake_read_from_xenstore.return_value = '4' fake_instance = {"name": "fake_instance"} self.assertTrue(self.vmops._pv_device_reported(fake_instance, "vm_ref")) @mock.patch.object(vmops.VMOps, '_read_from_xenstore') def test_pv_device_not_reported(self, fake_read_from_xenstore): with mock.patch.object(self._session.VM, 'get_record') as fake_vm_rec: fake_vm_rec.return_value = {'VIFs': 'fake-vif-object'} with mock.patch.object(self._session, 'call_xenapi') as fake_call: fake_call.return_value = {'device': '0'} fake_read_from_xenstore.return_value = '0' fake_instance = {"name": "fake_instance"} self.assertFalse(self.vmops._pv_device_reported(fake_instance, "vm_ref")) @mock.patch.object(vmops.VMOps, '_read_from_xenstore') def test_pv_device_None_reported(self, fake_read_from_xenstore): with mock.patch.object(self._session.VM, 'get_record') as fake_vm_rec: fake_vm_rec.return_value = {'VIFs': 'fake-vif-object'} with mock.patch.object(self._session, 'call_xenapi') as fake_call: fake_call.return_value = {'device': '0'} fake_read_from_xenstore.return_value = '"None"' fake_instance = {"name": "fake_instance"} self.assertFalse(self.vmops._pv_device_reported(fake_instance, "vm_ref")) @mock.patch.object(vmops.VMOps, '_write_to_xenstore') def test_write_fake_pv_version(self, fake_write_to_xenstore): fake_write_to_xenstore.return_value = 'fake_return' fake_instance = {"name": "fake_instance"} with mock.patch.object(self._session, 'product_version') as version: version.return_value = ('6', '2', '0') self.assertIsNone(self.vmops._write_fake_pv_version(fake_instance, "vm_ref")) class LiveMigrateHelperTestCase(VMOpsTestBase): def test_connect_block_device_volumes_none(self): self.assertEqual({}, self.vmops.connect_block_device_volumes(None)) @mock.patch.object(volumeops.VolumeOps, "connect_volume") def test_connect_block_device_volumes_calls_connect(self, mock_connect): with mock.patch.object(self.vmops._session, "call_xenapi") as mock_session: mock_connect.return_value = ("sr_uuid", None) mock_session.return_value = "sr_ref" bdm = {"connection_info": "c_info"} bdi = {"block_device_mapping": [bdm]} result = self.vmops.connect_block_device_volumes(bdi) self.assertEqual({'sr_uuid': 'sr_ref'}, result) mock_connect.assert_called_once_with("c_info") mock_session.assert_called_once_with("SR.get_by_uuid", "sr_uuid") def _call_live_migrate_command_with_migrate_send_data(self, migrate_data): command_name = 'test_command' vm_ref = "vm_ref" def side_effect(method, *args): if method == "SR.get_by_uuid": return "sr_ref_new" xmlrpclib.dumps(args, method, allow_none=1) with mock.patch.object(self.vmops, "_generate_vdi_map") as mock_gen_vdi_map, \ mock.patch.object(self.vmops._session, 'call_xenapi') as mock_call_xenapi: mock_call_xenapi.side_effect = side_effect mock_gen_vdi_map.side_effect = [ {"vdi": "sr_ref"}, {"vdi": "sr_ref_2"}] self.vmops._call_live_migrate_command(command_name, vm_ref, migrate_data) expected_vdi_map = {'vdi': 'sr_ref'} if 'sr_uuid_map' in migrate_data: expected_vdi_map = {'vdi': 'sr_ref_2'} self.assertEqual(mock_call_xenapi.call_args_list[-1], mock.call('test_command', vm_ref, migrate_data.migrate_send_data, True, expected_vdi_map, {}, {})) self.assertEqual(mock_gen_vdi_map.call_args_list[0], mock.call(migrate_data.destination_sr_ref, vm_ref)) if 'sr_uuid_map' in migrate_data: self.assertEqual(mock_gen_vdi_map.call_args_list[1], mock.call(migrate_data.sr_uuid_map["sr_uuid2"], vm_ref, "sr_ref_new")) def test_call_live_migrate_command_with_full_data(self): migrate_data = objects.XenapiLiveMigrateData() migrate_data.migrate_send_data = {"foo": "bar"} migrate_data.destination_sr_ref = "sr_ref" migrate_data.sr_uuid_map = {"sr_uuid2": "sr_ref_3"} self._call_live_migrate_command_with_migrate_send_data(migrate_data) def test_call_live_migrate_command_with_no_sr_uuid_map(self): migrate_data = objects.XenapiLiveMigrateData() migrate_data.migrate_send_data = {"foo": "baz"} migrate_data.destination_sr_ref = "sr_ref" self._call_live_migrate_command_with_migrate_send_data(migrate_data) def test_call_live_migrate_command_with_no_migrate_send_data(self): migrate_data = objects.XenapiLiveMigrateData() self.assertRaises(exception.InvalidParameterValue, self._call_live_migrate_command_with_migrate_send_data, migrate_data) class RollbackLiveMigrateDestinationTestCase(VMOpsTestBase): @mock.patch.object(volume_utils, 'find_sr_by_uuid', return_value='sr_ref') @mock.patch.object(volume_utils, 'forget_sr') def test_rollback_dest_calls_sr_forget(self, forget_sr, sr_ref): block_device_info = {'block_device_mapping': [{'connection_info': {'data': {'volume_id': 'fake-uuid', 'target_iqn': 'fake-iqn', 'target_portal': 'fake-portal'}}}]} self.vmops.rollback_live_migration_at_destination('instance', block_device_info) forget_sr.assert_called_once_with(self.vmops._session, 'sr_ref') @mock.patch.object(volume_utils, 'forget_sr') @mock.patch.object(volume_utils, 'find_sr_by_uuid', side_effect=test.TestingException) def test_rollback_dest_handles_exception(self, find_sr_ref, forget_sr): block_device_info = {'block_device_mapping': [{'connection_info': {'data': {'volume_id': 'fake-uuid', 'target_iqn': 'fake-iqn', 'target_portal': 'fake-portal'}}}]} self.vmops.rollback_live_migration_at_destination('instance', block_device_info) self.assertFalse(forget_sr.called) @mock.patch.object(vmops.VMOps, '_resize_ensure_vm_is_shutdown') @mock.patch.object(vmops.VMOps, '_apply_orig_vm_name_label') @mock.patch.object(vmops.VMOps, '_update_instance_progress') @mock.patch.object(vm_utils, 'get_vdi_for_vm_safely') @mock.patch.object(vm_utils, 'resize_disk') @mock.patch.object(vm_utils, 'migrate_vhd') @mock.patch.object(vm_utils, 'destroy_vdi') class MigrateDiskResizingDownTestCase(VMOpsTestBase): def test_migrate_disk_resizing_down_works_no_ephemeral( self, mock_destroy_vdi, mock_migrate_vhd, mock_resize_disk, mock_get_vdi_for_vm_safely, mock_update_instance_progress, mock_apply_orig_vm_name_label, mock_resize_ensure_vm_is_shutdown): context = "ctx" instance = {"name": "fake", "uuid": "uuid"} dest = "dest" vm_ref = "vm_ref" sr_path = "sr_path" instance_type = dict(root_gb=1) old_vdi_ref = "old_ref" new_vdi_ref = "new_ref" new_vdi_uuid = "new_uuid" mock_get_vdi_for_vm_safely.return_value = (old_vdi_ref, None) mock_resize_disk.return_value = (new_vdi_ref, new_vdi_uuid) self.vmops._migrate_disk_resizing_down(context, instance, dest, instance_type, vm_ref, sr_path) mock_get_vdi_for_vm_safely.assert_called_once_with( self.vmops._session, vm_ref) mock_resize_ensure_vm_is_shutdown.assert_called_once_with( instance, vm_ref) mock_apply_orig_vm_name_label.assert_called_once_with( instance, vm_ref) mock_resize_disk.assert_called_once_with( self.vmops._session, instance, old_vdi_ref, instance_type) mock_migrate_vhd.assert_called_once_with( self.vmops._session, instance, new_vdi_uuid, dest, sr_path, 0) mock_destroy_vdi.assert_called_once_with( self.vmops._session, new_vdi_ref) prog_expected = [ mock.call(context, instance, 1, 5), mock.call(context, instance, 2, 5), mock.call(context, instance, 3, 5), mock.call(context, instance, 4, 5) # 5/5: step to be executed by finish migration. ] self.assertEqual(prog_expected, mock_update_instance_progress.call_args_list) class GetVdisForInstanceTestCase(VMOpsTestBase): """Tests get_vdis_for_instance utility method.""" def setUp(self): super(GetVdisForInstanceTestCase, self).setUp() self.context = context.get_admin_context() self.context.auth_token = 'auth_token' self.session = mock.Mock() self.vmops._session = self.session self.instance = fake_instance.fake_instance_obj(self.context) self.name_label = 'name' self.image = 'fake_image_id' @mock.patch.object(volumeops.VolumeOps, "connect_volume", return_value=("sr", "vdi_uuid")) def test_vdis_for_instance_bdi_password_scrubbed(self, get_uuid_mock): # setup fake data data = {'name_label': self.name_label, 'sr_uuid': 'fake', 'auth_password': 'scrubme'} bdm = [{'mount_device': '/dev/vda', 'connection_info': {'data': data}}] bdi = {'root_device_name': 'vda', 'block_device_mapping': bdm} # Tests that the parameters to the to_xml method are sanitized for # passwords when logged. def fake_debug(*args, **kwargs): if 'auth_password' in args[0]: self.assertNotIn('scrubme', args[0]) fake_debug.matched = True fake_debug.matched = False with mock.patch.object(vmops.LOG, 'debug', side_effect=fake_debug) as debug_mock: vdis = self.vmops._get_vdis_for_instance(self.context, self.instance, self.name_label, self.image, image_type=4, block_device_info=bdi) self.assertEqual(1, len(vdis)) get_uuid_mock.assert_called_once_with({"data": data}) # we don't care what the log message is, we just want to make sure # our stub method is called which asserts the password is scrubbed self.assertTrue(debug_mock.called) self.assertTrue(fake_debug.matched) nova-13.1.4/nova/tests/unit/virt/xenapi/client/0000775000567000056710000000000013064447471022530 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/virt/xenapi/client/__init__.py0000664000567000056710000000000013064447152024623 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/virt/xenapi/client/test_session.py0000664000567000056710000002062213064447152025622 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import socket import mock from nova import exception from nova.tests.unit.virt.xenapi import stubs from nova import version from nova.virt.xenapi.client import session class SessionTestCase(stubs.XenAPITestBaseNoDB): @mock.patch.object(session.XenAPISession, '_create_session') @mock.patch.object(session.XenAPISession, '_get_product_version_and_brand') @mock.patch.object(session.XenAPISession, '_verify_plugin_version') def test_session_passes_version(self, mock_verify, mock_version, create_session): sess = mock.Mock() create_session.return_value = sess mock_version.return_value = ('version', 'brand') session.XenAPISession('http://someserver', 'username', 'password') expected_version = '%s %s %s' % (version.vendor_string(), version.product_string(), version.version_string_with_package()) sess.login_with_password.assert_called_with('username', 'password', expected_version, 'OpenStack') @mock.patch('eventlet.timeout.Timeout') @mock.patch.object(session.XenAPISession, '_create_session') @mock.patch.object(session.XenAPISession, '_get_product_version_and_brand') @mock.patch.object(session.XenAPISession, '_verify_plugin_version') def test_session_login_with_timeout(self, mock_verify, mock_version, create_session, mock_timeout): self.flags(connection_concurrent=2, group='xenserver') sess = mock.Mock() create_session.return_value = sess mock_version.return_value = ('version', 'brand') session.XenAPISession('http://someserver', 'username', 'password') self.assertEqual(2, sess.login_with_password.call_count) self.assertEqual(2, mock_timeout.call_count) @mock.patch('eventlet.timeout.Timeout') @mock.patch.object(session.XenAPISession, '_create_session') @mock.patch.object(session.XenAPISession, '_get_product_version_and_brand') @mock.patch.object(session.XenAPISession, '_verify_plugin_version') @mock.patch.object(session.XenAPISession, '_get_host_uuid') @mock.patch.object(session.XenAPISession, '_get_host_ref') def test_session_raises_exception(self, mock_ref, mock_uuid, mock_verify, mock_version, create_session, mock_timeout): import XenAPI self.flags(connection_concurrent=2, group='xenserver') sess = mock.Mock() create_session.return_value = sess # First login fails, second login in except block succeeds, # third login for the pool succeeds sess.login_with_password.side_effect = [ XenAPI.Failure(['HOST_IS_SLAVE', 'master']), None, None] mock_version.return_value = ('version', 'brand') session.XenAPISession('http://slave', 'username', 'password') self.assertEqual(3, sess.login_with_password.call_count) self.assertEqual(3, mock_timeout.call_count) class ApplySessionHelpersTestCase(stubs.XenAPITestBaseNoDB): def setUp(self): super(ApplySessionHelpersTestCase, self).setUp() self.session = mock.Mock() session.apply_session_helpers(self.session) def test_apply_session_helpers_add_VM(self): self.session.VM.get_X("ref") self.session.call_xenapi.assert_called_once_with("VM.get_X", "ref") def test_apply_session_helpers_add_SR(self): self.session.SR.get_X("ref") self.session.call_xenapi.assert_called_once_with("SR.get_X", "ref") def test_apply_session_helpers_add_VDI(self): self.session.VDI.get_X("ref") self.session.call_xenapi.assert_called_once_with("VDI.get_X", "ref") def test_apply_session_helpers_add_VBD(self): self.session.VBD.get_X("ref") self.session.call_xenapi.assert_called_once_with("VBD.get_X", "ref") def test_apply_session_helpers_add_PBD(self): self.session.PBD.get_X("ref") self.session.call_xenapi.assert_called_once_with("PBD.get_X", "ref") def test_apply_session_helpers_add_PIF(self): self.session.PIF.get_X("ref") self.session.call_xenapi.assert_called_once_with("PIF.get_X", "ref") def test_apply_session_helpers_add_VLAN(self): self.session.VLAN.get_X("ref") self.session.call_xenapi.assert_called_once_with("VLAN.get_X", "ref") def test_apply_session_helpers_add_host(self): self.session.host.get_X("ref") self.session.call_xenapi.assert_called_once_with("host.get_X", "ref") def test_apply_session_helpers_add_network(self): self.session.network.get_X("ref") self.session.call_xenapi.assert_called_once_with("network.get_X", "ref") def test_apply_session_helpers_add_pool(self): self.session.pool.get_X("ref") self.session.call_xenapi.assert_called_once_with("pool.get_X", "ref") class CallPluginTestCase(stubs.XenAPITestBaseNoDB): def _get_fake_xapisession(self): class FakeXapiSession(session.XenAPISession): def __init__(self, **kwargs): "Skip the superclass's dirty init" self.XenAPI = mock.MagicMock() return FakeXapiSession() def setUp(self): super(CallPluginTestCase, self).setUp() self.session = self._get_fake_xapisession() def test_serialized_with_retry_socket_error_conn_reset(self): exc = socket.error() exc.errno = errno.ECONNRESET plugin = 'glance' fn = 'download_vhd' num_retries = 1 callback = None retry_cb = mock.Mock() with mock.patch.object(self.session, 'call_plugin_serialized', spec=True) as call_plugin_serialized: call_plugin_serialized.side_effect = exc self.assertRaises(exception.PluginRetriesExceeded, self.session.call_plugin_serialized_with_retry, plugin, fn, num_retries, callback, retry_cb) call_plugin_serialized.assert_called_with(plugin, fn) self.assertEqual(2, call_plugin_serialized.call_count) self.assertEqual(2, retry_cb.call_count) def test_serialized_with_retry_socket_error_reraised(self): exc = socket.error() exc.errno = errno.ECONNREFUSED plugin = 'glance' fn = 'download_vhd' num_retries = 1 callback = None retry_cb = mock.Mock() with mock.patch.object(self.session, 'call_plugin_serialized', spec=True) as call_plugin_serialized: call_plugin_serialized.side_effect = exc self.assertRaises(socket.error, self.session.call_plugin_serialized_with_retry, plugin, fn, num_retries, callback, retry_cb) call_plugin_serialized.assert_called_once_with(plugin, fn) self.assertEqual(0, retry_cb.call_count) def test_serialized_with_retry_socket_reset_reraised(self): exc = socket.error() exc.errno = errno.ECONNRESET plugin = 'glance' fn = 'download_vhd' num_retries = 1 callback = None retry_cb = mock.Mock() with mock.patch.object(self.session, 'call_plugin_serialized', spec=True) as call_plugin_serialized: call_plugin_serialized.side_effect = exc self.assertRaises(exception.PluginRetriesExceeded, self.session.call_plugin_serialized_with_retry, plugin, fn, num_retries, callback, retry_cb) call_plugin_serialized.assert_called_with(plugin, fn) self.assertEqual(2, call_plugin_serialized.call_count) nova-13.1.4/nova/tests/unit/virt/xenapi/client/test_objects.py0000664000567000056710000000761513064447152025577 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova.tests.unit.virt.xenapi import stubs from nova import utils from nova.virt.xenapi.client import objects class XenAPISessionObjectTestCase(stubs.XenAPITestBaseNoDB): def setUp(self): super(XenAPISessionObjectTestCase, self).setUp() self.session = mock.Mock() self.obj = objects.XenAPISessionObject(self.session, "FAKE") def test_call_method_via_attr(self): self.session.call_xenapi.return_value = "asdf" result = self.obj.get_X("ref") self.assertEqual(result, "asdf") self.session.call_xenapi.assert_called_once_with("FAKE.get_X", "ref") class ObjectsTestCase(stubs.XenAPITestBaseNoDB): def setUp(self): super(ObjectsTestCase, self).setUp() self.session = mock.Mock() def test_VM(self): vm = objects.VM(self.session) vm.get_X("ref") self.session.call_xenapi.assert_called_once_with("VM.get_X", "ref") def test_SR(self): sr = objects.SR(self.session) sr.get_X("ref") self.session.call_xenapi.assert_called_once_with("SR.get_X", "ref") def test_VDI(self): vdi = objects.VDI(self.session) vdi.get_X("ref") self.session.call_xenapi.assert_called_once_with("VDI.get_X", "ref") def test_VBD(self): vbd = objects.VBD(self.session) vbd.get_X("ref") self.session.call_xenapi.assert_called_once_with("VBD.get_X", "ref") def test_PBD(self): pbd = objects.PBD(self.session) pbd.get_X("ref") self.session.call_xenapi.assert_called_once_with("PBD.get_X", "ref") def test_PIF(self): pif = objects.PIF(self.session) pif.get_X("ref") self.session.call_xenapi.assert_called_once_with("PIF.get_X", "ref") def test_VLAN(self): vlan = objects.VLAN(self.session) vlan.get_X("ref") self.session.call_xenapi.assert_called_once_with("VLAN.get_X", "ref") def test_host(self): host = objects.Host(self.session) host.get_X("ref") self.session.call_xenapi.assert_called_once_with("host.get_X", "ref") def test_network(self): network = objects.Network(self.session) network.get_X("ref") self.session.call_xenapi.assert_called_once_with("network.get_X", "ref") def test_pool(self): pool = objects.Pool(self.session) pool.get_X("ref") self.session.call_xenapi.assert_called_once_with("pool.get_X", "ref") class VBDTestCase(stubs.XenAPITestBaseNoDB): def setUp(self): super(VBDTestCase, self).setUp() self.session = mock.Mock() self.session.VBD = objects.VBD(self.session) def test_plug(self): self.session.VBD.plug("vbd_ref", "vm_ref") self.session.call_xenapi.assert_called_once_with("VBD.plug", "vbd_ref") def test_unplug(self): self.session.VBD.unplug("vbd_ref", "vm_ref") self.session.call_xenapi.assert_called_once_with("VBD.unplug", "vbd_ref") @mock.patch.object(utils, 'synchronized') def test_vbd_plug_check_synchronized(self, mock_synchronized): self.session.VBD.unplug("vbd_ref", "vm_ref") mock_synchronized.assert_called_once_with("xenapi-vbd-vm_ref") nova-13.1.4/nova/tests/unit/virt/xenapi/test_xenapi.py0000664000567000056710000052765713064447152024171 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test suite for XenAPI.""" import ast import base64 import contextlib import copy import functools import os import re import uuid import mock from mox3 import mox from oslo_concurrency import lockutils from oslo_config import fixture as config_fixture from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import importutils import six import testtools from nova.compute import api as compute_api from nova.compute import arch from nova.compute import hv_type from nova.compute import power_state from nova.compute import task_states from nova.compute import utils as compute_utils from nova.compute import vm_states import nova.conf from nova import context from nova import crypto from nova import db from nova import exception from nova import objects from nova.objects import base from nova import test from nova.tests.unit.db import fakes as db_fakes from nova.tests.unit import fake_flavor from nova.tests.unit import fake_instance from nova.tests.unit import fake_network from nova.tests.unit import fake_processutils import nova.tests.unit.image.fake as fake_image from nova.tests.unit import matchers from nova.tests.unit.objects import test_aggregate from nova.tests.unit import utils as test_utils from nova.tests.unit.virt.xenapi import stubs from nova.virt import fake from nova.virt.xenapi import agent from nova.virt.xenapi.client import session as xenapi_session from nova.virt.xenapi import driver as xenapi_conn from nova.virt.xenapi import fake as xenapi_fake from nova.virt.xenapi import host from nova.virt.xenapi.image import glance from nova.virt.xenapi import pool from nova.virt.xenapi import pool_states from nova.virt.xenapi import vm_utils from nova.virt.xenapi import vmops from nova.virt.xenapi import volume_utils LOG = logging.getLogger(__name__) CONF = nova.conf.CONF CONF.import_opt('compute_manager', 'nova.service') CONF.import_opt('network_manager', 'nova.service') CONF.import_opt('host', 'nova.netconf') CONF.import_opt('login_timeout', 'nova.virt.xenapi.client.session', group="xenserver") IMAGE_MACHINE = '1' IMAGE_KERNEL = '2' IMAGE_RAMDISK = '3' IMAGE_RAW = '4' IMAGE_VHD = '5' IMAGE_ISO = '6' IMAGE_IPXE_ISO = '7' IMAGE_FROM_VOLUME = '8' IMAGE_FIXTURES = { IMAGE_MACHINE: { 'image_meta': {'name': 'fakemachine', 'size': 0, 'disk_format': 'ami', 'container_format': 'ami', 'id': 'fake-image'}, }, IMAGE_KERNEL: { 'image_meta': {'name': 'fakekernel', 'size': 0, 'disk_format': 'aki', 'container_format': 'aki', 'id': 'fake-kernel'}, }, IMAGE_RAMDISK: { 'image_meta': {'name': 'fakeramdisk', 'size': 0, 'disk_format': 'ari', 'container_format': 'ari', 'id': 'fake-ramdisk'}, }, IMAGE_RAW: { 'image_meta': {'name': 'fakeraw', 'size': 0, 'disk_format': 'raw', 'container_format': 'bare', 'id': 'fake-image-raw'}, }, IMAGE_VHD: { 'image_meta': {'name': 'fakevhd', 'size': 0, 'disk_format': 'vhd', 'container_format': 'ovf', 'id': 'fake-image-vhd'}, }, IMAGE_ISO: { 'image_meta': {'name': 'fakeiso', 'size': 0, 'disk_format': 'iso', 'container_format': 'bare', 'id': 'fake-image-iso'}, }, IMAGE_IPXE_ISO: { 'image_meta': {'name': 'fake_ipxe_iso', 'size': 0, 'disk_format': 'iso', 'container_format': 'bare', 'id': 'fake-image-pxe', 'properties': {'ipxe_boot': 'true'}}, }, IMAGE_FROM_VOLUME: { 'image_meta': {'name': 'fake_ipxe_iso', 'id': 'fake-image-volume', 'properties': {'foo': 'bar'}}, }, } def get_session(): return xenapi_session.XenAPISession('test_url', 'root', 'test_pass') def set_image_fixtures(): image_service = fake_image.FakeImageService() image_service.images.clear() for image_id, image_meta in IMAGE_FIXTURES.items(): image_meta = image_meta['image_meta'] image_meta['id'] = image_id image_service.create(None, image_meta) def get_fake_device_info(): # FIXME: 'sr_uuid', 'introduce_sr_keys', sr_type and vdi_uuid # can be removed from the dict when LP bug #1087308 is fixed fake_vdi_ref = xenapi_fake.create_vdi('fake-vdi', None) fake_vdi_uuid = xenapi_fake.get_record('VDI', fake_vdi_ref)['uuid'] fake = {'block_device_mapping': [{'connection_info': {'driver_volume_type': 'iscsi', 'data': {'sr_uuid': 'falseSR', 'introduce_sr_keys': ['sr_type'], 'sr_type': 'iscsi', 'vdi_uuid': fake_vdi_uuid, 'target_discovered': False, 'target_iqn': 'foo_iqn:foo_volid', 'target_portal': 'localhost:3260', 'volume_id': 'foo_volid', 'target_lun': 1, 'auth_password': 'my-p@55w0rd', 'auth_username': 'johndoe', 'auth_method': u'CHAP'}, }, 'mount_device': 'vda', 'delete_on_termination': False}, ], 'root_device_name': '/dev/sda', 'ephemerals': [], 'swap': None, } return fake def stub_vm_utils_with_vdi_attached_here(function): """vm_utils.with_vdi_attached_here needs to be stubbed out because it calls down to the filesystem to attach a vdi. This provides a decorator to handle that. """ @functools.wraps(function) def decorated_function(self, *args, **kwargs): @contextlib.contextmanager def fake_vdi_attached_here(*args, **kwargs): fake_dev = 'fakedev' yield fake_dev def fake_image_download(*args, **kwargs): pass orig_vdi_attached_here = vm_utils.vdi_attached_here orig_image_download = fake_image._FakeImageService.download try: vm_utils.vdi_attached_here = fake_vdi_attached_here fake_image._FakeImageService.download = fake_image_download return function(self, *args, **kwargs) finally: fake_image._FakeImageService.download = orig_image_download vm_utils.vdi_attached_here = orig_vdi_attached_here return decorated_function def create_instance_with_system_metadata(context, instance_values): inst = objects.Instance(context=context, system_metadata={}) for k, v in instance_values.items(): setattr(inst, k, v) inst.flavor = objects.Flavor.get_by_id(context, instance_values['instance_type_id']) inst.old_flavor = None inst.new_flavor = None inst.create() inst.pci_devices = objects.PciDeviceList(objects=[]) return inst class XenAPIVolumeTestCase(stubs.XenAPITestBaseNoDB): """Unit tests for Volume operations.""" def setUp(self): super(XenAPIVolumeTestCase, self).setUp() self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF)) self.fixture.config(disable_process_locking=True, group='oslo_concurrency') self.flags(firewall_driver='nova.virt.xenapi.firewall.' 'Dom0IptablesFirewallDriver') self.flags(connection_url='test_url', connection_password='test_pass', group='xenserver') self.instance = fake_instance.fake_db_instance(name='foo') @classmethod def _make_connection_info(cls): target_iqn = 'iqn.2010-10.org.openstack:volume-00000001' return {'driver_volume_type': 'iscsi', 'data': {'volume_id': 1, 'target_iqn': target_iqn, 'target_portal': '127.0.0.1:3260,fake', 'target_lun': None, 'auth_method': 'CHAP', 'auth_username': 'username', 'auth_password': 'password'}} def test_attach_volume(self): # This shows how to test Ops classes' methods. stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests) conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) vm = xenapi_fake.create_vm(self.instance['name'], 'Running') conn_info = self._make_connection_info() self.assertIsNone( conn.attach_volume(None, conn_info, self.instance, '/dev/sdc')) # check that the VM has a VBD attached to it # Get XenAPI record for VBD vbds = xenapi_fake.get_all('VBD') vbd = xenapi_fake.get_record('VBD', vbds[0]) vm_ref = vbd['VM'] self.assertEqual(vm_ref, vm) def test_attach_volume_raise_exception(self): # This shows how to test when exceptions are raised. stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeFailedTests) conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) xenapi_fake.create_vm(self.instance['name'], 'Running') self.assertRaises(exception.VolumeDriverNotFound, conn.attach_volume, None, {'driver_volume_type': 'nonexist'}, self.instance, '/dev/sdc') # FIXME(sirp): convert this to use XenAPITestBaseNoDB class XenAPIVMTestCase(stubs.XenAPITestBase): """Unit tests for VM operations.""" def setUp(self): super(XenAPIVMTestCase, self).setUp() self.useFixture(test.SampleNetworks()) self.network = importutils.import_object(CONF.network_manager) self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF)) self.fixture.config(disable_process_locking=True, group='oslo_concurrency') self.flags(instance_name_template='%d', firewall_driver='nova.virt.xenapi.firewall.' 'Dom0IptablesFirewallDriver') self.flags(connection_url='test_url', connection_password='test_pass', group='xenserver') db_fakes.stub_out_db_instance_api(self) xenapi_fake.create_network('fake', 'fake_br1') stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) stubs.stubout_get_this_vm_uuid(self.stubs) stubs.stub_out_vm_methods(self.stubs) fake_processutils.stub_out_processutils_execute(self.stubs) self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id) self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) self.conn._session.is_local_connection = False fake_image.stub_out_image_service(self) set_image_fixtures() stubs.stubout_image_service_download(self.stubs) stubs.stubout_stream_disk(self.stubs) def fake_inject_instance_metadata(self, instance, vm): pass self.stubs.Set(vmops.VMOps, '_inject_instance_metadata', fake_inject_instance_metadata) def fake_safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref): name_label = "fakenamelabel" disk_type = "fakedisktype" virtual_size = 777 return vm_utils.create_vdi( session, sr_ref, instance, name_label, disk_type, virtual_size) self.stubs.Set(vm_utils, '_safe_copy_vdi', fake_safe_copy_vdi) def fake_unpause_and_wait(self, vm_ref, instance, power_on): self._update_last_dom_id(vm_ref) self.stubs.Set(vmops.VMOps, '_unpause_and_wait', fake_unpause_and_wait) def tearDown(self): fake_image.FakeImageService_reset() super(XenAPIVMTestCase, self).tearDown() def test_init_host(self): session = get_session() vm = vm_utils._get_this_vm_ref(session) # Local root disk vdi0 = xenapi_fake.create_vdi('compute', None) vbd0 = xenapi_fake.create_vbd(vm, vdi0) # Instance VDI vdi1 = xenapi_fake.create_vdi('instance-aaaa', None, other_config={'nova_instance_uuid': 'aaaa'}) xenapi_fake.create_vbd(vm, vdi1) # Only looks like instance VDI vdi2 = xenapi_fake.create_vdi('instance-bbbb', None) vbd2 = xenapi_fake.create_vbd(vm, vdi2) self.conn.init_host(None) self.assertEqual(set(xenapi_fake.get_all('VBD')), set([vbd0, vbd2])) def test_instance_exists(self): self.mox.StubOutWithMock(vm_utils, 'lookup') vm_utils.lookup(mox.IgnoreArg(), 'foo').AndReturn(True) self.mox.ReplayAll() self.stubs.Set(objects.Instance, 'name', 'foo') instance = objects.Instance(uuid='fake-uuid') self.assertTrue(self.conn.instance_exists(instance)) def test_instance_not_exists(self): self.mox.StubOutWithMock(vm_utils, 'lookup') vm_utils.lookup(mox.IgnoreArg(), 'bar').AndReturn(None) self.mox.ReplayAll() self.stubs.Set(objects.Instance, 'name', 'bar') instance = objects.Instance(uuid='fake-uuid') self.assertFalse(self.conn.instance_exists(instance)) def test_list_instances_0(self): instances = self.conn.list_instances() self.assertEqual(instances, []) def test_list_instance_uuids_0(self): instance_uuids = self.conn.list_instance_uuids() self.assertEqual(instance_uuids, []) def test_list_instance_uuids(self): uuids = [] for x in range(1, 4): instance = self._create_instance() uuids.append(instance['uuid']) instance_uuids = self.conn.list_instance_uuids() self.assertEqual(len(uuids), len(instance_uuids)) self.assertEqual(set(uuids), set(instance_uuids)) def test_get_rrd_server(self): self.flags(connection_url='myscheme://myaddress/', group='xenserver') server_info = vm_utils._get_rrd_server() self.assertEqual(server_info[0], 'myscheme') self.assertEqual(server_info[1], 'myaddress') expected_raw_diagnostics = { 'vbd_xvdb_write': '0.0', 'memory_target': '4294967296.0000', 'memory_internal_free': '1415564.0000', 'memory': '4294967296.0000', 'vbd_xvda_write': '0.0', 'cpu0': '0.0042', 'vif_0_tx': '287.4134', 'vbd_xvda_read': '0.0', 'vif_0_rx': '1816.0144', 'vif_2_rx': '0.0', 'vif_2_tx': '0.0', 'vbd_xvdb_read': '0.0', 'last_update': '1328795567', } def test_get_diagnostics(self): def fake_get_rrd(host, vm_uuid): path = os.path.dirname(os.path.realpath(__file__)) with open(os.path.join(path, 'vm_rrd.xml')) as f: return re.sub(r'\s', '', f.read()) self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd) expected = self.expected_raw_diagnostics instance = self._create_instance() actual = self.conn.get_diagnostics(instance) self.assertThat(actual, matchers.DictMatches(expected)) def test_get_instance_diagnostics(self): def fake_get_rrd(host, vm_uuid): path = os.path.dirname(os.path.realpath(__file__)) with open(os.path.join(path, 'vm_rrd.xml')) as f: return re.sub(r'\s', '', f.read()) self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd) expected = { 'config_drive': False, 'state': 'running', 'driver': 'xenapi', 'version': '1.0', 'uptime': 0, 'hypervisor_os': None, 'cpu_details': [{'time': 0}, {'time': 0}, {'time': 0}, {'time': 0}], 'nic_details': [{'mac_address': '00:00:00:00:00:00', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 0, 'rx_packets': 0, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}], 'disk_details': [{'errors_count': 0, 'id': '', 'read_bytes': 0, 'read_requests': 0, 'write_bytes': 0, 'write_requests': 0}], 'memory_details': {'maximum': 8192, 'used': 0}} instance = self._create_instance(obj=True) actual = self.conn.get_instance_diagnostics(instance) self.assertEqual(expected, actual.serialize()) def test_get_vnc_console(self): instance = self._create_instance(obj=True) session = get_session() conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) vm_ref = vm_utils.lookup(session, instance['name']) console = conn.get_vnc_console(self.context, instance) # Note(sulo): We don't care about session id in test # they will always differ so strip that out actual_path = console.internal_access_path.split('&')[0] expected_path = "/console?ref=%s" % str(vm_ref) self.assertEqual(expected_path, actual_path) def test_get_vnc_console_for_rescue(self): instance = self._create_instance(obj=True) conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) rescue_vm = xenapi_fake.create_vm(instance['name'] + '-rescue', 'Running') # Set instance state to rescued instance['vm_state'] = 'rescued' console = conn.get_vnc_console(self.context, instance) # Note(sulo): We don't care about session id in test # they will always differ so strip that out actual_path = console.internal_access_path.split('&')[0] expected_path = "/console?ref=%s" % str(rescue_vm) self.assertEqual(expected_path, actual_path) def test_get_vnc_console_instance_not_ready(self): instance = self._create_instance(obj=True, spawn=False) instance.vm_state = 'building' conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.InstanceNotFound, conn.get_vnc_console, self.context, instance) def test_get_vnc_console_rescue_not_ready(self): instance = self._create_instance(obj=True, spawn=False) instance.vm_state = 'rescued' conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.InstanceNotReady, conn.get_vnc_console, self.context, instance) def test_instance_snapshot_fails_with_no_primary_vdi(self): def create_bad_vbd(session, vm_ref, vdi_ref, userdevice, vbd_type='disk', read_only=False, bootable=False, osvol=False): vbd_rec = {'VM': vm_ref, 'VDI': vdi_ref, 'userdevice': 'fake', 'currently_attached': False} vbd_ref = xenapi_fake._create_object('VBD', vbd_rec) xenapi_fake.after_VBD_create(vbd_ref, vbd_rec) return vbd_ref self.stubs.Set(vm_utils, 'create_vbd', create_bad_vbd) stubs.stubout_instance_snapshot(self.stubs) # Stubbing out firewall driver as previous stub sets alters # xml rpc result parsing stubs.stubout_firewall_driver(self.stubs, self.conn) instance = self._create_instance() image_id = "my_snapshot_id" self.assertRaises(exception.NovaException, self.conn.snapshot, self.context, instance, image_id, lambda *args, **kwargs: None) def test_instance_snapshot(self): expected_calls = [ {'args': (), 'kwargs': {'task_state': task_states.IMAGE_PENDING_UPLOAD}}, {'args': (), 'kwargs': {'task_state': task_states.IMAGE_UPLOADING, 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}] func_call_matcher = matchers.FunctionCallMatcher(expected_calls) image_id = "my_snapshot_id" stubs.stubout_instance_snapshot(self.stubs) stubs.stubout_is_snapshot(self.stubs) # Stubbing out firewall driver as previous stub sets alters # xml rpc result parsing stubs.stubout_firewall_driver(self.stubs, self.conn) instance = self._create_instance() self.fake_upload_called = False def fake_image_upload(_self, ctx, session, inst, img_id, vdi_uuids): self.fake_upload_called = True self.assertEqual(ctx, self.context) self.assertEqual(inst, instance) self.assertIsInstance(vdi_uuids, list) self.assertEqual(img_id, image_id) self.stubs.Set(glance.GlanceStore, 'upload_image', fake_image_upload) self.conn.snapshot(self.context, instance, image_id, func_call_matcher.call) # Ensure VM was torn down vm_labels = [] for vm_ref in xenapi_fake.get_all('VM'): vm_rec = xenapi_fake.get_record('VM', vm_ref) if not vm_rec["is_control_domain"]: vm_labels.append(vm_rec["name_label"]) self.assertEqual(vm_labels, [instance['name']]) # Ensure VBDs were torn down vbd_labels = [] for vbd_ref in xenapi_fake.get_all('VBD'): vbd_rec = xenapi_fake.get_record('VBD', vbd_ref) vbd_labels.append(vbd_rec["vm_name_label"]) self.assertEqual(vbd_labels, [instance['name']]) # Ensure task states changed in correct order self.assertIsNone(func_call_matcher.match()) # Ensure VDIs were torn down for vdi_ref in xenapi_fake.get_all('VDI'): vdi_rec = xenapi_fake.get_record('VDI', vdi_ref) name_label = vdi_rec["name_label"] self.assertFalse(name_label.endswith('snapshot')) self.assertTrue(self.fake_upload_called) def create_vm_record(self, conn, os_type, name): instances = conn.list_instances() self.assertEqual(instances, [name]) # Get Nova record for VM vm_info = conn.get_info({'name': name}) # Get XenAPI record for VM vms = [rec for ref, rec in six.iteritems(xenapi_fake.get_all_records('VM')) if not rec['is_control_domain']] vm = vms[0] self.vm_info = vm_info self.vm = vm def check_vm_record(self, conn, instance_type_id, check_injection): flavor = db.flavor_get(conn, instance_type_id) mem_kib = int(flavor['memory_mb']) << 10 mem_bytes = str(mem_kib << 10) vcpus = flavor['vcpus'] vcpu_weight = flavor['vcpu_weight'] self.assertEqual(self.vm_info.max_mem_kb, mem_kib) self.assertEqual(self.vm_info.mem_kb, mem_kib) self.assertEqual(self.vm['memory_static_max'], mem_bytes) self.assertEqual(self.vm['memory_dynamic_max'], mem_bytes) self.assertEqual(self.vm['memory_dynamic_min'], mem_bytes) self.assertEqual(self.vm['VCPUs_max'], str(vcpus)) self.assertEqual(self.vm['VCPUs_at_startup'], str(vcpus)) if vcpu_weight is None: self.assertEqual(self.vm['VCPUs_params'], {}) else: self.assertEqual(self.vm['VCPUs_params'], {'weight': str(vcpu_weight), 'cap': '0'}) # Check that the VM is running according to Nova self.assertEqual(self.vm_info.state, power_state.RUNNING) # Check that the VM is running according to XenAPI. self.assertEqual(self.vm['power_state'], 'Running') if check_injection: xenstore_data = self.vm['xenstore_data'] self.assertNotIn('vm-data/hostname', xenstore_data) key = 'vm-data/networking/DEADBEEF0001' xenstore_value = xenstore_data[key] tcpip_data = ast.literal_eval(xenstore_value) self.assertJsonEqual({'broadcast': '192.168.1.255', 'dns': ['192.168.1.4', '192.168.1.3'], 'gateway': '192.168.1.1', 'gateway_v6': '2001:db8:0:1::1', 'ip6s': [{'enabled': '1', 'ip': '2001:db8:0:1:dcad:beff:feef:1', 'netmask': 64, 'gateway': '2001:db8:0:1::1'}], 'ips': [{'enabled': '1', 'ip': '192.168.1.100', 'netmask': '255.255.255.0', 'gateway': '192.168.1.1'}, {'enabled': '1', 'ip': '192.168.1.101', 'netmask': '255.255.255.0', 'gateway': '192.168.1.1'}], 'label': 'test1', 'mac': 'DE:AD:BE:EF:00:01'}, tcpip_data) def check_vm_params_for_windows(self): self.assertEqual(self.vm['platform']['nx'], 'true') self.assertEqual(self.vm['HVM_boot_params'], {'order': 'dc'}) self.assertEqual(self.vm['HVM_boot_policy'], 'BIOS order') # check that these are not set self.assertEqual(self.vm['PV_args'], '') self.assertEqual(self.vm['PV_bootloader'], '') self.assertEqual(self.vm['PV_kernel'], '') self.assertEqual(self.vm['PV_ramdisk'], '') def check_vm_params_for_linux(self): self.assertEqual(self.vm['platform']['nx'], 'false') self.assertEqual(self.vm['PV_args'], '') self.assertEqual(self.vm['PV_bootloader'], 'pygrub') # check that these are not set self.assertEqual(self.vm['PV_kernel'], '') self.assertEqual(self.vm['PV_ramdisk'], '') self.assertEqual(self.vm['HVM_boot_params'], {}) self.assertEqual(self.vm['HVM_boot_policy'], '') def check_vm_params_for_linux_with_external_kernel(self): self.assertEqual(self.vm['platform']['nx'], 'false') self.assertEqual(self.vm['PV_args'], 'root=/dev/xvda1') self.assertNotEqual(self.vm['PV_kernel'], '') self.assertNotEqual(self.vm['PV_ramdisk'], '') # check that these are not set self.assertEqual(self.vm['HVM_boot_params'], {}) self.assertEqual(self.vm['HVM_boot_policy'], '') def _list_vdis(self): session = get_session() return session.call_xenapi('VDI.get_all') def _list_vms(self): session = get_session() return session.call_xenapi('VM.get_all') def _check_vdis(self, start_list, end_list): for vdi_ref in end_list: if vdi_ref not in start_list: vdi_rec = xenapi_fake.get_record('VDI', vdi_ref) # If the cache is turned on then the base disk will be # there even after the cleanup if 'other_config' in vdi_rec: if 'image-id' not in vdi_rec['other_config']: self.fail('Found unexpected VDI:%s' % vdi_ref) else: self.fail('Found unexpected VDI:%s' % vdi_ref) def _test_spawn(self, image_ref, kernel_id, ramdisk_id, instance_type_id="3", os_type="linux", hostname="test", architecture="x86-64", instance_id=1, injected_files=None, check_injection=False, create_record=True, empty_dns=False, block_device_info=None, key_data=None): if injected_files is None: injected_files = [] # Fake out inject_instance_metadata def fake_inject_instance_metadata(self, instance, vm): pass self.stubs.Set(vmops.VMOps, '_inject_instance_metadata', fake_inject_instance_metadata) if create_record: instance = objects.Instance(context=self.context) instance.project_id = self.project_id instance.user_id = self.user_id instance.image_ref = image_ref instance.kernel_id = kernel_id instance.ramdisk_id = ramdisk_id instance.root_gb = 20 instance.ephemeral_gb = 0 instance.instance_type_id = instance_type_id instance.os_type = os_type instance.hostname = hostname instance.key_data = key_data instance.architecture = architecture instance.system_metadata = {} flavor = objects.Flavor.get_by_id(self.context, instance_type_id) if instance_type_id == 5: # NOTE(danms): xenapi test stubs have flavor 5 with no # vcpu_weight flavor.vcpu_weight = None instance.flavor = flavor instance.create() else: instance = objects.Instance.get_by_id(self.context, instance_id, expected_attrs=['flavor']) network_info = fake_network.fake_get_instance_nw_info(self) if empty_dns: # NOTE(tr3buchet): this is a terrible way to do this... network_info[0]['network']['subnets'][0]['dns'] = [] image_meta = objects.ImageMeta.from_dict( IMAGE_FIXTURES[image_ref]["image_meta"]) self.conn.spawn(self.context, instance, image_meta, injected_files, 'herp', network_info, block_device_info) self.create_vm_record(self.conn, os_type, instance['name']) self.check_vm_record(self.conn, instance_type_id, check_injection) self.assertEqual(instance['os_type'], os_type) self.assertEqual(instance['architecture'], architecture) def test_spawn_ipxe_iso_success(self): self.mox.StubOutWithMock(vm_utils, 'get_sr_path') vm_utils.get_sr_path(mox.IgnoreArg()).AndReturn('/sr/path') self.flags(ipxe_network_name='test1', ipxe_boot_menu_url='http://boot.example.com', ipxe_mkisofs_cmd='/root/mkisofs', group='xenserver') self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized') self.conn._session.call_plugin_serialized( 'ipxe', 'inject', '/sr/path', mox.IgnoreArg(), 'http://boot.example.com', '192.168.1.100', '255.255.255.0', '192.168.1.1', '192.168.1.3', '/root/mkisofs') self.mox.ReplayAll() self._test_spawn(IMAGE_IPXE_ISO, None, None) def test_spawn_ipxe_iso_no_network_name(self): self.flags(ipxe_network_name=None, ipxe_boot_menu_url='http://boot.example.com', group='xenserver') # call_plugin_serialized shouldn't be called self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized') self.mox.ReplayAll() self._test_spawn(IMAGE_IPXE_ISO, None, None) def test_spawn_ipxe_iso_no_boot_menu_url(self): self.flags(ipxe_network_name='test1', ipxe_boot_menu_url=None, group='xenserver') # call_plugin_serialized shouldn't be called self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized') self.mox.ReplayAll() self._test_spawn(IMAGE_IPXE_ISO, None, None) def test_spawn_ipxe_iso_unknown_network_name(self): self.flags(ipxe_network_name='test2', ipxe_boot_menu_url='http://boot.example.com', group='xenserver') # call_plugin_serialized shouldn't be called self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized') self.mox.ReplayAll() self._test_spawn(IMAGE_IPXE_ISO, None, None) def test_spawn_empty_dns(self): # Test spawning with an empty dns list. self._test_spawn(IMAGE_VHD, None, None, os_type="linux", architecture="x86-64", empty_dns=True) self.check_vm_params_for_linux() def test_spawn_not_enough_memory(self): self.assertRaises(exception.InsufficientFreeMemory, self._test_spawn, '1', 2, 3, "4") # m1.xlarge def test_spawn_fail_cleanup_1(self): """Simulates an error while downloading an image. Verifies that the VM and VDIs created are properly cleaned up. """ vdi_recs_start = self._list_vdis() start_vms = self._list_vms() stubs.stubout_fetch_disk_image(self.stubs, raise_failure=True) self.assertRaises(xenapi_fake.Failure, self._test_spawn, '1', 2, 3) # No additional VDI should be found. vdi_recs_end = self._list_vdis() end_vms = self._list_vms() self._check_vdis(vdi_recs_start, vdi_recs_end) # No additional VMs should be found. self.assertEqual(start_vms, end_vms) def test_spawn_fail_cleanup_2(self): """Simulates an error while creating VM record. Verifies that the VM and VDIs created are properly cleaned up. """ vdi_recs_start = self._list_vdis() start_vms = self._list_vms() stubs.stubout_create_vm(self.stubs) self.assertRaises(xenapi_fake.Failure, self._test_spawn, '1', 2, 3) # No additional VDI should be found. vdi_recs_end = self._list_vdis() end_vms = self._list_vms() self._check_vdis(vdi_recs_start, vdi_recs_end) # No additional VMs should be found. self.assertEqual(start_vms, end_vms) def test_spawn_fail_cleanup_3(self): """Simulates an error while attaching disks. Verifies that the VM and VDIs created are properly cleaned up. """ stubs.stubout_attach_disks(self.stubs) vdi_recs_start = self._list_vdis() start_vms = self._list_vms() self.assertRaises(xenapi_fake.Failure, self._test_spawn, '1', 2, 3) # No additional VDI should be found. vdi_recs_end = self._list_vdis() end_vms = self._list_vms() self._check_vdis(vdi_recs_start, vdi_recs_end) # No additional VMs should be found. self.assertEqual(start_vms, end_vms) def test_spawn_raw_glance(self): self._test_spawn(IMAGE_RAW, None, None, os_type=None) self.check_vm_params_for_windows() def test_spawn_vhd_glance_linux(self): self._test_spawn(IMAGE_VHD, None, None, os_type="linux", architecture="x86-64") self.check_vm_params_for_linux() def test_spawn_vhd_glance_windows(self): self._test_spawn(IMAGE_VHD, None, None, os_type="windows", architecture="i386", instance_type_id=5) self.check_vm_params_for_windows() def test_spawn_iso_glance(self): self._test_spawn(IMAGE_ISO, None, None, os_type="windows", architecture="i386") self.check_vm_params_for_windows() def test_spawn_glance(self): def fake_fetch_disk_image(context, session, instance, name_label, image_id, image_type): sr_ref = vm_utils.safe_find_sr(session) image_type_str = vm_utils.ImageType.to_string(image_type) vdi_ref = vm_utils.create_vdi(session, sr_ref, instance, name_label, image_type_str, "20") vdi_role = vm_utils.ImageType.get_role(image_type) vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref) return {vdi_role: dict(uuid=vdi_uuid, file=None)} self.stubs.Set(vm_utils, '_fetch_disk_image', fake_fetch_disk_image) self._test_spawn(IMAGE_MACHINE, IMAGE_KERNEL, IMAGE_RAMDISK) self.check_vm_params_for_linux_with_external_kernel() def test_spawn_boot_from_volume_no_glance_image_meta(self): dev_info = get_fake_device_info() self._test_spawn(IMAGE_FROM_VOLUME, None, None, block_device_info=dev_info) def test_spawn_boot_from_volume_with_image_meta(self): dev_info = get_fake_device_info() self._test_spawn(IMAGE_VHD, None, None, block_device_info=dev_info) @testtools.skipIf(test_utils.is_osx(), 'IPv6 pretty-printing broken on OSX, see bug 1409135') def test_spawn_netinject_file(self): self.flags(flat_injected=True) db_fakes.stub_out_db_instance_api(self, injected=True) self._tee_executed = False def _tee_handler(cmd, **kwargs): actual = kwargs.get('process_input', None) expected = """\ # Injected by Nova on instance boot # # This file describes the network interfaces available on your system # and how to activate them. For more information, see interfaces(5). # The loopback network interface auto lo iface lo inet loopback auto eth0 iface eth0 inet static hwaddress ether DE:AD:BE:EF:00:01 address 192.168.1.100 netmask 255.255.255.0 broadcast 192.168.1.255 gateway 192.168.1.1 dns-nameservers 192.168.1.3 192.168.1.4 iface eth0 inet6 static hwaddress ether DE:AD:BE:EF:00:01 address 2001:db8:0:1:dcad:beff:feef:1 netmask 64 gateway 2001:db8:0:1::1 """ self.assertEqual(expected, actual) self._tee_executed = True return '', '' def _readlink_handler(cmd_parts, **kwargs): return os.path.realpath(cmd_parts[2]), '' fake_processutils.fake_execute_set_repliers([ # Capture the tee .../etc/network/interfaces command (r'tee.*interfaces', _tee_handler), (r'readlink -nm.*', _readlink_handler), ]) self._test_spawn(IMAGE_MACHINE, IMAGE_KERNEL, IMAGE_RAMDISK, check_injection=True) self.assertTrue(self._tee_executed) @testtools.skipIf(test_utils.is_osx(), 'IPv6 pretty-printing broken on OSX, see bug 1409135') def test_spawn_netinject_xenstore(self): db_fakes.stub_out_db_instance_api(self, injected=True) self._tee_executed = False def _mount_handler(cmd, *ignore_args, **ignore_kwargs): # When mounting, create real files under the mountpoint to simulate # files in the mounted filesystem # mount point will be the last item of the command list self._tmpdir = cmd[len(cmd) - 1] LOG.debug('Creating files in %s to simulate guest agent', self._tmpdir) os.makedirs(os.path.join(self._tmpdir, 'usr', 'sbin')) # Touch the file using open open(os.path.join(self._tmpdir, 'usr', 'sbin', 'xe-update-networking'), 'w').close() return '', '' def _umount_handler(cmd, *ignore_args, **ignore_kwargs): # Umount would normally make files in the mounted filesystem # disappear, so do that here LOG.debug('Removing simulated guest agent files in %s', self._tmpdir) os.remove(os.path.join(self._tmpdir, 'usr', 'sbin', 'xe-update-networking')) os.rmdir(os.path.join(self._tmpdir, 'usr', 'sbin')) os.rmdir(os.path.join(self._tmpdir, 'usr')) return '', '' def _tee_handler(cmd, *ignore_args, **ignore_kwargs): self._tee_executed = True return '', '' fake_processutils.fake_execute_set_repliers([ (r'mount', _mount_handler), (r'umount', _umount_handler), (r'tee.*interfaces', _tee_handler)]) self._test_spawn('1', 2, 3, check_injection=True) # tee must not run in this case, where an injection-capable # guest agent is detected self.assertFalse(self._tee_executed) def test_spawn_injects_auto_disk_config_to_xenstore(self): instance = self._create_instance(spawn=False, obj=True) self.mox.StubOutWithMock(self.conn._vmops, '_inject_auto_disk_config') self.conn._vmops._inject_auto_disk_config(instance, mox.IgnoreArg()) self.mox.ReplayAll() image_meta = objects.ImageMeta.from_dict( IMAGE_FIXTURES['1']["image_meta"]) self.conn.spawn(self.context, instance, image_meta, [], 'herp', '') def test_spawn_vlanmanager(self): self.flags(network_manager='nova.network.manager.VlanManager', vlan_interface='fake0') def dummy(*args, **kwargs): pass self.stubs.Set(vmops.VMOps, '_create_vifs', dummy) # Reset network table xenapi_fake.reset_table('network') # Instance 2 will use vlan network (see db/fakes.py) ctxt = self.context.elevated() inst2 = self._create_instance(False, obj=True) networks = self.network.db.network_get_all(ctxt) with mock.patch('nova.objects.network.Network._from_db_object'): for network in networks: self.network.set_network_host(ctxt, network) self.network.allocate_for_instance(ctxt, instance_id=inst2.id, instance_uuid=inst2.uuid, host=CONF.host, vpn=None, rxtx_factor=3, project_id=self.project_id, macs=None) self._test_spawn(IMAGE_MACHINE, IMAGE_KERNEL, IMAGE_RAMDISK, instance_id=inst2.id, create_record=False) # TODO(salvatore-orlando): a complete test here would require # a check for making sure the bridge for the VM's VIF is # consistent with bridge specified in nova db def test_spawn_with_network_qos(self): self._create_instance() for vif_ref in xenapi_fake.get_all('VIF'): vif_rec = xenapi_fake.get_record('VIF', vif_ref) self.assertEqual(vif_rec['qos_algorithm_type'], 'ratelimit') self.assertEqual(vif_rec['qos_algorithm_params']['kbps'], str(3 * 10 * 1024)) def test_spawn_ssh_key_injection(self): # Test spawning with key_data on an instance. Should use # agent file injection. self.flags(use_agent_default=True, group='xenserver') actual_injected_files = [] def fake_inject_file(self, method, args): path = base64.b64decode(args['b64_path']) contents = base64.b64decode(args['b64_contents']) actual_injected_files.append((path, contents)) return jsonutils.dumps({'returncode': '0', 'message': 'success'}) self.stubs.Set(stubs.FakeSessionForVMTests, '_plugin_agent_inject_file', fake_inject_file) def fake_encrypt_text(sshkey, new_pass): self.assertEqual("ssh-rsa fake_keydata", sshkey) return "fake" self.stubs.Set(crypto, 'ssh_encrypt_text', fake_encrypt_text) expected_data = ('\n# The following ssh key was injected by ' 'Nova\nssh-rsa fake_keydata\n') injected_files = [('/root/.ssh/authorized_keys', expected_data)] self._test_spawn(IMAGE_VHD, None, None, os_type="linux", architecture="x86-64", key_data='ssh-rsa fake_keydata') self.assertEqual(actual_injected_files, injected_files) def test_spawn_ssh_key_injection_non_rsa(self): # Test spawning with key_data on an instance. Should use # agent file injection. self.flags(use_agent_default=True, group='xenserver') actual_injected_files = [] def fake_inject_file(self, method, args): path = base64.b64decode(args['b64_path']) contents = base64.b64decode(args['b64_contents']) actual_injected_files.append((path, contents)) return jsonutils.dumps({'returncode': '0', 'message': 'success'}) self.stubs.Set(stubs.FakeSessionForVMTests, '_plugin_agent_inject_file', fake_inject_file) def fake_encrypt_text(sshkey, new_pass): raise NotImplementedError("Should not be called") self.stubs.Set(crypto, 'ssh_encrypt_text', fake_encrypt_text) expected_data = ('\n# The following ssh key was injected by ' 'Nova\nssh-dsa fake_keydata\n') injected_files = [('/root/.ssh/authorized_keys', expected_data)] self._test_spawn(IMAGE_VHD, None, None, os_type="linux", architecture="x86-64", key_data='ssh-dsa fake_keydata') self.assertEqual(actual_injected_files, injected_files) def test_spawn_injected_files(self): # Test spawning with injected_files. self.flags(use_agent_default=True, group='xenserver') actual_injected_files = [] def fake_inject_file(self, method, args): path = base64.b64decode(args['b64_path']) contents = base64.b64decode(args['b64_contents']) actual_injected_files.append((path, contents)) return jsonutils.dumps({'returncode': '0', 'message': 'success'}) self.stubs.Set(stubs.FakeSessionForVMTests, '_plugin_agent_inject_file', fake_inject_file) injected_files = [('/tmp/foo', 'foobar')] self._test_spawn(IMAGE_VHD, None, None, os_type="linux", architecture="x86-64", injected_files=injected_files) self.check_vm_params_for_linux() self.assertEqual(actual_injected_files, injected_files) @mock.patch('nova.db.agent_build_get_by_triple') def test_spawn_agent_upgrade(self, mock_get): self.flags(use_agent_default=True, group='xenserver') mock_get.return_value = {"version": "1.1.0", "architecture": "x86-64", "hypervisor": "xen", "os": "windows", "url": "url", "md5hash": "asdf", 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'id': 1} self._test_spawn(IMAGE_VHD, None, None, os_type="linux", architecture="x86-64") @mock.patch('nova.db.agent_build_get_by_triple') def test_spawn_agent_upgrade_fails_silently(self, mock_get): mock_get.return_value = {"version": "1.1.0", "architecture": "x86-64", "hypervisor": "xen", "os": "windows", "url": "url", "md5hash": "asdf", 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'id': 1} self._test_spawn_fails_silently_with(exception.AgentError, method="_plugin_agent_agentupdate", failure="fake_error") def test_spawn_with_resetnetwork_alternative_returncode(self): self.flags(use_agent_default=True, group='xenserver') def fake_resetnetwork(self, method, args): fake_resetnetwork.called = True # NOTE(johngarbutt): as returned by FreeBSD and Gentoo return jsonutils.dumps({'returncode': '500', 'message': 'success'}) self.stubs.Set(stubs.FakeSessionForVMTests, '_plugin_agent_resetnetwork', fake_resetnetwork) fake_resetnetwork.called = False self._test_spawn(IMAGE_VHD, None, None, os_type="linux", architecture="x86-64") self.assertTrue(fake_resetnetwork.called) def _test_spawn_fails_silently_with(self, expected_exception_cls, method="_plugin_agent_version", failure=None, value=None): self.flags(use_agent_default=True, agent_version_timeout=0, group='xenserver') def fake_agent_call(self, method, args): if failure: raise xenapi_fake.Failure([failure]) else: return value self.stubs.Set(stubs.FakeSessionForVMTests, method, fake_agent_call) called = {} def fake_add_instance_fault(*args, **kwargs): called["fake_add_instance_fault"] = args[2] self.stubs.Set(compute_utils, 'add_instance_fault_from_exc', fake_add_instance_fault) self._test_spawn(IMAGE_VHD, None, None, os_type="linux", architecture="x86-64") actual_exception = called["fake_add_instance_fault"] self.assertIsInstance(actual_exception, expected_exception_cls) def test_spawn_fails_silently_with_agent_timeout(self): self._test_spawn_fails_silently_with(exception.AgentTimeout, failure="TIMEOUT:fake") def test_spawn_fails_silently_with_agent_not_implemented(self): self._test_spawn_fails_silently_with(exception.AgentNotImplemented, failure="NOT IMPLEMENTED:fake") def test_spawn_fails_silently_with_agent_error(self): self._test_spawn_fails_silently_with(exception.AgentError, failure="fake_error") def test_spawn_fails_silently_with_agent_bad_return(self): error = jsonutils.dumps({'returncode': -1, 'message': 'fake'}) self._test_spawn_fails_silently_with(exception.AgentError, value=error) def test_spawn_sets_last_dom_id(self): self._test_spawn(IMAGE_VHD, None, None, os_type="linux", architecture="x86-64") self.assertEqual(self.vm['domid'], self.vm['other_config']['last_dom_id']) def test_rescue(self): instance = self._create_instance(spawn=False, obj=True) xenapi_fake.create_vm(instance['name'], 'Running') session = get_session() vm_ref = vm_utils.lookup(session, instance['name']) swap_vdi_ref = xenapi_fake.create_vdi('swap', None) root_vdi_ref = xenapi_fake.create_vdi('root', None) eph1_vdi_ref = xenapi_fake.create_vdi('eph', None) eph2_vdi_ref = xenapi_fake.create_vdi('eph', None) vol_vdi_ref = xenapi_fake.create_vdi('volume', None) xenapi_fake.create_vbd(vm_ref, swap_vdi_ref, userdevice=2) xenapi_fake.create_vbd(vm_ref, root_vdi_ref, userdevice=0) xenapi_fake.create_vbd(vm_ref, eph1_vdi_ref, userdevice=4) xenapi_fake.create_vbd(vm_ref, eph2_vdi_ref, userdevice=5) xenapi_fake.create_vbd(vm_ref, vol_vdi_ref, userdevice=6, other_config={'osvol': True}) conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) image_meta = objects.ImageMeta.from_dict( {'id': IMAGE_VHD, 'disk_format': 'vhd', 'properties': {'vm_mode': 'xen'}}) conn.rescue(self.context, instance, [], image_meta, '') vm = xenapi_fake.get_record('VM', vm_ref) rescue_name = "%s-rescue" % vm["name_label"] rescue_ref = vm_utils.lookup(session, rescue_name) rescue_vm = xenapi_fake.get_record('VM', rescue_ref) vdi_refs = {} for vbd_ref in rescue_vm['VBDs']: vbd = xenapi_fake.get_record('VBD', vbd_ref) vdi_refs[vbd['VDI']] = vbd['userdevice'] self.assertEqual('1', vdi_refs[root_vdi_ref]) self.assertEqual('2', vdi_refs[swap_vdi_ref]) self.assertEqual('4', vdi_refs[eph1_vdi_ref]) self.assertEqual('5', vdi_refs[eph2_vdi_ref]) self.assertNotIn(vol_vdi_ref, vdi_refs) def test_rescue_preserve_disk_on_failure(self): # test that the original disk is preserved if rescue setup fails # bug #1227898 instance = self._create_instance(obj=True) session = get_session() image_meta = objects.ImageMeta.from_dict( {'id': IMAGE_VHD, 'disk_format': 'vhd', 'properties': {'vm_mode': 'xen'}}) vm_ref = vm_utils.lookup(session, instance['name']) vdi_ref, vdi_rec = vm_utils.get_vdi_for_vm_safely(session, vm_ref) # raise an error in the spawn setup process and trigger the # undo manager logic: def fake_start(*args, **kwargs): raise test.TestingException('Start Error') self.stubs.Set(self.conn._vmops, '_start', fake_start) self.assertRaises(test.TestingException, self.conn.rescue, self.context, instance, [], image_meta, '') # confirm original disk still exists: vdi_ref2, vdi_rec2 = vm_utils.get_vdi_for_vm_safely(session, vm_ref) self.assertEqual(vdi_ref, vdi_ref2) self.assertEqual(vdi_rec['uuid'], vdi_rec2['uuid']) def test_unrescue(self): instance = self._create_instance(obj=True) conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) # Unrescue expects the original instance to be powered off conn.power_off(instance) xenapi_fake.create_vm(instance['name'] + '-rescue', 'Running') conn.unrescue(instance, None) def test_unrescue_not_in_rescue(self): instance = self._create_instance(obj=True) conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) # Ensure that it will not unrescue a non-rescued instance. self.assertRaises(exception.InstanceNotInRescueMode, conn.unrescue, instance, None) def test_finish_revert_migration(self): instance = self._create_instance() class VMOpsMock(object): def __init__(self): self.finish_revert_migration_called = False def finish_revert_migration(self, context, instance, block_info, power_on): self.finish_revert_migration_called = True conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) conn._vmops = VMOpsMock() conn.finish_revert_migration(self.context, instance, None) self.assertTrue(conn._vmops.finish_revert_migration_called) def test_reboot_hard(self): instance = self._create_instance() conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) conn.reboot(self.context, instance, None, "HARD") def test_poll_rebooting_instances(self): self.mox.StubOutWithMock(compute_api.API, 'reboot') compute_api.API.reboot(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() instance = self._create_instance() instances = [instance] conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) conn.poll_rebooting_instances(60, instances) def test_reboot_soft(self): instance = self._create_instance() conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) conn.reboot(self.context, instance, None, "SOFT") def test_reboot_halted(self): session = get_session() instance = self._create_instance(spawn=False) conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) xenapi_fake.create_vm(instance['name'], 'Halted') conn.reboot(self.context, instance, None, "SOFT") vm_ref = vm_utils.lookup(session, instance['name']) vm = xenapi_fake.get_record('VM', vm_ref) self.assertEqual(vm['power_state'], 'Running') def test_reboot_unknown_state(self): instance = self._create_instance(spawn=False) conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) xenapi_fake.create_vm(instance['name'], 'Unknown') self.assertRaises(xenapi_fake.Failure, conn.reboot, self.context, instance, None, "SOFT") def test_reboot_rescued(self): instance = self._create_instance() instance['vm_state'] = vm_states.RESCUED conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) real_result = vm_utils.lookup(conn._session, instance['name']) self.mox.StubOutWithMock(vm_utils, 'lookup') vm_utils.lookup(conn._session, instance['name'], True).AndReturn(real_result) self.mox.ReplayAll() conn.reboot(self.context, instance, None, "SOFT") def test_get_console_output_succeeds(self): def fake_get_console_output(instance): self.assertEqual("instance", instance) return "console_log" self.stubs.Set(self.conn._vmops, 'get_console_output', fake_get_console_output) self.assertEqual(self.conn.get_console_output('context', "instance"), "console_log") def _test_maintenance_mode(self, find_host, find_aggregate): real_call_xenapi = self.conn._session.call_xenapi instance = self._create_instance(spawn=True) api_calls = {} # Record all the xenapi calls, and return a fake list of hosts # for the host.get_all call def fake_call_xenapi(method, *args): api_calls[method] = args if method == 'host.get_all': return ['foo', 'bar', 'baz'] return real_call_xenapi(method, *args) self.stubs.Set(self.conn._session, 'call_xenapi', fake_call_xenapi) def fake_aggregate_get(context, host, key): if find_aggregate: return [test_aggregate.fake_aggregate] else: return [] self.stub_out('nova.db.aggregate_get_by_host', fake_aggregate_get) def fake_host_find(context, session, src, dst): if find_host: return 'bar' else: raise exception.NoValidHost("I saw this one coming...") self.stubs.Set(host, '_host_find', fake_host_find) result = self.conn.host_maintenance_mode('bar', 'on_maintenance') self.assertEqual(result, 'on_maintenance') # We expect the VM.pool_migrate call to have been called to # migrate our instance to the 'bar' host vm_ref = vm_utils.lookup(self.conn._session, instance['name']) host_ref = "foo" expected = (vm_ref, host_ref, {"live": "true"}) self.assertEqual(api_calls.get('VM.pool_migrate'), expected) instance = db.instance_get_by_uuid(self.context, instance['uuid']) self.assertEqual(instance['vm_state'], vm_states.ACTIVE) self.assertEqual(instance['task_state'], task_states.MIGRATING) def test_maintenance_mode(self): self._test_maintenance_mode(True, True) def test_maintenance_mode_no_host(self): self.assertRaises(exception.NoValidHost, self._test_maintenance_mode, False, True) def test_maintenance_mode_no_aggregate(self): self.assertRaises(exception.NotFound, self._test_maintenance_mode, True, False) def test_uuid_find(self): self.mox.StubOutWithMock(db, 'instance_get_all_by_host') fake_inst = fake_instance.fake_db_instance(id=123) fake_inst2 = fake_instance.fake_db_instance(id=456) db.instance_get_all_by_host(self.context, fake_inst['host'], columns_to_join=None ).AndReturn([fake_inst, fake_inst2]) self.mox.ReplayAll() expected_name = CONF.instance_name_template % fake_inst['id'] inst_uuid = host._uuid_find(self.context, fake_inst['host'], expected_name) self.assertEqual(inst_uuid, fake_inst['uuid']) def test_session_virtapi(self): was = {'called': False} def fake_aggregate_get_by_host(self, *args, **kwargs): was['called'] = True raise test.TestingException() self.stub_out("nova.db.aggregate_get_by_host", fake_aggregate_get_by_host) self.stubs.Set(self.conn._session, "is_slave", True) self.assertRaises(test.TestingException, self.conn._session._get_host_uuid) self.assertTrue(was['called']) def test_session_handles_aggregate_metadata(self): def fake_aggregate_get(context, host, key): agg = copy.copy(test_aggregate.fake_aggregate) agg['metadetails'][CONF.host] = 'this_should_be_metadata' return [agg] self.stub_out('nova.db.aggregate_get_by_host', fake_aggregate_get) self.stubs.Set(self.conn._session, "is_slave", True) self.assertEqual('this_should_be_metadata', self.conn._session._get_host_uuid()) def test_per_instance_usage_running(self): instance = self._create_instance(spawn=True) flavor = objects.Flavor.get_by_id(self.context, 3) expected = {instance['uuid']: {'memory_mb': flavor['memory_mb'], 'uuid': instance['uuid']}} actual = self.conn.get_per_instance_usage() self.assertEqual(expected, actual) # Paused instances still consume resources: self.conn.pause(instance) actual = self.conn.get_per_instance_usage() self.assertEqual(expected, actual) def test_per_instance_usage_suspended(self): # Suspended instances do not consume memory: instance = self._create_instance(spawn=True) self.conn.suspend(self.context, instance) actual = self.conn.get_per_instance_usage() self.assertEqual({}, actual) def test_per_instance_usage_halted(self): instance = self._create_instance(spawn=True, obj=True) self.conn.power_off(instance) actual = self.conn.get_per_instance_usage() self.assertEqual({}, actual) def _create_instance(self, spawn=True, obj=False, **attrs): """Creates and spawns a test instance.""" instance_values = { 'uuid': str(uuid.uuid4()), 'display_name': 'host-', 'project_id': self.project_id, 'user_id': self.user_id, 'image_ref': 1, 'kernel_id': 2, 'ramdisk_id': 3, 'root_gb': 80, 'ephemeral_gb': 0, 'instance_type_id': '3', # m1.large 'os_type': 'linux', 'vm_mode': 'hvm', 'architecture': 'x86-64'} instance_values.update(attrs) instance = create_instance_with_system_metadata(self.context, instance_values) network_info = fake_network.fake_get_instance_nw_info(self) image_meta = objects.ImageMeta.from_dict( {'id': IMAGE_VHD, 'disk_format': 'vhd'}) if spawn: self.conn.spawn(self.context, instance, image_meta, [], 'herp', network_info) if obj: return instance return base.obj_to_primitive(instance) def test_destroy_clean_up_kernel_and_ramdisk(self): def fake_lookup_kernel_ramdisk(session, vm_ref): return "kernel", "ramdisk" self.stubs.Set(vm_utils, "lookup_kernel_ramdisk", fake_lookup_kernel_ramdisk) def fake_destroy_kernel_ramdisk(session, instance, kernel, ramdisk): fake_destroy_kernel_ramdisk.called = True self.assertEqual("kernel", kernel) self.assertEqual("ramdisk", ramdisk) fake_destroy_kernel_ramdisk.called = False self.stubs.Set(vm_utils, "destroy_kernel_ramdisk", fake_destroy_kernel_ramdisk) instance = self._create_instance(spawn=True, obj=True) network_info = fake_network.fake_get_instance_nw_info(self) self.conn.destroy(self.context, instance, network_info) vm_ref = vm_utils.lookup(self.conn._session, instance['name']) self.assertIsNone(vm_ref) self.assertTrue(fake_destroy_kernel_ramdisk.called) class XenAPIDiffieHellmanTestCase(test.NoDBTestCase): """Unit tests for Diffie-Hellman code.""" def setUp(self): super(XenAPIDiffieHellmanTestCase, self).setUp() self.alice = agent.SimpleDH() self.bob = agent.SimpleDH() def test_shared(self): alice_pub = self.alice.get_public() bob_pub = self.bob.get_public() alice_shared = self.alice.compute_shared(bob_pub) bob_shared = self.bob.compute_shared(alice_pub) self.assertEqual(alice_shared, bob_shared) def _test_encryption(self, message): enc = self.alice.encrypt(message) self.assertFalse(enc.endswith('\n')) dec = self.bob.decrypt(enc) self.assertEqual(dec, message) def test_encrypt_simple_message(self): self._test_encryption('This is a simple message.') def test_encrypt_message_with_newlines_at_end(self): self._test_encryption('This message has a newline at the end.\n') def test_encrypt_many_newlines_at_end(self): self._test_encryption('Message with lotsa newlines.\n\n\n') def test_encrypt_newlines_inside_message(self): self._test_encryption('Message\nwith\ninterior\nnewlines.') def test_encrypt_with_leading_newlines(self): self._test_encryption('\n\nMessage with leading newlines.') def test_encrypt_really_long_message(self): self._test_encryption(''.join(['abcd' for i in range(1024)])) # FIXME(sirp): convert this to use XenAPITestBaseNoDB class XenAPIMigrateInstance(stubs.XenAPITestBase): """Unit test for verifying migration-related actions.""" REQUIRES_LOCKING = True def setUp(self): super(XenAPIMigrateInstance, self).setUp() self.flags(connection_url='test_url', connection_password='test_pass', group='xenserver') self.flags(firewall_driver='nova.virt.xenapi.firewall.' 'Dom0IptablesFirewallDriver') stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) db_fakes.stub_out_db_instance_api(self) xenapi_fake.create_network('fake', 'fake_br1') self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id) self.instance_values = { 'project_id': self.project_id, 'user_id': self.user_id, 'image_ref': 1, 'kernel_id': None, 'ramdisk_id': None, 'root_gb': 80, 'ephemeral_gb': 0, 'instance_type_id': '3', # m1.large 'os_type': 'linux', 'architecture': 'x86-64'} migration_values = { 'source_compute': 'nova-compute', 'dest_compute': 'nova-compute', 'dest_host': '10.127.5.114', 'status': 'post-migrating', 'instance_uuid': '15f23e6a-cc6e-4d22-b651-d9bdaac316f7', 'old_instance_type_id': 5, 'new_instance_type_id': 1 } self.migration = db.migration_create( context.get_admin_context(), migration_values) fake_processutils.stub_out_processutils_execute(self.stubs) stubs.stub_out_migration_methods(self.stubs) stubs.stubout_get_this_vm_uuid(self.stubs) def fake_inject_instance_metadata(self, instance, vm): pass self.stubs.Set(vmops.VMOps, '_inject_instance_metadata', fake_inject_instance_metadata) def fake_unpause_and_wait(self, vm_ref, instance, power_on): pass self.stubs.Set(vmops.VMOps, '_unpause_and_wait', fake_unpause_and_wait) def _create_instance(self, **kw): values = self.instance_values.copy() values.update(kw) instance = objects.Instance(context=self.context, **values) instance.create() return instance def test_migrate_disk_and_power_off(self): instance = self._create_instance() xenapi_fake.create_vm(instance['name'], 'Running') flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=80, ephemeral_gb=0) conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) vm_ref = vm_utils.lookup(conn._session, instance['name']) self.mox.StubOutWithMock(volume_utils, 'is_booted_from_volume') volume_utils.is_booted_from_volume(conn._session, vm_ref) self.mox.ReplayAll() conn.migrate_disk_and_power_off(self.context, instance, '127.0.0.1', flavor, None) def test_migrate_disk_and_power_off_passes_exceptions(self): instance = self._create_instance() xenapi_fake.create_vm(instance['name'], 'Running') flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=80, ephemeral_gb=0) def fake_raise(*args, **kwargs): raise exception.MigrationError(reason='test failure') self.stubs.Set(vmops.VMOps, "_migrate_disk_resizing_up", fake_raise) conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.MigrationError, conn.migrate_disk_and_power_off, self.context, instance, '127.0.0.1', flavor, None) def test_migrate_disk_and_power_off_throws_on_zero_gb_resize_down(self): instance = self._create_instance() flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=0, ephemeral_gb=0) conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.ResizeError, conn.migrate_disk_and_power_off, self.context, instance, 'fake_dest', flavor, None) def test_migrate_disk_and_power_off_with_zero_gb_old_and_new_works(self): flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=0, ephemeral_gb=0) instance = self._create_instance(root_gb=0, ephemeral_gb=0) xenapi_fake.create_vm(instance['name'], 'Running') conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) vm_ref = vm_utils.lookup(conn._session, instance['name']) self.mox.StubOutWithMock(volume_utils, 'is_booted_from_volume') volume_utils.is_booted_from_volume(conn._session, vm_ref) self.mox.ReplayAll() conn.migrate_disk_and_power_off(self.context, instance, '127.0.0.1', flavor, None) def _test_revert_migrate(self, power_on): instance = create_instance_with_system_metadata(self.context, self.instance_values) self.called = False self.fake_vm_start_called = False self.fake_finish_revert_migration_called = False context = 'fake_context' def fake_vm_start(*args, **kwargs): self.fake_vm_start_called = True def fake_vdi_resize(*args, **kwargs): self.called = True def fake_finish_revert_migration(*args, **kwargs): self.fake_finish_revert_migration_called = True self.stubs.Set(stubs.FakeSessionForVMTests, "VDI_resize_online", fake_vdi_resize) self.stubs.Set(vmops.VMOps, '_start', fake_vm_start) self.stubs.Set(vmops.VMOps, 'finish_revert_migration', fake_finish_revert_migration) stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests, product_version=(4, 0, 0), product_brand='XenServer') self.mox.StubOutWithMock(volume_utils, 'is_booted_from_volume') conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) network_info = fake_network.fake_get_instance_nw_info(self) image_meta = objects.ImageMeta.from_dict( {'id': instance['image_ref'], 'disk_format': 'vhd'}) base = xenapi_fake.create_vdi('hurr', 'fake') base_uuid = xenapi_fake.get_record('VDI', base)['uuid'] cow = xenapi_fake.create_vdi('durr', 'fake') cow_uuid = xenapi_fake.get_record('VDI', cow)['uuid'] conn.finish_migration(self.context, self.migration, instance, dict(base_copy=base_uuid, cow=cow_uuid), network_info, image_meta, resize_instance=True, block_device_info=None, power_on=power_on) self.assertTrue(self.called) self.assertEqual(self.fake_vm_start_called, power_on) conn.finish_revert_migration(context, instance, network_info) self.assertTrue(self.fake_finish_revert_migration_called) def test_revert_migrate_power_on(self): self._test_revert_migrate(True) def test_revert_migrate_power_off(self): self._test_revert_migrate(False) def _test_finish_migrate(self, power_on): instance = create_instance_with_system_metadata(self.context, self.instance_values) self.called = False self.fake_vm_start_called = False def fake_vm_start(*args, **kwargs): self.fake_vm_start_called = True def fake_vdi_resize(*args, **kwargs): self.called = True self.stubs.Set(vmops.VMOps, '_start', fake_vm_start) self.stubs.Set(stubs.FakeSessionForVMTests, "VDI_resize_online", fake_vdi_resize) stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests, product_version=(4, 0, 0), product_brand='XenServer') conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) network_info = fake_network.fake_get_instance_nw_info(self) image_meta = objects.ImageMeta.from_dict( {'id': instance['image_ref'], 'disk_format': 'vhd'}) conn.finish_migration(self.context, self.migration, instance, dict(base_copy='hurr', cow='durr'), network_info, image_meta, resize_instance=True, block_device_info=None, power_on=power_on) self.assertTrue(self.called) self.assertEqual(self.fake_vm_start_called, power_on) def test_finish_migrate_power_on(self): self._test_finish_migrate(True) def test_finish_migrate_power_off(self): self._test_finish_migrate(False) def test_finish_migrate_no_local_storage(self): values = copy.copy(self.instance_values) values["root_gb"] = 0 values["ephemeral_gb"] = 0 instance = create_instance_with_system_metadata(self.context, values) def fake_vdi_resize(*args, **kwargs): raise Exception("This shouldn't be called") self.stubs.Set(stubs.FakeSessionForVMTests, "VDI_resize_online", fake_vdi_resize) conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) network_info = fake_network.fake_get_instance_nw_info(self) image_meta = objects.ImageMeta.from_dict( {'id': instance['image_ref'], 'disk_format': 'vhd'}) conn.finish_migration(self.context, self.migration, instance, dict(base_copy='hurr', cow='durr'), network_info, image_meta, resize_instance=True) def test_finish_migrate_no_resize_vdi(self): instance = create_instance_with_system_metadata(self.context, self.instance_values) def fake_vdi_resize(*args, **kwargs): raise Exception("This shouldn't be called") self.stubs.Set(stubs.FakeSessionForVMTests, "VDI_resize_online", fake_vdi_resize) conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) network_info = fake_network.fake_get_instance_nw_info(self) # Resize instance would be determined by the compute call image_meta = objects.ImageMeta.from_dict( {'id': instance['image_ref'], 'disk_format': 'vhd'}) conn.finish_migration(self.context, self.migration, instance, dict(base_copy='hurr', cow='durr'), network_info, image_meta, resize_instance=False) @stub_vm_utils_with_vdi_attached_here def test_migrate_too_many_partitions_no_resize_down(self): instance = self._create_instance() xenapi_fake.create_vm(instance['name'], 'Running') flavor = db.flavor_get_by_name(self.context, 'm1.small') flavor = fake_flavor.fake_flavor_obj(self.context, **flavor) conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) def fake_get_partitions(partition): return [(1, 2, 3, 4, "", ""), (1, 2, 3, 4, "", "")] self.stubs.Set(vm_utils, '_get_partitions', fake_get_partitions) self.assertRaises(exception.InstanceFaultRollback, conn.migrate_disk_and_power_off, self.context, instance, '127.0.0.1', flavor, None) @stub_vm_utils_with_vdi_attached_here def test_migrate_bad_fs_type_no_resize_down(self): instance = self._create_instance() xenapi_fake.create_vm(instance['name'], 'Running') flavor = db.flavor_get_by_name(self.context, 'm1.small') flavor = fake_flavor.fake_flavor_obj(self.context, **flavor) conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) def fake_get_partitions(partition): return [(1, 2, 3, "ext2", "", "boot")] self.stubs.Set(vm_utils, '_get_partitions', fake_get_partitions) self.assertRaises(exception.InstanceFaultRollback, conn.migrate_disk_and_power_off, self.context, instance, '127.0.0.1', flavor, None) def test_migrate_rollback_when_resize_down_fs_fails(self): conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) vmops = conn._vmops self.mox.StubOutWithMock(vmops, '_resize_ensure_vm_is_shutdown') self.mox.StubOutWithMock(vmops, '_apply_orig_vm_name_label') self.mox.StubOutWithMock(vm_utils, 'resize_disk') self.mox.StubOutWithMock(vm_utils, 'migrate_vhd') self.mox.StubOutWithMock(vm_utils, 'destroy_vdi') self.mox.StubOutWithMock(vm_utils, 'get_vdi_for_vm_safely') self.mox.StubOutWithMock(vmops, '_restore_orig_vm_and_cleanup_orphan') instance = objects.Instance(context=self.context, auto_disk_config=True, uuid='uuid') instance.obj_reset_changes() vm_ref = "vm_ref" dest = "dest" flavor = "type" sr_path = "sr_path" vmops._resize_ensure_vm_is_shutdown(instance, vm_ref) vmops._apply_orig_vm_name_label(instance, vm_ref) old_vdi_ref = "old_ref" vm_utils.get_vdi_for_vm_safely(vmops._session, vm_ref).AndReturn( (old_vdi_ref, None)) new_vdi_ref = "new_ref" new_vdi_uuid = "new_uuid" vm_utils.resize_disk(vmops._session, instance, old_vdi_ref, flavor).AndReturn((new_vdi_ref, new_vdi_uuid)) vm_utils.migrate_vhd(vmops._session, instance, new_vdi_uuid, dest, sr_path, 0).AndRaise( exception.ResizeError(reason="asdf")) vm_utils.destroy_vdi(vmops._session, new_vdi_ref) vmops._restore_orig_vm_and_cleanup_orphan(instance) self.mox.ReplayAll() with mock.patch.object(instance, 'save') as mock_save: self.assertRaises(exception.InstanceFaultRollback, vmops._migrate_disk_resizing_down, self.context, instance, dest, flavor, vm_ref, sr_path) self.assertEqual(3, mock_save.call_count) self.assertEqual(60.0, instance.progress) def test_resize_ensure_vm_is_shutdown_cleanly(self): conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) vmops = conn._vmops fake_instance = {'uuid': 'uuid'} self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown') self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm') self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm') vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False) vm_utils.clean_shutdown_vm(vmops._session, fake_instance, "ref").AndReturn(True) self.mox.ReplayAll() vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref") def test_resize_ensure_vm_is_shutdown_forced(self): conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) vmops = conn._vmops fake_instance = {'uuid': 'uuid'} self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown') self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm') self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm') vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False) vm_utils.clean_shutdown_vm(vmops._session, fake_instance, "ref").AndReturn(False) vm_utils.hard_shutdown_vm(vmops._session, fake_instance, "ref").AndReturn(True) self.mox.ReplayAll() vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref") def test_resize_ensure_vm_is_shutdown_fails(self): conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) vmops = conn._vmops fake_instance = {'uuid': 'uuid'} self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown') self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm') self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm') vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False) vm_utils.clean_shutdown_vm(vmops._session, fake_instance, "ref").AndReturn(False) vm_utils.hard_shutdown_vm(vmops._session, fake_instance, "ref").AndReturn(False) self.mox.ReplayAll() self.assertRaises(exception.ResizeError, vmops._resize_ensure_vm_is_shutdown, fake_instance, "ref") def test_resize_ensure_vm_is_shutdown_already_shutdown(self): conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) vmops = conn._vmops fake_instance = {'uuid': 'uuid'} self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown') self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm') self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm') vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(True) self.mox.ReplayAll() vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref") class XenAPIImageTypeTestCase(test.NoDBTestCase): """Test ImageType class.""" def test_to_string(self): # Can convert from type id to type string. self.assertEqual( vm_utils.ImageType.to_string(vm_utils.ImageType.KERNEL), vm_utils.ImageType.KERNEL_STR) def _assert_role(self, expected_role, image_type_id): self.assertEqual( expected_role, vm_utils.ImageType.get_role(image_type_id)) def test_get_image_role_kernel(self): self._assert_role('kernel', vm_utils.ImageType.KERNEL) def test_get_image_role_ramdisk(self): self._assert_role('ramdisk', vm_utils.ImageType.RAMDISK) def test_get_image_role_disk(self): self._assert_role('root', vm_utils.ImageType.DISK) def test_get_image_role_disk_raw(self): self._assert_role('root', vm_utils.ImageType.DISK_RAW) def test_get_image_role_disk_vhd(self): self._assert_role('root', vm_utils.ImageType.DISK_VHD) class XenAPIDetermineDiskImageTestCase(test.NoDBTestCase): """Unit tests for code that detects the ImageType.""" def assert_disk_type(self, image_meta, expected_disk_type): actual = vm_utils.determine_disk_image_type(image_meta) self.assertEqual(expected_disk_type, actual) def test_machine(self): image_meta = objects.ImageMeta.from_dict( {'disk_format': 'ami'}) self.assert_disk_type(image_meta, vm_utils.ImageType.DISK) def test_raw(self): image_meta = objects.ImageMeta.from_dict( {'disk_format': 'raw'}) self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_RAW) def test_vhd(self): image_meta = objects.ImageMeta.from_dict( {'disk_format': 'vhd'}) self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_VHD) # FIXME(sirp): convert this to use XenAPITestBaseNoDB class XenAPIHostTestCase(stubs.XenAPITestBase): """Tests HostState, which holds metrics from XenServer that get reported back to the Schedulers. """ def setUp(self): super(XenAPIHostTestCase, self).setUp() self.flags(connection_url='test_url', connection_password='test_pass', group='xenserver') stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) self.context = context.get_admin_context() self.flags(use_local=True, group='conductor') self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) self.instance = fake_instance.fake_db_instance(name='foo') def test_host_state(self): stats = self.conn.host_state.get_host_stats(False) # Values from fake.create_local_srs (ext SR) self.assertEqual(stats['disk_total'], 40000) self.assertEqual(stats['disk_used'], 20000) # Values from fake._plugin_xenhost_host_data self.assertEqual(stats['host_memory_total'], 10) self.assertEqual(stats['host_memory_overhead'], 20) self.assertEqual(stats['host_memory_free'], 30) self.assertEqual(stats['host_memory_free_computed'], 40) self.assertEqual(stats['hypervisor_hostname'], 'fake-xenhost') self.assertEqual(stats['host_cpu_info']['cpu_count'], 4) self.assertThat({ 'vendor': 'GenuineIntel', 'model': 'Intel(R) Xeon(R) CPU X3430 @ 2.40GHz', 'topology': { 'sockets': 1, 'cores': 4, 'threads': 1, }, 'features': [ 'fpu', 'de', 'tsc', 'msr', 'pae', 'mce', 'cx8', 'apic', 'sep', 'mtrr', 'mca', 'cmov', 'pat', 'clflush', 'acpi', 'mmx', 'fxsr', 'sse', 'sse2', 'ss', 'ht', 'nx', 'constant_tsc', 'nonstop_tsc', 'aperfmperf', 'pni', 'vmx', 'est', 'ssse3', 'sse4_1', 'sse4_2', 'popcnt', 'hypervisor', 'ida', 'tpr_shadow', 'vnmi', 'flexpriority', 'ept', 'vpid', ]}, matchers.DictMatches(stats['cpu_model'])) # No VMs running self.assertEqual(stats['vcpus_used'], 0) def test_host_state_vcpus_used(self): stats = self.conn.host_state.get_host_stats(True) self.assertEqual(stats['vcpus_used'], 0) xenapi_fake.create_vm(self.instance['name'], 'Running') stats = self.conn.host_state.get_host_stats(True) self.assertEqual(stats['vcpus_used'], 4) def test_pci_passthrough_devices(self): stats = self.conn.host_state.get_host_stats(False) self.assertEqual(len(stats['pci_passthrough_devices']), 2) def test_host_state_missing_sr(self): # Must trigger construction of 'host_state' property # before introducing the stub which raises the error hs = self.conn.host_state def fake_safe_find_sr(session): raise exception.StorageRepositoryNotFound('not there') self.stubs.Set(vm_utils, 'safe_find_sr', fake_safe_find_sr) self.assertRaises(exception.StorageRepositoryNotFound, hs.get_host_stats, refresh=True) def _test_host_action(self, method, action, expected=None): result = method('host', action) if not expected: expected = action self.assertEqual(result, expected) def _test_host_action_no_param(self, method, action, expected=None): result = method(action) if not expected: expected = action self.assertEqual(result, expected) def test_host_reboot(self): self._test_host_action_no_param(self.conn.host_power_action, 'reboot') def test_host_shutdown(self): self._test_host_action_no_param(self.conn.host_power_action, 'shutdown') def test_host_startup(self): self.assertRaises(NotImplementedError, self.conn.host_power_action, 'startup') def test_host_maintenance_on(self): self._test_host_action(self.conn.host_maintenance_mode, True, 'on_maintenance') def test_host_maintenance_off(self): self._test_host_action(self.conn.host_maintenance_mode, False, 'off_maintenance') def test_set_enable_host_enable(self): _create_service_entries(self.context, values={'nova': ['fake-mini']}) self._test_host_action_no_param(self.conn.set_host_enabled, True, 'enabled') service = db.service_get_by_host_and_binary(self.context, 'fake-mini', 'nova-compute') self.assertFalse(service.disabled) def test_set_enable_host_disable(self): _create_service_entries(self.context, values={'nova': ['fake-mini']}) self._test_host_action_no_param(self.conn.set_host_enabled, False, 'disabled') service = db.service_get_by_host_and_binary(self.context, 'fake-mini', 'nova-compute') self.assertTrue(service.disabled) def test_get_host_uptime(self): result = self.conn.get_host_uptime() self.assertEqual(result, 'fake uptime') def test_supported_instances_is_included_in_host_state(self): stats = self.conn.host_state.get_host_stats(False) self.assertIn('supported_instances', stats) def test_supported_instances_is_calculated_by_to_supported_instances(self): def to_supported_instances(somedata): return "SOMERETURNVALUE" self.stubs.Set(host, 'to_supported_instances', to_supported_instances) stats = self.conn.host_state.get_host_stats(False) self.assertEqual("SOMERETURNVALUE", stats['supported_instances']) def test_update_stats_caches_hostname(self): self.mox.StubOutWithMock(host, 'call_xenhost') self.mox.StubOutWithMock(vm_utils, 'scan_default_sr') self.mox.StubOutWithMock(vm_utils, 'list_vms') self.mox.StubOutWithMock(self.conn._session, 'call_xenapi') data = {'disk_total': 0, 'disk_used': 0, 'disk_available': 0, 'supported_instances': 0, 'host_capabilities': [], 'host_hostname': 'foo', 'vcpus_used': 0, } sr_rec = { 'physical_size': 0, 'physical_utilisation': 0, 'virtual_allocation': 0, } for i in range(3): host.call_xenhost(mox.IgnoreArg(), 'host_data', {}).AndReturn(data) vm_utils.scan_default_sr(self.conn._session).AndReturn("ref") vm_utils.list_vms(self.conn._session).AndReturn([]) self.conn._session.call_xenapi('SR.get_record', "ref").AndReturn( sr_rec) if i == 2: # On the third call (the second below) change the hostname data = dict(data, host_hostname='bar') self.mox.ReplayAll() stats = self.conn.host_state.get_host_stats(refresh=True) self.assertEqual('foo', stats['hypervisor_hostname']) stats = self.conn.host_state.get_host_stats(refresh=True) self.assertEqual('foo', stats['hypervisor_hostname']) class ToSupportedInstancesTestCase(test.NoDBTestCase): def test_default_return_value(self): self.assertEqual([], host.to_supported_instances(None)) def test_return_value(self): self.assertEqual([(arch.X86_64, hv_type.XEN, 'xen')], host.to_supported_instances([u'xen-3.0-x86_64'])) def test_invalid_values_do_not_break(self): self.assertEqual([(arch.X86_64, hv_type.XEN, 'xen')], host.to_supported_instances([u'xen-3.0-x86_64', 'spam'])) def test_multiple_values(self): self.assertEqual( [ (arch.X86_64, hv_type.XEN, 'xen'), (arch.I686, hv_type.XEN, 'hvm') ], host.to_supported_instances([u'xen-3.0-x86_64', 'hvm-3.0-x86_32']) ) # FIXME(sirp): convert this to use XenAPITestBaseNoDB class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase): def setUp(self): super(XenAPIAutoDiskConfigTestCase, self).setUp() self.flags(connection_url='test_url', connection_password='test_pass', group='xenserver') self.flags(firewall_driver='nova.virt.xenapi.firewall.' 'Dom0IptablesFirewallDriver') stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) self.user_id = 'fake' self.project_id = 'fake' self.instance_values = { 'project_id': self.project_id, 'user_id': self.user_id, 'image_ref': 1, 'kernel_id': 2, 'ramdisk_id': 3, 'root_gb': 80, 'ephemeral_gb': 0, 'instance_type_id': '3', # m1.large 'os_type': 'linux', 'architecture': 'x86-64'} self.context = context.RequestContext(self.user_id, self.project_id) def fake_create_vbd(session, vm_ref, vdi_ref, userdevice, vbd_type='disk', read_only=False, bootable=True, osvol=False): pass self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd) def assertIsPartitionCalled(self, called): marker = {"partition_called": False} def fake_resize_part_and_fs(dev, start, old_sectors, new_sectors, flags): marker["partition_called"] = True self.stubs.Set(vm_utils, "_resize_part_and_fs", fake_resize_part_and_fs) context.RequestContext(self.user_id, self.project_id) session = get_session() disk_image_type = vm_utils.ImageType.DISK_VHD instance = create_instance_with_system_metadata(self.context, self.instance_values) vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted') vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake') vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid'] vdis = {'root': {'uuid': vdi_uuid, 'ref': vdi_ref}} image_meta = objects.ImageMeta.from_dict( {'id': 'null', 'disk_format': 'vhd', 'properties': {'vm_mode': 'xen'}}) self.conn._vmops._attach_disks(instance, image_meta, vm_ref, instance['name'], vdis, disk_image_type, "fake_nw_inf") self.assertEqual(marker["partition_called"], called) def test_instance_not_auto_disk_config(self): """Should not partition unless instance is marked as auto_disk_config. """ self.instance_values['auto_disk_config'] = False self.assertIsPartitionCalled(False) @stub_vm_utils_with_vdi_attached_here def test_instance_auto_disk_config_fails_safe_two_partitions(self): # Should not partition unless fail safes pass. self.instance_values['auto_disk_config'] = True def fake_get_partitions(dev): return [(1, 0, 100, 'ext4', "", ""), (2, 100, 200, 'ext4' "", "")] self.stubs.Set(vm_utils, "_get_partitions", fake_get_partitions) self.assertIsPartitionCalled(False) @stub_vm_utils_with_vdi_attached_here def test_instance_auto_disk_config_fails_safe_badly_numbered(self): # Should not partition unless fail safes pass. self.instance_values['auto_disk_config'] = True def fake_get_partitions(dev): return [(2, 100, 200, 'ext4', "", "")] self.stubs.Set(vm_utils, "_get_partitions", fake_get_partitions) self.assertIsPartitionCalled(False) @stub_vm_utils_with_vdi_attached_here def test_instance_auto_disk_config_fails_safe_bad_fstype(self): # Should not partition unless fail safes pass. self.instance_values['auto_disk_config'] = True def fake_get_partitions(dev): return [(1, 100, 200, 'asdf', "", "")] self.stubs.Set(vm_utils, "_get_partitions", fake_get_partitions) self.assertIsPartitionCalled(False) @stub_vm_utils_with_vdi_attached_here def test_instance_auto_disk_config_passes_fail_safes(self): """Should partition if instance is marked as auto_disk_config=True and virt-layer specific fail-safe checks pass. """ self.instance_values['auto_disk_config'] = True def fake_get_partitions(dev): return [(1, 0, 100, 'ext4', "", "boot")] self.stubs.Set(vm_utils, "_get_partitions", fake_get_partitions) self.assertIsPartitionCalled(True) # FIXME(sirp): convert this to use XenAPITestBaseNoDB class XenAPIGenerateLocal(stubs.XenAPITestBase): """Test generating of local disks, like swap and ephemeral.""" def setUp(self): super(XenAPIGenerateLocal, self).setUp() self.flags(connection_url='test_url', connection_password='test_pass', group='xenserver') self.flags(firewall_driver='nova.virt.xenapi.firewall.' 'Dom0IptablesFirewallDriver') stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) db_fakes.stub_out_db_instance_api(self) self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) self.user_id = 'fake' self.project_id = 'fake' self.instance_values = { 'project_id': self.project_id, 'user_id': self.user_id, 'image_ref': 1, 'kernel_id': 2, 'ramdisk_id': 3, 'root_gb': 80, 'ephemeral_gb': 0, 'instance_type_id': '3', # m1.large 'os_type': 'linux', 'architecture': 'x86-64'} self.context = context.RequestContext(self.user_id, self.project_id) def fake_create_vbd(session, vm_ref, vdi_ref, userdevice, vbd_type='disk', read_only=False, bootable=True, osvol=False, empty=False, unpluggable=True): return session.call_xenapi('VBD.create', {'VM': vm_ref, 'VDI': vdi_ref}) self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd) def assertCalled(self, instance, disk_image_type=vm_utils.ImageType.DISK_VHD): context.RequestContext(self.user_id, self.project_id) session = get_session() vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted') vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake') vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid'] vdi_key = 'root' if disk_image_type == vm_utils.ImageType.DISK_ISO: vdi_key = 'iso' vdis = {vdi_key: {'uuid': vdi_uuid, 'ref': vdi_ref}} self.called = False image_meta = objects.ImageMeta.from_dict( {'id': 'null', 'disk_format': 'vhd', 'properties': {'vm_mode': 'xen'}}) self.conn._vmops._attach_disks(instance, image_meta, vm_ref, instance['name'], vdis, disk_image_type, "fake_nw_inf") self.assertTrue(self.called) def test_generate_swap(self): # Test swap disk generation. instance_values = dict(self.instance_values, instance_type_id=5) instance = create_instance_with_system_metadata(self.context, instance_values) def fake_generate_swap(*args, **kwargs): self.called = True self.stubs.Set(vm_utils, 'generate_swap', fake_generate_swap) self.assertCalled(instance) def test_generate_ephemeral(self): # Test ephemeral disk generation. instance_values = dict(self.instance_values, instance_type_id=4) instance = create_instance_with_system_metadata(self.context, instance_values) def fake_generate_ephemeral(*args): self.called = True self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral) self.assertCalled(instance) def test_generate_iso_blank_root_disk(self): instance_values = dict(self.instance_values, instance_type_id=4) instance_values.pop('kernel_id') instance_values.pop('ramdisk_id') instance = create_instance_with_system_metadata(self.context, instance_values) def fake_generate_ephemeral(*args): pass self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral) def fake_generate_iso(*args): self.called = True self.stubs.Set(vm_utils, 'generate_iso_blank_root_disk', fake_generate_iso) self.assertCalled(instance, vm_utils.ImageType.DISK_ISO) class XenAPIBWCountersTestCase(stubs.XenAPITestBaseNoDB): FAKE_VMS = {'test1:ref': dict(name_label='test1', other_config=dict(nova_uuid='hash'), domid='12', _vifmap={'0': "a:b:c:d...", '1': "e:f:12:q..."}), 'test2:ref': dict(name_label='test2', other_config=dict(nova_uuid='hash'), domid='42', _vifmap={'0': "a:3:c:d...", '1': "e:f:42:q..."}), } def setUp(self): super(XenAPIBWCountersTestCase, self).setUp() self.stubs.Set(vm_utils, 'list_vms', XenAPIBWCountersTestCase._fake_list_vms) self.flags(connection_url='test_url', connection_password='test_pass', group='xenserver') self.flags(firewall_driver='nova.virt.xenapi.firewall.' 'Dom0IptablesFirewallDriver') stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) def _fake_get_vif_device_map(vm_rec): return vm_rec['_vifmap'] self.stubs.Set(self.conn._vmops, "_get_vif_device_map", _fake_get_vif_device_map) @classmethod def _fake_list_vms(cls, session): return six.iteritems(cls.FAKE_VMS) @staticmethod def _fake_fetch_bandwidth_mt(session): return {} @staticmethod def _fake_fetch_bandwidth(session): return {'42': {'0': {'bw_in': 21024, 'bw_out': 22048}, '1': {'bw_in': 231337, 'bw_out': 221212121}}, '12': {'0': {'bw_in': 1024, 'bw_out': 2048}, '1': {'bw_in': 31337, 'bw_out': 21212121}}, } def test_get_all_bw_counters(self): instances = [dict(name='test1', uuid='1-2-3'), dict(name='test2', uuid='4-5-6')] self.stubs.Set(vm_utils, 'fetch_bandwidth', self._fake_fetch_bandwidth) result = self.conn.get_all_bw_counters(instances) self.assertEqual(len(result), 4) self.assertIn(dict(uuid='1-2-3', mac_address="a:b:c:d...", bw_in=1024, bw_out=2048), result) self.assertIn(dict(uuid='1-2-3', mac_address="e:f:12:q...", bw_in=31337, bw_out=21212121), result) self.assertIn(dict(uuid='4-5-6', mac_address="a:3:c:d...", bw_in=21024, bw_out=22048), result) self.assertIn(dict(uuid='4-5-6', mac_address="e:f:42:q...", bw_in=231337, bw_out=221212121), result) def test_get_all_bw_counters_in_failure_case(self): """Test that get_all_bw_conters returns an empty list when no data returned from Xenserver. c.f. bug #910045. """ instances = [dict(name='instance-0001', uuid='1-2-3-4-5')] self.stubs.Set(vm_utils, 'fetch_bandwidth', self._fake_fetch_bandwidth_mt) result = self.conn.get_all_bw_counters(instances) self.assertEqual(result, []) # TODO(salvatore-orlando): this class and # nova.tests.unit.virt.test_libvirt.IPTablesFirewallDriverTestCase # share a lot of code. Consider abstracting common code in a base # class for firewall driver testing. # # FIXME(sirp): convert this to use XenAPITestBaseNoDB class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase): REQUIRES_LOCKING = True _in_rules = [ '# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011', '*nat', ':PREROUTING ACCEPT [1170:189210]', ':INPUT ACCEPT [844:71028]', ':OUTPUT ACCEPT [5149:405186]', ':POSTROUTING ACCEPT [5063:386098]', '# Completed on Mon Dec 6 11:54:13 2010', '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010', '*mangle', ':INPUT ACCEPT [969615:281627771]', ':FORWARD ACCEPT [0:0]', ':OUTPUT ACCEPT [915599:63811649]', ':nova-block-ipv4 - [0:0]', '[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ', '[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED' ',ESTABLISHED -j ACCEPT ', '[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ', '[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ', '[0:0] -A FORWARD -o virbr0 -j REJECT ' '--reject-with icmp-port-unreachable ', '[0:0] -A FORWARD -i virbr0 -j REJECT ' '--reject-with icmp-port-unreachable ', 'COMMIT', '# Completed on Mon Dec 6 11:54:13 2010', '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010', '*filter', ':INPUT ACCEPT [969615:281627771]', ':FORWARD ACCEPT [0:0]', ':OUTPUT ACCEPT [915599:63811649]', ':nova-block-ipv4 - [0:0]', '[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ', '[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED' ',ESTABLISHED -j ACCEPT ', '[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ', '[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ', '[0:0] -A FORWARD -o virbr0 -j REJECT ' '--reject-with icmp-port-unreachable ', '[0:0] -A FORWARD -i virbr0 -j REJECT ' '--reject-with icmp-port-unreachable ', 'COMMIT', '# Completed on Mon Dec 6 11:54:13 2010', ] _in6_filter_rules = [ '# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011', '*filter', ':INPUT ACCEPT [349155:75810423]', ':FORWARD ACCEPT [0:0]', ':OUTPUT ACCEPT [349256:75777230]', 'COMMIT', '# Completed on Tue Jan 18 23:47:56 2011', ] def setUp(self): super(XenAPIDom0IptablesFirewallTestCase, self).setUp() self.flags(connection_url='test_url', connection_password='test_pass', group='xenserver') self.flags(instance_name_template='%d', firewall_driver='nova.virt.xenapi.firewall.' 'Dom0IptablesFirewallDriver') self.user_id = 'mappin' self.project_id = 'fake' stubs.stubout_session(self.stubs, stubs.FakeSessionForFirewallTests, test_case=self) self.context = context.RequestContext(self.user_id, self.project_id) self.network = importutils.import_object(CONF.network_manager) self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) self.fw = self.conn._vmops.firewall_driver def _create_instance_ref(self): return db.instance_create(self.context, {'user_id': self.user_id, 'project_id': self.project_id, 'instance_type_id': 1}) def _create_test_security_group(self): admin_ctxt = context.get_admin_context() secgroup = db.security_group_create(admin_ctxt, {'user_id': self.user_id, 'project_id': self.project_id, 'name': 'testgroup', 'description': 'test group'}) db.security_group_rule_create(admin_ctxt, {'parent_group_id': secgroup['id'], 'protocol': 'icmp', 'from_port': -1, 'to_port': -1, 'cidr': '192.168.11.0/24'}) db.security_group_rule_create(admin_ctxt, {'parent_group_id': secgroup['id'], 'protocol': 'icmp', 'from_port': 8, 'to_port': -1, 'cidr': '192.168.11.0/24'}) db.security_group_rule_create(admin_ctxt, {'parent_group_id': secgroup['id'], 'protocol': 'tcp', 'from_port': 80, 'to_port': 81, 'cidr': '192.168.10.0/24'}) return secgroup def _validate_security_group(self): in_rules = filter(lambda l: not l.startswith('#'), self._in_rules) for rule in in_rules: if 'nova' not in rule: self.assertIn(rule, self._out_rules, 'Rule went missing: %s' % rule) instance_chain = None for rule in self._out_rules: # This is pretty crude, but it'll do for now # last two octets change if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule): instance_chain = rule.split(' ')[-1] break self.assertTrue(instance_chain, "The instance chain wasn't added") security_group_chain = None for rule in self._out_rules: # This is pretty crude, but it'll do for now if '-A %s -j' % instance_chain in rule: security_group_chain = rule.split(' ')[-1] break self.assertTrue(security_group_chain, "The security group chain wasn't added") regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp' ' -s 192.168.11.0/24') self.assertTrue(len(filter(regex.match, self._out_rules)) > 0, "ICMP acceptance rule wasn't added") regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp' ' --icmp-type 8 -s 192.168.11.0/24') self.assertTrue(len(filter(regex.match, self._out_rules)) > 0, "ICMP Echo Request acceptance rule wasn't added") regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp --dport 80:81' ' -s 192.168.10.0/24') self.assertTrue(len(filter(regex.match, self._out_rules)) > 0, "TCP port 80/81 acceptance rule wasn't added") def test_static_filters(self): instance_ref = self._create_instance_ref() src_instance_ref = self._create_instance_ref() admin_ctxt = context.get_admin_context() secgroup = self._create_test_security_group() src_secgroup = db.security_group_create(admin_ctxt, {'user_id': self.user_id, 'project_id': self.project_id, 'name': 'testsourcegroup', 'description': 'src group'}) db.security_group_rule_create(admin_ctxt, {'parent_group_id': secgroup['id'], 'protocol': 'tcp', 'from_port': 80, 'to_port': 81, 'group_id': src_secgroup['id']}) db.instance_add_security_group(admin_ctxt, instance_ref['uuid'], secgroup['id']) db.instance_add_security_group(admin_ctxt, src_instance_ref['uuid'], src_secgroup['id']) instance_ref = db.instance_get(admin_ctxt, instance_ref['id']) src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id']) network_model = fake_network.fake_get_instance_nw_info(self, 1) from nova.compute import utils as compute_utils # noqa self.stubs.Set(compute_utils, 'get_nw_info_for_instance', lambda instance: network_model) self.fw.prepare_instance_filter(instance_ref, network_model) self.fw.apply_instance_filter(instance_ref, network_model) self._validate_security_group() # Extra test for TCP acceptance rules for ip in network_model.fixed_ips(): if ip['version'] != 4: continue regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp' ' --dport 80:81 -s %s' % ip['address']) self.assertTrue(len(filter(regex.match, self._out_rules)) > 0, "TCP port 80/81 acceptance rule wasn't added") db.instance_destroy(admin_ctxt, instance_ref['uuid']) def test_filters_for_instance_with_ip_v6(self): self.flags(use_ipv6=True) network_info = fake_network.fake_get_instance_nw_info(self, 1) rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info) self.assertEqual(len(rulesv4), 2) self.assertEqual(len(rulesv6), 1) def test_filters_for_instance_without_ip_v6(self): self.flags(use_ipv6=False) network_info = fake_network.fake_get_instance_nw_info(self, 1) rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info) self.assertEqual(len(rulesv4), 2) self.assertEqual(len(rulesv6), 0) def test_multinic_iptables(self): ipv4_rules_per_addr = 1 ipv4_addr_per_network = 2 ipv6_rules_per_addr = 1 ipv6_addr_per_network = 1 networks_count = 5 instance_ref = self._create_instance_ref() _get_instance_nw_info = fake_network.fake_get_instance_nw_info network_info = _get_instance_nw_info(self, networks_count, ipv4_addr_per_network) network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \ '1.1.1.1' ipv4_len = len(self.fw.iptables.ipv4['filter'].rules) ipv6_len = len(self.fw.iptables.ipv6['filter'].rules) inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref, network_info) self.fw.prepare_instance_filter(instance_ref, network_info) ipv4 = self.fw.iptables.ipv4['filter'].rules ipv6 = self.fw.iptables.ipv6['filter'].rules ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len # Extra rules are for the DHCP request rules = (ipv4_rules_per_addr * ipv4_addr_per_network * networks_count) + 2 self.assertEqual(ipv4_network_rules, rules) self.assertEqual(ipv6_network_rules, ipv6_rules_per_addr * ipv6_addr_per_network * networks_count) def test_do_refresh_security_group_rules(self): admin_ctxt = context.get_admin_context() instance_ref = self._create_instance_ref() network_info = fake_network.fake_get_instance_nw_info(self, 1, 1) secgroup = self._create_test_security_group() db.instance_add_security_group(admin_ctxt, instance_ref['uuid'], secgroup['id']) self.fw.prepare_instance_filter(instance_ref, network_info) self.fw.instance_info[instance_ref['id']] = (instance_ref, network_info) self._validate_security_group() # add a rule to the security group db.security_group_rule_create(admin_ctxt, {'parent_group_id': secgroup['id'], 'protocol': 'udp', 'from_port': 200, 'to_port': 299, 'cidr': '192.168.99.0/24'}) # validate the extra rule self.fw.refresh_security_group_rules(secgroup) regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p udp --dport 200:299' ' -s 192.168.99.0/24') self.assertTrue(len(filter(regex.match, self._out_rules)) > 0, "Rules were not updated properly. " "The rule for UDP acceptance is missing") class XenAPISRSelectionTestCase(stubs.XenAPITestBaseNoDB): """Unit tests for testing we find the right SR.""" def test_safe_find_sr_raise_exception(self): # Ensure StorageRepositoryNotFound is raise when wrong filter. self.flags(sr_matching_filter='yadayadayada', group='xenserver') stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) session = get_session() self.assertRaises(exception.StorageRepositoryNotFound, vm_utils.safe_find_sr, session) def test_safe_find_sr_local_storage(self): # Ensure the default local-storage is found. self.flags(sr_matching_filter='other-config:i18n-key=local-storage', group='xenserver') stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) session = get_session() # This test is only guaranteed if there is one host in the pool self.assertEqual(len(xenapi_fake.get_all('host')), 1) host_ref = xenapi_fake.get_all('host')[0] pbd_refs = xenapi_fake.get_all('PBD') for pbd_ref in pbd_refs: pbd_rec = xenapi_fake.get_record('PBD', pbd_ref) if pbd_rec['host'] != host_ref: continue sr_rec = xenapi_fake.get_record('SR', pbd_rec['SR']) if sr_rec['other_config']['i18n-key'] == 'local-storage': local_sr = pbd_rec['SR'] expected = vm_utils.safe_find_sr(session) self.assertEqual(local_sr, expected) def test_safe_find_sr_by_other_criteria(self): # Ensure the SR is found when using a different filter. self.flags(sr_matching_filter='other-config:my_fake_sr=true', group='xenserver') stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) session = get_session() host_ref = xenapi_fake.get_all('host')[0] local_sr = xenapi_fake.create_sr(name_label='Fake Storage', type='lvm', other_config={'my_fake_sr': 'true'}, host_ref=host_ref) expected = vm_utils.safe_find_sr(session) self.assertEqual(local_sr, expected) def test_safe_find_sr_default(self): # Ensure the default SR is found regardless of other-config. self.flags(sr_matching_filter='default-sr:true', group='xenserver') stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) session = get_session() pool_ref = session.call_xenapi('pool.get_all')[0] expected = vm_utils.safe_find_sr(session) self.assertEqual(session.call_xenapi('pool.get_default_SR', pool_ref), expected) def _create_service_entries(context, values={'avail_zone1': ['fake_host1', 'fake_host2'], 'avail_zone2': ['fake_host3'], }): for avail_zone, hosts in six.iteritems(values): for service_host in hosts: db.service_create(context, {'host': service_host, 'binary': 'nova-compute', 'topic': 'compute', 'report_count': 0}) return values # FIXME(sirp): convert this to use XenAPITestBaseNoDB class XenAPIAggregateTestCase(stubs.XenAPITestBase): """Unit tests for aggregate operations.""" def setUp(self): super(XenAPIAggregateTestCase, self).setUp() self.flags(connection_url='http://test_url', connection_username='test_user', connection_password='test_pass', group='xenserver') self.flags(instance_name_template='%d', firewall_driver='nova.virt.xenapi.firewall.' 'Dom0IptablesFirewallDriver', host='host', compute_driver='xenapi.XenAPIDriver', default_availability_zone='avail_zone1') self.flags(use_local=True, group='conductor') host_ref = xenapi_fake.get_all('host')[0] stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) self.context = context.get_admin_context() self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) self.compute = importutils.import_object(CONF.compute_manager) self.api = compute_api.AggregateAPI() values = {'name': 'test_aggr', 'metadata': {'availability_zone': 'test_zone', pool_states.POOL_FLAG: 'XenAPI'}} self.aggr = objects.Aggregate(context=self.context, id=1, **values) self.fake_metadata = {pool_states.POOL_FLAG: 'XenAPI', 'master_compute': 'host', 'availability_zone': 'fake_zone', pool_states.KEY: pool_states.ACTIVE, 'host': xenapi_fake.get_record('host', host_ref)['uuid']} def test_pool_add_to_aggregate_called_by_driver(self): calls = [] def pool_add_to_aggregate(context, aggregate, host, slave_info=None): self.assertEqual("CONTEXT", context) self.assertEqual("AGGREGATE", aggregate) self.assertEqual("HOST", host) self.assertEqual("SLAVEINFO", slave_info) calls.append(pool_add_to_aggregate) self.stubs.Set(self.conn._pool, "add_to_aggregate", pool_add_to_aggregate) self.conn.add_to_aggregate("CONTEXT", "AGGREGATE", "HOST", slave_info="SLAVEINFO") self.assertIn(pool_add_to_aggregate, calls) def test_pool_remove_from_aggregate_called_by_driver(self): calls = [] def pool_remove_from_aggregate(context, aggregate, host, slave_info=None): self.assertEqual("CONTEXT", context) self.assertEqual("AGGREGATE", aggregate) self.assertEqual("HOST", host) self.assertEqual("SLAVEINFO", slave_info) calls.append(pool_remove_from_aggregate) self.stubs.Set(self.conn._pool, "remove_from_aggregate", pool_remove_from_aggregate) self.conn.remove_from_aggregate("CONTEXT", "AGGREGATE", "HOST", slave_info="SLAVEINFO") self.assertIn(pool_remove_from_aggregate, calls) def test_add_to_aggregate_for_first_host_sets_metadata(self): def fake_init_pool(id, name): fake_init_pool.called = True self.stubs.Set(self.conn._pool, "_init_pool", fake_init_pool) aggregate = self._aggregate_setup() self.conn._pool.add_to_aggregate(self.context, aggregate, "host") result = db.aggregate_get(self.context, aggregate.id) self.assertTrue(fake_init_pool.called) self.assertThat(self.fake_metadata, matchers.DictMatches(result['metadetails'])) def test_join_slave(self): # Ensure join_slave gets called when the request gets to master. def fake_join_slave(id, compute_uuid, host, url, user, password): fake_join_slave.called = True self.stubs.Set(self.conn._pool, "_join_slave", fake_join_slave) aggregate = self._aggregate_setup(hosts=['host', 'host2'], metadata=self.fake_metadata) self.conn._pool.add_to_aggregate(self.context, aggregate, "host2", dict(compute_uuid='fake_uuid', url='fake_url', user='fake_user', passwd='fake_pass', xenhost_uuid='fake_uuid')) self.assertTrue(fake_join_slave.called) def test_add_to_aggregate_first_host(self): def fake_pool_set_name_label(self, session, pool_ref, name): fake_pool_set_name_label.called = True self.stubs.Set(xenapi_fake.SessionBase, "pool_set_name_label", fake_pool_set_name_label) self.conn._session.call_xenapi("pool.create", {"name": "asdf"}) metadata = {'availability_zone': 'fake_zone', pool_states.POOL_FLAG: "XenAPI", pool_states.KEY: pool_states.CREATED} aggregate = objects.Aggregate(context=self.context) aggregate.name = 'fake_aggregate' aggregate.metadata = dict(metadata) aggregate.create() aggregate.add_host('host') self.assertEqual(["host"], aggregate.hosts) self.assertEqual(metadata, aggregate.metadata) self.conn._pool.add_to_aggregate(self.context, aggregate, "host") self.assertTrue(fake_pool_set_name_label.called) def test_remove_from_aggregate_called(self): def fake_remove_from_aggregate(context, aggregate, host): fake_remove_from_aggregate.called = True self.stubs.Set(self.conn._pool, "remove_from_aggregate", fake_remove_from_aggregate) self.conn.remove_from_aggregate(None, None, None) self.assertTrue(fake_remove_from_aggregate.called) def test_remove_from_empty_aggregate(self): result = self._aggregate_setup() self.assertRaises(exception.InvalidAggregateActionDelete, self.conn._pool.remove_from_aggregate, self.context, result, "test_host") def test_remove_slave(self): # Ensure eject slave gets called. def fake_eject_slave(id, compute_uuid, host_uuid): fake_eject_slave.called = True self.stubs.Set(self.conn._pool, "_eject_slave", fake_eject_slave) self.fake_metadata['host2'] = 'fake_host2_uuid' aggregate = self._aggregate_setup(hosts=['host', 'host2'], metadata=self.fake_metadata, aggr_state=pool_states.ACTIVE) self.conn._pool.remove_from_aggregate(self.context, aggregate, "host2") self.assertTrue(fake_eject_slave.called) def test_remove_master_solo(self): # Ensure metadata are cleared after removal. def fake_clear_pool(id): fake_clear_pool.called = True self.stubs.Set(self.conn._pool, "_clear_pool", fake_clear_pool) aggregate = self._aggregate_setup(metadata=self.fake_metadata) self.conn._pool.remove_from_aggregate(self.context, aggregate, "host") result = db.aggregate_get(self.context, aggregate.id) self.assertTrue(fake_clear_pool.called) self.assertThat({'availability_zone': 'fake_zone', pool_states.POOL_FLAG: 'XenAPI', pool_states.KEY: pool_states.ACTIVE}, matchers.DictMatches(result['metadetails'])) def test_remote_master_non_empty_pool(self): # Ensure AggregateError is raised if removing the master. aggregate = self._aggregate_setup(hosts=['host', 'host2'], metadata=self.fake_metadata) self.assertRaises(exception.InvalidAggregateActionDelete, self.conn._pool.remove_from_aggregate, self.context, aggregate, "host") def _aggregate_setup(self, aggr_name='fake_aggregate', aggr_zone='fake_zone', aggr_state=pool_states.CREATED, hosts=['host'], metadata=None): aggregate = objects.Aggregate(context=self.context) aggregate.name = aggr_name aggregate.metadata = {'availability_zone': aggr_zone, pool_states.POOL_FLAG: 'XenAPI', pool_states.KEY: aggr_state, } if metadata: aggregate.metadata.update(metadata) aggregate.create() for aggregate_host in hosts: aggregate.add_host(aggregate_host) return aggregate def test_add_host_to_aggregate_invalid_changing_status(self): """Ensure InvalidAggregateActionAdd is raised when adding host while aggregate is not ready. """ aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING) ex = self.assertRaises(exception.InvalidAggregateActionAdd, self.conn.add_to_aggregate, self.context, aggregate, 'host') self.assertIn('setup in progress', str(ex)) def test_add_host_to_aggregate_invalid_dismissed_status(self): """Ensure InvalidAggregateActionAdd is raised when aggregate is deleted. """ aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED) ex = self.assertRaises(exception.InvalidAggregateActionAdd, self.conn.add_to_aggregate, self.context, aggregate, 'fake_host') self.assertIn('aggregate deleted', str(ex)) def test_add_host_to_aggregate_invalid_error_status(self): """Ensure InvalidAggregateActionAdd is raised when aggregate is in error. """ aggregate = self._aggregate_setup(aggr_state=pool_states.ERROR) ex = self.assertRaises(exception.InvalidAggregateActionAdd, self.conn.add_to_aggregate, self.context, aggregate, 'fake_host') self.assertIn('aggregate in error', str(ex)) def test_remove_host_from_aggregate_error(self): # Ensure we can remove a host from an aggregate even if in error. values = _create_service_entries(self.context) fake_zone = list(values.keys())[0] aggr = self.api.create_aggregate(self.context, 'fake_aggregate', fake_zone) # let's mock the fact that the aggregate is ready! metadata = {pool_states.POOL_FLAG: "XenAPI", pool_states.KEY: pool_states.ACTIVE} db.aggregate_metadata_add(self.context, aggr.id, metadata) for aggregate_host in values[fake_zone]: aggr = self.api.add_host_to_aggregate(self.context, aggr.id, aggregate_host) # let's mock the fact that the aggregate is in error! expected = self.api.remove_host_from_aggregate(self.context, aggr.id, values[fake_zone][0]) self.assertEqual(len(aggr.hosts) - 1, len(expected.hosts)) self.assertEqual(expected.metadata[pool_states.KEY], pool_states.ACTIVE) def test_remove_host_from_aggregate_invalid_dismissed_status(self): """Ensure InvalidAggregateActionDelete is raised when aggregate is deleted. """ aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED) self.assertRaises(exception.InvalidAggregateActionDelete, self.conn.remove_from_aggregate, self.context, aggregate, 'fake_host') def test_remove_host_from_aggregate_invalid_changing_status(self): """Ensure InvalidAggregateActionDelete is raised when aggregate is changing. """ aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING) self.assertRaises(exception.InvalidAggregateActionDelete, self.conn.remove_from_aggregate, self.context, aggregate, 'fake_host') def test_add_aggregate_host_raise_err(self): # Ensure the undo operation works correctly on add. def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore): raise exception.AggregateError( aggregate_id='', action='', reason='') self.stubs.Set(self.compute.driver, "add_to_aggregate", fake_driver_add_to_aggregate) metadata = {pool_states.POOL_FLAG: "XenAPI", pool_states.KEY: pool_states.ACTIVE} self.aggr.metadata = metadata self.aggr.hosts = ['fake_host'] self.assertRaises(exception.AggregateError, self.compute.add_aggregate_host, self.context, host="fake_host", aggregate=self.aggr, slave_info=None) self.assertEqual(self.aggr.metadata[pool_states.KEY], pool_states.ERROR) self.assertEqual(self.aggr.hosts, ['fake_host']) class MockComputeAPI(object): def __init__(self): self._mock_calls = [] def add_aggregate_host(self, ctxt, aggregate, host_param, host, slave_info): self._mock_calls.append(( self.add_aggregate_host, ctxt, aggregate, host_param, host, slave_info)) def remove_aggregate_host(self, ctxt, aggregate_id, host_param, host, slave_info): self._mock_calls.append(( self.remove_aggregate_host, ctxt, aggregate_id, host_param, host, slave_info)) class StubDependencies(object): """Stub dependencies for ResourcePool.""" def __init__(self): self.compute_rpcapi = MockComputeAPI() def _is_hv_pool(self, *_ignore): return True def _get_metadata(self, *_ignore): return { pool_states.KEY: {}, 'master_compute': 'master' } def _create_slave_info(self, *ignore): return "SLAVE_INFO" class ResourcePoolWithStubs(StubDependencies, pool.ResourcePool): """A ResourcePool, use stub dependencies.""" class HypervisorPoolTestCase(test.NoDBTestCase): fake_aggregate = { 'id': 98, 'hosts': [], 'metadata': { 'master_compute': 'master', pool_states.POOL_FLAG: '', pool_states.KEY: '' } } fake_aggregate = objects.Aggregate(**fake_aggregate) def test_slave_asks_master_to_add_slave_to_pool(self): slave = ResourcePoolWithStubs() slave.add_to_aggregate("CONTEXT", self.fake_aggregate, "slave") self.assertIn( (slave.compute_rpcapi.add_aggregate_host, "CONTEXT", jsonutils.to_primitive(self.fake_aggregate), "slave", "master", "SLAVE_INFO"), slave.compute_rpcapi._mock_calls) def test_slave_asks_master_to_remove_slave_from_pool(self): slave = ResourcePoolWithStubs() slave.remove_from_aggregate("CONTEXT", self.fake_aggregate, "slave") self.assertIn( (slave.compute_rpcapi.remove_aggregate_host, "CONTEXT", 98, "slave", "master", "SLAVE_INFO"), slave.compute_rpcapi._mock_calls) class SwapXapiHostTestCase(test.NoDBTestCase): def test_swapping(self): self.assertEqual( "http://otherserver:8765/somepath", pool.swap_xapi_host( "http://someserver:8765/somepath", 'otherserver')) def test_no_port(self): self.assertEqual( "http://otherserver/somepath", pool.swap_xapi_host( "http://someserver/somepath", 'otherserver')) def test_no_path(self): self.assertEqual( "http://otherserver", pool.swap_xapi_host( "http://someserver", 'otherserver')) class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB): """Unit tests for live_migration.""" def setUp(self): super(XenAPILiveMigrateTestCase, self).setUp() self.flags(connection_url='test_url', connection_password='test_pass', group='xenserver') self.flags(firewall_driver='nova.virt.xenapi.firewall.' 'Dom0IptablesFirewallDriver', host='host') db_fakes.stub_out_db_instance_api(self) self.context = context.get_admin_context() def test_live_migration_calls_vmops(self): stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) def fake_live_migrate(context, instance_ref, dest, post_method, recover_method, block_migration, migrate_data): fake_live_migrate.called = True self.stubs.Set(self.conn._vmops, "live_migrate", fake_live_migrate) self.conn.live_migration(None, None, None, None, None) self.assertTrue(fake_live_migrate.called) def test_pre_live_migration(self): stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) with mock.patch.object(self.conn._vmops, "pre_live_migration") as pre: pre.return_value = True result = self.conn.pre_live_migration( "ctx", "inst", "bdi", "nw", "di", "data") self.assertTrue(result) pre.assert_called_with("ctx", "inst", "bdi", "nw", "di", "data") def test_post_live_migration_at_destination(self): # ensure method is present stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) fake_instance = {"name": "name"} fake_network_info = "network_info" def fake_fw(instance, network_info): self.assertEqual(instance, fake_instance) self.assertEqual(network_info, fake_network_info) fake_fw.call_count += 1 def fake_create_kernel_and_ramdisk(context, session, instance, name_label): return "fake-kernel-file", "fake-ramdisk-file" fake_fw.call_count = 0 _vmops = self.conn._vmops self.stubs.Set(_vmops.firewall_driver, 'setup_basic_filtering', fake_fw) self.stubs.Set(_vmops.firewall_driver, 'prepare_instance_filter', fake_fw) self.stubs.Set(_vmops.firewall_driver, 'apply_instance_filter', fake_fw) self.stubs.Set(vm_utils, "create_kernel_and_ramdisk", fake_create_kernel_and_ramdisk) def fake_get_vm_opaque_ref(instance): fake_get_vm_opaque_ref.called = True self.stubs.Set(_vmops, "_get_vm_opaque_ref", fake_get_vm_opaque_ref) fake_get_vm_opaque_ref.called = False def fake_strip_base_mirror_from_vdis(session, vm_ref): fake_strip_base_mirror_from_vdis.called = True self.stubs.Set(vm_utils, "strip_base_mirror_from_vdis", fake_strip_base_mirror_from_vdis) fake_strip_base_mirror_from_vdis.called = False self.conn.post_live_migration_at_destination(None, fake_instance, fake_network_info, None) self.assertEqual(fake_fw.call_count, 3) self.assertTrue(fake_get_vm_opaque_ref.called) self.assertTrue(fake_strip_base_mirror_from_vdis.called) def test_check_can_live_migrate_destination_with_block_migration(self): stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf") expected = {'block_migration': True, 'is_volume_backed': False, 'migrate_data': { 'migrate_send_data': {'value': 'fake_migrate_data'}, 'destination_sr_ref': 'asdf' } } result = self.conn.check_can_live_migrate_destination(self.context, {'host': 'host'}, {}, {}, True, False) result.is_volume_backed = False self.assertEqual(expected, result.to_legacy_dict()) def test_check_live_migrate_destination_verifies_ip(self): stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) for pif_ref in xenapi_fake.get_all('PIF'): pif_rec = xenapi_fake.get_record('PIF', pif_ref) pif_rec['IP'] = '' pif_rec['IPv6'] = '' self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf") self.assertRaises(exception.MigrationError, self.conn.check_can_live_migrate_destination, self.context, {'host': 'host'}, {}, {}, True, False) def test_check_can_live_migrate_destination_block_migration_fails(self): stubs.stubout_session(self.stubs, stubs.FakeSessionForFailedMigrateTests) self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.MigrationError, self.conn.check_can_live_migrate_destination, self.context, {'host': 'host'}, {}, {}, True, False) def _add_default_live_migrate_stubs(self, conn): def fake_generate_vdi_map(destination_sr_ref, _vm_ref): pass def fake_get_iscsi_srs(destination_sr_ref, _vm_ref): return [] def fake_get_vm_opaque_ref(instance): return "fake_vm" def fake_lookup_kernel_ramdisk(session, vm): return ("fake_PV_kernel", "fake_PV_ramdisk") self.stubs.Set(conn._vmops, "_generate_vdi_map", fake_generate_vdi_map) self.stubs.Set(conn._vmops, "_get_iscsi_srs", fake_get_iscsi_srs) self.stubs.Set(conn._vmops, "_get_vm_opaque_ref", fake_get_vm_opaque_ref) self.stubs.Set(vm_utils, "lookup_kernel_ramdisk", fake_lookup_kernel_ramdisk) def test_check_can_live_migrate_source_with_block_migrate(self): stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) self._add_default_live_migrate_stubs(self.conn) dest_check_data = {'block_migration': True, 'is_volume_backed': False, 'migrate_data': { 'destination_sr_ref': None, 'migrate_send_data': {'key': 'value'} }} result = self.conn.check_can_live_migrate_source(self.context, {'host': 'host'}, dest_check_data) self.assertEqual(dest_check_data, result.to_legacy_dict()) def test_check_can_live_migrate_source_with_block_migrate_iscsi(self): stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) self._add_default_live_migrate_stubs(self.conn) def fake_get_iscsi_srs(destination_sr_ref, _vm_ref): return ['sr_ref'] self.stubs.Set(self.conn._vmops, "_get_iscsi_srs", fake_get_iscsi_srs) def fake_make_plugin_call(plugin, method, **args): return "true" self.stubs.Set(self.conn._vmops, "_make_plugin_call", fake_make_plugin_call) dest_check_data = objects.XenapiLiveMigrateData( block_migration=True, is_volume_backed=True, destination_sr_ref=None, migrate_send_data={'key': 'value'}) result = self.conn.check_can_live_migrate_source(self.context, {'host': 'host'}, dest_check_data) self.assertEqual(dest_check_data.to_legacy_dict(), result.to_legacy_dict()) def test_check_can_live_migrate_source_with_block_iscsi_fails(self): stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) self._add_default_live_migrate_stubs(self.conn) def fake_get_iscsi_srs(destination_sr_ref, _vm_ref): return ['sr_ref'] self.stubs.Set(self.conn._vmops, "_get_iscsi_srs", fake_get_iscsi_srs) def fake_make_plugin_call(plugin, method, **args): return {'returncode': 'error', 'message': 'Plugin not found'} self.stubs.Set(self.conn._vmops, "_make_plugin_call", fake_make_plugin_call) self.assertRaises(exception.MigrationError, self.conn.check_can_live_migrate_source, self.context, {'host': 'host'}, {}) def test_check_can_live_migrate_source_with_block_migrate_fails(self): stubs.stubout_session(self.stubs, stubs.FakeSessionForFailedMigrateTests) self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) self._add_default_live_migrate_stubs(self.conn) dest_check_data = {'block_migration': True, 'is_volume_backed': True, 'migrate_data': { 'destination_sr_ref': None, 'migrate_send_data': {'key': 'value'} }} self.assertRaises(exception.MigrationError, self.conn.check_can_live_migrate_source, self.context, {'host': 'host'}, dest_check_data) def test_check_can_live_migrate_works(self): stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) def fake_aggregate_get_by_host(context, host, key=None): self.assertEqual(CONF.host, host) return [dict(test_aggregate.fake_aggregate, metadetails={"host": "test_host_uuid"})] self.stub_out("nova.db.aggregate_get_by_host", fake_aggregate_get_by_host) self.conn.check_can_live_migrate_destination(self.context, {'host': 'host'}, False, False) def test_check_can_live_migrate_fails(self): stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) def fake_aggregate_get_by_host(context, host, key=None): self.assertEqual(CONF.host, host) return [dict(test_aggregate.fake_aggregate, metadetails={"dest_other": "test_host_uuid"})] self.stub_out("nova.db.aggregate_get_by_host", fake_aggregate_get_by_host) self.assertRaises(exception.MigrationError, self.conn.check_can_live_migrate_destination, self.context, {'host': 'host'}, None, None) def test_live_migration(self): stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) def fake_lookup_kernel_ramdisk(session, vm_ref): return "kernel", "ramdisk" self.stubs.Set(vm_utils, "lookup_kernel_ramdisk", fake_lookup_kernel_ramdisk) def fake_get_vm_opaque_ref(instance): return "fake_vm" self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref", fake_get_vm_opaque_ref) def fake_get_host_opaque_ref(context, destination_hostname): return "fake_host" self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref", fake_get_host_opaque_ref) def post_method(context, instance, destination_hostname, block_migration, migrate_data): post_method.called = True migrate_data = objects.XenapiLiveMigrateData( destination_sr_ref="foo", migrate_send_data={"bar": "baz"}, block_migration=False) self.conn.live_migration(self.conn, None, None, post_method, None, None, migrate_data) self.assertTrue(post_method.called, "post_method.called") def test_live_migration_on_failure(self): stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) def fake_get_vm_opaque_ref(instance): return "fake_vm" self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref", fake_get_vm_opaque_ref) def fake_get_host_opaque_ref(context, destination_hostname): return "fake_host" self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref", fake_get_host_opaque_ref) def fake_call_xenapi(*args): raise NotImplementedError() self.stubs.Set(self.conn._vmops._session, "call_xenapi", fake_call_xenapi) def recover_method(context, instance, destination_hostname, block_migration): recover_method.called = True migrate_data = objects.XenapiLiveMigrateData( destination_sr_ref="foo", migrate_send_data={"bar": "baz"}, block_migration=False) self.assertRaises(NotImplementedError, self.conn.live_migration, self.conn, None, None, None, recover_method, None, migrate_data) self.assertTrue(recover_method.called, "recover_method.called") def test_live_migration_calls_post_migration(self): stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) self._add_default_live_migrate_stubs(self.conn) def post_method(context, instance, destination_hostname, block_migration, migrate_data): post_method.called = True # pass block_migration = True and migrate data migrate_data = objects.XenapiLiveMigrateData( destination_sr_ref="foo", migrate_send_data={"bar": "baz"}, block_migration=True) self.conn.live_migration(self.conn, None, None, post_method, None, True, migrate_data) self.assertTrue(post_method.called, "post_method.called") def test_live_migration_block_cleans_srs(self): stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) self._add_default_live_migrate_stubs(self.conn) def fake_get_iscsi_srs(context, instance): return ['sr_ref'] self.stubs.Set(self.conn._vmops, "_get_iscsi_srs", fake_get_iscsi_srs) def fake_forget_sr(context, instance): fake_forget_sr.called = True self.stubs.Set(volume_utils, "forget_sr", fake_forget_sr) def post_method(context, instance, destination_hostname, block_migration, migrate_data): post_method.called = True migrate_data = objects.XenapiLiveMigrateData( destination_sr_ref="foo", migrate_send_data={"bar": "baz"}, block_migration=True) self.conn.live_migration(self.conn, None, None, post_method, None, True, migrate_data) self.assertTrue(post_method.called, "post_method.called") self.assertTrue(fake_forget_sr.called, "forget_sr.called") def test_live_migration_with_block_migration_fails_migrate_send(self): stubs.stubout_session(self.stubs, stubs.FakeSessionForFailedMigrateTests) self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) self._add_default_live_migrate_stubs(self.conn) def recover_method(context, instance, destination_hostname, block_migration): recover_method.called = True # pass block_migration = True and migrate data migrate_data = objects.XenapiLiveMigrateData( destination_sr_ref='foo', migrate_send_data={'bar': 'baz'}, block_migration=True) self.assertRaises(exception.MigrationError, self.conn.live_migration, self.conn, None, None, None, recover_method, True, migrate_data) self.assertTrue(recover_method.called, "recover_method.called") def test_live_migrate_block_migration_xapi_call_parameters(self): fake_vdi_map = object() class Session(xenapi_fake.SessionBase): def VM_migrate_send(self_, session, vmref, migrate_data, islive, vdi_map, vif_map, options): self.assertEqual({'SOMEDATA': 'SOMEVAL'}, migrate_data) self.assertEqual(fake_vdi_map, vdi_map) stubs.stubout_session(self.stubs, Session) conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) self._add_default_live_migrate_stubs(conn) def fake_generate_vdi_map(destination_sr_ref, _vm_ref): return fake_vdi_map self.stubs.Set(conn._vmops, "_generate_vdi_map", fake_generate_vdi_map) def dummy_callback(*args, **kwargs): pass migrate_data = objects.XenapiLiveMigrateData( migrate_send_data={'SOMEDATA': 'SOMEVAL'}, destination_sr_ref='TARGET_SR_OPAQUE_REF', block_migration=True) conn.live_migration( self.context, instance=dict(name='ignore'), dest=None, post_method=dummy_callback, recover_method=dummy_callback, block_migration="SOMEDATA", migrate_data=migrate_data) def test_live_migrate_pool_migration_xapi_call_parameters(self): class Session(xenapi_fake.SessionBase): def VM_pool_migrate(self_, session, vm_ref, host_ref, options): self.assertEqual("fake_ref", host_ref) self.assertEqual({"live": "true"}, options) raise IOError() stubs.stubout_session(self.stubs, Session) conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) self._add_default_live_migrate_stubs(conn) def fake_get_host_opaque_ref(context, destination): return "fake_ref" self.stubs.Set(conn._vmops, "_get_host_opaque_ref", fake_get_host_opaque_ref) def dummy_callback(*args, **kwargs): pass migrate_data = objects.XenapiLiveMigrateData( migrate_send_data={'foo': 'bar'}, destination_sr_ref='foo', block_migration=False) self.assertRaises(IOError, conn.live_migration, self.context, instance=dict(name='ignore'), dest=None, post_method=dummy_callback, recover_method=dummy_callback, block_migration=False, migrate_data=migrate_data) def test_generate_vdi_map(self): stubs.stubout_session(self.stubs, xenapi_fake.SessionBase) conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) vm_ref = "fake_vm_ref" def fake_find_sr(_session): self.assertEqual(conn._session, _session) return "source_sr_ref" self.stubs.Set(vm_utils, "safe_find_sr", fake_find_sr) def fake_get_instance_vdis_for_sr(_session, _vm_ref, _sr_ref): self.assertEqual(conn._session, _session) self.assertEqual(vm_ref, _vm_ref) self.assertEqual("source_sr_ref", _sr_ref) return ["vdi0", "vdi1"] self.stubs.Set(vm_utils, "get_instance_vdis_for_sr", fake_get_instance_vdis_for_sr) result = conn._vmops._generate_vdi_map("dest_sr_ref", vm_ref) self.assertEqual({"vdi0": "dest_sr_ref", "vdi1": "dest_sr_ref"}, result) def test_rollback_live_migration_at_destination(self): stubs.stubout_session(self.stubs, xenapi_fake.SessionBase) conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) with mock.patch.object(conn, "destroy") as mock_destroy: conn.rollback_live_migration_at_destination("context", "instance", [], {'block_device_mapping': []}) self.assertFalse(mock_destroy.called) class XenAPIInjectMetadataTestCase(stubs.XenAPITestBaseNoDB): def setUp(self): super(XenAPIInjectMetadataTestCase, self).setUp() self.flags(connection_url='test_url', connection_password='test_pass', group='xenserver') self.flags(firewall_driver='nova.virt.xenapi.firewall.' 'Dom0IptablesFirewallDriver') stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) self.xenstore = dict(persist={}, ephem={}) self.called_fake_get_vm_opaque_ref = False def fake_get_vm_opaque_ref(inst, instance): self.called_fake_get_vm_opaque_ref = True if instance["uuid"] == "not_found": raise exception.NotFound self.assertEqual(instance, {'uuid': 'fake'}) return 'vm_ref' def fake_add_to_param_xenstore(inst, vm_ref, key, val): self.assertEqual(vm_ref, 'vm_ref') self.xenstore['persist'][key] = val def fake_remove_from_param_xenstore(inst, vm_ref, key): self.assertEqual(vm_ref, 'vm_ref') if key in self.xenstore['persist']: del self.xenstore['persist'][key] def fake_write_to_xenstore(inst, instance, path, value, vm_ref=None): self.assertEqual(instance, {'uuid': 'fake'}) self.assertEqual(vm_ref, 'vm_ref') self.xenstore['ephem'][path] = jsonutils.dumps(value) def fake_delete_from_xenstore(inst, instance, path, vm_ref=None): self.assertEqual(instance, {'uuid': 'fake'}) self.assertEqual(vm_ref, 'vm_ref') if path in self.xenstore['ephem']: del self.xenstore['ephem'][path] self.stubs.Set(vmops.VMOps, '_get_vm_opaque_ref', fake_get_vm_opaque_ref) self.stubs.Set(vmops.VMOps, '_add_to_param_xenstore', fake_add_to_param_xenstore) self.stubs.Set(vmops.VMOps, '_remove_from_param_xenstore', fake_remove_from_param_xenstore) self.stubs.Set(vmops.VMOps, '_write_to_xenstore', fake_write_to_xenstore) self.stubs.Set(vmops.VMOps, '_delete_from_xenstore', fake_delete_from_xenstore) def test_inject_instance_metadata(self): # Add some system_metadata to ensure it doesn't get added # to xenstore instance = dict(metadata=[{'key': 'a', 'value': 1}, {'key': 'b', 'value': 2}, {'key': 'c', 'value': 3}, # Check xenstore key sanitizing {'key': 'hi.there', 'value': 4}, {'key': 'hi!t.e/e', 'value': 5}], # Check xenstore key sanitizing system_metadata=[{'key': 'sys_a', 'value': 1}, {'key': 'sys_b', 'value': 2}, {'key': 'sys_c', 'value': 3}], uuid='fake') self.conn._vmops._inject_instance_metadata(instance, 'vm_ref') self.assertEqual(self.xenstore, { 'persist': { 'vm-data/user-metadata/a': '1', 'vm-data/user-metadata/b': '2', 'vm-data/user-metadata/c': '3', 'vm-data/user-metadata/hi_there': '4', 'vm-data/user-metadata/hi_t_e_e': '5', }, 'ephem': {}, }) def test_change_instance_metadata_add(self): # Test XenStore key sanitizing here, too. diff = {'test.key': ['+', 4]} instance = {'uuid': 'fake'} self.xenstore = { 'persist': { 'vm-data/user-metadata/a': '1', 'vm-data/user-metadata/b': '2', 'vm-data/user-metadata/c': '3', }, 'ephem': { 'vm-data/user-metadata/a': '1', 'vm-data/user-metadata/b': '2', 'vm-data/user-metadata/c': '3', }, } self.conn._vmops.change_instance_metadata(instance, diff) self.assertEqual(self.xenstore, { 'persist': { 'vm-data/user-metadata/a': '1', 'vm-data/user-metadata/b': '2', 'vm-data/user-metadata/c': '3', 'vm-data/user-metadata/test_key': '4', }, 'ephem': { 'vm-data/user-metadata/a': '1', 'vm-data/user-metadata/b': '2', 'vm-data/user-metadata/c': '3', 'vm-data/user-metadata/test_key': '4', }, }) def test_change_instance_metadata_update(self): diff = dict(b=['+', 4]) instance = {'uuid': 'fake'} self.xenstore = { 'persist': { 'vm-data/user-metadata/a': '1', 'vm-data/user-metadata/b': '2', 'vm-data/user-metadata/c': '3', }, 'ephem': { 'vm-data/user-metadata/a': '1', 'vm-data/user-metadata/b': '2', 'vm-data/user-metadata/c': '3', }, } self.conn._vmops.change_instance_metadata(instance, diff) self.assertEqual(self.xenstore, { 'persist': { 'vm-data/user-metadata/a': '1', 'vm-data/user-metadata/b': '4', 'vm-data/user-metadata/c': '3', }, 'ephem': { 'vm-data/user-metadata/a': '1', 'vm-data/user-metadata/b': '4', 'vm-data/user-metadata/c': '3', }, }) def test_change_instance_metadata_delete(self): diff = dict(b=['-']) instance = {'uuid': 'fake'} self.xenstore = { 'persist': { 'vm-data/user-metadata/a': '1', 'vm-data/user-metadata/b': '2', 'vm-data/user-metadata/c': '3', }, 'ephem': { 'vm-data/user-metadata/a': '1', 'vm-data/user-metadata/b': '2', 'vm-data/user-metadata/c': '3', }, } self.conn._vmops.change_instance_metadata(instance, diff) self.assertEqual(self.xenstore, { 'persist': { 'vm-data/user-metadata/a': '1', 'vm-data/user-metadata/c': '3', }, 'ephem': { 'vm-data/user-metadata/a': '1', 'vm-data/user-metadata/c': '3', }, }) def test_change_instance_metadata_not_found(self): instance = {'uuid': 'not_found'} self.conn._vmops.change_instance_metadata(instance, "fake_diff") self.assertTrue(self.called_fake_get_vm_opaque_ref) class XenAPISessionTestCase(test.NoDBTestCase): def _get_mock_xapisession(self, software_version): class MockXapiSession(xenapi_session.XenAPISession): def __init__(_ignore): "Skip the superclass's dirty init" def _get_software_version(_ignore): return software_version return MockXapiSession() def test_local_session(self): session = self._get_mock_xapisession({}) session.is_local_connection = True session.XenAPI = self.mox.CreateMockAnything() session.XenAPI.xapi_local().AndReturn("local_connection") self.mox.ReplayAll() self.assertEqual("local_connection", session._create_session("unix://local")) def test_remote_session(self): session = self._get_mock_xapisession({}) session.is_local_connection = False session.XenAPI = self.mox.CreateMockAnything() session.XenAPI.Session("url").AndReturn("remote_connection") self.mox.ReplayAll() self.assertEqual("remote_connection", session._create_session("url")) def test_get_product_version_product_brand_does_not_fail(self): session = self._get_mock_xapisession({ 'build_number': '0', 'date': '2012-08-03', 'hostname': 'komainu', 'linux': '3.2.0-27-generic', 'network_backend': 'bridge', 'platform_name': 'XCP_Kronos', 'platform_version': '1.6.0', 'xapi': '1.3', 'xen': '4.1.2', 'xencenter_max': '1.10', 'xencenter_min': '1.10' }) self.assertEqual( ((1, 6, 0), None), session._get_product_version_and_brand() ) def test_get_product_version_product_brand_xs_6(self): session = self._get_mock_xapisession({ 'product_brand': 'XenServer', 'product_version': '6.0.50', 'platform_version': '0.0.1' }) self.assertEqual( ((6, 0, 50), 'XenServer'), session._get_product_version_and_brand() ) def test_verify_plugin_version_same(self): session = self._get_mock_xapisession({}) session.PLUGIN_REQUIRED_VERSION = '2.4' self.mox.StubOutWithMock(session, 'call_plugin_serialized') session.call_plugin_serialized('nova_plugin_version', 'get_version', ).AndReturn("2.4") self.mox.ReplayAll() session._verify_plugin_version() def test_verify_plugin_version_compatible(self): session = self._get_mock_xapisession({}) session.XenAPI = xenapi_fake.FakeXenAPI() session.PLUGIN_REQUIRED_VERSION = '2.4' self.mox.StubOutWithMock(session, 'call_plugin_serialized') session.call_plugin_serialized('nova_plugin_version', 'get_version', ).AndReturn("2.5") self.mox.ReplayAll() session._verify_plugin_version() def test_verify_plugin_version_bad_maj(self): session = self._get_mock_xapisession({}) session.XenAPI = xenapi_fake.FakeXenAPI() session.PLUGIN_REQUIRED_VERSION = '2.4' self.mox.StubOutWithMock(session, 'call_plugin_serialized') session.call_plugin_serialized('nova_plugin_version', 'get_version', ).AndReturn("3.0") self.mox.ReplayAll() self.assertRaises(xenapi_fake.Failure, session._verify_plugin_version) def test_verify_plugin_version_bad_min(self): session = self._get_mock_xapisession({}) session.XenAPI = xenapi_fake.FakeXenAPI() session.PLUGIN_REQUIRED_VERSION = '2.4' self.mox.StubOutWithMock(session, 'call_plugin_serialized') session.call_plugin_serialized('nova_plugin_version', 'get_version', ).AndReturn("2.3") self.mox.ReplayAll() self.assertRaises(xenapi_fake.Failure, session._verify_plugin_version) def test_verify_current_version_matches(self): session = self._get_mock_xapisession({}) # Import the plugin to extract its version path = os.path.dirname(__file__) rel_path_elem = "../../../../../plugins/xenserver/xenapi/etc/xapi.d/" \ "plugins/nova_plugin_version" for elem in rel_path_elem.split('/'): path = os.path.join(path, elem) path = os.path.realpath(path) plugin_version = None with open(path) as plugin_file: for line in plugin_file: if "PLUGIN_VERSION = " in line: plugin_version = line.strip()[17:].strip('"') self.assertEqual(session.PLUGIN_REQUIRED_VERSION, plugin_version) class XenAPIFakeTestCase(test.NoDBTestCase): def test_query_matches(self): record = {'a': '1', 'b': '2', 'c_d': '3'} tests = {'field "a"="1"': True, 'field "b"="2"': True, 'field "b"="4"': False, 'not field "b"="4"': True, 'field "a"="1" and field "b"="4"': False, 'field "a"="1" or field "b"="4"': True, 'field "c__d"="3"': True, 'field \'b\'=\'2\'': True, } for query in tests.keys(): expected = tests[query] fail_msg = "for test '%s'" % query self.assertEqual(xenapi_fake._query_matches(record, query), expected, fail_msg) def test_query_bad_format(self): record = {'a': '1', 'b': '2', 'c': '3'} tests = ['"a"="1" or "b"="4"', 'a=1', ] for query in tests: fail_msg = "for test '%s'" % query self.assertFalse(xenapi_fake._query_matches(record, query), fail_msg) nova-13.1.4/nova/tests/unit/virt/xenapi/vm_rrd.xml0000664000567000056710000006544413064447141023274 0ustar jenkinsjenkins00000000000000 0003 5 1328795567 cpu0 DERIVE 300.0000 0.0 1.0000 5102.8417 0.0110 0 memory GAUGE 300.0000 0.0 Infinity 4294967296 10961792000.0000 0 memory_target GAUGE 300.0000 0.0 Infinity 4294967296 10961792000.0000 0 vif_0_tx DERIVE 300.0000 -Infinity Infinity 1079132206 752.4007 0 vif_0_rx DERIVE 300.0000 -Infinity Infinity 1093250983 4837.8805 0 vbd_xvda_write DERIVE 300.0000 -Infinity Infinity 4552440832 0.0 0 vbd_xvda_read DERIVE 300.0000 -Infinity Infinity 1371223040 0.0 0 memory_internal_free GAUGE 300.0000 -Infinity Infinity 1415564 3612860.6020 0 vbd_xvdb_write DERIVE 300.0000 -Infinity Infinity 0.0 0.0 2 vbd_xvdb_read DERIVE 300.0000 -Infinity Infinity 0.0 0.0 2 vif_2_tx DERIVE 300.0000 -Infinity Infinity 0.0 0.0 2 vif_2_rx DERIVE 300.0000 -Infinity Infinity 0.0 0.0 2 AVERAGE 1 0.5000 0.0 0.0 0.0 0 0.0 0.0 0.0 0 0.0 0.0 0.0 0 0.0 0.0 0.0 0 0.0 0.0 0.0 0 0.0 0.0 0.0 0 0.0 0.0 0.0 0 0.0 0.0 0.0 0 0.0 0.0 0.0 0 0.0 0.0 0.0 0 0.0 0.0 0.0 0 0.0 0.0 0.0 0 0.0259 4294967296.0000 4294967296.0000 270.6642 1968.1381 0.0 0.0 1433552.0000 0.0 0.0 0.0 0.0 0.0042 4294967296.0000 4294967296.0000 258.6530 1890.5522 565.3453 0.0 1433552.0000 0.0 0.0 0.0 0.0 0.0043 4294967296.0000 4294967296.0000 249.1120 1778.2501 817.5985 0.0 1433552.0000 0.0 0.0 0.0 0.0 0.0039 4294967296.0000 4294967296.0000 270.5131 1806.3336 9811.4443 0.0 1433552.0000 0.0 0.0 0.0 0.0 0.0041 4294967296.0000 4294967296.0000 264.3683 1952.4054 4370.4121 0.0 1433552.0000 0.0 0.0 0.0 0.0 0.0034 4294967296.0000 4294967296.0000 251.6331 1958.8002 0.0 0.0 1433552.0000 0.0 0.0 0.0 0.0 0.0042 4294967296.0000 4294967296.0000 274.5222 2067.5947 0.0 0.0 1433552.0000 0.0 0.0 0.0 0.0 0.0046 4294967296.0000 4294967296.0000 260.9790 2042.7045 1671.6940 0.0 1433552.0000 0.0 0.0 0.0 0.0 0.0163 4294967296.0000 4294967296.0000 249.0992 1845.3728 4119.4312 0.0 1431698.1250 0.0 0.0 0.0 0.0 0.0098 4294967296.0000 4294967296.0000 273.9898 1879.1331 5459.4102 0.0 1430824.0000 0.0 0.0 0.0 0.0 0.0043 4294967296.0000 4294967296.0000 261.3513 2335.3000 6837.4907 0.0 1430824.0000 0.0 0.0 0.0 0.0 0.0793 4294967296.0000 4294967296.0000 249.2620 2092.4504 2391.9744 0.0 1430824.0000 0.0 0.0 0.0 0.0 0.0406 4294967296.0000 4294967296.0000 270.0746 1859.9802 0.0 0.0 1430824.0000 0.0 0.0 0.0 0.0 0.0043 4294967296.0000 4294967296.0000 263.4259 2010.8950 550.1484 0.0 1430824.0000 0.0 0.0 0.0 0.0 0.0565 4294967296.0000 4294967296.0000 29891.2227 26210.6699 3213.4324 0.0 1415564.0000 0.0 0.0 0.0 0.0 0.0645 4294967296.0000 4294967296.0000 31501.1562 29642.1641 400.9566 0.0 1415564.0000 0.0 0.0 0.0 0.0 0.0381 4294967296.0000 4294967296.0000 17350.7676 20748.6133 1247.4755 0.0 1415564.0000 0.0 0.0 0.0 0.0 0.0212 4294967296.0000 4294967296.0000 11981.0918 12866.9775 5774.9497 0.0 1415564.0000 0.0 0.0 0.0 0.0 0.0045 4294967296.0000 4294967296.0000 249.0901 1898.6758 4446.3750 0.0 1415564.0000 0.0 0.0 0.0 0.0 0.0614 4294967296.0000 4294967296.0000 249.0959 2255.1912 0.0 0.0 1415564.0000 0.0 0.0 0.0 0.0 0.0609 4294967296.0000 4294967296.0000 253.1091 2099.0601 1230.0925 0.0 1415564.0000 0.0 0.0 0.0 0.0 0.0047 4294967296.0000 4294967296.0000 268.6620 1759.5667 2861.2107 0.0 1415564.0000 0.0 0.0 0.0 0.0 0.0100 4294967296.0000 4294967296.0000 292.2647 1828.5435 3270.3474 0.0 1415564.0000 0.0 0.0 0.0 0.0 0.0093 4294967296.0000 4294967296.0000 303.5810 1932.1176 4485.4355 0.0 1415564.0000 0.0 0.0 0.0 0.0 0.0038 4294967296.0000 4294967296.0000 291.6633 1842.4425 2898.5137 0.0 1415564.0000 0.0 0.0 0.0 0.0 0.0042 4294967296.0000 4294967296.0000 287.4134 1816.0144 0.0 0.0 1415564.0000 0.0 0.0 0.0 0.0 AVERAGE 12 0.5000 0.0 0.0 0.0150 0 0.0 0.0 3221225472.0000 0 0.0 0.0 3221225472.0000 0 0.0 0.0 1181.3309 0 0.0 0.0 2358.2158 0 0.0 0.0 2080.5770 0 0.0 0.0 0.0 0 0.0 0.0 1061673.0000 0 0.0 0.0 0.0 0 0.0 0.0 0.0 0 0.0 0.0 0.0 0 0.0 0.0 0.0 0 0.0130 4294967296.0000 4294967296.0000 261.6000 1990.6442 1432.2385 0.0 1441908.0000 0.0 0.0 0.0 0.0 0.0172 4294967296.0000 4294967296.0000 318.8885 1979.7030 1724.9528 0.0 1441912.7500 0.0 0.0 0.0 0.0 0.0483 4294967296.0000 4294967296.0000 3108.1233 4815.9639 4962.0503 68.2667 1441916.0000 0.0 0.0 0.0 0.0 0.0229 4294967296.0000 4294967296.0000 1944.2039 3757.9177 10861.6670 0.0 1439546.7500 0.0 0.0 0.0 0.0 0.0639 4294967296.0000 4294967296.0000 44504.8789 34745.1523 9571.1455 0.0 1437892.0000 0.0 0.0 0.0 0.0 0.2945 4294967296.0000 4294967296.0000 79219.1641 102827.0781 438999.3438 0.0 1415337.7500 0.0 0.0 0.0 0.0 0.1219 4294967296.0000 4294967296.0000 61093.7109 49836.3164 8734.3730 0.0 1399324.0000 0.0 0.0 0.0 0.0 0.0151 4294967296.0000 4294967296.0000 48.3914 1922.5935 2251.4346 0.0 1421237.1250 0.0 0.0 0.0 0.0 0.3162 4294967296.0000 4294967296.0000 80667.4922 53950.0430 416858.5000 0.0 1437032.0000 0.0 0.0 0.0 0.0 AVERAGE 720 0.5000 0.0 0.0 0.0848 0 0.0 0.0 3775992081.0667 0 0.0 0.0 3775992081.0667 0 0.0 0.0 16179.3166 0 0.0 0.0 13379.7997 0 0.0 0.0 109091.4636 0 0.0 0.0 323.1289 0 0.0 0.0 1259057.5294 0 0.0 0.0 0.0 0 0.0 0.0 0.0 0 0.0 0.0 0.0 0 0.0 0.0 0.0 0 0.1458 4294967296.0000 4294967296.0000 6454.3096 5327.6709 116520.9609 738.4178 2653538.0000 0.0 0.0 0.0 0.0 0.0971 4294967296.0000 4294967296.0000 10180.4941 10825.1777 98749.3438 523.3778 2381725.7500 0.0 0.0 0.0 0.0 0.0683 4294967296.0000 4294967296.0000 23183.2695 19607.6523 93946.5703 807.8222 2143269.2500 0.0 0.0 0.0 0.0 0.0352 4294967296.0000 4294967296.0000 7552.5708 7320.5391 30907.9453 150384.6406 1583336.0000 0.0 0.0 0.0 0.0 AVERAGE 17280 0.5000 0.0 0.0 0.0187 0 0.0 0.0 2483773622.0445 0 0.0 0.0 2483773622.0445 0 0.0 0.0 2648.2715 0 0.0 0.0 3002.4238 0 0.0 0.0 19129.3156 0 0.0 0.0 6365.7244 0 0.0 0.0 1468863.7753 0 0.0 0.0 0.0 0 0.0 0.0 0.0 0 0.0 0.0 0.0 0 0.0 0.0 0.0 0 0.0579 4294967296.0000 4294967296.0000 6291.0151 7489.2583 70915.3750 50.1570 613674.0000 0.0 0.0 0.0 0.0 0.0541 4294967296.0000 4294967296.0000 10406.3682 10638.9365 32972.1250 7.6800 647683.5625 0.0 0.0 0.0 0.0 0.0189 4294967296.0000 4294967296.0000 207.0768 2145.3167 1685.8905 0.0 599934.0000 0.0 0.0 0.0 0.0 0.0202 4294967296.0000 4294967296.0000 71.0270 2046.6521 6703.9795 182.0444 595963.8750 0.0 0.0 0.0 0.0 0.0661 4294967296.0000 4294967296.0000 8520.3213 8488.0664 52978.7930 7.3956 727540.0000 0.0 0.0 0.0 0.0 0.0219 4294967296.0000 4294967296.0000 40443.0117 20702.5996 -1377536.8750 36990.5898 1823778.0000 0.0 0.0 0.0 0.0 0.0265 4294971904.0000 4294754304.0000 6384.6367 6513.4951 22415.6348 2486.9690 3072170.0000 0.0 0.0 0.0 0.0 nova-13.1.4/nova/tests/unit/virt/xenapi/test_vif.py0000664000567000056710000001643413064447152023453 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import exception from nova.network import model from nova.tests.unit.virt.xenapi import stubs from nova.virt.xenapi import network_utils from nova.virt.xenapi import vif fake_vif = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'id': '123456789123', 'address': '00:00:00:00:00:00', 'network_id': 123, 'instance_uuid': 'fake-uuid', 'uuid': 'fake-uuid-2', } def fake_call_xenapi(method, *args): if method == "VM.get_VIFs": return ["fake_vif_ref", "fake_vif_ref_A2"] if method == "VIF.get_record": if args[0] == "fake_vif_ref": return {'uuid': fake_vif['uuid'], 'MAC': fake_vif['address'], 'network': 'fake_network', 'other_config': {'nicira-iface-id': fake_vif['id']} } else: raise exception.Exception("Failed get vif record") if method == "VIF.unplug": return if method == "VIF.destroy": if args[0] == "fake_vif_ref": return else: raise exception.Exception("unplug vif failed") if method == "VIF.create": if args[0] == "fake_vif_rec": return "fake_vif_ref" else: raise exception.Exception("VIF existed") return "Unexpected call_xenapi: %s.%s" % (method, args) class XenVIFDriverTestBase(stubs.XenAPITestBaseNoDB): def setUp(self): super(XenVIFDriverTestBase, self).setUp() self._session = mock.Mock() self._session.call_xenapi.side_effect = fake_call_xenapi class XenVIFDriverTestCase(XenVIFDriverTestBase): def setUp(self): super(XenVIFDriverTestCase, self).setUp() self.base_driver = vif.XenVIFDriver(self._session) def test_get_vif_ref(self): vm_ref = "fake_vm_ref" vif_ref = 'fake_vif_ref' ret_vif_ref = self.base_driver._get_vif_ref(fake_vif, vm_ref) self.assertEqual(vif_ref, ret_vif_ref) expected = [mock.call('VM.get_VIFs', vm_ref), mock.call('VIF.get_record', vif_ref)] self.assertEqual(expected, self._session.call_xenapi.call_args_list) def test_get_vif_ref_none_and_exception(self): vm_ref = "fake_vm_ref" vif = {'address': "no_match_vif_address"} ret_vif_ref = self.base_driver._get_vif_ref(vif, vm_ref) self.assertIsNone(ret_vif_ref) expected = [mock.call('VM.get_VIFs', vm_ref), mock.call('VIF.get_record', 'fake_vif_ref'), mock.call('VIF.get_record', 'fake_vif_ref_A2')] self.assertEqual(expected, self._session.call_xenapi.call_args_list) def test_create_vif(self): vif_rec = "fake_vif_rec" vm_ref = "fake_vm_ref" ret_vif_ref = self.base_driver._create_vif(fake_vif, vif_rec, vm_ref) self.assertEqual("fake_vif_ref", ret_vif_ref) expected = [mock.call('VIF.create', vif_rec)] self.assertEqual(expected, self._session.call_xenapi.call_args_list) def test_create_vif_exception(self): self.assertRaises(exception.NovaException, self.base_driver._create_vif, "fake_vif", "missing_vif_rec", "fake_vm_ref") @mock.patch.object(vif.XenVIFDriver, '_get_vif_ref', return_value='fake_vif_ref') def test_unplug(self, mock_get_vif_ref): instance = {'name': "fake_instance"} vm_ref = "fake_vm_ref" self.base_driver.unplug(instance, fake_vif, vm_ref) expected = [mock.call('VIF.destroy', 'fake_vif_ref')] self.assertEqual(expected, self._session.call_xenapi.call_args_list) @mock.patch.object(vif.XenVIFDriver, '_get_vif_ref', return_value='missing_vif_ref') def test_unplug_exception(self, mock_get_vif_ref): instance = "fake_instance" vm_ref = "fake_vm_ref" self.assertRaises(exception.NovaException, self.base_driver.unplug, instance, fake_vif, vm_ref) class XenAPIBridgeDriverTestCase(XenVIFDriverTestBase, object): def setUp(self): super(XenAPIBridgeDriverTestCase, self).setUp() self.bridge_driver = vif.XenAPIBridgeDriver(self._session) @mock.patch.object(vif.XenAPIBridgeDriver, '_ensure_vlan_bridge', return_value='fake_network_ref') @mock.patch.object(vif.XenVIFDriver, '_create_vif', return_value='fake_vif_ref') def test_plug_create_vlan(self, mock_create_vif, mock_ensure_vlan_bridge): instance = {'name': "fake_instance_name"} network = model.Network() network._set_meta({'should_create_vlan': True}) vif = model.VIF() vif._set_meta({'rxtx_cap': 1}) vif['network'] = network vif['address'] = "fake_address" vm_ref = "fake_vm_ref" device = 1 ret_vif_ref = self.bridge_driver.plug(instance, vif, vm_ref, device) self.assertEqual('fake_vif_ref', ret_vif_ref) @mock.patch.object(vif.XenVIFDriver, '_get_vif_ref', return_value='fake_vif_ref') def test_unplug(self, mock_get_vif_ref): instance = {'name': "fake_instance"} vm_ref = "fake_vm_ref" self.bridge_driver.unplug(instance, fake_vif, vm_ref) expected = [mock.call('VIF.destroy', 'fake_vif_ref')] self.assertEqual(expected, self._session.call_xenapi.call_args_list) class XenAPIOpenVswitchDriverTestCase(XenVIFDriverTestBase): def setUp(self): super(XenAPIOpenVswitchDriverTestCase, self).setUp() self.ovs_driver = vif.XenAPIOpenVswitchDriver(self._session) @mock.patch.object(network_utils, 'find_network_with_bridge', return_value='fake_network_ref') @mock.patch.object(vif.XenVIFDriver, '_create_vif', return_value='fake_vif_ref') @mock.patch.object(vif.XenVIFDriver, '_get_vif_ref', return_value=None) def test_plug(self, mock_get_vif_ref, mock_create_vif, mock_find_network_with_bridge): instance = {'name': "fake_instance_name"} vm_ref = "fake_vm_ref" device = 1 ret_vif_ref = self.ovs_driver.plug(instance, fake_vif, vm_ref, device) self.assertEqual('fake_vif_ref', ret_vif_ref) @mock.patch.object(vif.XenVIFDriver, '_get_vif_ref', return_value='fake_vif_ref') def test_unplug(self, mock_get_vif_ref): instance = {'name': "fake_instance"} vm_ref = "fake_vm_ref" self.ovs_driver.unplug(instance, fake_vif, vm_ref) expected = [mock.call('VIF.destroy', 'fake_vif_ref')] self.assertEqual(expected, self._session.call_xenapi.call_args_list) nova-13.1.4/nova/tests/unit/virt/xenapi/plugins/0000775000567000056710000000000013064447471022733 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/virt/xenapi/plugins/__init__.py0000664000567000056710000000000013064447152025026 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/virt/xenapi/plugins/plugin_test.py0000664000567000056710000000453613064447152025646 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import imp import mock import os import sys from nova import test from nova.virt.xenapi.client import session # both XenAPI and XenAPIPlugin may not exist # in unit test environment. sys.modules['XenAPI'] = mock.Mock() sys.modules['XenAPIPlugin'] = mock.Mock() class PluginTestBase(test.NoDBTestCase): def setUp(self): super(PluginTestBase, self).setUp() self.session = mock.Mock() session.apply_session_helpers(self.session) def mock_patch_object(self, target, attribute, return_val=None): # utilility function to mock object's attribute patcher = mock.patch.object(target, attribute, return_value=return_val) mock_one = patcher.start() self.addCleanup(patcher.stop) return mock_one def _get_plugin_path(self): current_path = os.path.realpath(__file__) rel_path = os.path.join(current_path, "../../../../../../../plugins/xenserver/xenapi/etc/xapi.d/plugins") plugin_path = os.path.abspath(rel_path) return plugin_path def load_plugin(self, file_name): # XAPI plugins run in a py24 environment and may be not compatible with # py34's syntax. In order to prevent unit test scanning the source file # under py34 environment, the plugins will be imported with this # function at run time. plugin_path = self._get_plugin_path() # add plugin path into search path. if plugin_path not in sys.path: sys.path.append(plugin_path) # be sure not to create c files next to the plugins sys.dont_write_bytecode = True name = file_name.split('.')[0] path = os.path.join(plugin_path, file_name) return imp.load_source(name, path) nova-13.1.4/nova/tests/unit/virt/xenapi/plugins/test_nova_plugin_version.py0000664000567000056710000000221113064447152030422 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.tests.unit.virt.xenapi.plugins import plugin_test class NovaPluginVersion(plugin_test.PluginTestBase): def setUp(self): super(NovaPluginVersion, self).setUp() self.nova_plugin_version = self.load_plugin("nova_plugin_version") def test_nova_plugin_version(self): session = 'fake_session' expected_value = self.nova_plugin_version.PLUGIN_VERSION return_value = self.nova_plugin_version.get_version(session) self.assertEqual(expected_value, return_value) nova-13.1.4/nova/tests/unit/virt/xenapi/test_volumeops.py0000664000567000056710000006043613064447152024721 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import exception from nova import test from nova.tests.unit.virt.xenapi import stubs from nova.virt.xenapi import vm_utils from nova.virt.xenapi import volume_utils from nova.virt.xenapi import volumeops class VolumeOpsTestBase(stubs.XenAPITestBaseNoDB): def setUp(self): super(VolumeOpsTestBase, self).setUp() self._setup_mock_volumeops() def _setup_mock_volumeops(self): self.session = stubs.FakeSessionForVolumeTests('fake_uri') self.ops = volumeops.VolumeOps(self.session) class VolumeDetachTestCase(VolumeOpsTestBase): def test_detach_volume_call(self): registered_calls = [] def regcall(label): def side_effect(*args, **kwargs): registered_calls.append(label) return side_effect ops = volumeops.VolumeOps('session') self.mox.StubOutWithMock(volumeops.vm_utils, 'lookup') self.mox.StubOutWithMock(volumeops.volume_utils, 'find_vbd_by_number') self.mox.StubOutWithMock(volumeops.vm_utils, 'is_vm_shutdown') self.mox.StubOutWithMock(volumeops.vm_utils, 'unplug_vbd') self.mox.StubOutWithMock(volumeops.vm_utils, 'destroy_vbd') self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number') self.mox.StubOutWithMock(volumeops.volume_utils, 'find_sr_from_vbd') self.mox.StubOutWithMock(volumeops.volume_utils, 'purge_sr') volumeops.vm_utils.lookup('session', 'instance_1').AndReturn( 'vmref') volumeops.volume_utils.get_device_number('mountpoint').AndReturn( 'devnumber') volumeops.volume_utils.find_vbd_by_number( 'session', 'vmref', 'devnumber').AndReturn('vbdref') volumeops.vm_utils.is_vm_shutdown('session', 'vmref').AndReturn( False) volumeops.vm_utils.unplug_vbd('session', 'vbdref', 'vmref') volumeops.vm_utils.destroy_vbd('session', 'vbdref').WithSideEffects( regcall('destroy_vbd')) volumeops.volume_utils.find_sr_from_vbd( 'session', 'vbdref').WithSideEffects( regcall('find_sr_from_vbd')).AndReturn('srref') volumeops.volume_utils.purge_sr('session', 'srref') self.mox.ReplayAll() ops.detach_volume( dict(driver_volume_type='iscsi', data='conn_data'), 'instance_1', 'mountpoint') self.assertEqual( ['find_sr_from_vbd', 'destroy_vbd'], registered_calls) @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs") @mock.patch.object(volume_utils, "find_vbd_by_number") @mock.patch.object(vm_utils, "vm_ref_or_raise") def test_detach_volume(self, mock_vm, mock_vbd, mock_detach): mock_vm.return_value = "vm_ref" mock_vbd.return_value = "vbd_ref" self.ops.detach_volume({}, "name", "/dev/xvdd") mock_vm.assert_called_once_with(self.session, "name") mock_vbd.assert_called_once_with(self.session, "vm_ref", 3) mock_detach.assert_called_once_with("vm_ref", ["vbd_ref"]) @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs") @mock.patch.object(volume_utils, "find_vbd_by_number") @mock.patch.object(vm_utils, "vm_ref_or_raise") def test_detach_volume_skips_error_skip_attach(self, mock_vm, mock_vbd, mock_detach): mock_vm.return_value = "vm_ref" mock_vbd.return_value = None self.ops.detach_volume({}, "name", "/dev/xvdd") self.assertFalse(mock_detach.called) @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs") @mock.patch.object(volume_utils, "find_vbd_by_number") @mock.patch.object(vm_utils, "vm_ref_or_raise") def test_detach_volume_raises(self, mock_vm, mock_vbd, mock_detach): mock_vm.return_value = "vm_ref" mock_vbd.side_effect = test.TestingException self.assertRaises(test.TestingException, self.ops.detach_volume, {}, "name", "/dev/xvdd") self.assertFalse(mock_detach.called) @mock.patch.object(volume_utils, "purge_sr") @mock.patch.object(vm_utils, "destroy_vbd") @mock.patch.object(volume_utils, "find_sr_from_vbd") @mock.patch.object(vm_utils, "unplug_vbd") @mock.patch.object(vm_utils, "is_vm_shutdown") def test_detach_vbds_and_srs_not_shutdown(self, mock_shutdown, mock_unplug, mock_find_sr, mock_destroy, mock_purge): mock_shutdown.return_value = False mock_find_sr.return_value = "sr_ref" self.ops._detach_vbds_and_srs("vm_ref", ["vbd_ref"]) mock_shutdown.assert_called_once_with(self.session, "vm_ref") mock_find_sr.assert_called_once_with(self.session, "vbd_ref") mock_unplug.assert_called_once_with(self.session, "vbd_ref", "vm_ref") mock_destroy.assert_called_once_with(self.session, "vbd_ref") mock_purge.assert_called_once_with(self.session, "sr_ref") @mock.patch.object(volume_utils, "purge_sr") @mock.patch.object(vm_utils, "destroy_vbd") @mock.patch.object(volume_utils, "find_sr_from_vbd") @mock.patch.object(vm_utils, "unplug_vbd") @mock.patch.object(vm_utils, "is_vm_shutdown") def test_detach_vbds_and_srs_is_shutdown(self, mock_shutdown, mock_unplug, mock_find_sr, mock_destroy, mock_purge): mock_shutdown.return_value = True mock_find_sr.return_value = "sr_ref" self.ops._detach_vbds_and_srs("vm_ref", ["vbd_ref_1", "vbd_ref_2"]) expected = [mock.call(self.session, "vbd_ref_1"), mock.call(self.session, "vbd_ref_2")] self.assertEqual(expected, mock_destroy.call_args_list) mock_purge.assert_called_with(self.session, "sr_ref") self.assertFalse(mock_unplug.called) @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs") @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs") def test_detach_all_no_volumes(self, mock_get_all, mock_detach): mock_get_all.return_value = [] self.ops.detach_all("vm_ref") mock_get_all.assert_called_once_with("vm_ref") self.assertFalse(mock_detach.called) @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs") @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs") def test_detach_all_volumes(self, mock_get_all, mock_detach): mock_get_all.return_value = ["1"] self.ops.detach_all("vm_ref") mock_get_all.assert_called_once_with("vm_ref") mock_detach.assert_called_once_with("vm_ref", ["1"]) def test_get_all_volume_vbd_refs_no_vbds(self): with mock.patch.object(self.session.VM, "get_VBDs") as mock_get: with mock.patch.object(self.session.VBD, "get_other_config") as mock_conf: mock_get.return_value = [] result = self.ops._get_all_volume_vbd_refs("vm_ref") self.assertEqual([], list(result)) mock_get.assert_called_once_with("vm_ref") self.assertFalse(mock_conf.called) def test_get_all_volume_vbd_refs_no_volumes(self): with mock.patch.object(self.session.VM, "get_VBDs") as mock_get: with mock.patch.object(self.session.VBD, "get_other_config") as mock_conf: mock_get.return_value = ["1"] mock_conf.return_value = {} result = self.ops._get_all_volume_vbd_refs("vm_ref") self.assertEqual([], list(result)) mock_get.assert_called_once_with("vm_ref") mock_conf.assert_called_once_with("1") def test_get_all_volume_vbd_refs_with_volumes(self): with mock.patch.object(self.session.VM, "get_VBDs") as mock_get: with mock.patch.object(self.session.VBD, "get_other_config") as mock_conf: mock_get.return_value = ["1", "2"] mock_conf.return_value = {"osvol": True} result = self.ops._get_all_volume_vbd_refs("vm_ref") self.assertEqual(["1", "2"], list(result)) mock_get.assert_called_once_with("vm_ref") class AttachVolumeTestCase(VolumeOpsTestBase): @mock.patch.object(volumeops.VolumeOps, "_attach_volume") @mock.patch.object(vm_utils, "vm_ref_or_raise") def test_attach_volume_default_hotplug(self, mock_get_vm, mock_attach): mock_get_vm.return_value = "vm_ref" self.ops.attach_volume({}, "instance_name", "/dev/xvda") mock_attach.assert_called_once_with({}, "vm_ref", "instance_name", 0, True) @mock.patch.object(volumeops.VolumeOps, "_attach_volume") @mock.patch.object(vm_utils, "vm_ref_or_raise") def test_attach_volume_hotplug(self, mock_get_vm, mock_attach): mock_get_vm.return_value = "vm_ref" self.ops.attach_volume({}, "instance_name", "/dev/xvda", False) mock_attach.assert_called_once_with({}, "vm_ref", "instance_name", 0, False) @mock.patch.object(volumeops.VolumeOps, "_attach_volume") def test_attach_volume_default_hotplug_connect_volume(self, mock_attach): self.ops.connect_volume({}) mock_attach.assert_called_once_with({}) @mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type") @mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider") @mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume") @mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm") def test_attach_volume_with_defaults(self, mock_attach, mock_hypervisor, mock_provider, mock_driver): connection_info = {"data": {}} with mock.patch.object(self.session.VDI, "get_uuid") as mock_vdi: mock_provider.return_value = ("sr_ref", "sr_uuid") mock_vdi.return_value = "vdi_uuid" result = self.ops._attach_volume(connection_info) self.assertEqual(result, ("sr_uuid", "vdi_uuid")) mock_driver.assert_called_once_with(connection_info) mock_provider.assert_called_once_with({}, None) mock_hypervisor.assert_called_once_with("sr_ref", {}) self.assertFalse(mock_attach.called) @mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type") @mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider") @mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume") @mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm") def test_attach_volume_with_hot_attach(self, mock_attach, mock_hypervisor, mock_provider, mock_driver): connection_info = {"data": {}} with mock.patch.object(self.session.VDI, "get_uuid") as mock_vdi: mock_provider.return_value = ("sr_ref", "sr_uuid") mock_hypervisor.return_value = "vdi_ref" mock_vdi.return_value = "vdi_uuid" result = self.ops._attach_volume(connection_info, "vm_ref", "name", 2, True) self.assertEqual(result, ("sr_uuid", "vdi_uuid")) mock_driver.assert_called_once_with(connection_info) mock_provider.assert_called_once_with({}, "name") mock_hypervisor.assert_called_once_with("sr_ref", {}) mock_attach.assert_called_once_with("vdi_ref", "vm_ref", "name", 2, True) @mock.patch.object(volume_utils, "forget_sr") @mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type") @mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider") @mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume") @mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm") def test_attach_volume_cleanup(self, mock_attach, mock_hypervisor, mock_provider, mock_driver, mock_forget): connection_info = {"data": {}} mock_provider.return_value = ("sr_ref", "sr_uuid") mock_hypervisor.side_effect = test.TestingException self.assertRaises(test.TestingException, self.ops._attach_volume, connection_info) mock_driver.assert_called_once_with(connection_info) mock_provider.assert_called_once_with({}, None) mock_hypervisor.assert_called_once_with("sr_ref", {}) mock_forget.assert_called_once_with(self.session, "sr_ref") self.assertFalse(mock_attach.called) def test_check_is_supported_driver_type_pass_iscsi(self): conn_info = {"driver_volume_type": "iscsi"} self.ops._check_is_supported_driver_type(conn_info) def test_check_is_supported_driver_type_pass_xensm(self): conn_info = {"driver_volume_type": "xensm"} self.ops._check_is_supported_driver_type(conn_info) def test_check_is_supported_driver_type_pass_bad(self): conn_info = {"driver_volume_type": "bad"} self.assertRaises(exception.VolumeDriverNotFound, self.ops._check_is_supported_driver_type, conn_info) @mock.patch.object(volume_utils, "introduce_sr") @mock.patch.object(volume_utils, "find_sr_by_uuid") @mock.patch.object(volume_utils, "parse_sr_info") def test_connect_to_volume_provider_new_sr(self, mock_parse, mock_find_sr, mock_introduce_sr): mock_parse.return_value = ("uuid", "label", "params") mock_find_sr.return_value = None mock_introduce_sr.return_value = "sr_ref" ref, uuid = self.ops._connect_to_volume_provider({}, "name") self.assertEqual("sr_ref", ref) self.assertEqual("uuid", uuid) mock_parse.assert_called_once_with({}, "Disk-for:name") mock_find_sr.assert_called_once_with(self.session, "uuid") mock_introduce_sr.assert_called_once_with(self.session, "uuid", "label", "params") @mock.patch.object(volume_utils, "introduce_sr") @mock.patch.object(volume_utils, "find_sr_by_uuid") @mock.patch.object(volume_utils, "parse_sr_info") def test_connect_to_volume_provider_old_sr(self, mock_parse, mock_find_sr, mock_introduce_sr): mock_parse.return_value = ("uuid", "label", "params") mock_find_sr.return_value = "sr_ref" ref, uuid = self.ops._connect_to_volume_provider({}, "name") self.assertEqual("sr_ref", ref) self.assertEqual("uuid", uuid) mock_parse.assert_called_once_with({}, "Disk-for:name") mock_find_sr.assert_called_once_with(self.session, "uuid") self.assertFalse(mock_introduce_sr.called) @mock.patch.object(volume_utils, "introduce_vdi") def test_connect_hypervisor_to_volume_regular(self, mock_intro): mock_intro.return_value = "vdi" result = self.ops._connect_hypervisor_to_volume("sr", {}) self.assertEqual("vdi", result) mock_intro.assert_called_once_with(self.session, "sr") @mock.patch.object(volume_utils, "introduce_vdi") def test_connect_hypervisor_to_volume_vdi(self, mock_intro): mock_intro.return_value = "vdi" conn = {"vdi_uuid": "id"} result = self.ops._connect_hypervisor_to_volume("sr", conn) self.assertEqual("vdi", result) mock_intro.assert_called_once_with(self.session, "sr", vdi_uuid="id") @mock.patch.object(volume_utils, "introduce_vdi") def test_connect_hypervisor_to_volume_lun(self, mock_intro): mock_intro.return_value = "vdi" conn = {"target_lun": "lun"} result = self.ops._connect_hypervisor_to_volume("sr", conn) self.assertEqual("vdi", result) mock_intro.assert_called_once_with(self.session, "sr", target_lun="lun") @mock.patch.object(volume_utils, "introduce_vdi") @mock.patch.object(volumeops.LOG, 'debug') def test_connect_hypervisor_to_volume_mask_password(self, mock_debug, mock_intro): # Tests that the connection_data is scrubbed before logging. data = {'auth_password': 'verybadpass'} self.ops._connect_hypervisor_to_volume("sr", data) self.assertTrue(mock_debug.called, 'LOG.debug was not called') password_logged = False for call in mock_debug.call_args_list: # The call object is a tuple of (args, kwargs) if 'verybadpass' in call[0]: password_logged = True break self.assertFalse(password_logged, 'connection_data was not scrubbed') @mock.patch.object(vm_utils, "is_vm_shutdown") @mock.patch.object(vm_utils, "create_vbd") def test_attach_volume_to_vm_plug(self, mock_vbd, mock_shutdown): mock_vbd.return_value = "vbd" mock_shutdown.return_value = False with mock.patch.object(self.session.VBD, "plug") as mock_plug: self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, True) mock_plug.assert_called_once_with("vbd", "vm") mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2, bootable=False, osvol=True) mock_shutdown.assert_called_once_with(self.session, "vm") @mock.patch.object(vm_utils, "is_vm_shutdown") @mock.patch.object(vm_utils, "create_vbd") def test_attach_volume_to_vm_no_plug(self, mock_vbd, mock_shutdown): mock_vbd.return_value = "vbd" mock_shutdown.return_value = True with mock.patch.object(self.session.VBD, "plug") as mock_plug: self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, True) self.assertFalse(mock_plug.called) mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2, bootable=False, osvol=True) mock_shutdown.assert_called_once_with(self.session, "vm") @mock.patch.object(vm_utils, "is_vm_shutdown") @mock.patch.object(vm_utils, "create_vbd") def test_attach_volume_to_vm_no_hotplug(self, mock_vbd, mock_shutdown): mock_vbd.return_value = "vbd" with mock.patch.object(self.session.VBD, "plug") as mock_plug: self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, False) self.assertFalse(mock_plug.called) mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2, bootable=False, osvol=True) self.assertFalse(mock_shutdown.called) class FindBadVolumeTestCase(VolumeOpsTestBase): @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs") def test_find_bad_volumes_no_vbds(self, mock_get_all): mock_get_all.return_value = [] result = self.ops.find_bad_volumes("vm_ref") mock_get_all.assert_called_once_with("vm_ref") self.assertEqual([], result) @mock.patch.object(volume_utils, "find_sr_from_vbd") @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs") def test_find_bad_volumes_no_bad_vbds(self, mock_get_all, mock_find_sr): mock_get_all.return_value = ["1", "2"] mock_find_sr.return_value = "sr_ref" with mock.patch.object(self.session.SR, "scan") as mock_scan: result = self.ops.find_bad_volumes("vm_ref") mock_get_all.assert_called_once_with("vm_ref") expected_find = [mock.call(self.session, "1"), mock.call(self.session, "2")] self.assertEqual(expected_find, mock_find_sr.call_args_list) expected_scan = [mock.call("sr_ref"), mock.call("sr_ref")] self.assertEqual(expected_scan, mock_scan.call_args_list) self.assertEqual([], result) @mock.patch.object(volume_utils, "find_sr_from_vbd") @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs") def test_find_bad_volumes_bad_vbds(self, mock_get_all, mock_find_sr): mock_get_all.return_value = ["vbd_ref"] mock_find_sr.return_value = "sr_ref" class FakeException(Exception): details = ['SR_BACKEND_FAILURE_40', "", "", ""] session = mock.Mock() session.XenAPI.Failure = FakeException self.ops._session = session with mock.patch.object(session.SR, "scan") as mock_scan: with mock.patch.object(session.VBD, "get_device") as mock_get: mock_scan.side_effect = FakeException mock_get.return_value = "xvdb" result = self.ops.find_bad_volumes("vm_ref") mock_get_all.assert_called_once_with("vm_ref") mock_scan.assert_called_once_with("sr_ref") mock_get.assert_called_once_with("vbd_ref") self.assertEqual(["/dev/xvdb"], result) @mock.patch.object(volume_utils, "find_sr_from_vbd") @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs") def test_find_bad_volumes_raises(self, mock_get_all, mock_find_sr): mock_get_all.return_value = ["vbd_ref"] mock_find_sr.return_value = "sr_ref" class FakeException(Exception): details = ['foo', "", "", ""] session = mock.Mock() session.XenAPI.Failure = FakeException self.ops._session = session with mock.patch.object(session.SR, "scan") as mock_scan: with mock.patch.object(session.VBD, "get_device") as mock_get: mock_scan.side_effect = FakeException mock_get.return_value = "xvdb" self.assertRaises(FakeException, self.ops.find_bad_volumes, "vm_ref") mock_scan.assert_called_once_with("sr_ref") class CleanupFromVDIsTestCase(VolumeOpsTestBase): def _check_find_purge_calls(self, find_sr_from_vdi, purge_sr, vdi_refs, sr_refs): find_sr_calls = [mock.call(self.ops._session, vdi_ref) for vdi_ref in vdi_refs] find_sr_from_vdi.assert_has_calls(find_sr_calls) purge_sr_calls = [mock.call(self.ops._session, sr_ref) for sr_ref in sr_refs] purge_sr.assert_has_calls(purge_sr_calls) @mock.patch.object(volume_utils, 'find_sr_from_vdi') @mock.patch.object(volume_utils, 'purge_sr') def test_safe_cleanup_from_vdis(self, purge_sr, find_sr_from_vdi): vdi_refs = ['vdi_ref1', 'vdi_ref2'] sr_refs = ['sr_ref1', 'sr_ref2'] find_sr_from_vdi.side_effect = sr_refs self.ops.safe_cleanup_from_vdis(vdi_refs) self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs, sr_refs) @mock.patch.object(volume_utils, 'find_sr_from_vdi', side_effect=[exception.StorageError(reason=''), 'sr_ref2']) @mock.patch.object(volume_utils, 'purge_sr') def test_safe_cleanup_from_vdis_handles_find_sr_exception(self, purge_sr, find_sr_from_vdi): vdi_refs = ['vdi_ref1', 'vdi_ref2'] sr_refs = ['sr_ref2'] find_sr_from_vdi.side_effect = [exception.StorageError(reason=''), sr_refs[0]] self.ops.safe_cleanup_from_vdis(vdi_refs) self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs, sr_refs) @mock.patch.object(volume_utils, 'find_sr_from_vdi') @mock.patch.object(volume_utils, 'purge_sr') def test_safe_cleanup_from_vdis_handles_purge_sr_exception(self, purge_sr, find_sr_from_vdi): vdi_refs = ['vdi_ref1', 'vdi_ref2'] sr_refs = ['sr_ref1', 'sr_ref2'] find_sr_from_vdi.side_effect = sr_refs purge_sr.side_effects = [test.TestingException, None] self.ops.safe_cleanup_from_vdis(vdi_refs) self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs, sr_refs) nova-13.1.4/nova/tests/unit/virt/disk/0000775000567000056710000000000013064447471020720 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/virt/disk/test_api.py0000664000567000056710000002153613064447152023105 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import tempfile import fixtures import mock from oslo_concurrency import processutils from nova import test from nova import utils from nova.virt.disk import api from nova.virt.disk.mount import api as mount from nova.virt.image import model as imgmodel class FakeMount(object): device = None @staticmethod def instance_for_format(image, mountdir, partition): return FakeMount() def get_dev(self): pass def unget_dev(self): pass class APITestCase(test.NoDBTestCase): def test_can_resize_need_fs_type_specified(self): # NOTE(mikal): Bug 1094373 saw a regression where we failed to # treat a failure to mount as a failure to be able to resize the # filesystem def _fake_get_disk_size(path): return 10 self.useFixture(fixtures.MonkeyPatch( 'nova.virt.disk.api.get_disk_size', _fake_get_disk_size)) def fake_trycmd(*args, **kwargs): return '', 'broken' self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', fake_trycmd)) def fake_returns_true(*args, **kwargs): return True def fake_returns_nothing(*args, **kwargs): return '' self.useFixture(fixtures.MonkeyPatch( 'nova.virt.disk.mount.nbd.NbdMount.get_dev', fake_returns_true)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.disk.mount.nbd.NbdMount.map_dev', fake_returns_true)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.disk.vfs.localfs.VFSLocalFS.get_image_fs', fake_returns_nothing)) # Force the use of localfs, which is what was used during the failure # reported in the bug def fake_import_fails(*args, **kwargs): raise Exception('Failed') self.useFixture(fixtures.MonkeyPatch( 'oslo_utils.import_module', fake_import_fails)) imgfile = tempfile.NamedTemporaryFile() self.addCleanup(imgfile.close) image = imgmodel.LocalFileImage(imgfile.name, imgmodel.FORMAT_QCOW2) self.assertFalse(api.is_image_extendable(image)) def test_is_image_extendable_raw(self): imgfile = tempfile.NamedTemporaryFile() self.mox.StubOutWithMock(utils, 'execute') utils.execute('e2label', imgfile) self.mox.ReplayAll() image = imgmodel.LocalFileImage(imgfile, imgmodel.FORMAT_RAW) self.addCleanup(imgfile.close) self.assertTrue(api.is_image_extendable(image)) def test_resize2fs_success(self): imgfile = tempfile.NamedTemporaryFile() self.addCleanup(imgfile.close) self.mox.StubOutWithMock(utils, 'execute') utils.execute('e2fsck', '-fp', imgfile, check_exit_code=[0, 1, 2], run_as_root=False) utils.execute('resize2fs', imgfile, check_exit_code=False, run_as_root=False) self.mox.ReplayAll() api.resize2fs(imgfile) def test_resize2fs_e2fsck_fails(self): imgfile = tempfile.NamedTemporaryFile() self.addCleanup(imgfile.close) self.mox.StubOutWithMock(utils, 'execute') utils.execute('e2fsck', '-fp', imgfile, check_exit_code=[0, 1, 2], run_as_root=False).AndRaise( processutils.ProcessExecutionError("fs error")) self.mox.ReplayAll() api.resize2fs(imgfile) def test_extend_qcow_success(self): imgfile = tempfile.NamedTemporaryFile() self.addCleanup(imgfile.close) imgsize = 10 device = "/dev/sdh" image = imgmodel.LocalFileImage(imgfile, imgmodel.FORMAT_QCOW2) self.flags(resize_fs_using_block_device=True) mounter = FakeMount.instance_for_format( image, None, None) mounter.device = device self.mox.StubOutWithMock(api, 'can_resize_image') self.mox.StubOutWithMock(utils, 'execute') self.mox.StubOutWithMock(api, 'is_image_extendable') self.mox.StubOutWithMock(mounter, 'get_dev') self.mox.StubOutWithMock(mounter, 'unget_dev') self.mox.StubOutWithMock(api, 'resize2fs') self.mox.StubOutWithMock(mount.Mount, 'instance_for_format', use_mock_anything=True) api.can_resize_image(imgfile, imgsize).AndReturn(True) utils.execute('qemu-img', 'resize', imgfile, imgsize) api.is_image_extendable(image).AndReturn(True) mount.Mount.instance_for_format(image, None, None).AndReturn(mounter) mounter.get_dev().AndReturn(True) api.resize2fs(mounter.device, run_as_root=True, check_exit_code=[0]) mounter.unget_dev() self.mox.ReplayAll() api.extend(image, imgsize) @mock.patch.object(api, 'can_resize_image', return_value=True) @mock.patch.object(api, 'is_image_extendable') @mock.patch.object(utils, 'execute') def test_extend_qcow_no_resize(self, mock_execute, mock_extendable, mock_can_resize_image): imgfile = tempfile.NamedTemporaryFile() self.addCleanup(imgfile.close) imgsize = 10 image = imgmodel.LocalFileImage(imgfile, imgmodel.FORMAT_QCOW2) self.flags(resize_fs_using_block_device=False) api.extend(image, imgsize) mock_can_resize_image.assert_called_once_with(imgfile, imgsize) mock_execute.assert_called_once_with('qemu-img', 'resize', imgfile, imgsize) self.assertFalse(mock_extendable.called) def test_extend_raw_success(self): imgfile = tempfile.NamedTemporaryFile() self.addCleanup(imgfile.close) imgsize = 10 image = imgmodel.LocalFileImage(imgfile, imgmodel.FORMAT_RAW) self.mox.StubOutWithMock(api, 'can_resize_image') self.mox.StubOutWithMock(utils, 'execute') self.mox.StubOutWithMock(api, 'resize2fs') api.can_resize_image(imgfile, imgsize).AndReturn(True) utils.execute('qemu-img', 'resize', imgfile, imgsize) utils.execute('e2label', image.path) api.resize2fs(imgfile, run_as_root=False, check_exit_code=[0]) self.mox.ReplayAll() api.extend(image, imgsize) HASH_VFAT = utils.get_hash_str(api.FS_FORMAT_VFAT)[:7] HASH_EXT4 = utils.get_hash_str(api.FS_FORMAT_EXT4)[:7] HASH_NTFS = utils.get_hash_str(api.FS_FORMAT_NTFS)[:7] def test_get_file_extension_for_os_type(self): self.assertEqual(self.HASH_VFAT, api.get_file_extension_for_os_type(None, None)) self.assertEqual(self.HASH_EXT4, api.get_file_extension_for_os_type('linux', None)) self.assertEqual(self.HASH_NTFS, api.get_file_extension_for_os_type( 'windows', None)) def test_get_file_extension_for_os_type_with_overrides(self): with mock.patch('nova.virt.disk.api._DEFAULT_MKFS_COMMAND', 'custom mkfs command'): self.assertEqual("a74d253", api.get_file_extension_for_os_type( 'linux', None)) self.assertEqual("a74d253", api.get_file_extension_for_os_type( 'windows', None)) self.assertEqual("a74d253", api.get_file_extension_for_os_type('osx', None)) with mock.patch.dict(api._MKFS_COMMAND, {'osx': 'custom mkfs command'}, clear=True): self.assertEqual(self.HASH_VFAT, api.get_file_extension_for_os_type(None, None)) self.assertEqual(self.HASH_EXT4, api.get_file_extension_for_os_type('linux', None)) self.assertEqual(self.HASH_NTFS, api.get_file_extension_for_os_type( 'windows', None)) self.assertEqual("a74d253", api.get_file_extension_for_os_type( 'osx', None)) nova-13.1.4/nova/tests/unit/virt/disk/__init__.py0000664000567000056710000000000013064447140023010 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/virt/disk/mount/0000775000567000056710000000000013064447471022062 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/virt/disk/mount/test_loop.py0000664000567000056710000000706313064447140024443 0ustar jenkinsjenkins00000000000000# Copyright 2012 Michael Still # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures from nova import test from nova.virt.disk.mount import loop from nova.virt.image import model as imgmodel def _fake_noop(*args, **kwargs): return def _fake_trycmd_losetup_works(*args, **kwargs): return '/dev/loop0', '' def _fake_trycmd_losetup_fails(*args, **kwards): return '', 'doh' class LoopTestCase(test.NoDBTestCase): def setUp(self): super(LoopTestCase, self).setUp() self.file = imgmodel.LocalFileImage("/some/file.qcow2", imgmodel.FORMAT_QCOW2) def test_get_dev(self): tempdir = self.useFixture(fixtures.TempDir()).path l = loop.LoopMount(self.file, tempdir) self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', _fake_trycmd_losetup_works)) self.useFixture(fixtures.MonkeyPatch('nova.utils.execute', _fake_noop)) # No error logged, device consumed self.assertTrue(l.get_dev()) self.assertTrue(l.linked) self.assertEqual('', l.error) self.assertEqual('/dev/loop0', l.device) # Free l.unget_dev() self.assertFalse(l.linked) self.assertEqual('', l.error) self.assertIsNone(l.device) def test_inner_get_dev_fails(self): tempdir = self.useFixture(fixtures.TempDir()).path l = loop.LoopMount(self.file, tempdir) self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', _fake_trycmd_losetup_fails)) # No error logged, device consumed self.assertFalse(l._inner_get_dev()) self.assertFalse(l.linked) self.assertNotEqual('', l.error) self.assertIsNone(l.device) # Free l.unget_dev() self.assertFalse(l.linked) self.assertIsNone(l.device) def test_get_dev_timeout(self): tempdir = self.useFixture(fixtures.TempDir()).path l = loop.LoopMount(self.file, tempdir) self.useFixture(fixtures.MonkeyPatch('time.sleep', _fake_noop)) self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', _fake_trycmd_losetup_fails)) self.useFixture(fixtures.MonkeyPatch(('nova.virt.disk.mount.api.' 'MAX_DEVICE_WAIT'), -10)) # Always fail to get a device def fake_get_dev_fails(): return False l._inner_get_dev = fake_get_dev_fails # Fail to get a device self.assertFalse(l.get_dev()) def test_unget_dev(self): tempdir = self.useFixture(fixtures.TempDir()).path l = loop.LoopMount(self.file, tempdir) self.useFixture(fixtures.MonkeyPatch('nova.utils.execute', _fake_noop)) # This just checks that a free of something we don't have doesn't # throw an exception l.unget_dev() nova-13.1.4/nova/tests/unit/virt/disk/mount/test_api.py0000664000567000056710000001674713064447152024257 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import test from nova.virt.disk.mount import api from nova.virt.disk.mount import block from nova.virt.disk.mount import loop from nova.virt.disk.mount import nbd from nova.virt.image import model as imgmodel PARTITION = 77 ORIG_DEVICE = "/dev/null" AUTOMAP_PARTITION = "/dev/nullp77" MAP_PARTITION = "/dev/mapper/nullp77" class MountTestCase(test.NoDBTestCase): def setUp(self): super(MountTestCase, self).setUp() def _test_map_dev(self, partition): mount = api.Mount(mock.sentinel.image, mock.sentinel.mount_dir) mount.device = ORIG_DEVICE mount.partition = partition mount.map_dev() return mount @mock.patch('nova.utils.trycmd') def _test_map_dev_with_trycmd(self, partition, trycmd): trycmd.return_value = [None, None] mount = self._test_map_dev(partition) self.assertEqual(1, trycmd.call_count) # don't care about args return mount def _exists_effect(self, data): def exists_effect(filename): try: v = data[filename] if isinstance(v, list): if len(v) > 0: return v.pop(0) self.fail("Out of items for: %s" % filename) return v except KeyError: self.fail("Unexpected call with: %s" % filename) return exists_effect def _check_calls(self, exists, filenames): self.assertEqual([mock.call(x) for x in filenames], exists.call_args_list) @mock.patch('os.path.exists') def test_map_dev_partition_search(self, exists): exists.side_effect = self._exists_effect({ ORIG_DEVICE: True}) mount = self._test_map_dev(-1) self._check_calls(exists, [ORIG_DEVICE]) self.assertNotEqual("", mount.error) self.assertFalse(mount.mapped) @mock.patch('os.path.exists') def test_map_dev_good(self, exists): exists.side_effect = self._exists_effect({ ORIG_DEVICE: True, AUTOMAP_PARTITION: False, MAP_PARTITION: [False, True]}) mount = self._test_map_dev_with_trycmd(PARTITION) self._check_calls(exists, [ORIG_DEVICE, AUTOMAP_PARTITION, MAP_PARTITION, MAP_PARTITION]) self.assertEqual("", mount.error) self.assertTrue(mount.mapped) @mock.patch('os.path.exists') def test_map_dev_error(self, exists): exists.side_effect = self._exists_effect({ ORIG_DEVICE: True, AUTOMAP_PARTITION: False, MAP_PARTITION: False}) mount = self._test_map_dev_with_trycmd(PARTITION) self._check_calls(exists, [ORIG_DEVICE, AUTOMAP_PARTITION, MAP_PARTITION, MAP_PARTITION]) self.assertNotEqual("", mount.error) self.assertFalse(mount.mapped) @mock.patch('os.path.exists') def test_map_dev_automap(self, exists): exists.side_effect = self._exists_effect({ ORIG_DEVICE: True, AUTOMAP_PARTITION: True}) mount = self._test_map_dev(PARTITION) self._check_calls(exists, [ORIG_DEVICE, AUTOMAP_PARTITION, AUTOMAP_PARTITION]) self.assertEqual(AUTOMAP_PARTITION, mount.mapped_device) self.assertTrue(mount.automapped) self.assertTrue(mount.mapped) @mock.patch('os.path.exists') def test_map_dev_else(self, exists): exists.side_effect = self._exists_effect({ ORIG_DEVICE: True, AUTOMAP_PARTITION: True}) mount = self._test_map_dev(None) self._check_calls(exists, [ORIG_DEVICE]) self.assertEqual(ORIG_DEVICE, mount.mapped_device) self.assertFalse(mount.automapped) self.assertTrue(mount.mapped) def test_instance_for_format_raw(self): image = imgmodel.LocalFileImage("/some/file.raw", imgmodel.FORMAT_RAW) mount_dir = '/mount/dir' partition = -1 inst = api.Mount.instance_for_format(image, mount_dir, partition) self.assertIsInstance(inst, loop.LoopMount) def test_instance_for_format_qcow2(self): image = imgmodel.LocalFileImage("/some/file.qcows", imgmodel.FORMAT_QCOW2) mount_dir = '/mount/dir' partition = -1 inst = api.Mount.instance_for_format(image, mount_dir, partition) self.assertIsInstance(inst, nbd.NbdMount) def test_instance_for_format_block(self): image = imgmodel.LocalBlockImage( "/dev/mapper/instances--instance-0000001_disk",) mount_dir = '/mount/dir' partition = -1 inst = api.Mount.instance_for_format(image, mount_dir, partition) self.assertIsInstance(inst, block.BlockMount) def test_instance_for_device_loop(self): image = mock.MagicMock() mount_dir = '/mount/dir' partition = -1 device = '/dev/loop0' inst = api.Mount.instance_for_device(image, mount_dir, partition, device) self.assertIsInstance(inst, loop.LoopMount) def test_instance_for_device_loop_partition(self): image = mock.MagicMock() mount_dir = '/mount/dir' partition = 1 device = '/dev/mapper/loop0p1' inst = api.Mount.instance_for_device(image, mount_dir, partition, device) self.assertIsInstance(inst, loop.LoopMount) def test_instance_for_device_nbd(self): image = mock.MagicMock() mount_dir = '/mount/dir' partition = -1 device = '/dev/nbd0' inst = api.Mount.instance_for_device(image, mount_dir, partition, device) self.assertIsInstance(inst, nbd.NbdMount) def test_instance_for_device_nbd_partition(self): image = mock.MagicMock() mount_dir = '/mount/dir' partition = 1 device = '/dev/mapper/nbd0p1' inst = api.Mount.instance_for_device(image, mount_dir, partition, device) self.assertIsInstance(inst, nbd.NbdMount) def test_instance_for_device_block(self): image = mock.MagicMock() mount_dir = '/mount/dir' partition = -1 device = '/dev/mapper/instances--instance-0000001_disk' inst = api.Mount.instance_for_device(image, mount_dir, partition, device) self.assertIsInstance(inst, block.BlockMount) def test_instance_for_device_block_partiton(self,): image = mock.MagicMock() mount_dir = '/mount/dir' partition = 1 device = '/dev/mapper/instances--instance-0000001_diskp1' inst = api.Mount.instance_for_device(image, mount_dir, partition, device) self.assertIsInstance(inst, block.BlockMount) nova-13.1.4/nova/tests/unit/virt/disk/mount/__init__.py0000664000567000056710000000000013064447140024152 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/virt/disk/mount/test_nbd.py0000664000567000056710000003123613064447152024237 0ustar jenkinsjenkins00000000000000# Copyright 2012 Michael Still and Canonical Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import tempfile import time import eventlet import fixtures from nova import test from nova.virt.disk.mount import nbd from nova.virt.image import model as imgmodel ORIG_EXISTS = os.path.exists ORIG_LISTDIR = os.listdir def _fake_exists_no_users(path): if path.startswith('/sys/block/nbd'): if path.endswith('pid'): return False return True return ORIG_EXISTS(path) def _fake_listdir_nbd_devices(path): if path.startswith('/sys/block'): return ['nbd0', 'nbd1'] return ORIG_LISTDIR(path) def _fake_exists_all_used(path): if path.startswith('/sys/block/nbd'): return True return ORIG_EXISTS(path) def _fake_detect_nbd_devices_none(self): return [] def _fake_detect_nbd_devices(self): return ['nbd0', 'nbd1'] def _fake_noop(*args, **kwargs): return class NbdTestCase(test.NoDBTestCase): def setUp(self): super(NbdTestCase, self).setUp() self.stubs.Set(nbd.NbdMount, '_detect_nbd_devices', _fake_detect_nbd_devices) self.useFixture(fixtures.MonkeyPatch('os.listdir', _fake_listdir_nbd_devices)) self.file = imgmodel.LocalFileImage("/some/file.qcow2", imgmodel.FORMAT_QCOW2) def test_nbd_no_devices(self): tempdir = self.useFixture(fixtures.TempDir()).path self.stubs.Set(nbd.NbdMount, '_detect_nbd_devices', _fake_detect_nbd_devices_none) n = nbd.NbdMount(self.file, tempdir) self.assertIsNone(n._allocate_nbd()) def test_nbd_no_free_devices(self): tempdir = self.useFixture(fixtures.TempDir()).path n = nbd.NbdMount(self.file, tempdir) self.useFixture(fixtures.MonkeyPatch('os.path.exists', _fake_exists_all_used)) self.assertIsNone(n._allocate_nbd()) def test_nbd_not_loaded(self): tempdir = self.useFixture(fixtures.TempDir()).path n = nbd.NbdMount(self.file, tempdir) # Fake out os.path.exists def fake_exists(path): if path.startswith('/sys/block/nbd'): return False return ORIG_EXISTS(path) self.useFixture(fixtures.MonkeyPatch('os.path.exists', fake_exists)) # This should fail, as we don't have the module "loaded" # TODO(mikal): work out how to force english as the gettext language # so that the error check always passes self.assertIsNone(n._allocate_nbd()) self.assertEqual('nbd unavailable: module not loaded', n.error) def test_nbd_allocation(self): tempdir = self.useFixture(fixtures.TempDir()).path n = nbd.NbdMount(self.file, tempdir) self.useFixture(fixtures.MonkeyPatch('os.path.exists', _fake_exists_no_users)) self.useFixture(fixtures.MonkeyPatch('random.shuffle', _fake_noop)) # Allocate a nbd device self.assertEqual('/dev/nbd0', n._allocate_nbd()) def test_nbd_allocation_one_in_use(self): tempdir = self.useFixture(fixtures.TempDir()).path n = nbd.NbdMount(self.file, tempdir) self.useFixture(fixtures.MonkeyPatch('random.shuffle', _fake_noop)) # Fake out os.path.exists def fake_exists(path): if path.startswith('/sys/block/nbd'): if path == '/sys/block/nbd0/pid': return True if path.endswith('pid'): return False return True return ORIG_EXISTS(path) self.useFixture(fixtures.MonkeyPatch('os.path.exists', fake_exists)) # Allocate a nbd device, should not be the in use one # TODO(mikal): Note that there is a leak here, as the in use nbd device # is removed from the list, but not returned so it will never be # re-added. I will fix this in a later patch. self.assertEqual('/dev/nbd1', n._allocate_nbd()) def test_inner_get_dev_no_devices(self): tempdir = self.useFixture(fixtures.TempDir()).path self.stubs.Set(nbd.NbdMount, '_detect_nbd_devices', _fake_detect_nbd_devices_none) n = nbd.NbdMount(self.file, tempdir) self.assertFalse(n._inner_get_dev()) def test_inner_get_dev_qemu_fails(self): tempdir = self.useFixture(fixtures.TempDir()).path n = nbd.NbdMount(self.file, tempdir) self.useFixture(fixtures.MonkeyPatch('os.path.exists', _fake_exists_no_users)) # We have a trycmd that always fails def fake_trycmd(*args, **kwargs): return '', 'broken' self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', fake_trycmd)) # Error logged, no device consumed self.assertFalse(n._inner_get_dev()) self.assertTrue(n.error.startswith('qemu-nbd error')) def test_inner_get_dev_qemu_timeout(self): tempdir = self.useFixture(fixtures.TempDir()).path n = nbd.NbdMount(self.file, tempdir) self.useFixture(fixtures.MonkeyPatch('os.path.exists', _fake_exists_no_users)) # We have a trycmd that always passed def fake_trycmd(*args, **kwargs): return '', '' self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', fake_trycmd)) self.useFixture(fixtures.MonkeyPatch('time.sleep', _fake_noop)) # Error logged, no device consumed self.assertFalse(n._inner_get_dev()) self.assertTrue(n.error.endswith('did not show up')) def fake_exists_one(self, path): # We need the pid file for the device which is allocated to exist, but # only once it is allocated to us if path.startswith('/sys/block/nbd'): if path == '/sys/block/nbd1/pid': return False if path.endswith('pid'): return False return True return ORIG_EXISTS(path) def fake_trycmd_creates_pid(self, *args, **kwargs): def fake_exists_two(path): if path.startswith('/sys/block/nbd'): if path == '/sys/block/nbd0/pid': return True if path.endswith('pid'): return False return True return ORIG_EXISTS(path) self.useFixture(fixtures.MonkeyPatch('os.path.exists', fake_exists_two)) return '', '' def test_inner_get_dev_works(self): tempdir = self.useFixture(fixtures.TempDir()).path n = nbd.NbdMount(self.file, tempdir) self.useFixture(fixtures.MonkeyPatch('random.shuffle', _fake_noop)) self.useFixture(fixtures.MonkeyPatch('os.path.exists', self.fake_exists_one)) self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', self.fake_trycmd_creates_pid)) self.useFixture(fixtures.MonkeyPatch('nova.utils.execute', _fake_noop)) # No error logged, device consumed self.assertTrue(n._inner_get_dev()) self.assertTrue(n.linked) self.assertEqual('', n.error) self.assertEqual('/dev/nbd0', n.device) # Free n.unget_dev() self.assertFalse(n.linked) self.assertEqual('', n.error) self.assertIsNone(n.device) def test_unget_dev_simple(self): # This test is just checking we don't get an exception when we unget # something we don't have tempdir = self.useFixture(fixtures.TempDir()).path n = nbd.NbdMount(self.file, tempdir) self.useFixture(fixtures.MonkeyPatch('nova.utils.execute', _fake_noop)) n.unget_dev() def test_get_dev(self): tempdir = self.useFixture(fixtures.TempDir()).path n = nbd.NbdMount(self.file, tempdir) self.useFixture(fixtures.MonkeyPatch('random.shuffle', _fake_noop)) self.useFixture(fixtures.MonkeyPatch('nova.utils.execute', _fake_noop)) self.useFixture(fixtures.MonkeyPatch('os.path.exists', self.fake_exists_one)) self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', self.fake_trycmd_creates_pid)) # No error logged, device consumed self.assertTrue(n.get_dev()) self.assertTrue(n.linked) self.assertEqual('', n.error) self.assertEqual('/dev/nbd0', n.device) # Free n.unget_dev() self.assertFalse(n.linked) self.assertEqual('', n.error) self.assertIsNone(n.device) def test_get_dev_timeout(self): # Always fail to get a device def fake_get_dev_fails(self): return False self.stubs.Set(nbd.NbdMount, '_inner_get_dev', fake_get_dev_fails) tempdir = self.useFixture(fixtures.TempDir()).path n = nbd.NbdMount(self.file, tempdir) self.useFixture(fixtures.MonkeyPatch('random.shuffle', _fake_noop)) self.useFixture(fixtures.MonkeyPatch('time.sleep', _fake_noop)) self.useFixture(fixtures.MonkeyPatch('nova.utils.execute', _fake_noop)) self.useFixture(fixtures.MonkeyPatch('os.path.exists', self.fake_exists_one)) self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', self.fake_trycmd_creates_pid)) self.useFixture(fixtures.MonkeyPatch(('nova.virt.disk.mount.api.' 'MAX_DEVICE_WAIT'), -10)) # No error logged, device consumed self.assertFalse(n.get_dev()) def test_do_mount_need_to_specify_fs_type(self): # NOTE(mikal): Bug 1094373 saw a regression where we failed to # communicate a failed mount properly. def fake_trycmd(*args, **kwargs): return '', 'broken' self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', fake_trycmd)) imgfile = tempfile.NamedTemporaryFile() self.addCleanup(imgfile.close) tempdir = self.useFixture(fixtures.TempDir()).path mount = nbd.NbdMount(imgfile.name, tempdir) def fake_returns_true(*args, **kwargs): return True mount.get_dev = fake_returns_true mount.map_dev = fake_returns_true self.assertFalse(mount.do_mount()) def test_device_creation_race(self): # Make sure that even if two threads create instances at the same time # they cannot choose the same nbd number (see bug 1207422) tempdir = self.useFixture(fixtures.TempDir()).path free_devices = _fake_detect_nbd_devices(None)[:] chosen_devices = [] def fake_find_unused(self): return os.path.join('/dev', free_devices[-1]) def delay_and_remove_device(*args, **kwargs): # Ensure that context switch happens before the device is marked # as used. This will cause a failure without nbd-allocation-lock # in place. time.sleep(0.1) # We always choose the top device in find_unused - remove it now. free_devices.pop() return '', '' def pid_exists(pidfile): return pidfile not in [os.path.join('/sys/block', dev, 'pid') for dev in free_devices] self.stubs.Set(nbd.NbdMount, '_allocate_nbd', fake_find_unused) self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', delay_and_remove_device)) self.useFixture(fixtures.MonkeyPatch('os.path.exists', pid_exists)) def get_a_device(): n = nbd.NbdMount(self.file, tempdir) n.get_dev() chosen_devices.append(n.device) thread1 = eventlet.spawn(get_a_device) thread2 = eventlet.spawn(get_a_device) thread1.wait() thread2.wait() self.assertEqual(2, len(chosen_devices)) self.assertNotEqual(chosen_devices[0], chosen_devices[1]) nova-13.1.4/nova/tests/unit/virt/disk/mount/test_block.py0000664000567000056710000000272613064447140024565 0ustar jenkinsjenkins00000000000000# Copyright 2015 Rackspace Hosting, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures from nova import test from nova.virt.disk.mount import block from nova.virt.image import model as imgmodel class LoopTestCase(test.NoDBTestCase): def setUp(self): super(LoopTestCase, self).setUp() device_path = '/dev/mapper/instances--instance-0000001_disk' self.image = imgmodel.LocalBlockImage(device_path) def test_get_dev(self): tempdir = self.useFixture(fixtures.TempDir()).path b = block.BlockMount(self.image, tempdir) self.assertTrue(b.get_dev()) self.assertTrue(b.linked) self.assertEqual(self.image.path, b.device) def test_unget_dev(self): tempdir = self.useFixture(fixtures.TempDir()).path b = block.BlockMount(self.image, tempdir) b.unget_dev() self.assertIsNone(b.device) self.assertFalse(b.linked) nova-13.1.4/nova/tests/unit/virt/disk/test_inject.py0000664000567000056710000002704613064447152023612 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import OrderedDict import os import fixtures from nova import exception from nova import test from nova.tests.unit.virt.disk.vfs import fakeguestfs from nova.virt.disk import api as diskapi from nova.virt.disk.vfs import guestfs as vfsguestfs from nova.virt.image import model as imgmodel class VirtDiskTest(test.NoDBTestCase): def setUp(self): super(VirtDiskTest, self).setUp() self.useFixture( fixtures.MonkeyPatch('nova.virt.disk.vfs.guestfs.guestfs', fakeguestfs)) self.file = imgmodel.LocalFileImage("/some/file", imgmodel.FORMAT_QCOW2) def test_inject_data(self): self.assertTrue(diskapi.inject_data( imgmodel.LocalFileImage("/some/file", imgmodel.FORMAT_QCOW2))) self.assertTrue(diskapi.inject_data( imgmodel.LocalFileImage("/some/file", imgmodel.FORMAT_RAW), mandatory=('files',))) self.assertTrue(diskapi.inject_data( imgmodel.LocalFileImage("/some/file", imgmodel.FORMAT_RAW), key="mysshkey", mandatory=('key',))) os_name = os.name os.name = 'nt' # Cause password injection to fail self.assertRaises(exception.NovaException, diskapi.inject_data, imgmodel.LocalFileImage("/some/file", imgmodel.FORMAT_RAW), admin_password="p", mandatory=('admin_password',)) self.assertFalse(diskapi.inject_data( imgmodel.LocalFileImage("/some/file", imgmodel.FORMAT_RAW), admin_password="p")) os.name = os_name self.assertFalse(diskapi.inject_data( imgmodel.LocalFileImage("/some/fail/file", imgmodel.FORMAT_RAW), key="mysshkey")) def test_inject_data_key(self): vfs = vfsguestfs.VFSGuestFS(self.file) vfs.setup() diskapi._inject_key_into_fs("mysshkey", vfs) self.assertIn("/root/.ssh", vfs.handle.files) self.assertEqual(vfs.handle.files["/root/.ssh"], {'isdir': True, 'gid': 0, 'uid': 0, 'mode': 0o700}) self.assertIn("/root/.ssh/authorized_keys", vfs.handle.files) self.assertEqual(vfs.handle.files["/root/.ssh/authorized_keys"], {'isdir': False, 'content': "Hello World\n# The following ssh " + "key was injected by Nova\nmysshkey\n", 'gid': 100, 'uid': 100, 'mode': 0o600}) vfs.teardown() def test_inject_data_key_with_selinux(self): vfs = vfsguestfs.VFSGuestFS(self.file) vfs.setup() vfs.make_path("etc/selinux") vfs.make_path("etc/rc.d") diskapi._inject_key_into_fs("mysshkey", vfs) self.assertIn("/etc/rc.d/rc.local", vfs.handle.files) self.assertEqual(vfs.handle.files["/etc/rc.d/rc.local"], {'isdir': False, 'content': "Hello World#!/bin/sh\n# Added by " + "Nova to ensure injected ssh keys " + "have the right context\nrestorecon " + "-RF root/.ssh 2>/dev/null || :\n", 'gid': 100, 'uid': 100, 'mode': 0o700}) self.assertIn("/root/.ssh", vfs.handle.files) self.assertEqual(vfs.handle.files["/root/.ssh"], {'isdir': True, 'gid': 0, 'uid': 0, 'mode': 0o700}) self.assertIn("/root/.ssh/authorized_keys", vfs.handle.files) self.assertEqual(vfs.handle.files["/root/.ssh/authorized_keys"], {'isdir': False, 'content': "Hello World\n# The following ssh " + "key was injected by Nova\nmysshkey\n", 'gid': 100, 'uid': 100, 'mode': 0o600}) vfs.teardown() def test_inject_data_key_with_selinux_append_with_newline(self): vfs = vfsguestfs.VFSGuestFS(self.file) vfs.setup() vfs.replace_file("/etc/rc.d/rc.local", "#!/bin/sh\necho done") vfs.make_path("etc/selinux") vfs.make_path("etc/rc.d") diskapi._inject_key_into_fs("mysshkey", vfs) self.assertIn("/etc/rc.d/rc.local", vfs.handle.files) self.assertEqual(vfs.handle.files["/etc/rc.d/rc.local"], {'isdir': False, 'content': "#!/bin/sh\necho done\n# Added " "by Nova to ensure injected ssh keys have " "the right context\nrestorecon -RF " "root/.ssh 2>/dev/null || :\n", 'gid': 100, 'uid': 100, 'mode': 0o700}) vfs.teardown() def test_inject_net(self): vfs = vfsguestfs.VFSGuestFS(self.file) vfs.setup() diskapi._inject_net_into_fs("mynetconfig", vfs) self.assertIn("/etc/network/interfaces", vfs.handle.files) self.assertEqual(vfs.handle.files["/etc/network/interfaces"], {'content': 'mynetconfig', 'gid': 100, 'isdir': False, 'mode': 0o700, 'uid': 100}) vfs.teardown() def test_inject_metadata(self): vfs = vfsguestfs.VFSGuestFS(self.file) vfs.setup() metadata = {"foo": "bar", "eek": "wizz"} metadata = OrderedDict(sorted(metadata.items())) diskapi._inject_metadata_into_fs(metadata, vfs) self.assertIn("/meta.js", vfs.handle.files) self.assertEqual({'content': '{"eek": "wizz", ' + '"foo": "bar"}', 'gid': 100, 'isdir': False, 'mode': 0o700, 'uid': 100}, vfs.handle.files["/meta.js"]) vfs.teardown() def test_inject_admin_password(self): vfs = vfsguestfs.VFSGuestFS(self.file) vfs.setup() def fake_salt(): return "1234567890abcdef" self.stubs.Set(diskapi, '_generate_salt', fake_salt) vfs.handle.write("/etc/shadow", "root:$1$12345678$xxxxx:14917:0:99999:7:::\n" + "bin:*:14495:0:99999:7:::\n" + "daemon:*:14495:0:99999:7:::\n") vfs.handle.write("/etc/passwd", "root:x:0:0:root:/root:/bin/bash\n" + "bin:x:1:1:bin:/bin:/sbin/nologin\n" + "daemon:x:2:2:daemon:/sbin:/sbin/nologin\n") diskapi._inject_admin_password_into_fs("123456", vfs) self.assertEqual(vfs.handle.files["/etc/passwd"], {'content': "root:x:0:0:root:/root:/bin/bash\n" + "bin:x:1:1:bin:/bin:/sbin/nologin\n" + "daemon:x:2:2:daemon:/sbin:" + "/sbin/nologin\n", 'gid': 100, 'isdir': False, 'mode': 0o700, 'uid': 100}) shadow = vfs.handle.files["/etc/shadow"] # if the encrypted password is only 13 characters long, then # nova.virt.disk.api:_set_password fell back to DES. if len(shadow['content']) == 91: self.assertEqual(shadow, {'content': "root:12tir.zIbWQ3c" + ":14917:0:99999:7:::\n" + "bin:*:14495:0:99999:7:::\n" + "daemon:*:14495:0:99999:7:::\n", 'gid': 100, 'isdir': False, 'mode': 0o700, 'uid': 100}) else: self.assertEqual(shadow, {'content': "root:$1$12345678$a4ge4d5iJ5vw" + "vbFS88TEN0:14917:0:99999:7:::\n" + "bin:*:14495:0:99999:7:::\n" + "daemon:*:14495:0:99999:7:::\n", 'gid': 100, 'isdir': False, 'mode': 0o700, 'uid': 100}) vfs.teardown() def test_inject_files_into_fs(self): vfs = vfsguestfs.VFSGuestFS(self.file) vfs.setup() diskapi._inject_files_into_fs([("/path/to/not/exists/file", "inject-file-contents")], vfs) self.assertIn("/path/to/not/exists", vfs.handle.files) shadow_dir = vfs.handle.files["/path/to/not/exists"] self.assertEqual(shadow_dir, {"isdir": True, "gid": 0, "uid": 0, "mode": 0o744}) shadow_file = vfs.handle.files["/path/to/not/exists/file"] self.assertEqual(shadow_file, {"isdir": False, "content": "inject-file-contents", "gid": 100, "uid": 100, "mode": 0o700}) vfs.teardown() def test_inject_files_into_fs_dir_exists(self): vfs = vfsguestfs.VFSGuestFS(self.file) vfs.setup() called = {'make_path': False} def fake_has_file(*args, **kwargs): return True def fake_make_path(*args, **kwargs): called['make_path'] = True self.stubs.Set(vfs, 'has_file', fake_has_file) self.stubs.Set(vfs, 'make_path', fake_make_path) # test for already exists dir diskapi._inject_files_into_fs([("/path/to/exists/file", "inject-file-contents")], vfs) self.assertIn("/path/to/exists/file", vfs.handle.files) self.assertFalse(called['make_path']) # test for root dir diskapi._inject_files_into_fs([("/inject-file", "inject-file-contents")], vfs) self.assertIn("/inject-file", vfs.handle.files) self.assertFalse(called['make_path']) # test for null dir vfs.handle.files.pop("/inject-file") diskapi._inject_files_into_fs([("inject-file", "inject-file-contents")], vfs) self.assertIn("/inject-file", vfs.handle.files) self.assertFalse(called['make_path']) vfs.teardown() nova-13.1.4/nova/tests/unit/virt/disk/vfs/0000775000567000056710000000000013064447471021516 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/virt/disk/vfs/test_guestfs.py0000664000567000056710000002623613064447152024614 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import mock from nova import exception from nova import test from nova.tests.unit.virt.disk.vfs import fakeguestfs from nova.virt.disk.vfs import guestfs as vfsimpl from nova.virt.image import model as imgmodel class VirtDiskVFSGuestFSTest(test.NoDBTestCase): def setUp(self): super(VirtDiskVFSGuestFSTest, self).setUp() self.useFixture( fixtures.MonkeyPatch('nova.virt.disk.vfs.guestfs.guestfs', fakeguestfs)) self.qcowfile = imgmodel.LocalFileImage("/dummy.qcow2", imgmodel.FORMAT_QCOW2) self.rawfile = imgmodel.LocalFileImage("/dummy.img", imgmodel.FORMAT_RAW) self.lvmfile = imgmodel.LocalBlockImage("/dev/volgroup/myvol") self.rbdfile = imgmodel.RBDImage("myvol", "mypool", "cthulu", "arrrrrgh", ["server1:123", "server2:123"]) def _do_test_appliance_setup_inspect(self, image, drives, forcetcg): if forcetcg: vfsimpl.force_tcg() else: vfsimpl.force_tcg(False) vfs = vfsimpl.VFSGuestFS( image, partition=-1) vfs.setup() if forcetcg: self.assertEqual("force_tcg", vfs.handle.backend_settings) vfsimpl.force_tcg(False) else: self.assertIsNone(vfs.handle.backend_settings) self.assertTrue(vfs.handle.running) self.assertEqual(drives, vfs.handle.drives) self.assertEqual(3, len(vfs.handle.mounts)) self.assertEqual("/dev/mapper/guestvgf-lv_root", vfs.handle.mounts[0][1]) self.assertEqual("/dev/vda1", vfs.handle.mounts[1][1]) self.assertEqual("/dev/mapper/guestvgf-lv_home", vfs.handle.mounts[2][1]) self.assertEqual("/", vfs.handle.mounts[0][2]) self.assertEqual("/boot", vfs.handle.mounts[1][2]) self.assertEqual("/home", vfs.handle.mounts[2][2]) handle = vfs.handle vfs.teardown() self.assertIsNone(vfs.handle) self.assertFalse(handle.running) self.assertTrue(handle.closed) self.assertEqual(0, len(handle.mounts)) def test_appliance_setup_inspect_auto(self): drives = [("/dummy.qcow2", {"format": "qcow2"})] self._do_test_appliance_setup_inspect(self.qcowfile, drives, False) def test_appliance_setup_inspect_tcg(self): drives = [("/dummy.qcow2", {"format": "qcow2"})] self._do_test_appliance_setup_inspect(self.qcowfile, drives, True) def test_appliance_setup_inspect_raw(self): drives = [("/dummy.img", {"format": "raw"})] self._do_test_appliance_setup_inspect(self.rawfile, drives, True) def test_appliance_setup_inspect_lvm(self): drives = [("/dev/volgroup/myvol", {"format": "raw"})] self._do_test_appliance_setup_inspect(self.lvmfile, drives, True) def test_appliance_setup_inspect_rbd(self): drives = [("mypool/myvol", {"format": "raw", "protocol": "rbd", "username": "cthulu", "secret": "arrrrrgh", "server": ["server1:123", "server2:123"]})] self._do_test_appliance_setup_inspect(self.rbdfile, drives, True) def test_appliance_setup_inspect_no_root_raises(self): vfs = vfsimpl.VFSGuestFS(self.qcowfile, partition=-1) # call setup to init the handle so we can stub it vfs.setup() self.assertIsNone(vfs.handle.backend_settings) def fake_inspect_os(): return [] self.stubs.Set(vfs.handle, 'inspect_os', fake_inspect_os) self.assertRaises(exception.NovaException, vfs.setup_os_inspect) def test_appliance_setup_inspect_multi_boots_raises(self): vfs = vfsimpl.VFSGuestFS(self.qcowfile, partition=-1) # call setup to init the handle so we can stub it vfs.setup() self.assertIsNone(vfs.handle.backend_settings) def fake_inspect_os(): return ['fake1', 'fake2'] self.stubs.Set(vfs.handle, 'inspect_os', fake_inspect_os) self.assertRaises(exception.NovaException, vfs.setup_os_inspect) def test_appliance_setup_static_nopart(self): vfs = vfsimpl.VFSGuestFS(self.qcowfile, partition=None) vfs.setup() self.assertIsNone(vfs.handle.backend_settings) self.assertTrue(vfs.handle.running) self.assertEqual(1, len(vfs.handle.mounts)) self.assertEqual("/dev/sda", vfs.handle.mounts[0][1]) self.assertEqual("/", vfs.handle.mounts[0][2]) handle = vfs.handle vfs.teardown() self.assertIsNone(vfs.handle) self.assertFalse(handle.running) self.assertTrue(handle.closed) self.assertEqual(0, len(handle.mounts)) def test_appliance_setup_static_part(self): vfs = vfsimpl.VFSGuestFS(self.qcowfile, partition=2) vfs.setup() self.assertIsNone(vfs.handle.backend_settings) self.assertTrue(vfs.handle.running) self.assertEqual(1, len(vfs.handle.mounts)) self.assertEqual("/dev/sda2", vfs.handle.mounts[0][1]) self.assertEqual("/", vfs.handle.mounts[0][2]) handle = vfs.handle vfs.teardown() self.assertIsNone(vfs.handle) self.assertFalse(handle.running) self.assertTrue(handle.closed) self.assertEqual(0, len(handle.mounts)) def test_makepath(self): vfs = vfsimpl.VFSGuestFS(self.qcowfile) vfs.setup() vfs.make_path("/some/dir") vfs.make_path("/other/dir") self.assertIn("/some/dir", vfs.handle.files) self.assertIn("/other/dir", vfs.handle.files) self.assertTrue(vfs.handle.files["/some/dir"]["isdir"]) self.assertTrue(vfs.handle.files["/other/dir"]["isdir"]) vfs.teardown() def test_append_file(self): vfs = vfsimpl.VFSGuestFS(self.qcowfile) vfs.setup() vfs.append_file("/some/file", " Goodbye") self.assertIn("/some/file", vfs.handle.files) self.assertEqual("Hello World Goodbye", vfs.handle.files["/some/file"]["content"]) vfs.teardown() def test_replace_file(self): vfs = vfsimpl.VFSGuestFS(self.qcowfile) vfs.setup() vfs.replace_file("/some/file", "Goodbye") self.assertIn("/some/file", vfs.handle.files) self.assertEqual("Goodbye", vfs.handle.files["/some/file"]["content"]) vfs.teardown() def test_read_file(self): vfs = vfsimpl.VFSGuestFS(self.qcowfile) vfs.setup() self.assertEqual("Hello World", vfs.read_file("/some/file")) vfs.teardown() def test_has_file(self): vfs = vfsimpl.VFSGuestFS(self.qcowfile) vfs.setup() vfs.read_file("/some/file") self.assertTrue(vfs.has_file("/some/file")) self.assertFalse(vfs.has_file("/other/file")) vfs.teardown() def test_set_permissions(self): vfs = vfsimpl.VFSGuestFS(self.qcowfile) vfs.setup() vfs.read_file("/some/file") self.assertEqual(0o700, vfs.handle.files["/some/file"]["mode"]) vfs.set_permissions("/some/file", 0o7777) self.assertEqual(0o7777, vfs.handle.files["/some/file"]["mode"]) vfs.teardown() def test_set_ownership(self): vfs = vfsimpl.VFSGuestFS(self.qcowfile) vfs.setup() vfs.read_file("/some/file") self.assertEqual(100, vfs.handle.files["/some/file"]["uid"]) self.assertEqual(100, vfs.handle.files["/some/file"]["gid"]) vfs.set_ownership("/some/file", "fred", None) self.assertEqual(105, vfs.handle.files["/some/file"]["uid"]) self.assertEqual(100, vfs.handle.files["/some/file"]["gid"]) vfs.set_ownership("/some/file", None, "users") self.assertEqual(105, vfs.handle.files["/some/file"]["uid"]) self.assertEqual(500, vfs.handle.files["/some/file"]["gid"]) vfs.set_ownership("/some/file", "joe", "admins") self.assertEqual(110, vfs.handle.files["/some/file"]["uid"]) self.assertEqual(600, vfs.handle.files["/some/file"]["gid"]) vfs.teardown() def test_close_on_error(self): vfs = vfsimpl.VFSGuestFS(self.qcowfile) vfs.setup() self.assertFalse(vfs.handle.kwargs['close_on_exit']) vfs.teardown() self.stubs.Set(fakeguestfs.GuestFS, 'SUPPORT_CLOSE_ON_EXIT', False) vfs = vfsimpl.VFSGuestFS(self.qcowfile) vfs.setup() self.assertNotIn('close_on_exit', vfs.handle.kwargs) vfs.teardown() def test_python_return_dict(self): vfs = vfsimpl.VFSGuestFS(self.qcowfile) vfs.setup() self.assertFalse(vfs.handle.kwargs['python_return_dict']) vfs.teardown() self.stubs.Set(fakeguestfs.GuestFS, 'SUPPORT_RETURN_DICT', False) vfs = vfsimpl.VFSGuestFS(self.qcowfile) vfs.setup() self.assertNotIn('python_return_dict', vfs.handle.kwargs) vfs.teardown() def test_setup_debug_disable(self): vfs = vfsimpl.VFSGuestFS(self.qcowfile) vfs.setup() self.assertFalse(vfs.handle.trace_enabled) self.assertFalse(vfs.handle.verbose_enabled) self.assertIsNone(vfs.handle.event_callback) def test_setup_debug_enabled(self): self.flags(debug=True, group='guestfs') vfs = vfsimpl.VFSGuestFS(self.qcowfile) vfs.setup() self.assertTrue(vfs.handle.trace_enabled) self.assertTrue(vfs.handle.verbose_enabled) self.assertIsNotNone(vfs.handle.event_callback) def test_get_format_fs(self): vfs = vfsimpl.VFSGuestFS(self.rawfile) vfs.setup() self.assertIsNotNone(vfs.handle) self.assertEqual('ext3', vfs.get_image_fs()) vfs.teardown() @mock.patch.object(vfsimpl.VFSGuestFS, 'setup_os') def test_setup_mount(self, setup_os): vfs = vfsimpl.VFSGuestFS(self.qcowfile) vfs.setup() self.assertTrue(setup_os.called) @mock.patch.object(vfsimpl.VFSGuestFS, 'setup_os') def test_setup_mount_false(self, setup_os): vfs = vfsimpl.VFSGuestFS(self.qcowfile) vfs.setup(mount=False) self.assertFalse(setup_os.called) nova-13.1.4/nova/tests/unit/virt/disk/vfs/__init__.py0000664000567000056710000000000013064447140023606 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/virt/disk/vfs/test_localfs.py0000664000567000056710000004336713064447152024563 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import tempfile import mock from oslo_concurrency import processutils from nova import exception from nova import test from nova.tests.unit import utils as tests_utils import nova.utils from nova.virt.disk.mount import nbd from nova.virt.disk.vfs import localfs as vfsimpl from nova.virt.image import model as imgmodel dirs = [] files = {} commands = [] def fake_execute(*args, **kwargs): commands.append({"args": args, "kwargs": kwargs}) if args[0] == "readlink": if args[1] == "-nm": if args[2] in ["/scratch/dir/some/file", "/scratch/dir/some/dir", "/scratch/dir/other/dir", "/scratch/dir/other/file"]: return args[2], "" elif args[1] == "-e": if args[2] in files: return args[2], "" return "", "No such file" elif args[0] == "mkdir": dirs.append(args[2]) elif args[0] == "chown": owner = args[1] path = args[2] if path not in files: raise Exception("No such file: " + path) sep = owner.find(':') if sep != -1: user = owner[0:sep] group = owner[sep + 1:] else: user = owner group = None if user: if user == "fred": uid = 105 else: uid = 110 files[path]["uid"] = uid if group: if group == "users": gid = 500 else: gid = 600 files[path]["gid"] = gid elif args[0] == "chgrp": group = args[1] path = args[2] if path not in files: raise Exception("No such file: " + path) if group == "users": gid = 500 else: gid = 600 files[path]["gid"] = gid elif args[0] == "chmod": mode = args[1] path = args[2] if path not in files: raise Exception("No such file: " + path) files[path]["mode"] = int(mode, 8) elif args[0] == "cat": path = args[1] if path not in files: files[path] = { "content": "Hello World", "gid": 100, "uid": 100, "mode": 0o700 } return files[path]["content"], "" elif args[0] == "tee": if args[1] == "-a": path = args[2] append = True else: path = args[1] append = False if path not in files: files[path] = { "content": "Hello World", "gid": 100, "uid": 100, "mode": 0o700, } if append: files[path]["content"] += kwargs["process_input"] else: files[path]["content"] = kwargs["process_input"] class VirtDiskVFSLocalFSTestPaths(test.NoDBTestCase): def setUp(self): super(VirtDiskVFSLocalFSTestPaths, self).setUp() real_execute = processutils.execute def nonroot_execute(*cmd_parts, **kwargs): kwargs.pop('run_as_root', None) return real_execute(*cmd_parts, **kwargs) self.stubs.Set(processutils, 'execute', nonroot_execute) self.rawfile = imgmodel.LocalFileImage("/dummy.img", imgmodel.FORMAT_RAW) def test_check_safe_path(self): if not tests_utils.coreutils_readlink_available(): self.skipTest("coreutils readlink(1) unavailable") vfs = vfsimpl.VFSLocalFS(self.rawfile) vfs.imgdir = "/foo" ret = vfs._canonical_path('etc/something.conf') self.assertEqual(ret, '/foo/etc/something.conf') def test_check_unsafe_path(self): if not tests_utils.coreutils_readlink_available(): self.skipTest("coreutils readlink(1) unavailable") vfs = vfsimpl.VFSLocalFS(self.rawfile) vfs.imgdir = "/foo" self.assertRaises(exception.Invalid, vfs._canonical_path, 'etc/../../../something.conf') class VirtDiskVFSLocalFSTest(test.NoDBTestCase): def setUp(self): super(VirtDiskVFSLocalFSTest, self).setUp() self.qcowfile = imgmodel.LocalFileImage("/dummy.qcow2", imgmodel.FORMAT_QCOW2) self.rawfile = imgmodel.LocalFileImage("/dummy.img", imgmodel.FORMAT_RAW) def test_makepath(self): global dirs, commands dirs = [] commands = [] self.stubs.Set(processutils, 'execute', fake_execute) vfs = vfsimpl.VFSLocalFS(self.qcowfile) vfs.imgdir = "/scratch/dir" vfs.make_path("/some/dir") vfs.make_path("/other/dir") self.assertEqual(dirs, ["/scratch/dir/some/dir", "/scratch/dir/other/dir"]), root_helper = nova.utils.get_root_helper() self.assertEqual(commands, [{'args': ('readlink', '-nm', '/scratch/dir/some/dir'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('mkdir', '-p', '/scratch/dir/some/dir'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('readlink', '-nm', '/scratch/dir/other/dir'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('mkdir', '-p', '/scratch/dir/other/dir'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}]) def test_append_file(self): global files, commands files = {} commands = [] self.stubs.Set(processutils, 'execute', fake_execute) vfs = vfsimpl.VFSLocalFS(self.qcowfile) vfs.imgdir = "/scratch/dir" vfs.append_file("/some/file", " Goodbye") self.assertIn("/scratch/dir/some/file", files) self.assertEqual(files["/scratch/dir/some/file"]["content"], "Hello World Goodbye") root_helper = nova.utils.get_root_helper() self.assertEqual(commands, [{'args': ('readlink', '-nm', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('tee', '-a', '/scratch/dir/some/file'), 'kwargs': {'process_input': ' Goodbye', 'run_as_root': True, 'root_helper': root_helper}}]) def test_replace_file(self): global files, commands files = {} commands = [] self.stubs.Set(processutils, 'execute', fake_execute) vfs = vfsimpl.VFSLocalFS(self.qcowfile) vfs.imgdir = "/scratch/dir" vfs.replace_file("/some/file", "Goodbye") self.assertIn("/scratch/dir/some/file", files) self.assertEqual(files["/scratch/dir/some/file"]["content"], "Goodbye") root_helper = nova.utils.get_root_helper() self.assertEqual(commands, [{'args': ('readlink', '-nm', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('tee', '/scratch/dir/some/file'), 'kwargs': {'process_input': 'Goodbye', 'run_as_root': True, 'root_helper': root_helper}}]) def test_read_file(self): global commands, files files = {} commands = [] self.stubs.Set(processutils, 'execute', fake_execute) vfs = vfsimpl.VFSLocalFS(self.qcowfile) vfs.imgdir = "/scratch/dir" self.assertEqual(vfs.read_file("/some/file"), "Hello World") root_helper = nova.utils.get_root_helper() self.assertEqual(commands, [{'args': ('readlink', '-nm', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('cat', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}]) def test_has_file(self): global commands, files files = {} commands = [] self.stubs.Set(processutils, 'execute', fake_execute) vfs = vfsimpl.VFSLocalFS(self.qcowfile) vfs.imgdir = "/scratch/dir" vfs.read_file("/some/file") self.assertTrue(vfs.has_file("/some/file")) self.assertFalse(vfs.has_file("/other/file")) root_helper = nova.utils.get_root_helper() self.assertEqual(commands, [{'args': ('readlink', '-nm', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('cat', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('readlink', '-nm', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('readlink', '-e', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('readlink', '-nm', '/scratch/dir/other/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('readlink', '-e', '/scratch/dir/other/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, ]) def test_set_permissions(self): global commands, files commands = [] files = {} self.stubs.Set(processutils, 'execute', fake_execute) vfs = vfsimpl.VFSLocalFS(self.qcowfile) vfs.imgdir = "/scratch/dir" vfs.read_file("/some/file") vfs.set_permissions("/some/file", 0o777) self.assertEqual(files["/scratch/dir/some/file"]["mode"], 0o777) root_helper = nova.utils.get_root_helper() self.assertEqual(commands, [{'args': ('readlink', '-nm', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('cat', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('readlink', '-nm', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('chmod', '777', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}]) def test_set_ownership(self): global commands, files commands = [] files = {} self.stubs.Set(processutils, 'execute', fake_execute) vfs = vfsimpl.VFSLocalFS(self.qcowfile) vfs.imgdir = "/scratch/dir" vfs.read_file("/some/file") self.assertEqual(files["/scratch/dir/some/file"]["uid"], 100) self.assertEqual(files["/scratch/dir/some/file"]["gid"], 100) vfs.set_ownership("/some/file", "fred", None) self.assertEqual(files["/scratch/dir/some/file"]["uid"], 105) self.assertEqual(files["/scratch/dir/some/file"]["gid"], 100) vfs.set_ownership("/some/file", None, "users") self.assertEqual(files["/scratch/dir/some/file"]["uid"], 105) self.assertEqual(files["/scratch/dir/some/file"]["gid"], 500) vfs.set_ownership("/some/file", "joe", "admins") self.assertEqual(files["/scratch/dir/some/file"]["uid"], 110) self.assertEqual(files["/scratch/dir/some/file"]["gid"], 600) root_helper = nova.utils.get_root_helper() self.assertEqual(commands, [{'args': ('readlink', '-nm', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('cat', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('readlink', '-nm', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('chown', 'fred', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('readlink', '-nm', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('chgrp', 'users', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('readlink', '-nm', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}, {'args': ('chown', 'joe:admins', '/scratch/dir/some/file'), 'kwargs': {'run_as_root': True, 'root_helper': root_helper}}]) @mock.patch.object(nova.utils, 'execute') def test_get_format_fs(self, execute): vfs = vfsimpl.VFSLocalFS(self.rawfile) vfs.setup = mock.MagicMock() vfs.teardown = mock.MagicMock() def fake_setup(): vfs.mount = mock.MagicMock() vfs.mount.device = None vfs.mount.get_dev.side_effect = fake_get_dev def fake_teardown(): vfs.mount.device = None def fake_get_dev(): vfs.mount.device = '/dev/xyz' return True vfs.setup.side_effect = fake_setup vfs.teardown.side_effect = fake_teardown execute.return_value = ('ext3\n', '') vfs.setup() self.assertEqual('ext3', vfs.get_image_fs()) vfs.teardown() vfs.mount.get_dev.assert_called_once_with() execute.assert_called_once_with('blkid', '-o', 'value', '-s', 'TYPE', '/dev/xyz', run_as_root=True, check_exit_code=[0, 2]) @mock.patch.object(tempfile, 'mkdtemp') @mock.patch.object(nbd, 'NbdMount') def test_setup_mount(self, NbdMount, mkdtemp): vfs = vfsimpl.VFSLocalFS(self.qcowfile) mounter = mock.MagicMock() mkdtemp.return_value = 'tmp/' NbdMount.return_value = mounter vfs.setup() self.assertTrue(mkdtemp.called) NbdMount.assert_called_once_with(self.qcowfile, "tmp/", None) mounter.do_mount.assert_called_once_with() @mock.patch.object(tempfile, 'mkdtemp') @mock.patch.object(nbd, 'NbdMount') def test_setup_mount_false(self, NbdMount, mkdtemp): vfs = vfsimpl.VFSLocalFS(self.qcowfile) mounter = mock.MagicMock() mkdtemp.return_value = 'tmp/' NbdMount.return_value = mounter vfs.setup(mount=False) self.assertTrue(mkdtemp.called) NbdMount.assert_called_once_with(self.qcowfile, "tmp/", None) self.assertFalse(mounter.do_mount.called) nova-13.1.4/nova/tests/unit/virt/disk/vfs/fakeguestfs.py0000664000567000056710000001322213064447152024373 0ustar jenkinsjenkins00000000000000# Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. EVENT_APPLIANCE = 0x1 EVENT_LIBRARY = 0x2 EVENT_WARNING = 0x3 EVENT_TRACE = 0x4 class GuestFS(object): SUPPORT_CLOSE_ON_EXIT = True SUPPORT_RETURN_DICT = True def __init__(self, **kwargs): if not self.SUPPORT_CLOSE_ON_EXIT and 'close_on_exit' in kwargs: raise TypeError('close_on_exit') if not self.SUPPORT_RETURN_DICT and 'python_return_dict' in kwargs: raise TypeError('python_return_dict') self._python_return_dict = kwargs.get('python_return_dict', False) self.kwargs = kwargs self.drives = [] self.running = False self.closed = False self.mounts = [] self.files = {} self.auginit = False self.root_mounted = False self.backend_settings = None self.trace_enabled = False self.verbose_enabled = False self.event_callback = None def launch(self): self.running = True def shutdown(self): self.running = False self.mounts = [] self.drives = [] def set_backend_settings(self, settings): self.backend_settings = settings def close(self): self.closed = True def add_drive_opts(self, file, *args, **kwargs): if file == "/some/fail/file": raise RuntimeError("%s: No such file or directory", file) self.drives.append((file, kwargs)) def add_drive(self, file, format=None, *args, **kwargs): self.add_drive_opts(file, format=None, *args, **kwargs) def inspect_os(self): return ["/dev/guestvgf/lv_root"] def inspect_get_mountpoints(self, dev): mountpoints = [("/home", "/dev/mapper/guestvgf-lv_home"), ("/", "/dev/mapper/guestvgf-lv_root"), ("/boot", "/dev/vda1")] if self.SUPPORT_RETURN_DICT and self._python_return_dict: return dict(mountpoints) else: return mountpoints def mount_options(self, options, device, mntpoint): if mntpoint == "/": self.root_mounted = True else: if not self.root_mounted: raise RuntimeError( "mount: %s: No such file or directory" % mntpoint) self.mounts.append((options, device, mntpoint)) def mkdir_p(self, path): if path not in self.files: self.files[path] = { "isdir": True, "gid": 100, "uid": 100, "mode": 0o700 } def read_file(self, path): if path not in self.files: self.files[path] = { "isdir": False, "content": "Hello World", "gid": 100, "uid": 100, "mode": 0o700 } return self.files[path]["content"] def write(self, path, content): if path not in self.files: self.files[path] = { "isdir": False, "content": "Hello World", "gid": 100, "uid": 100, "mode": 0o700 } self.files[path]["content"] = content def write_append(self, path, content): if path not in self.files: self.files[path] = { "isdir": False, "content": "Hello World", "gid": 100, "uid": 100, "mode": 0o700 } self.files[path]["content"] = self.files[path]["content"] + content def stat(self, path): if path not in self.files: raise RuntimeError("No such file: " + path) return self.files[path]["mode"] def chown(self, uid, gid, path): if path not in self.files: raise RuntimeError("No such file: " + path) if uid != -1: self.files[path]["uid"] = uid if gid != -1: self.files[path]["gid"] = gid def chmod(self, mode, path): if path not in self.files: raise RuntimeError("No such file: " + path) self.files[path]["mode"] = mode def aug_init(self, root, flags): self.auginit = True def aug_close(self): self.auginit = False def aug_get(self, cfgpath): if not self.auginit: raise RuntimeError("Augeus not initialized") if cfgpath == "/files/etc/passwd/root/uid": return 0 elif cfgpath == "/files/etc/passwd/fred/uid": return 105 elif cfgpath == "/files/etc/passwd/joe/uid": return 110 elif cfgpath == "/files/etc/group/root/gid": return 0 elif cfgpath == "/files/etc/group/users/gid": return 500 elif cfgpath == "/files/etc/group/admins/gid": return 600 raise RuntimeError("Unknown path %s", cfgpath) def set_trace(self, enabled): self.trace_enabled = enabled def set_verbose(self, enabled): self.verbose_enabled = enabled def set_event_callback(self, func, events): self.event_callback = (func, events) def vfs_type(self, dev): return 'ext3' nova-13.1.4/nova/tests/unit/virt/__init__.py0000664000567000056710000000000013064447140022056 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/virt/test_volumeutils.py0000664000567000056710000000273213064447141023765 0ustar jenkinsjenkins00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # Copyright 2012 University Of Minho # Copyright 2010 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for virt volumeutils. """ import mock from os_brick.initiator import connector from nova import test from nova.virt import volumeutils class VolumeUtilsTestCase(test.NoDBTestCase): @mock.patch.object(connector.ISCSIConnector, 'get_initiator', return_value='fake.initiator.iqn') def test_get_iscsi_initiator(self, fake_initiator): initiator = 'fake.initiator.iqn' # Start test result = volumeutils.get_iscsi_initiator() self.assertEqual(initiator, result) @mock.patch.object(connector.ISCSIConnector, 'get_initiator', return_value=None) def test_get_missing_iscsi_initiator(self, fake_initiator): result = volumeutils.get_iscsi_initiator() self.assertIsNone(result) nova-13.1.4/nova/tests/unit/virt/vmwareapi/0000775000567000056710000000000013064447471021761 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/virt/vmwareapi/ovf.xml0000664000567000056710000000704413064447141023274 0ustar jenkinsjenkins00000000000000 Virtual disk information The list of logical networks The VM Network network A virtual machine Damn Small Linux The kind of installed guest operating system Other Linux (32-bit) Virtual hardware requirements Virtual Hardware Family 0 Damn Small Linux vmx-07 hertz * 10^6 Number of Virtual CPUs 1 virtual CPU(s) 1 3 1 byte * 2^20 Memory Size 256MB of memory 2 4 256 7 true VM Network Spinderman network Network adapter 1 6 PCNet32 10 0 Hard disk 1 ovf:/disk/vmdisk1 8 4 17 A human-readable annotation Paiadzhina fostata boklici nova-13.1.4/nova/tests/unit/virt/vmwareapi/test_configdrive.py0000664000567000056710000001632613064447152025675 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import mock from mox3 import mox from nova import context from nova.image import glance from nova import objects from nova import test from nova.tests.unit import fake_instance import nova.tests.unit.image.fake from nova.tests.unit import utils from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake from nova.tests.unit.virt.vmwareapi import stubs from nova.tests import uuidsentinel from nova.virt import fake from nova.virt.vmwareapi import driver from nova.virt.vmwareapi import vm_util from nova.virt.vmwareapi import vmops class ConfigDriveTestCase(test.NoDBTestCase): REQUIRES_LOCKING = True @mock.patch.object(driver.VMwareVCDriver, '_register_openstack_extension') def setUp(self, mock_register): super(ConfigDriveTestCase, self).setUp() vm_util.vm_refs_cache_reset() self.context = context.RequestContext('fake', 'fake', is_admin=False) self.flags(cluster_name='test_cluster', host_ip='test_url', host_username='test_username', host_password='test_pass', use_linked_clone=False, group='vmware') self.flags(enabled=False, group='vnc') vmwareapi_fake.reset() stubs.set_stubs(self) nova.tests.unit.image.fake.stub_out_image_service(self) self.conn = driver.VMwareVCDriver(fake.FakeVirtAPI) self.network_info = utils.get_test_network_info() self.node_name = self.conn._nodename image_ref = nova.tests.unit.image.fake.get_valid_image_id() instance_values = { 'vm_state': 'building', 'project_id': 'fake', 'user_id': 'fake', 'name': '1', 'kernel_id': '1', 'ramdisk_id': '1', 'mac_addresses': [{'address': 'de:ad:be:ef:be:ef'}], 'memory_mb': 8192, 'flavor': objects.Flavor(vcpus=4, extra_specs={}), 'instance_type_id': 0, 'vcpus': 4, 'root_gb': 80, 'image_ref': image_ref, 'host': 'fake_host', 'task_state': 'scheduling', 'reservation_id': 'r-3t8muvr0', 'id': 1, 'uuid': uuidsentinel.foo, 'node': self.node_name, 'metadata': [], 'expected_attrs': ['system_metadata'], } self.test_instance = fake_instance.fake_instance_obj(self.context, **instance_values) self.test_instance.flavor = objects.Flavor(vcpus=4, memory_mb=8192, ephemeral_gb=0, swap=0, extra_specs={}) (image_service, image_id) = glance.get_remote_image_service(context, image_ref) metadata = image_service.show(context, image_id) self.image = objects.ImageMeta.from_dict({ 'id': image_ref, 'disk_format': 'vmdk', 'size': int(metadata['size']), }) class FakeInstanceMetadata(object): def __init__(self, instance, content=None, extra_md=None, network_info=None): pass def metadata_for_config_drive(self): return [] self.useFixture(fixtures.MonkeyPatch( 'nova.api.metadata.base.InstanceMetadata', FakeInstanceMetadata)) def fake_make_drive(_self, _path): pass # We can't actually make a config drive v2 because ensure_tree has # been faked out self.stub_out('nova.virt.configdrive.ConfigDriveBuilder.make_drive', fake_make_drive) def fake_upload_iso_to_datastore(iso_path, instance, **kwargs): pass self.stub_out('nova.virt.vmwareapi.images.upload_iso_to_datastore', fake_upload_iso_to_datastore) def tearDown(self): super(ConfigDriveTestCase, self).tearDown() vmwareapi_fake.cleanup() nova.tests.unit.image.fake.FakeImageService_reset() @mock.patch.object(vmops.VMwareVMOps, '_get_instance_metadata', return_value='fake_metadata') def _spawn_vm(self, fake_get_instance_meta, injected_files=None, admin_password=None, block_device_info=None): injected_files = injected_files or [] self.conn.spawn(self.context, self.test_instance, self.image, injected_files=injected_files, admin_password=admin_password, network_info=self.network_info, block_device_info=block_device_info) def test_create_vm_with_config_drive_verify_method_invocation(self): self.test_instance.config_drive = 'True' self.mox.StubOutWithMock(vmops.VMwareVMOps, '_create_config_drive') self.mox.StubOutWithMock(vmops.VMwareVMOps, '_attach_cdrom_to_vm') self.conn._vmops._create_config_drive(self.test_instance, mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg() ).AndReturn('[ds1] fake.iso') self.conn._vmops._attach_cdrom_to_vm(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() # if spawn does not call the _create_config_drive or # _attach_cdrom_to_vm call with the correct set of parameters # then mox's VerifyAll will throw a Expected methods never called # Exception self._spawn_vm() def test_create_vm_without_config_drive(self): self.test_instance.config_drive = None self.mox.StubOutWithMock(vmops.VMwareVMOps, '_create_config_drive') self.mox.StubOutWithMock(vmops.VMwareVMOps, '_attach_cdrom_to_vm') self.mox.ReplayAll() # if spawn ends up calling _create_config_drive or # _attach_cdrom_to_vm then mox will log a Unexpected method call # exception self._spawn_vm() def test_create_vm_with_config_drive(self): self.test_instance.config_drive = 'True' self._spawn_vm() nova-13.1.4/nova/tests/unit/virt/vmwareapi/stubs.py0000664000567000056710000000502313064447152023467 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Citrix Systems, Inc. # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Stubouts for the test suite """ from oslo_vmware import exceptions as vexc from nova.tests.unit.virt.vmwareapi import fake def fake_get_vim_object(arg): """Stubs out the VMwareAPISession's get_vim_object method.""" return fake.FakeVim() @property def fake_vim_prop(arg): """Stubs out the VMwareAPISession's vim property access method.""" return fake.get_fake_vim_object(arg) def fake_is_vim_object(arg, module): """Stubs out the VMwareAPISession's is_vim_object method.""" return isinstance(module, fake.FakeVim) def fake_temp_method_exception(): raise vexc.VimFaultException( [vexc.NOT_AUTHENTICATED], "Session Empty/Not Authenticated") def fake_temp_session_exception(): raise vexc.VimConnectionException("it's a fake!", "Session Exception") def fake_session_file_exception(): fault_list = [vexc.FILE_ALREADY_EXISTS] raise vexc.VimFaultException(fault_list, Exception('fake')) def fake_session_permission_exception(): fault_list = [vexc.NO_PERMISSION] fault_string = 'Permission to perform this operation was denied.' details = {'privilegeId': 'Resource.AssignVMToPool', 'object': 'domain-c7'} raise vexc.VimFaultException(fault_list, fault_string, details=details) def set_stubs(test): """Set the stubs.""" test.stub_out('nova.virt.vmwareapi.network_util.get_network_with_the_name', fake.fake_get_network) test.stub_out('nova.virt.vmwareapi.images.upload_image_stream_optimized', fake.fake_upload_image) test.stub_out('nova.virt.vmwareapi.images.fetch_image', fake.fake_fetch_image) test.stub_out('nova.virt.vmwareapi.driver.VMwareAPISession.vim', fake_vim_prop) test.stub_out('nova.virt.vmwareapi.driver.VMwareAPISession._is_vim_object', fake_is_vim_object) nova-13.1.4/nova/tests/unit/virt/vmwareapi/test_network_util.py0000664000567000056710000002174713064447152026127 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 VMware, Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import mock from oslo_vmware import vim_util from nova import exception from nova import test from nova.tests.unit.virt.vmwareapi import fake from nova.tests.unit.virt.vmwareapi import stubs from nova.virt.vmwareapi import driver from nova.virt.vmwareapi import network_util from nova.virt.vmwareapi import vm_util ResultSet = collections.namedtuple('ResultSet', ['objects']) ObjectContent = collections.namedtuple('ObjectContent', ['obj', 'propSet']) DynamicProperty = collections.namedtuple('DynamicProperty', ['name', 'val']) class GetNetworkWithTheNameTestCase(test.NoDBTestCase): def setUp(self): super(GetNetworkWithTheNameTestCase, self).setUp() fake.reset() self.stub_out('nova.virt.vmwareapi.driver.VMwareAPISession.vim', stubs.fake_vim_prop) self.stub_out('nova.virt.vmwareapi.driver.' 'VMwareAPISession.is_vim_object', stubs.fake_is_vim_object) self._session = driver.VMwareAPISession() def _build_cluster_networks(self, networks): """Returns a set of results for a cluster network lookup. This is an example: (ObjectContent){ obj = (obj){ value = "domain-c7" _type = "ClusterComputeResource" } propSet[] = (DynamicProperty){ name = "network" val = (ArrayOfManagedObjectReference){ ManagedObjectReference[] = (ManagedObjectReference){ value = "network-54" _type = "Network" }, (ManagedObjectReference){ value = "dvportgroup-14" _type = "DistributedVirtualPortgroup" }, } }, }] """ objects = [] obj = ObjectContent(obj=vim_util.get_moref("domain-c7", "ClusterComputeResource"), propSet=[]) value = fake.DataObject() value.ManagedObjectReference = [] for network in networks: value.ManagedObjectReference.append(network) obj.propSet.append( DynamicProperty(name='network', val=value)) objects.append(obj) return ResultSet(objects=objects) def test_get_network_no_match(self): net_morefs = [vim_util.get_moref("dvportgroup-135", "DistributedVirtualPortgroup"), vim_util.get_moref("dvportgroup-136", "DistributedVirtualPortgroup")] networks = self._build_cluster_networks(net_morefs) self._continue_retrieval_called = False def mock_call_method(module, method, *args, **kwargs): if method == 'get_object_properties': return networks if method == 'get_object_property': result = fake.DataObject() result.name = 'no-match' return result if method == 'continue_retrieval': self._continue_retrieval_called = True with mock.patch.object(self._session, '_call_method', mock_call_method): res = network_util.get_network_with_the_name(self._session, 'fake_net', 'fake_cluster') self.assertTrue(self._continue_retrieval_called) self.assertIsNone(res) def _get_network_dvs_match(self, name, token=False): net_morefs = [vim_util.get_moref("dvportgroup-135", "DistributedVirtualPortgroup")] networks = self._build_cluster_networks(net_morefs) def mock_call_method(module, method, *args, **kwargs): if method == 'get_object_properties': return networks if method == 'get_object_property': result = fake.DataObject() if not token or self._continue_retrieval_called: result.name = name else: result.name = 'fake_name' result.key = 'fake_key' result.distributedVirtualSwitch = 'fake_dvs' return result if method == 'continue_retrieval': if token: self._continue_retrieval_called = True return networks if method == 'cancel_retrieval': self._cancel_retrieval_called = True with mock.patch.object(self._session, '_call_method', mock_call_method): res = network_util.get_network_with_the_name(self._session, 'fake_net', 'fake_cluster') self.assertIsNotNone(res) def test_get_network_dvs_exact_match(self): self._cancel_retrieval_called = False self._get_network_dvs_match('fake_net') self.assertTrue(self._cancel_retrieval_called) def test_get_network_dvs_match(self): self._cancel_retrieval_called = False self._get_network_dvs_match('dvs_7-virtualwire-7-fake_net') self.assertTrue(self._cancel_retrieval_called) def test_get_network_dvs_match_with_token(self): self._continue_retrieval_called = False self._cancel_retrieval_called = False self._get_network_dvs_match('dvs_7-virtualwire-7-fake_net', token=True) self.assertTrue(self._continue_retrieval_called) self.assertTrue(self._cancel_retrieval_called) def test_get_network_network_match(self): net_morefs = [vim_util.get_moref("network-54", "Network")] networks = self._build_cluster_networks(net_morefs) def mock_call_method(module, method, *args, **kwargs): if method == 'get_object_properties': return networks if method == 'get_object_property': return 'fake_net' with mock.patch.object(self._session, '_call_method', mock_call_method): res = network_util.get_network_with_the_name(self._session, 'fake_net', 'fake_cluster') self.assertIsNotNone(res) class GetVlanIdAndVswitchForPortgroupTestCase(test.NoDBTestCase): @mock.patch.object(vm_util, 'get_host_ref') def test_no_port_groups(self, mock_get_host_ref): session = mock.Mock() session._call_method.return_value = None self.assertRaises( exception.NovaException, network_util.get_vlanid_and_vswitch_for_portgroup, session, 'port_group_name', 'fake_cluster' ) @mock.patch.object(vm_util, 'get_host_ref') def test_valid_port_group(self, mock_get_host_ref): session = mock.Mock() session._call_method.return_value = self._fake_port_groups() vlanid, vswitch = network_util.get_vlanid_and_vswitch_for_portgroup( session, 'port_group_name', 'fake_cluster' ) self.assertEqual(vlanid, 100) self.assertEqual(vswitch, 'vswitch_name') @mock.patch.object(vm_util, 'get_host_ref') def test_unknown_port_group(self, mock_get_host_ref): session = mock.Mock() session._call_method.return_value = self._fake_port_groups() vlanid, vswitch = network_util.get_vlanid_and_vswitch_for_portgroup( session, 'unknown_port_group', 'fake_cluster' ) self.assertIsNone(vlanid) self.assertIsNone(vswitch) def _fake_port_groups(self): port_group_spec = fake.DataObject() port_group_spec.name = 'port_group_name' port_group_spec.vlanId = 100 port_group = fake.DataObject() port_group.vswitch = 'vswitch_name' port_group.spec = port_group_spec response = fake.DataObject() response.HostPortGroup = [port_group] return response nova-13.1.4/nova/tests/unit/virt/vmwareapi/__init__.py0000664000567000056710000000000013064447141024052 0ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/virt/vmwareapi/test_vim_util.py0000664000567000056710000000300313064447141025210 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import test from nova.tests.unit.virt.vmwareapi import fake from nova.virt.vmwareapi import vim_util class VMwareVIMUtilTestCase(test.NoDBTestCase): def setUp(self): super(VMwareVIMUtilTestCase, self).setUp() fake.reset() self.vim = fake.FakeVim() self.vim._login() def test_get_inner_objects(self): property = ['summary.name'] # Get the fake datastores directly from the cluster cluster_refs = fake._get_object_refs('ClusterComputeResource') cluster = fake._get_object(cluster_refs[0]) expected_ds = cluster.datastore.ManagedObjectReference # Get the fake datastores using inner objects utility method result = vim_util.get_inner_objects( self.vim, cluster_refs[0], 'datastore', 'Datastore', property) datastores = [oc.obj for oc in result.objects] self.assertEqual(expected_ds, datastores) nova-13.1.4/nova/tests/unit/virt/vmwareapi/test_images.py0000664000567000056710000003550113064447152024637 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 VMware, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test suite for images. """ import os import tarfile import mock from oslo_utils import units from oslo_vmware import rw_handles from nova import exception from nova import objects from nova import test import nova.tests.unit.image.fake from nova.tests import uuidsentinel from nova.virt.vmwareapi import constants from nova.virt.vmwareapi import images from nova.virt.vmwareapi import vm_util class VMwareImagesTestCase(test.NoDBTestCase): """Unit tests for Vmware API connection calls.""" def test_fetch_image(self): """Test fetching images.""" dc_name = 'fake-dc' file_path = 'fake_file' ds_name = 'ds1' host = mock.MagicMock() port = 7443 context = mock.MagicMock() image_data = { 'id': nova.tests.unit.image.fake.get_valid_image_id(), 'disk_format': 'vmdk', 'size': 512, } read_file_handle = mock.MagicMock() write_file_handle = mock.MagicMock() read_iter = mock.MagicMock() instance = objects.Instance(id=1, uuid=uuidsentinel.foo, image_ref=image_data['id']) def fake_read_handle(read_iter): return read_file_handle def fake_write_handle(host, port, dc_name, ds_name, cookies, file_path, file_size): return write_file_handle with test.nested( mock.patch.object(rw_handles, 'ImageReadHandle', side_effect=fake_read_handle), mock.patch.object(rw_handles, 'FileWriteHandle', side_effect=fake_write_handle), mock.patch.object(images, 'image_transfer'), mock.patch.object(images.IMAGE_API, 'get', return_value=image_data), mock.patch.object(images.IMAGE_API, 'download', return_value=read_iter), ) as (glance_read, http_write, image_transfer, image_show, image_download): images.fetch_image(context, instance, host, port, dc_name, ds_name, file_path) glance_read.assert_called_once_with(read_iter) http_write.assert_called_once_with(host, port, dc_name, ds_name, None, file_path, image_data['size']) image_transfer.assert_called_once_with(read_file_handle, write_file_handle) image_download.assert_called_once_with(context, instance['image_ref']) image_show.assert_called_once_with(context, instance['image_ref']) def _setup_mock_get_remote_image_service(self, mock_get_remote_image_service, metadata): mock_image_service = mock.MagicMock() mock_image_service.show.return_value = metadata mock_get_remote_image_service.return_value = [mock_image_service, 'i'] def test_get_vmdk_name_from_ovf(self): ovf_path = os.path.join(os.path.dirname(__file__), 'ovf.xml') with open(ovf_path) as f: ovf_descriptor = f.read() vmdk_name = images.get_vmdk_name_from_ovf(ovf_descriptor) self.assertEqual("Damn_Small_Linux-disk1.vmdk", vmdk_name) @mock.patch('oslo_vmware.rw_handles.ImageReadHandle') @mock.patch('oslo_vmware.rw_handles.VmdkWriteHandle') @mock.patch.object(tarfile, 'open') def test_fetch_image_ova(self, mock_tar_open, mock_write_class, mock_read_class): session = mock.MagicMock() ovf_descriptor = None ovf_path = os.path.join(os.path.dirname(__file__), 'ovf.xml') with open(ovf_path) as f: ovf_descriptor = f.read() with test.nested( mock.patch.object(images.IMAGE_API, 'get'), mock.patch.object(images.IMAGE_API, 'download'), mock.patch.object(images, 'image_transfer'), mock.patch.object(images, '_build_shadow_vm_config_spec'), mock.patch.object(session, '_call_method'), mock.patch.object(vm_util, 'get_vmdk_info') ) as (mock_image_api_get, mock_image_api_download, mock_image_transfer, mock_build_shadow_vm_config_spec, mock_call_method, mock_get_vmdk_info): image_data = {'id': 'fake-id', 'disk_format': 'vmdk', 'size': 512} instance = mock.MagicMock() instance.image_ref = image_data['id'] mock_image_api_get.return_value = image_data vm_folder_ref = mock.MagicMock() res_pool_ref = mock.MagicMock() context = mock.MagicMock() mock_read_handle = mock.MagicMock() mock_read_class.return_value = mock_read_handle mock_write_handle = mock.MagicMock() mock_write_class.return_value = mock_write_handle mock_write_handle.get_imported_vm.return_value = \ mock.sentinel.vm_ref mock_ovf = mock.MagicMock() mock_ovf.name = 'dsl.ovf' mock_vmdk = mock.MagicMock() mock_vmdk.name = "Damn_Small_Linux-disk1.vmdk" def fake_extract(name): if name == mock_ovf: m = mock.MagicMock() m.read.return_value = ovf_descriptor return m elif name == mock_vmdk: return mock_read_handle mock_tar = mock.MagicMock() mock_tar.__iter__ = mock.Mock(return_value = iter([mock_ovf, mock_vmdk])) mock_tar.extractfile = fake_extract mock_tar_open.return_value.__enter__.return_value = mock_tar images.fetch_image_ova( context, instance, session, 'fake-vm', 'fake-datastore', vm_folder_ref, res_pool_ref) mock_tar_open.assert_called_once_with(mode='r|', fileobj=mock_read_handle) mock_image_transfer.assert_called_once_with(mock_read_handle, mock_write_handle) mock_get_vmdk_info.assert_called_once_with( session, mock.sentinel.vm_ref, 'fake-vm') mock_call_method.assert_called_once_with( session.vim, "UnregisterVM", mock.sentinel.vm_ref) @mock.patch('oslo_vmware.rw_handles.ImageReadHandle') @mock.patch('oslo_vmware.rw_handles.VmdkWriteHandle') def test_fetch_image_stream_optimized(self, mock_write_class, mock_read_class): """Test fetching streamOptimized disk image.""" session = mock.MagicMock() with test.nested( mock.patch.object(images.IMAGE_API, 'get'), mock.patch.object(images.IMAGE_API, 'download'), mock.patch.object(images, 'image_transfer'), mock.patch.object(images, '_build_shadow_vm_config_spec'), mock.patch.object(session, '_call_method'), mock.patch.object(vm_util, 'get_vmdk_info') ) as (mock_image_api_get, mock_image_api_download, mock_image_transfer, mock_build_shadow_vm_config_spec, mock_call_method, mock_get_vmdk_info): image_data = {'id': 'fake-id', 'disk_format': 'vmdk', 'size': 512} instance = mock.MagicMock() instance.image_ref = image_data['id'] mock_image_api_get.return_value = image_data vm_folder_ref = mock.MagicMock() res_pool_ref = mock.MagicMock() context = mock.MagicMock() mock_read_handle = mock.MagicMock() mock_read_class.return_value = mock_read_handle mock_write_handle = mock.MagicMock() mock_write_class.return_value = mock_write_handle mock_write_handle.get_imported_vm.return_value = \ mock.sentinel.vm_ref images.fetch_image_stream_optimized( context, instance, session, 'fake-vm', 'fake-datastore', vm_folder_ref, res_pool_ref) mock_image_transfer.assert_called_once_with(mock_read_handle, mock_write_handle) mock_call_method.assert_called_once_with( session.vim, "UnregisterVM", mock.sentinel.vm_ref) mock_get_vmdk_info.assert_called_once_with( session, mock.sentinel.vm_ref, 'fake-vm') def test_from_image_with_image_ref(self): raw_disk_size_in_gb = 83 raw_disk_size_in_bytes = raw_disk_size_in_gb * units.Gi image_id = nova.tests.unit.image.fake.get_valid_image_id() mdata = {'size': raw_disk_size_in_bytes, 'disk_format': 'vmdk', 'properties': { "vmware_ostype": constants.DEFAULT_OS_TYPE, "vmware_adaptertype": constants.DEFAULT_ADAPTER_TYPE, "vmware_disktype": constants.DEFAULT_DISK_TYPE, "hw_vif_model": constants.DEFAULT_VIF_MODEL, "vmware_linked_clone": True}} mdata = objects.ImageMeta.from_dict(mdata) img_props = images.VMwareImage.from_image(image_id, mdata) image_size_in_kb = raw_disk_size_in_bytes / units.Ki # assert that defaults are set and no value returned is left empty self.assertEqual(constants.DEFAULT_OS_TYPE, img_props.os_type) self.assertEqual(constants.DEFAULT_ADAPTER_TYPE, img_props.adapter_type) self.assertEqual(constants.DEFAULT_DISK_TYPE, img_props.disk_type) self.assertEqual(constants.DEFAULT_VIF_MODEL, img_props.vif_model) self.assertTrue(img_props.linked_clone) self.assertEqual(image_size_in_kb, img_props.file_size_in_kb) def _image_build(self, image_lc_setting, global_lc_setting, disk_format=constants.DEFAULT_DISK_FORMAT, os_type=constants.DEFAULT_OS_TYPE, adapter_type=constants.DEFAULT_ADAPTER_TYPE, disk_type=constants.DEFAULT_DISK_TYPE, vif_model=constants.DEFAULT_VIF_MODEL): self.flags(use_linked_clone=global_lc_setting, group='vmware') raw_disk_size_in_gb = 93 raw_disk_size_in_btyes = raw_disk_size_in_gb * units.Gi image_id = nova.tests.unit.image.fake.get_valid_image_id() mdata = {'size': raw_disk_size_in_btyes, 'disk_format': disk_format, 'properties': { "vmware_ostype": os_type, "vmware_adaptertype": adapter_type, "vmware_disktype": disk_type, "hw_vif_model": vif_model}} if image_lc_setting is not None: mdata['properties']["vmware_linked_clone"] = image_lc_setting mdata = objects.ImageMeta.from_dict(mdata) return images.VMwareImage.from_image(image_id, mdata) def test_use_linked_clone_override_nf(self): image_props = self._image_build(None, False) self.assertFalse(image_props.linked_clone, "No overrides present but still overridden!") def test_use_linked_clone_override_nt(self): image_props = self._image_build(None, True) self.assertTrue(image_props.linked_clone, "No overrides present but still overridden!") def test_use_linked_clone_override_ny(self): image_props = self._image_build(None, "yes") self.assertTrue(image_props.linked_clone, "No overrides present but still overridden!") def test_use_linked_clone_override_ft(self): image_props = self._image_build(False, True) self.assertFalse(image_props.linked_clone, "image level metadata failed to override global") def test_use_linked_clone_override_string_nt(self): image_props = self._image_build("no", True) self.assertFalse(image_props.linked_clone, "image level metadata failed to override global") def test_use_linked_clone_override_string_yf(self): image_props = self._image_build("yes", False) self.assertTrue(image_props.linked_clone, "image level metadata failed to override global") def test_use_disk_format_iso(self): image = self._image_build(None, True, disk_format='iso') self.assertEqual('iso', image.file_type) self.assertTrue(image.is_iso) def test_use_bad_disk_format(self): self.assertRaises(exception.InvalidDiskFormat, self._image_build, None, True, disk_format='bad_disk_format') def test_image_no_defaults(self): image = self._image_build(False, False, disk_format='iso', os_type='otherGuest', adapter_type='lsiLogic', disk_type='preallocated', vif_model='e1000e') self.assertEqual('iso', image.file_type) self.assertEqual('otherGuest', image.os_type) self.assertEqual('lsiLogic', image.adapter_type) self.assertEqual('preallocated', image.disk_type) self.assertEqual('e1000e', image.vif_model) self.assertFalse(image.linked_clone) def test_image_defaults(self): image = images.VMwareImage(image_id='fake-image-id') # N.B. We intentially don't use the defined constants here. Amongst # other potential failures, we're interested in changes to their # values, which would not otherwise be picked up. self.assertEqual('otherGuest', image.os_type) self.assertEqual('lsiLogic', image.adapter_type) self.assertEqual('preallocated', image.disk_type) self.assertEqual('e1000', image.vif_model) nova-13.1.4/nova/tests/unit/virt/vmwareapi/test_vmops.py0000664000567000056710000035607713064447152024554 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_serialization import jsonutils from oslo_utils import units from oslo_utils import uuidutils from oslo_vmware import exceptions as vexc from oslo_vmware.objects import datastore as ds_obj from oslo_vmware import vim_util as vutil import six from nova.compute import power_state from nova import context from nova import exception from nova.network import model as network_model from nova import objects from nova import test from nova.tests.unit import fake_flavor from nova.tests.unit import fake_instance import nova.tests.unit.image.fake from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake from nova.tests.unit.virt.vmwareapi import stubs from nova.tests import uuidsentinel from nova import utils from nova import version from nova.virt import hardware from nova.virt.vmwareapi import constants from nova.virt.vmwareapi import driver from nova.virt.vmwareapi import ds_util from nova.virt.vmwareapi import images from nova.virt.vmwareapi import vif from nova.virt.vmwareapi import vim_util from nova.virt.vmwareapi import vm_util from nova.virt.vmwareapi import vmops class DsPathMatcher(object): def __init__(self, expected_ds_path_str): self.expected_ds_path_str = expected_ds_path_str def __eq__(self, ds_path_param): return str(ds_path_param) == self.expected_ds_path_str class VMwareVMOpsTestCase(test.NoDBTestCase): def setUp(self): super(VMwareVMOpsTestCase, self).setUp() ds_util.dc_cache_reset() vmwareapi_fake.reset() stubs.set_stubs(self) self.flags(enabled=True, group='vnc') self.flags(image_cache_subdirectory_name='vmware_base', my_ip='', flat_injected=True) self._context = context.RequestContext('fake_user', 'fake_project') self._session = driver.VMwareAPISession() self._virtapi = mock.Mock() self._image_id = nova.tests.unit.image.fake.get_valid_image_id() fake_ds_ref = vmwareapi_fake.ManagedObjectReference('fake-ds') self._ds = ds_obj.Datastore( ref=fake_ds_ref, name='fake_ds', capacity=10 * units.Gi, freespace=10 * units.Gi) self._dc_info = ds_util.DcInfo( ref='fake_dc_ref', name='fake_dc', vmFolder='fake_vm_folder') cluster = vmwareapi_fake.create_cluster('fake_cluster', fake_ds_ref) self._uuid = uuidsentinel.foo self._instance_values = { 'name': 'fake_name', 'display_name': 'fake_display_name', 'uuid': self._uuid, 'vcpus': 1, 'memory_mb': 512, 'image_ref': self._image_id, 'root_gb': 10, 'node': '%s(%s)' % (cluster.mo_id, cluster.name), 'expected_attrs': ['system_metadata'], } self._instance = fake_instance.fake_instance_obj( self._context, **self._instance_values) self._flavor = objects.Flavor(name='m1.small', memory_mb=512, vcpus=1, root_gb=10, ephemeral_gb=0, swap=0, extra_specs={}) self._instance.flavor = self._flavor self._vmops = vmops.VMwareVMOps(self._session, self._virtapi, None, cluster=cluster.obj) self._cluster = cluster self._image_meta = objects.ImageMeta.from_dict({'id': self._image_id}) subnet_4 = network_model.Subnet(cidr='192.168.0.1/24', dns=[network_model.IP('192.168.0.1')], gateway= network_model.IP('192.168.0.1'), ips=[ network_model.IP('192.168.0.100')], routes=None) subnet_6 = network_model.Subnet(cidr='dead:beef::1/64', dns=None, gateway= network_model.IP('dead:beef::1'), ips=[network_model.IP( 'dead:beef::dcad:beff:feef:0')], routes=None) network = network_model.Network(id=0, bridge='fa0', label='fake', subnets=[subnet_4, subnet_6], vlan=None, bridge_interface=None, injected=True) self._network_values = { 'id': None, 'address': 'DE:AD:BE:EF:00:00', 'network': network, 'type': None, 'devname': None, 'ovs_interfaceid': None, 'rxtx_cap': 3 } self.network_info = network_model.NetworkInfo([ network_model.VIF(**self._network_values) ]) pure_IPv6_network = network_model.Network(id=0, bridge='fa0', label='fake', subnets=[subnet_6], vlan=None, bridge_interface=None, injected=True) self.pure_IPv6_network_info = network_model.NetworkInfo([ network_model.VIF(id=None, address='DE:AD:BE:EF:00:00', network=pure_IPv6_network, type=None, devname=None, ovs_interfaceid=None, rxtx_cap=3) ]) self._metadata = ( "name:fake_display_name\n" "userid:fake_user\n" "username:None\n" "projectid:fake_project\n" "projectname:None\n" "flavor:name:m1.micro\n" "flavor:memory_mb:6\n" "flavor:vcpus:28\n" "flavor:ephemeral_gb:8128\n" "flavor:root_gb:496\n" "flavor:swap:33550336\n" "imageid:70a599e0-31e7-49b7-b260-868f441e862b\n" "package:%s\n" % version.version_string_with_package()) def test_get_machine_id_str(self): result = vmops.VMwareVMOps._get_machine_id_str(self.network_info) self.assertEqual('DE:AD:BE:EF:00:00;192.168.0.100;255.255.255.0;' '192.168.0.1;192.168.0.255;192.168.0.1#', result) result = vmops.VMwareVMOps._get_machine_id_str( self.pure_IPv6_network_info) self.assertEqual('DE:AD:BE:EF:00:00;;;;;#', result) def _setup_create_folder_mocks(self): ops = vmops.VMwareVMOps(mock.Mock(), mock.Mock(), mock.Mock()) base_name = 'folder' ds_name = "datastore" ds_ref = mock.Mock() ds_ref.value = 1 dc_ref = mock.Mock() ds_util._DS_DC_MAPPING[ds_ref.value] = ds_util.DcInfo( ref=dc_ref, name='fake-name', vmFolder='fake-folder') path = ds_obj.DatastorePath(ds_name, base_name) return ds_name, ds_ref, ops, path, dc_ref @mock.patch.object(ds_util, 'mkdir') def test_create_folder_if_missing(self, mock_mkdir): ds_name, ds_ref, ops, path, dc = self._setup_create_folder_mocks() ops._create_folder_if_missing(ds_name, ds_ref, 'folder') mock_mkdir.assert_called_with(ops._session, path, dc) @mock.patch.object(ds_util, 'mkdir') def test_create_folder_if_missing_exception(self, mock_mkdir): ds_name, ds_ref, ops, path, dc = self._setup_create_folder_mocks() ds_util.mkdir.side_effect = vexc.FileAlreadyExistsException() ops._create_folder_if_missing(ds_name, ds_ref, 'folder') mock_mkdir.assert_called_with(ops._session, path, dc) @mock.patch.object(vutil, 'continue_retrieval', return_value=None) def test_get_valid_vms_from_retrieve_result(self, _mock_cont): ops = vmops.VMwareVMOps(self._session, mock.Mock(), mock.Mock()) fake_objects = vmwareapi_fake.FakeRetrieveResult() for x in range(0, 3): vm = vmwareapi_fake.VirtualMachine() vm.set('config.extraConfig["nvp.vm-uuid"]', vmwareapi_fake.OptionValue( value=uuidutils.generate_uuid())) fake_objects.add_object(vm) vms = ops._get_valid_vms_from_retrieve_result(fake_objects) self.assertEqual(3, len(vms)) @mock.patch.object(vutil, 'continue_retrieval', return_value=None) def test_get_valid_vms_from_retrieve_result_with_invalid(self, _mock_cont): ops = vmops.VMwareVMOps(self._session, mock.Mock(), mock.Mock()) fake_objects = vmwareapi_fake.FakeRetrieveResult() valid_vm = vmwareapi_fake.VirtualMachine() valid_vm.set('config.extraConfig["nvp.vm-uuid"]', vmwareapi_fake.OptionValue( value=uuidutils.generate_uuid())) fake_objects.add_object(valid_vm) invalid_vm1 = vmwareapi_fake.VirtualMachine() invalid_vm1.set('runtime.connectionState', 'orphaned') invalid_vm1.set('config.extraConfig["nvp.vm-uuid"]', vmwareapi_fake.OptionValue( value=uuidutils.generate_uuid())) invalid_vm2 = vmwareapi_fake.VirtualMachine() invalid_vm2.set('runtime.connectionState', 'inaccessible') invalid_vm2.set('config.extraConfig["nvp.vm-uuid"]', vmwareapi_fake.OptionValue( value=uuidutils.generate_uuid())) fake_objects.add_object(invalid_vm1) fake_objects.add_object(invalid_vm2) vms = ops._get_valid_vms_from_retrieve_result(fake_objects) self.assertEqual(1, len(vms)) def test_delete_vm_snapshot(self): def fake_call_method(module, method, *args, **kwargs): self.assertEqual('RemoveSnapshot_Task', method) self.assertEqual('fake_vm_snapshot', args[0]) self.assertFalse(kwargs['removeChildren']) self.assertTrue(kwargs['consolidate']) return 'fake_remove_snapshot_task' with test.nested( mock.patch.object(self._session, '_wait_for_task'), mock.patch.object(self._session, '_call_method', fake_call_method) ) as (_wait_for_task, _call_method): self._vmops._delete_vm_snapshot(self._instance, "fake_vm_ref", "fake_vm_snapshot") _wait_for_task.assert_has_calls([ mock.call('fake_remove_snapshot_task')]) def test_create_vm_snapshot(self): method_list = ['CreateSnapshot_Task', 'get_object_property'] def fake_call_method(module, method, *args, **kwargs): expected_method = method_list.pop(0) self.assertEqual(expected_method, method) if (expected_method == 'CreateSnapshot_Task'): self.assertEqual('fake_vm_ref', args[0]) self.assertFalse(kwargs['memory']) self.assertTrue(kwargs['quiesce']) return 'fake_snapshot_task' elif (expected_method == 'get_object_property'): task_info = mock.Mock() task_info.result = "fake_snapshot_ref" self.assertEqual(('fake_snapshot_task', 'info'), args) return task_info with test.nested( mock.patch.object(self._session, '_wait_for_task'), mock.patch.object(self._session, '_call_method', fake_call_method) ) as (_wait_for_task, _call_method): snap = self._vmops._create_vm_snapshot(self._instance, "fake_vm_ref") self.assertEqual("fake_snapshot_ref", snap) _wait_for_task.assert_has_calls([ mock.call('fake_snapshot_task')]) def test_update_instance_progress(self): with mock.patch.object(self._instance, 'save') as mock_save: self._vmops._update_instance_progress(self._instance._context, self._instance, 5, 10) mock_save.assert_called_once_with() self.assertEqual(50, self._instance.progress) @mock.patch.object(vm_util, 'get_vm_ref', return_value='fake_ref') def test_get_info(self, mock_get_vm_ref): result = { 'summary.config.numCpu': 4, 'summary.config.memorySizeMB': 128, 'runtime.powerState': 'poweredOn' } def mock_call_method(module, method, *args, **kwargs): if method == 'continue_retrieval': return return result with mock.patch.object(self._session, '_call_method', mock_call_method): info = self._vmops.get_info(self._instance) mock_get_vm_ref.assert_called_once_with(self._session, self._instance) expected = hardware.InstanceInfo(state=power_state.RUNNING, max_mem_kb=128 * 1024, mem_kb=128 * 1024, num_cpu=4) self.assertEqual(expected, info) @mock.patch.object(vm_util, 'get_vm_ref', return_value='fake_ref') def test_get_info_when_ds_unavailable(self, mock_get_vm_ref): result = { 'runtime.powerState': 'poweredOff' } def mock_call_method(module, method, *args, **kwargs): if method == 'continue_retrieval': return return result with mock.patch.object(self._session, '_call_method', mock_call_method): info = self._vmops.get_info(self._instance) mock_get_vm_ref.assert_called_once_with(self._session, self._instance) self.assertEqual(hardware.InstanceInfo(state=power_state.SHUTDOWN), info) @mock.patch.object(vm_util, 'get_vm_ref', return_value='fake_ref') def test_get_info_instance_deleted(self, mock_get_vm_ref): props = ['summary.config.numCpu', 'summary.config.memorySizeMB', 'runtime.powerState'] prop_cpu = vmwareapi_fake.Prop(props[0], 4) prop_mem = vmwareapi_fake.Prop(props[1], 128) prop_state = vmwareapi_fake.Prop(props[2], 'poweredOn') prop_list = [prop_state, prop_mem, prop_cpu] obj_content = vmwareapi_fake.ObjectContent(None, prop_list=prop_list) result = vmwareapi_fake.FakeRetrieveResult() result.add_object(obj_content) def mock_call_method(module, method, *args, **kwargs): raise vexc.ManagedObjectNotFoundException() with mock.patch.object(self._session, '_call_method', mock_call_method): self.assertRaises(exception.InstanceNotFound, self._vmops.get_info, self._instance) mock_get_vm_ref.assert_called_once_with(self._session, self._instance) def _test_get_datacenter_ref_and_name(self, ds_ref_exists=False): instance_ds_ref = mock.Mock() instance_ds_ref.value = "ds-1" _vcvmops = vmops.VMwareVMOps(self._session, None, None) if ds_ref_exists: ds_ref = mock.Mock() ds_ref.value = "ds-1" else: ds_ref = None self._continue_retrieval = True self._fake_object1 = vmwareapi_fake.FakeRetrieveResult() self._fake_object2 = vmwareapi_fake.FakeRetrieveResult() def fake_call_method(module, method, *args, **kwargs): self._fake_object1.add_object(vmwareapi_fake.Datacenter( ds_ref=ds_ref)) if not ds_ref: # Token is set for the fake_object1, so it will continue to # fetch the next object. setattr(self._fake_object1, 'token', 'token-0') if self._continue_retrieval: if self._continue_retrieval: self._continue_retrieval = False self._fake_object2.add_object( vmwareapi_fake.Datacenter()) return self._fake_object2 return if method == "continue_retrieval": return return self._fake_object1 with mock.patch.object(self._session, '_call_method', side_effect=fake_call_method) as fake_call: dc_info = _vcvmops.get_datacenter_ref_and_name(instance_ds_ref) if ds_ref: self.assertEqual(1, len(ds_util._DS_DC_MAPPING)) calls = [mock.call(vim_util, "get_objects", "Datacenter", ["name", "datastore", "vmFolder"]), mock.call(vutil, 'continue_retrieval', self._fake_object1)] fake_call.assert_has_calls(calls) self.assertEqual("ha-datacenter", dc_info.name) else: calls = [mock.call(vim_util, "get_objects", "Datacenter", ["name", "datastore", "vmFolder"]), mock.call(vutil, 'continue_retrieval', self._fake_object2)] fake_call.assert_has_calls(calls) self.assertIsNone(dc_info) def test_get_datacenter_ref_and_name(self): self._test_get_datacenter_ref_and_name(ds_ref_exists=True) def test_get_datacenter_ref_and_name_with_no_datastore(self): self._test_get_datacenter_ref_and_name() @mock.patch.object(vmops.VMwareVMOps, '_fetch_image_if_missing') @mock.patch.object(vm_util, 'power_off_instance') @mock.patch.object(ds_util, 'disk_copy') @mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref') @mock.patch.object(vm_util, 'find_rescue_device') @mock.patch.object(vm_util, 'get_vm_boot_spec') @mock.patch.object(vm_util, 'reconfigure_vm') @mock.patch.object(vm_util, 'power_on_instance') @mock.patch.object(ds_obj, 'get_datastore_by_ref') def test_rescue(self, mock_get_ds_by_ref, mock_power_on, mock_reconfigure, mock_get_boot_spec, mock_find_rescue, mock_get_vm_ref, mock_disk_copy, mock_power_off, mock_fetch_image_if_missing): _volumeops = mock.Mock() self._vmops._volumeops = _volumeops ds = ds_obj.Datastore('fake-ref', 'ds1') mock_get_ds_by_ref.return_value = ds mock_find_rescue.return_value = 'fake-rescue-device' mock_get_boot_spec.return_value = 'fake-boot-spec' vm_ref = vmwareapi_fake.ManagedObjectReference() mock_get_vm_ref.return_value = vm_ref device = vmwareapi_fake.DataObject() backing = vmwareapi_fake.DataObject() backing.datastore = ds.ref device.backing = backing vmdk = vm_util.VmdkInfo('[fake] uuid/root.vmdk', 'fake-adapter', 'fake-disk', 'fake-capacity', device) with test.nested( mock.patch.object(self._vmops, 'get_datacenter_ref_and_name'), mock.patch.object(vm_util, 'get_vmdk_info', return_value=vmdk) ) as (_get_dc_ref_and_name, fake_vmdk_info): dc_info = mock.Mock() _get_dc_ref_and_name.return_value = dc_info self._vmops.rescue( self._context, self._instance, None, self._image_meta) mock_fetch_image_if_missing(self._context, mock.ANY) mock_power_off.assert_called_once_with(self._session, self._instance, vm_ref) uuid = self._instance.image_ref cache_path = ds.build_path('vmware_base', uuid, uuid + '.vmdk') rescue_path = ds.build_path(self._uuid, uuid + '-rescue.vmdk') mock_disk_copy.assert_called_once_with(self._session, dc_info.ref, cache_path, rescue_path) _volumeops.attach_disk_to_vm.assert_called_once_with(vm_ref, self._instance, mock.ANY, mock.ANY, rescue_path) mock_get_boot_spec.assert_called_once_with(mock.ANY, 'fake-rescue-device') mock_reconfigure.assert_called_once_with(self._session, vm_ref, 'fake-boot-spec') mock_power_on.assert_called_once_with(self._session, self._instance, vm_ref=vm_ref) def test_unrescue_power_on(self): self._test_unrescue(True) def test_unrescue_power_off(self): self._test_unrescue(False) def _test_unrescue(self, power_on): _volumeops = mock.Mock() self._vmops._volumeops = _volumeops vm_ref = mock.Mock() def fake_call_method(module, method, *args, **kwargs): expected_args = (vm_ref, 'config.hardware.device') self.assertEqual('get_object_property', method) self.assertEqual(expected_args, args) with test.nested( mock.patch.object(vm_util, 'power_on_instance'), mock.patch.object(vm_util, 'find_rescue_device'), mock.patch.object(vm_util, 'get_vm_ref', return_value=vm_ref), mock.patch.object(self._session, '_call_method', fake_call_method), mock.patch.object(vm_util, 'power_off_instance') ) as (_power_on_instance, _find_rescue, _get_vm_ref, _call_method, _power_off): self._vmops.unrescue(self._instance, power_on=power_on) if power_on: _power_on_instance.assert_called_once_with(self._session, self._instance, vm_ref=vm_ref) else: self.assertFalse(_power_on_instance.called) _get_vm_ref.assert_called_once_with(self._session, self._instance) _power_off.assert_called_once_with(self._session, self._instance, vm_ref) _volumeops.detach_disk_from_vm.assert_called_once_with( vm_ref, self._instance, mock.ANY, destroy_disk=True) def _test_finish_migration(self, power_on=True, resize_instance=False): with test.nested( mock.patch.object(self._vmops, '_resize_create_ephemerals_and_swap'), mock.patch.object(self._vmops, "_update_instance_progress"), mock.patch.object(vm_util, "power_on_instance"), mock.patch.object(vm_util, "get_vm_ref", return_value='fake-ref') ) as (fake_resize_create_ephemerals_and_swap, fake_update_instance_progress, fake_power_on, fake_get_vm_ref): self._vmops.finish_migration(context=self._context, migration=None, instance=self._instance, disk_info=None, network_info=None, block_device_info=None, resize_instance=resize_instance, image_meta=None, power_on=power_on) fake_resize_create_ephemerals_and_swap.assert_called_once_with( 'fake-ref', self._instance, None) if power_on: fake_power_on.assert_called_once_with(self._session, self._instance, vm_ref='fake-ref') else: self.assertFalse(fake_power_on.called) calls = [ mock.call(self._context, self._instance, step=5, total_steps=vmops.RESIZE_TOTAL_STEPS), mock.call(self._context, self._instance, step=6, total_steps=vmops.RESIZE_TOTAL_STEPS)] fake_update_instance_progress.assert_has_calls(calls) def test_finish_migration_power_on(self): self._test_finish_migration(power_on=True, resize_instance=False) def test_finish_migration_power_off(self): self._test_finish_migration(power_on=False, resize_instance=False) def test_finish_migration_power_on_resize(self): self._test_finish_migration(power_on=True, resize_instance=True) @mock.patch.object(vmops.VMwareVMOps, '_create_swap') @mock.patch.object(vmops.VMwareVMOps, '_create_ephemeral') @mock.patch.object(ds_obj, 'get_datastore_by_ref', return_value='fake-ds-ref') @mock.patch.object(vm_util, 'get_vmdk_info') def _test_resize_create_ephemerals(self, vmdk, datastore, mock_get_vmdk_info, mock_get_datastore_by_ref, mock_create_ephemeral, mock_create_swap): mock_get_vmdk_info.return_value = vmdk dc_info = ds_util.DcInfo(ref='fake_ref', name='fake', vmFolder='fake_folder') with mock.patch.object(self._vmops, 'get_datacenter_ref_and_name', return_value=dc_info) as mock_get_dc_ref_and_name: self._vmops._resize_create_ephemerals_and_swap( 'vm-ref', self._instance, 'block-devices') mock_get_vmdk_info.assert_called_once_with( self._session, 'vm-ref', uuid=self._instance.uuid) if vmdk.device: mock_get_datastore_by_ref.assert_called_once_with( self._session, datastore.ref) mock_get_dc_ref_and_name.assert_called_once_with(datastore.ref) mock_create_ephemeral.assert_called_once_with( 'block-devices', self._instance, 'vm-ref', dc_info, 'fake-ds-ref', 'uuid', 'fake-adapter') mock_create_swap.assert_called_once_with( 'block-devices', self._instance, 'vm-ref', dc_info, 'fake-ds-ref', 'uuid', 'fake-adapter') else: self.assertFalse(mock_create_ephemeral.called) self.assertFalse(mock_get_dc_ref_and_name.called) self.assertFalse(mock_get_datastore_by_ref.called) def test_resize_create_ephemerals(self): datastore = ds_obj.Datastore(ref='fake-ref', name='fake') device = vmwareapi_fake.DataObject() backing = vmwareapi_fake.DataObject() backing.datastore = datastore.ref device.backing = backing vmdk = vm_util.VmdkInfo('[fake] uuid/root.vmdk', 'fake-adapter', 'fake-disk', 'fake-capacity', device) self._test_resize_create_ephemerals(vmdk, datastore) def test_resize_create_ephemerals_no_root(self): vmdk = vm_util.VmdkInfo(None, None, None, 0, None) self._test_resize_create_ephemerals(vmdk, None) @mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs') @mock.patch.object(vmops.VMwareVMOps, '_resize_create_ephemerals_and_swap') @mock.patch.object(vmops.VMwareVMOps, '_remove_ephemerals_and_swap') @mock.patch.object(ds_util, 'disk_delete') @mock.patch.object(ds_util, 'disk_move') @mock.patch.object(ds_util, 'file_exists', return_value=True) @mock.patch.object(vmops.VMwareVMOps, '_get_ds_browser', return_value='fake-browser') @mock.patch.object(vm_util, 'reconfigure_vm') @mock.patch.object(vm_util, 'get_vm_resize_spec', return_value='fake-spec') @mock.patch.object(vm_util, 'power_off_instance') @mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref') @mock.patch.object(vm_util, 'power_on_instance') def _test_finish_revert_migration(self, fake_power_on, fake_get_vm_ref, fake_power_off, fake_resize_spec, fake_reconfigure_vm, fake_get_browser, fake_original_exists, fake_disk_move, fake_disk_delete, fake_remove_ephemerals_and_swap, fake_resize_create_ephemerals_and_swap, fake_get_extra_specs, power_on): """Tests the finish_revert_migration method on vmops.""" datastore = ds_obj.Datastore(ref='fake-ref', name='fake') device = vmwareapi_fake.DataObject() backing = vmwareapi_fake.DataObject() backing.datastore = datastore.ref device.backing = backing vmdk = vm_util.VmdkInfo('[fake] uuid/root.vmdk', 'fake-adapter', 'fake-disk', 'fake-capacity', device) dc_info = ds_util.DcInfo(ref='fake_ref', name='fake', vmFolder='fake_folder') extra_specs = vm_util.ExtraSpecs() fake_get_extra_specs.return_value = extra_specs with test.nested( mock.patch.object(self._vmops, 'get_datacenter_ref_and_name', return_value=dc_info), mock.patch.object(vm_util, 'get_vmdk_info', return_value=vmdk) ) as (fake_get_dc_ref_and_name, fake_get_vmdk_info): self._vmops._volumeops = mock.Mock() mock_attach_disk = self._vmops._volumeops.attach_disk_to_vm mock_detach_disk = self._vmops._volumeops.detach_disk_from_vm self._vmops.finish_revert_migration(self._context, instance=self._instance, network_info=None, block_device_info=None, power_on=power_on) fake_get_vm_ref.assert_called_once_with(self._session, self._instance) fake_power_off.assert_called_once_with(self._session, self._instance, 'fake-ref') # Validate VM reconfiguration metadata = ('name:fake_display_name\n' 'userid:fake_user\n' 'username:None\n' 'projectid:fake_project\n' 'projectname:None\n' 'flavor:name:m1.small\n' 'flavor:memory_mb:512\n' 'flavor:vcpus:1\n' 'flavor:ephemeral_gb:0\n' 'flavor:root_gb:10\n' 'flavor:swap:0\n' 'imageid:70a599e0-31e7-49b7-b260-868f441e862b\n' 'package:%s\n' % version.version_string_with_package()) fake_resize_spec.assert_called_once_with( self._session.vim.client.factory, int(self._instance.vcpus), int(self._instance.memory_mb), extra_specs, metadata=metadata) fake_reconfigure_vm.assert_called_once_with(self._session, 'fake-ref', 'fake-spec') # Validate disk configuration fake_get_vmdk_info.assert_called_once_with( self._session, 'fake-ref', uuid=self._instance.uuid) fake_get_browser.assert_called_once_with('fake-ref') fake_original_exists.assert_called_once_with( self._session, 'fake-browser', ds_obj.DatastorePath(datastore.name, 'uuid'), 'original.vmdk') mock_detach_disk.assert_called_once_with('fake-ref', self._instance, device) fake_disk_delete.assert_called_once_with( self._session, dc_info.ref, '[fake] uuid/root.vmdk') fake_disk_move.assert_called_once_with( self._session, dc_info.ref, '[fake] uuid/original.vmdk', '[fake] uuid/root.vmdk') mock_attach_disk.assert_called_once_with( 'fake-ref', self._instance, 'fake-adapter', 'fake-disk', '[fake] uuid/root.vmdk') fake_remove_ephemerals_and_swap.assert_called_once_with('fake-ref') fake_resize_create_ephemerals_and_swap.assert_called_once_with( 'fake-ref', self._instance, None) if power_on: fake_power_on.assert_called_once_with(self._session, self._instance) else: self.assertFalse(fake_power_on.called) def test_finish_revert_migration_power_on(self): self._test_finish_revert_migration(power_on=True) def test_finish_revert_migration_power_off(self): self._test_finish_revert_migration(power_on=False) @mock.patch.object(vmops.VMwareVMOps, '_get_instance_metadata') @mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs') @mock.patch.object(vm_util, 'reconfigure_vm') @mock.patch.object(vm_util, 'get_vm_resize_spec', return_value='fake-spec') def test_resize_vm(self, fake_resize_spec, fake_reconfigure, fake_get_extra_specs, fake_get_metadata): extra_specs = vm_util.ExtraSpecs() fake_get_extra_specs.return_value = extra_specs fake_get_metadata.return_value = self._metadata flavor = objects.Flavor(name='m1.small', memory_mb=1024, vcpus=2, extra_specs={}) self._vmops._resize_vm(self._context, self._instance, 'vm-ref', flavor, None) fake_resize_spec.assert_called_once_with( self._session.vim.client.factory, 2, 1024, extra_specs, metadata=self._metadata) fake_reconfigure.assert_called_once_with(self._session, 'vm-ref', 'fake-spec') @mock.patch.object(vmops.VMwareVMOps, '_extend_virtual_disk') @mock.patch.object(ds_util, 'disk_move') @mock.patch.object(ds_util, 'disk_copy') def test_resize_disk(self, fake_disk_copy, fake_disk_move, fake_extend): datastore = ds_obj.Datastore(ref='fake-ref', name='fake') device = vmwareapi_fake.DataObject() backing = vmwareapi_fake.DataObject() backing.datastore = datastore.ref device.backing = backing vmdk = vm_util.VmdkInfo('[fake] uuid/root.vmdk', 'fake-adapter', 'fake-disk', self._instance.root_gb * units.Gi, device) dc_info = ds_util.DcInfo(ref='fake_ref', name='fake', vmFolder='fake_folder') with mock.patch.object(self._vmops, 'get_datacenter_ref_and_name', return_value=dc_info) as fake_get_dc_ref_and_name: self._vmops._volumeops = mock.Mock() mock_attach_disk = self._vmops._volumeops.attach_disk_to_vm mock_detach_disk = self._vmops._volumeops.detach_disk_from_vm flavor = fake_flavor.fake_flavor_obj(self._context, root_gb=self._instance.root_gb + 1) self._vmops._resize_disk(self._instance, 'fake-ref', vmdk, flavor) fake_get_dc_ref_and_name.assert_called_once_with(datastore.ref) fake_disk_copy.assert_called_once_with( self._session, dc_info.ref, '[fake] uuid/root.vmdk', '[fake] uuid/resized.vmdk') mock_detach_disk.assert_called_once_with('fake-ref', self._instance, device) fake_extend.assert_called_once_with( self._instance, flavor['root_gb'] * units.Mi, '[fake] uuid/resized.vmdk', dc_info.ref) calls = [ mock.call(self._session, dc_info.ref, '[fake] uuid/root.vmdk', '[fake] uuid/original.vmdk'), mock.call(self._session, dc_info.ref, '[fake] uuid/resized.vmdk', '[fake] uuid/root.vmdk')] fake_disk_move.assert_has_calls(calls) mock_attach_disk.assert_called_once_with( 'fake-ref', self._instance, 'fake-adapter', 'fake-disk', '[fake] uuid/root.vmdk') @mock.patch.object(vm_util, 'detach_devices_from_vm') @mock.patch.object(vm_util, 'get_swap') @mock.patch.object(vm_util, 'get_ephemerals') def test_remove_ephemerals_and_swap(self, get_ephemerals, get_swap, detach_devices): get_ephemerals.return_value = [mock.sentinel.ephemeral0, mock.sentinel.ephemeral1] get_swap.return_value = mock.sentinel.swap devices = [mock.sentinel.ephemeral0, mock.sentinel.ephemeral1, mock.sentinel.swap] self._vmops._remove_ephemerals_and_swap(mock.sentinel.vm_ref) detach_devices.assert_called_once_with(self._vmops._session, mock.sentinel.vm_ref, devices) @mock.patch.object(ds_util, 'disk_delete') @mock.patch.object(ds_util, 'file_exists', return_value=True) @mock.patch.object(vmops.VMwareVMOps, '_get_ds_browser', return_value='fake-browser') @mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref') def test_confirm_migration(self, fake_get_vm_ref, fake_get_browser, fake_original_exists, fake_disk_delete): """Tests the confirm_migration method on vmops.""" datastore = ds_obj.Datastore(ref='fake-ref', name='fake') device = vmwareapi_fake.DataObject() backing = vmwareapi_fake.DataObject() backing.datastore = datastore.ref device.backing = backing vmdk = vm_util.VmdkInfo('[fake] uuid/root.vmdk', 'fake-adapter', 'fake-disk', 'fake-capacity', device) dc_info = ds_util.DcInfo(ref='fake_ref', name='fake', vmFolder='fake_folder') with test.nested( mock.patch.object(self._vmops, 'get_datacenter_ref_and_name', return_value=dc_info), mock.patch.object(vm_util, 'get_vmdk_info', return_value=vmdk) ) as (fake_get_dc_ref_and_name, fake_get_vmdk_info): self._vmops.confirm_migration(None, self._instance, None) fake_get_vm_ref.assert_called_once_with(self._session, self._instance) fake_get_vmdk_info.assert_called_once_with( self._session, 'fake-ref', uuid=self._instance.uuid) fake_get_browser.assert_called_once_with('fake-ref') fake_original_exists.assert_called_once_with( self._session, 'fake-browser', ds_obj.DatastorePath(datastore.name, 'uuid'), 'original.vmdk') fake_disk_delete.assert_called_once_with( self._session, dc_info.ref, '[fake] uuid/original.vmdk') def test_migrate_disk_and_power_off(self): self._test_migrate_disk_and_power_off( flavor_root_gb=self._instance.root_gb + 1) def test_migrate_disk_and_power_off_zero_disk_flavor(self): self._instance.root_gb = 0 self._test_migrate_disk_and_power_off(flavor_root_gb=0) def test_migrate_disk_and_power_off_disk_shrink(self): self.assertRaises(exception.InstanceFaultRollback, self._test_migrate_disk_and_power_off, flavor_root_gb=self._instance.root_gb - 1) @mock.patch.object(vmops.VMwareVMOps, "_remove_ephemerals_and_swap") @mock.patch.object(vm_util, 'get_vmdk_info') @mock.patch.object(vmops.VMwareVMOps, "_resize_disk") @mock.patch.object(vmops.VMwareVMOps, "_resize_vm") @mock.patch.object(vm_util, 'power_off_instance') @mock.patch.object(vmops.VMwareVMOps, "_update_instance_progress") @mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref') def _test_migrate_disk_and_power_off(self, fake_get_vm_ref, fake_progress, fake_power_off, fake_resize_vm, fake_resize_disk, fake_get_vmdk_info, fake_remove_ephemerals_and_swap, flavor_root_gb): vmdk = vm_util.VmdkInfo('[fake] uuid/root.vmdk', 'fake-adapter', 'fake-disk', self._instance.root_gb * units.Gi, 'fake-device') fake_get_vmdk_info.return_value = vmdk flavor = fake_flavor.fake_flavor_obj(self._context, root_gb=flavor_root_gb) self._vmops.migrate_disk_and_power_off(self._context, self._instance, None, flavor) fake_get_vm_ref.assert_called_once_with(self._session, self._instance) fake_power_off.assert_called_once_with(self._session, self._instance, 'fake-ref') fake_resize_vm.assert_called_once_with(self._context, self._instance, 'fake-ref', flavor, mock.ANY) fake_resize_disk.assert_called_once_with(self._instance, 'fake-ref', vmdk, flavor) calls = [mock.call(self._context, self._instance, step=i, total_steps=vmops.RESIZE_TOTAL_STEPS) for i in range(4)] fake_progress.assert_has_calls(calls) @mock.patch.object(vutil, 'get_inventory_path', return_value='fake_path') @mock.patch.object(vmops.VMwareVMOps, '_attach_cdrom_to_vm') @mock.patch.object(vmops.VMwareVMOps, '_create_config_drive') def test_configure_config_drive(self, mock_create_config_drive, mock_attach_cdrom_to_vm, mock_get_inventory_path): injected_files = mock.Mock() admin_password = mock.Mock() network_info = mock.Mock() vm_ref = mock.Mock() mock_create_config_drive.return_value = "fake_iso_path" self._vmops._configure_config_drive( self._instance, vm_ref, self._dc_info, self._ds, injected_files, admin_password, network_info) upload_iso_path = self._ds.build_path("fake_iso_path") mock_get_inventory_path.assert_called_once_with(self._session.vim, self._dc_info.ref) mock_create_config_drive.assert_called_once_with(self._instance, injected_files, admin_password, network_info, self._ds.name, 'fake_path', self._instance.uuid, "Fake-CookieJar") mock_attach_cdrom_to_vm.assert_called_once_with( vm_ref, self._instance, self._ds.ref, str(upload_iso_path)) @mock.patch.object(vmops.LOG, 'debug') @mock.patch.object(vmops.VMwareVMOps, '_fetch_image_if_missing') @mock.patch.object(vmops.VMwareVMOps, '_get_vm_config_info') @mock.patch.object(vmops.VMwareVMOps, 'build_virtual_machine') @mock.patch.object(vmops.lockutils, 'lock') def test_spawn_mask_block_device_info_password(self, mock_lock, mock_build_virtual_machine, mock_get_vm_config_info, mock_fetch_image_if_missing, mock_debug): # Very simple test that just ensures block_device_info auth_password # is masked when logged; the rest of the test just fails out early. data = {'auth_password': 'scrubme'} bdm = [{'boot_index': 0, 'disk_bus': constants.DEFAULT_ADAPTER_TYPE, 'connection_info': {'data': data}}] bdi = {'block_device_mapping': bdm} self.password_logged = False # Tests that the parameters to the to_xml method are sanitized for # passwords when logged. def fake_debug(*args, **kwargs): if 'auth_password' in args[0]: self.password_logged = True self.assertNotIn('scrubme', args[0]) mock_debug.side_effect = fake_debug self.flags(flat_injected=False) self.flags(enabled=False, group='vnc') # Call spawn(). We don't care what it does as long as it generates # the log message, which we check below. with mock.patch.object(self._vmops, '_volumeops') as mock_vo: mock_vo.attach_root_volume.side_effect = test.TestingException try: self._vmops.spawn( self._context, self._instance, self._image_meta, injected_files=None, admin_password=None, network_info=[], block_device_info=bdi ) except test.TestingException: pass # Check that the relevant log message was generated, and therefore # that we checked it was scrubbed self.assertTrue(self.password_logged) def _get_metadata(self, is_image_used=True): if is_image_used: image_id = '70a599e0-31e7-49b7-b260-868f441e862b' else: image_id = None return ("name:fake_display_name\n" "userid:fake_user\n" "username:None\n" "projectid:fake_project\n" "projectname:None\n" "flavor:name:m1.small\n" "flavor:memory_mb:512\n" "flavor:vcpus:1\n" "flavor:ephemeral_gb:0\n" "flavor:root_gb:10\n" "flavor:swap:0\n" "imageid:%(image_id)s\n" "package:%(version)s\n" % { 'image_id': image_id, 'version': version.version_string_with_package()}) @mock.patch.object(vm_util, 'rename_vm') @mock.patch.object(vmops.VMwareVMOps, '_create_folders', return_value='fake_vm_folder') @mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance') @mock.patch.object(vmops.VMwareVMOps, '_use_disk_image_as_linked_clone') @mock.patch.object(vmops.VMwareVMOps, '_fetch_image_if_missing') @mock.patch( 'nova.virt.vmwareapi.imagecache.ImageCacheManager.enlist_image') @mock.patch.object(vmops.VMwareVMOps, 'build_virtual_machine') @mock.patch.object(vmops.VMwareVMOps, '_get_vm_config_info') @mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs') @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage, 'from_image') def test_spawn_non_root_block_device(self, from_image, get_extra_specs, get_vm_config_info, build_virtual_machine, enlist_image, fetch_image, use_disk_image, power_on_instance, create_folders, rename_vm): self._instance.flavor = self._flavor extra_specs = get_extra_specs.return_value connection_info1 = {'data': 'fake-data1', 'serial': 'volume-fake-id1'} connection_info2 = {'data': 'fake-data2', 'serial': 'volume-fake-id2'} bdm = [{'connection_info': connection_info1, 'disk_bus': constants.ADAPTER_TYPE_IDE, 'mount_device': '/dev/sdb'}, {'connection_info': connection_info2, 'disk_bus': constants.DEFAULT_ADAPTER_TYPE, 'mount_device': '/dev/sdc'}] bdi = {'block_device_mapping': bdm, 'root_device_name': '/dev/sda'} self.flags(flat_injected=False) self.flags(enabled=False, group='vnc') image_size = (self._instance.root_gb) * units.Gi / 2 image_info = images.VMwareImage( image_id=self._image_id, file_size=image_size) vi = get_vm_config_info.return_value from_image.return_value = image_info build_virtual_machine.return_value = 'fake-vm-ref' with mock.patch.object(self._vmops, '_volumeops') as volumeops: self._vmops.spawn(self._context, self._instance, self._image_meta, injected_files=None, admin_password=None, network_info=[], block_device_info=bdi) from_image.assert_called_once_with(self._instance.image_ref, self._image_meta) get_vm_config_info.assert_called_once_with(self._instance, image_info, extra_specs) build_virtual_machine.assert_called_once_with(self._instance, image_info, vi.dc_info, vi.datastore, [], extra_specs, self._get_metadata()) enlist_image.assert_called_once_with(image_info.image_id, vi.datastore, vi.dc_info.ref) fetch_image.assert_called_once_with(self._context, vi) use_disk_image.assert_called_once_with('fake-vm-ref', vi) volumeops.attach_volume.assert_any_call( connection_info1, self._instance, constants.ADAPTER_TYPE_IDE) volumeops.attach_volume.assert_any_call( connection_info2, self._instance, constants.DEFAULT_ADAPTER_TYPE) @mock.patch.object(vm_util, 'rename_vm') @mock.patch.object(vmops.VMwareVMOps, '_create_folders', return_value='fake_vm_folder') @mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance') @mock.patch.object(vmops.VMwareVMOps, 'build_virtual_machine') @mock.patch.object(vmops.VMwareVMOps, '_get_vm_config_info') @mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs') @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage, 'from_image') def test_spawn_with_no_image_and_block_devices(self, from_image, get_extra_specs, get_vm_config_info, build_virtual_machine, power_on_instance, create_folders, rename_vm): self._instance.image_ref = None self._instance.flavor = self._flavor extra_specs = get_extra_specs.return_value connection_info1 = {'data': 'fake-data1', 'serial': 'volume-fake-id1'} connection_info2 = {'data': 'fake-data2', 'serial': 'volume-fake-id2'} connection_info3 = {'data': 'fake-data3', 'serial': 'volume-fake-id3'} bdm = [{'boot_index': 0, 'connection_info': connection_info1, 'disk_bus': constants.ADAPTER_TYPE_IDE}, {'boot_index': 1, 'connection_info': connection_info2, 'disk_bus': constants.DEFAULT_ADAPTER_TYPE}, {'boot_index': 2, 'connection_info': connection_info3, 'disk_bus': constants.ADAPTER_TYPE_LSILOGICSAS}] bdi = {'block_device_mapping': bdm} self.flags(flat_injected=False) self.flags(enabled=False, group='vnc') image_info = mock.sentinel.image_info vi = get_vm_config_info.return_value from_image.return_value = image_info build_virtual_machine.return_value = 'fake-vm-ref' with mock.patch.object(self._vmops, '_volumeops') as volumeops: self._vmops.spawn(self._context, self._instance, self._image_meta, injected_files=None, admin_password=None, network_info=[], block_device_info=bdi) from_image.assert_called_once_with(self._instance.image_ref, self._image_meta) get_vm_config_info.assert_called_once_with(self._instance, image_info, extra_specs) build_virtual_machine.assert_called_once_with(self._instance, image_info, vi.dc_info, vi.datastore, [], extra_specs, self._get_metadata(is_image_used=False)) volumeops.attach_root_volume.assert_called_once_with( connection_info1, self._instance, vi.datastore.ref, constants.ADAPTER_TYPE_IDE) volumeops.attach_volume.assert_any_call( connection_info2, self._instance, constants.DEFAULT_ADAPTER_TYPE) volumeops.attach_volume.assert_any_call( connection_info3, self._instance, constants.ADAPTER_TYPE_LSILOGICSAS) @mock.patch.object(vmops.VMwareVMOps, '_create_folders', return_value='fake_vm_folder') @mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance') @mock.patch.object(vmops.VMwareVMOps, 'build_virtual_machine') @mock.patch.object(vmops.VMwareVMOps, '_get_vm_config_info') @mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs') @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage, 'from_image') def test_spawn_unsupported_hardware(self, from_image, get_extra_specs, get_vm_config_info, build_virtual_machine, power_on_instance, create_folders): self._instance.image_ref = None self._instance.flavor = self._flavor extra_specs = get_extra_specs.return_value connection_info = {'data': 'fake-data', 'serial': 'volume-fake-id'} bdm = [{'boot_index': 0, 'connection_info': connection_info, 'disk_bus': 'invalid_adapter_type'}] bdi = {'block_device_mapping': bdm} self.flags(flat_injected=False) self.flags(enabled=False, group='vnc') image_info = mock.sentinel.image_info vi = get_vm_config_info.return_value from_image.return_value = image_info build_virtual_machine.return_value = 'fake-vm-ref' self.assertRaises(exception.UnsupportedHardware, self._vmops.spawn, self._context, self._instance, self._image_meta, injected_files=None, admin_password=None, network_info=[], block_device_info=bdi) from_image.assert_called_once_with(self._instance.image_ref, self._image_meta) get_vm_config_info.assert_called_once_with( self._instance, image_info, extra_specs) build_virtual_machine.assert_called_once_with(self._instance, image_info, vi.dc_info, vi.datastore, [], extra_specs, self._get_metadata(is_image_used=False)) def test_get_ds_browser(self): cache = self._vmops._datastore_browser_mapping ds_browser = mock.Mock() moref = vmwareapi_fake.ManagedObjectReference('datastore-100') self.assertIsNone(cache.get(moref.value)) mock_call_method = mock.Mock(return_value=ds_browser) with mock.patch.object(self._session, '_call_method', mock_call_method): ret = self._vmops._get_ds_browser(moref) mock_call_method.assert_called_once_with(vutil, 'get_object_property', moref, 'browser') self.assertIs(ds_browser, ret) self.assertIs(ds_browser, cache.get(moref.value)) @mock.patch.object( vmops.VMwareVMOps, '_sized_image_exists', return_value=False) @mock.patch.object(vmops.VMwareVMOps, '_extend_virtual_disk') @mock.patch.object(vm_util, 'copy_virtual_disk') def _test_use_disk_image_as_linked_clone(self, mock_copy_virtual_disk, mock_extend_virtual_disk, mock_sized_image_exists, flavor_fits_image=False): extra_specs = vm_util.ExtraSpecs() file_size = 10 * units.Gi if flavor_fits_image else 5 * units.Gi image_info = images.VMwareImage( image_id=self._image_id, file_size=file_size, linked_clone=False) cache_root_folder = self._ds.build_path("vmware_base", self._image_id) mock_imagecache = mock.Mock() mock_imagecache.get_image_cache_folder.return_value = cache_root_folder vi = vmops.VirtualMachineInstanceConfigInfo( self._instance, image_info, self._ds, self._dc_info, mock_imagecache, extra_specs) sized_cached_image_ds_loc = cache_root_folder.join( "%s.%s.vmdk" % (self._image_id, vi.root_gb)) self._vmops._volumeops = mock.Mock() mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm self._vmops._use_disk_image_as_linked_clone("fake_vm_ref", vi) mock_copy_virtual_disk.assert_called_once_with( self._session, self._dc_info.ref, str(vi.cache_image_path), str(sized_cached_image_ds_loc)) if not flavor_fits_image: mock_extend_virtual_disk.assert_called_once_with( self._instance, vi.root_gb * units.Mi, str(sized_cached_image_ds_loc), self._dc_info.ref) mock_attach_disk_to_vm.assert_called_once_with( "fake_vm_ref", self._instance, vi.ii.adapter_type, vi.ii.disk_type, str(sized_cached_image_ds_loc), vi.root_gb * units.Mi, False, disk_io_limits=vi._extra_specs.disk_io_limits) def test_use_disk_image_as_linked_clone(self): self._test_use_disk_image_as_linked_clone() def test_use_disk_image_as_linked_clone_flavor_fits_image(self): self._test_use_disk_image_as_linked_clone(flavor_fits_image=True) @mock.patch.object(vmops.VMwareVMOps, '_extend_virtual_disk') @mock.patch.object(vm_util, 'copy_virtual_disk') def _test_use_disk_image_as_full_clone(self, mock_copy_virtual_disk, mock_extend_virtual_disk, flavor_fits_image=False): extra_specs = vm_util.ExtraSpecs() file_size = 10 * units.Gi if flavor_fits_image else 5 * units.Gi image_info = images.VMwareImage( image_id=self._image_id, file_size=file_size, linked_clone=False) cache_root_folder = self._ds.build_path("vmware_base", self._image_id) mock_imagecache = mock.Mock() mock_imagecache.get_image_cache_folder.return_value = cache_root_folder vi = vmops.VirtualMachineInstanceConfigInfo( self._instance, image_info, self._ds, self._dc_info, mock_imagecache, extra_specs) self._vmops._volumeops = mock.Mock() mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm self._vmops._use_disk_image_as_full_clone("fake_vm_ref", vi) fake_path = '[fake_ds] %(uuid)s/%(uuid)s.vmdk' % {'uuid': self._uuid} mock_copy_virtual_disk.assert_called_once_with( self._session, self._dc_info.ref, str(vi.cache_image_path), fake_path) if not flavor_fits_image: mock_extend_virtual_disk.assert_called_once_with( self._instance, vi.root_gb * units.Mi, fake_path, self._dc_info.ref) mock_attach_disk_to_vm.assert_called_once_with( "fake_vm_ref", self._instance, vi.ii.adapter_type, vi.ii.disk_type, fake_path, vi.root_gb * units.Mi, False, disk_io_limits=vi._extra_specs.disk_io_limits) def test_use_disk_image_as_full_clone(self): self._test_use_disk_image_as_full_clone() def test_use_disk_image_as_full_clone_image_too_big(self): self._test_use_disk_image_as_full_clone(flavor_fits_image=True) @mock.patch.object(vmops.VMwareVMOps, '_attach_cdrom_to_vm') @mock.patch.object(vm_util, 'create_virtual_disk') def _test_use_iso_image(self, mock_create_virtual_disk, mock_attach_cdrom, with_root_disk): extra_specs = vm_util.ExtraSpecs() image_info = images.VMwareImage( image_id=self._image_id, file_size=10 * units.Mi, linked_clone=True) cache_root_folder = self._ds.build_path("vmware_base", self._image_id) mock_imagecache = mock.Mock() mock_imagecache.get_image_cache_folder.return_value = cache_root_folder vi = vmops.VirtualMachineInstanceConfigInfo( self._instance, image_info, self._ds, self._dc_info, mock_imagecache, extra_specs) self._vmops._volumeops = mock.Mock() mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm self._vmops._use_iso_image("fake_vm_ref", vi) mock_attach_cdrom.assert_called_once_with( "fake_vm_ref", self._instance, self._ds.ref, str(vi.cache_image_path)) fake_path = '[fake_ds] %(uuid)s/%(uuid)s.vmdk' % {'uuid': self._uuid} if with_root_disk: mock_create_virtual_disk.assert_called_once_with( self._session, self._dc_info.ref, vi.ii.adapter_type, vi.ii.disk_type, fake_path, vi.root_gb * units.Mi) linked_clone = False mock_attach_disk_to_vm.assert_called_once_with( "fake_vm_ref", self._instance, vi.ii.adapter_type, vi.ii.disk_type, fake_path, vi.root_gb * units.Mi, linked_clone, disk_io_limits=vi._extra_specs.disk_io_limits) def test_use_iso_image_with_root_disk(self): self._test_use_iso_image(with_root_disk=True) def test_use_iso_image_without_root_disk(self): self._test_use_iso_image(with_root_disk=False) def _verify_spawn_method_calls(self, mock_call_method, extras=None): # TODO(vui): More explicit assertions of spawn() behavior # are waiting on additional refactoring pertaining to image # handling/manipulation. Till then, we continue to assert on the # sequence of VIM operations invoked. expected_methods = ['get_object_property', 'SearchDatastore_Task', 'CreateVirtualDisk_Task', 'DeleteDatastoreFile_Task', 'MoveDatastoreFile_Task', 'DeleteDatastoreFile_Task', 'SearchDatastore_Task', 'ExtendVirtualDisk_Task', ] if extras: expected_methods.extend(extras) # Last call should be renaming the instance expected_methods.append('Rename_Task') recorded_methods = [c[1][1] for c in mock_call_method.mock_calls] self.assertEqual(expected_methods, recorded_methods) @mock.patch.object(vmops.VMwareVMOps, '_create_folders', return_value='fake_vm_folder') @mock.patch( 'nova.virt.vmwareapi.vmops.VMwareVMOps._update_vnic_index') @mock.patch( 'nova.virt.vmwareapi.vmops.VMwareVMOps._configure_config_drive') @mock.patch('nova.virt.vmwareapi.ds_util.get_datastore') @mock.patch( 'nova.virt.vmwareapi.vmops.VMwareVMOps.get_datacenter_ref_and_name') @mock.patch('nova.virt.vmwareapi.vif.get_vif_info', return_value=[]) @mock.patch('nova.utils.is_neutron', return_value=False) @mock.patch('nova.virt.vmwareapi.vm_util.get_vm_create_spec', return_value='fake_create_spec') @mock.patch('nova.virt.vmwareapi.vm_util.create_vm', return_value='fake_vm_ref') @mock.patch('nova.virt.vmwareapi.ds_util.mkdir') @mock.patch('nova.virt.vmwareapi.vmops.VMwareVMOps._set_machine_id') @mock.patch( 'nova.virt.vmwareapi.imagecache.ImageCacheManager.enlist_image') @mock.patch.object(vmops.VMwareVMOps, '_get_and_set_vnc_config') @mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance') @mock.patch('nova.virt.vmwareapi.vm_util.copy_virtual_disk') # TODO(dims): Need to add tests for create_virtual_disk after the # disk/image code in spawn gets refactored def _test_spawn(self, mock_copy_virtual_disk, mock_power_on_instance, mock_get_and_set_vnc_config, mock_enlist_image, mock_set_machine_id, mock_mkdir, mock_create_vm, mock_get_create_spec, mock_is_neutron, mock_get_vif_info, mock_get_datacenter_ref_and_name, mock_get_datastore, mock_configure_config_drive, mock_update_vnic_index, mock_create_folders, block_device_info=None, extra_specs=None, config_drive=False): if extra_specs is None: extra_specs = vm_util.ExtraSpecs() image_size = (self._instance.root_gb) * units.Gi / 2 image = { 'id': self._image_id, 'disk_format': 'vmdk', 'size': image_size, } image = objects.ImageMeta.from_dict(image) image_info = images.VMwareImage( image_id=self._image_id, file_size=image_size) vi = self._vmops._get_vm_config_info( self._instance, image_info, extra_specs) self._vmops._volumeops = mock.Mock() network_info = mock.Mock() mock_get_datastore.return_value = self._ds mock_get_datacenter_ref_and_name.return_value = self._dc_info mock_call_method = mock.Mock(return_value='fake_task') if extra_specs is None: extra_specs = vm_util.ExtraSpecs() with test.nested( mock.patch.object(self._session, '_wait_for_task'), mock.patch.object(self._session, '_call_method', mock_call_method), mock.patch.object(uuidutils, 'generate_uuid', return_value='tmp-uuid'), mock.patch.object(images, 'fetch_image'), mock.patch.object(self._vmops, '_get_extra_specs', return_value=extra_specs), mock.patch.object(self._vmops, '_get_instance_metadata', return_value='fake-metadata') ) as (_wait_for_task, _call_method, _generate_uuid, _fetch_image, _get_extra_specs, _get_instance_metadata): self._vmops.spawn(self._context, self._instance, image, injected_files='fake_files', admin_password='password', network_info=network_info, block_device_info=block_device_info) mock_is_neutron.assert_called_once_with() self.assertEqual(2, mock_mkdir.call_count) mock_get_vif_info.assert_called_once_with( self._session, self._cluster.obj, False, constants.DEFAULT_VIF_MODEL, network_info) mock_get_create_spec.assert_called_once_with( self._session.vim.client.factory, self._instance, 'fake_ds', [], extra_specs, constants.DEFAULT_OS_TYPE, profile_spec=None, metadata='fake-metadata') mock_create_vm.assert_called_once_with( self._session, self._instance, 'fake_vm_folder', 'fake_create_spec', self._cluster.resourcePool) mock_get_and_set_vnc_config.assert_called_once_with( self._session.vim.client.factory, self._instance, 'fake_vm_ref') mock_set_machine_id.assert_called_once_with( self._session.vim.client.factory, self._instance, network_info, vm_ref='fake_vm_ref') mock_power_on_instance.assert_called_once_with( self._session, self._instance, vm_ref='fake_vm_ref') if (block_device_info and 'block_device_mapping' in block_device_info): bdms = block_device_info['block_device_mapping'] for bdm in bdms: mock_attach_root = ( self._vmops._volumeops.attach_root_volume) mock_attach = self._vmops._volumeops.attach_volume adapter_type = bdm.get('disk_bus') or vi.ii.adapter_type if bdm.get('boot_index') == 0: mock_attach_root.assert_any_call( bdm['connection_info'], self._instance, self._ds.ref, adapter_type) else: mock_attach.assert_any_call( bdm['connection_info'], self._instance, self._ds.ref, adapter_type) mock_enlist_image.assert_called_once_with( self._image_id, self._ds, self._dc_info.ref) upload_file_name = 'vmware_temp/tmp-uuid/%s/%s-flat.vmdk' % ( self._image_id, self._image_id) _fetch_image.assert_called_once_with( self._context, self._instance, self._session._host, self._session._port, self._dc_info.name, self._ds.name, upload_file_name, cookies='Fake-CookieJar') self.assertTrue(len(_wait_for_task.mock_calls) > 0) extras = None if block_device_info and ('ephemerals' in block_device_info or 'swap' in block_device_info): extras = ['CreateVirtualDisk_Task'] self._verify_spawn_method_calls(_call_method, extras) dc_ref = 'fake_dc_ref' source_file = six.text_type('[fake_ds] vmware_base/%s/%s.vmdk' % (self._image_id, self._image_id)) dest_file = six.text_type('[fake_ds] vmware_base/%s/%s.%d.vmdk' % (self._image_id, self._image_id, self._instance['root_gb'])) # TODO(dims): add more tests for copy_virtual_disk after # the disk/image code in spawn gets refactored mock_copy_virtual_disk.assert_called_with(self._session, dc_ref, source_file, dest_file) if config_drive: mock_configure_config_drive.assert_called_once_with( self._instance, 'fake_vm_ref', self._dc_info, self._ds, 'fake_files', 'password', network_info) mock_update_vnic_index.assert_called_once_with( self._context, self._instance, network_info) @mock.patch.object(ds_util, 'get_datastore') @mock.patch.object(vmops.VMwareVMOps, 'get_datacenter_ref_and_name') def _test_get_spawn_vm_config_info(self, mock_get_datacenter_ref_and_name, mock_get_datastore, image_size_bytes=0): image_info = images.VMwareImage( image_id=self._image_id, file_size=image_size_bytes, linked_clone=True) mock_get_datastore.return_value = self._ds mock_get_datacenter_ref_and_name.return_value = self._dc_info extra_specs = vm_util.ExtraSpecs() vi = self._vmops._get_vm_config_info(self._instance, image_info, extra_specs) self.assertEqual(image_info, vi.ii) self.assertEqual(self._ds, vi.datastore) self.assertEqual(self._instance.root_gb, vi.root_gb) self.assertEqual(self._instance, vi.instance) self.assertEqual(self._instance.uuid, vi.instance.uuid) self.assertEqual(extra_specs, vi._extra_specs) cache_image_path = '[%s] vmware_base/%s/%s.vmdk' % ( self._ds.name, self._image_id, self._image_id) self.assertEqual(cache_image_path, str(vi.cache_image_path)) cache_image_folder = '[%s] vmware_base/%s' % ( self._ds.name, self._image_id) self.assertEqual(cache_image_folder, str(vi.cache_image_folder)) def test_get_spawn_vm_config_info(self): image_size = (self._instance.root_gb) * units.Gi / 2 self._test_get_spawn_vm_config_info(image_size_bytes=image_size) def test_get_spawn_vm_config_info_image_too_big(self): image_size = (self._instance.root_gb + 1) * units.Gi self.assertRaises(exception.InstanceUnacceptable, self._test_get_spawn_vm_config_info, image_size_bytes=image_size) def test_spawn(self): self._test_spawn() def test_spawn_config_drive_enabled(self): self.flags(force_config_drive=True) self._test_spawn(config_drive=True) def test_spawn_with_block_device_info(self): block_device_info = { 'block_device_mapping': [{'boot_index': 0, 'connection_info': 'fake', 'mount_device': '/dev/vda'}] } self._test_spawn(block_device_info=block_device_info) def test_spawn_with_block_device_info_with_config_drive(self): self.flags(force_config_drive=True) block_device_info = { 'block_device_mapping': [{'boot_index': 0, 'connection_info': 'fake', 'mount_device': '/dev/vda'}] } self._test_spawn(block_device_info=block_device_info, config_drive=True) def _spawn_with_block_device_info_ephemerals(self, ephemerals): block_device_info = {'ephemerals': ephemerals} self._test_spawn(block_device_info=block_device_info) def test_spawn_with_block_device_info_ephemerals(self): ephemerals = [{'device_type': 'disk', 'disk_bus': 'virtio', 'device_name': '/dev/vdb', 'size': 1}] self._spawn_with_block_device_info_ephemerals(ephemerals) def test_spawn_with_block_device_info_ephemerals_no_disk_bus(self): ephemerals = [{'device_type': 'disk', 'disk_bus': None, 'device_name': '/dev/vdb', 'size': 1}] self._spawn_with_block_device_info_ephemerals(ephemerals) def test_spawn_with_block_device_info_swap(self): block_device_info = {'swap': {'disk_bus': None, 'swap_size': 512, 'device_name': '/dev/sdb'}} self._test_spawn(block_device_info=block_device_info) @mock.patch.object(vm_util, 'rename_vm') @mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance') @mock.patch.object(vmops.VMwareVMOps, '_create_and_attach_thin_disk') @mock.patch.object(vmops.VMwareVMOps, '_use_disk_image_as_linked_clone') @mock.patch.object(vmops.VMwareVMOps, '_fetch_image_if_missing') @mock.patch( 'nova.virt.vmwareapi.imagecache.ImageCacheManager.enlist_image') @mock.patch.object(vmops.VMwareVMOps, 'build_virtual_machine') @mock.patch.object(vmops.VMwareVMOps, '_get_vm_config_info') @mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs') @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage, 'from_image') def test_spawn_with_ephemerals_and_swap(self, from_image, get_extra_specs, get_vm_config_info, build_virtual_machine, enlist_image, fetch_image, use_disk_image, create_and_attach_thin_disk, power_on_instance, rename_vm): self._instance.flavor = objects.Flavor(vcpus=1, memory_mb=512, name="m1.tiny", root_gb=1, ephemeral_gb=1, swap=512, extra_specs={}) extra_specs = self._vmops._get_extra_specs(self._instance.flavor) ephemerals = [{'device_type': 'disk', 'disk_bus': None, 'device_name': '/dev/vdb', 'size': 1}, {'device_type': 'disk', 'disk_bus': None, 'device_name': '/dev/vdc', 'size': 1}] swap = {'disk_bus': None, 'swap_size': 512, 'device_name': '/dev/vdd'} bdi = {'block_device_mapping': [], 'root_device_name': '/dev/sda', 'ephemerals': ephemerals, 'swap': swap} metadata = self._vmops._get_instance_metadata(self._context, self._instance) self.flags(enabled=False, group='vnc') self.flags(flat_injected=False) image_size = (self._instance.root_gb) * units.Gi / 2 image_info = images.VMwareImage( image_id=self._image_id, file_size=image_size) vi = get_vm_config_info.return_value from_image.return_value = image_info build_virtual_machine.return_value = 'fake-vm-ref' self._vmops.spawn(self._context, self._instance, {}, injected_files=None, admin_password=None, network_info=[], block_device_info=bdi) from_image.assert_called_once_with(self._instance.image_ref, {}) get_vm_config_info.assert_called_once_with(self._instance, image_info, extra_specs) build_virtual_machine.assert_called_once_with(self._instance, image_info, vi.dc_info, vi.datastore, [], extra_specs, metadata) enlist_image.assert_called_once_with(image_info.image_id, vi.datastore, vi.dc_info.ref) fetch_image.assert_called_once_with(self._context, vi) use_disk_image.assert_called_once_with('fake-vm-ref', vi) # _create_and_attach_thin_disk should be called for each ephemeral # and swap disk eph0_path = str(ds_obj.DatastorePath(vi.datastore.name, self._uuid, 'ephemeral_0.vmdk')) eph1_path = str(ds_obj.DatastorePath(vi.datastore.name, self._uuid, 'ephemeral_1.vmdk')) swap_path = str(ds_obj.DatastorePath(vi.datastore.name, self._uuid, 'swap.vmdk')) create_and_attach_thin_disk.assert_has_calls([ mock.call(self._instance, 'fake-vm-ref', vi.dc_info, ephemerals[0]['size'] * units.Mi, vi.ii.adapter_type, eph0_path), mock.call(self._instance, 'fake-vm-ref', vi.dc_info, ephemerals[1]['size'] * units.Mi, vi.ii.adapter_type, eph1_path), mock.call(self._instance, 'fake-vm-ref', vi.dc_info, swap['swap_size'] * units.Ki, vi.ii.adapter_type, swap_path) ]) power_on_instance.assert_called_once_with(self._session, self._instance, vm_ref='fake-vm-ref') def _get_fake_vi(self): image_info = images.VMwareImage( image_id=self._image_id, file_size=7, linked_clone=False) vi = vmops.VirtualMachineInstanceConfigInfo( self._instance, image_info, self._ds, self._dc_info, mock.Mock()) return vi @mock.patch.object(vm_util, 'create_virtual_disk') def test_create_and_attach_thin_disk(self, mock_create): vi = self._get_fake_vi() self._vmops._volumeops = mock.Mock() mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm path = str(ds_obj.DatastorePath(vi.datastore.name, self._uuid, 'fake-filename')) self._vmops._create_and_attach_thin_disk(self._instance, 'fake-vm-ref', vi.dc_info, 1, 'fake-adapter-type', path) mock_create.assert_called_once_with( self._session, self._dc_info.ref, 'fake-adapter-type', 'thin', path, 1) mock_attach_disk_to_vm.assert_called_once_with( 'fake-vm-ref', self._instance, 'fake-adapter-type', 'thin', path, 1, False) def test_create_ephemeral_with_bdi(self): ephemerals = [{'device_type': 'disk', 'disk_bus': 'virtio', 'device_name': '/dev/vdb', 'size': 1}] block_device_info = {'ephemerals': ephemerals} vi = self._get_fake_vi() with mock.patch.object( self._vmops, '_create_and_attach_thin_disk') as mock_caa: self._vmops._create_ephemeral(block_device_info, self._instance, 'fake-vm-ref', vi.dc_info, vi.datastore, self._uuid, vi.ii.adapter_type) mock_caa.assert_called_once_with( self._instance, 'fake-vm-ref', vi.dc_info, 1 * units.Mi, 'virtio', '[fake_ds] %s/ephemeral_0.vmdk' % self._uuid) def _test_create_ephemeral_from_instance(self, bdi): vi = self._get_fake_vi() with mock.patch.object( self._vmops, '_create_and_attach_thin_disk') as mock_caa: self._vmops._create_ephemeral(bdi, self._instance, 'fake-vm-ref', vi.dc_info, vi.datastore, self._uuid, vi.ii.adapter_type) mock_caa.assert_called_once_with( self._instance, 'fake-vm-ref', vi.dc_info, 1 * units.Mi, constants.DEFAULT_ADAPTER_TYPE, '[fake_ds] %s/ephemeral_0.vmdk' % self._uuid) def test_create_ephemeral_with_bdi_but_no_ephemerals(self): block_device_info = {'ephemerals': []} self._instance.ephemeral_gb = 1 self._test_create_ephemeral_from_instance(block_device_info) def test_create_ephemeral_with_no_bdi(self): self._instance.ephemeral_gb = 1 self._test_create_ephemeral_from_instance(None) def _test_create_swap_from_instance(self, bdi): vi = self._get_fake_vi() flavor = objects.Flavor(vcpus=1, memory_mb=1024, ephemeral_gb=1, swap=1024, extra_specs={}) self._instance.flavor = flavor with mock.patch.object( self._vmops, '_create_and_attach_thin_disk' ) as create_and_attach: self._vmops._create_swap(bdi, self._instance, 'fake-vm-ref', vi.dc_info, vi.datastore, self._uuid, 'lsiLogic') size = flavor.swap * units.Ki if bdi is not None: swap = bdi.get('swap', {}) size = swap.get('swap_size', 0) * units.Ki path = str(ds_obj.DatastorePath(vi.datastore.name, self._uuid, 'swap.vmdk')) create_and_attach.assert_called_once_with(self._instance, 'fake-vm-ref', vi.dc_info, size, 'lsiLogic', path) def test_create_swap_with_bdi(self): block_device_info = {'swap': {'disk_bus': None, 'swap_size': 512, 'device_name': '/dev/sdb'}} self._test_create_swap_from_instance(block_device_info) def test_create_swap_with_no_bdi(self): self._test_create_swap_from_instance(None) @mock.patch.object(vmops.VMwareVMOps, '_create_folders', return_value='fake_vm_folder') def test_build_virtual_machine(self, mock_create_folder): image_id = nova.tests.unit.image.fake.get_valid_image_id() image = images.VMwareImage(image_id=image_id) extra_specs = vm_util.ExtraSpecs() vm_ref = self._vmops.build_virtual_machine(self._instance, image, self._dc_info, self._ds, self.network_info, extra_specs, self._metadata) vm = vmwareapi_fake._get_object(vm_ref) # Test basic VM parameters self.assertEqual(self._instance.uuid, vm.name) self.assertEqual(self._instance.uuid, vm.get('summary.config.instanceUuid')) self.assertEqual(self._instance_values['vcpus'], vm.get('summary.config.numCpu')) self.assertEqual(self._instance_values['memory_mb'], vm.get('summary.config.memorySizeMB')) # Test NSX config for optval in vm.get('config.extraConfig').OptionValue: if optval.key == 'nvp.vm-uuid': self.assertEqual(self._instance_values['uuid'], optval.value) break else: self.fail('nvp.vm-uuid not found in extraConfig') # Test that the VM is associated with the specified datastore datastores = vm.datastore.ManagedObjectReference self.assertEqual(1, len(datastores)) datastore = vmwareapi_fake._get_object(datastores[0]) self.assertEqual(self._ds.name, datastore.get('summary.name')) # Test that the VM's network is configured as specified devices = vm.get('config.hardware.device').VirtualDevice for device in devices: if device.obj_name != 'ns0:VirtualE1000': continue self.assertEqual(self._network_values['address'], device.macAddress) break else: self.fail('NIC not configured') def test_spawn_cpu_limit(self): cpu_limits = vm_util.Limits(limit=7) extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits) self._test_spawn(extra_specs=extra_specs) def test_spawn_cpu_reservation(self): cpu_limits = vm_util.Limits(reservation=7) extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits) self._test_spawn(extra_specs=extra_specs) def test_spawn_cpu_allocations(self): cpu_limits = vm_util.Limits(limit=7, reservation=6) extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits) self._test_spawn(extra_specs=extra_specs) def test_spawn_cpu_shares_level(self): cpu_limits = vm_util.Limits(shares_level='high') extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits) self._test_spawn(extra_specs=extra_specs) def test_spawn_cpu_shares_custom(self): cpu_limits = vm_util.Limits(shares_level='custom', shares_share=1948) extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits) self._test_spawn(extra_specs=extra_specs) def test_spawn_memory_limit(self): memory_limits = vm_util.Limits(limit=7) extra_specs = vm_util.ExtraSpecs(memory_limits=memory_limits) self._test_spawn(extra_specs=extra_specs) def test_spawn_memory_reservation(self): memory_limits = vm_util.Limits(reservation=7) extra_specs = vm_util.ExtraSpecs(memory_limits=memory_limits) self._test_spawn(extra_specs=extra_specs) def test_spawn_memory_allocations(self): memory_limits = vm_util.Limits(limit=7, reservation=6) extra_specs = vm_util.ExtraSpecs(memory_limits=memory_limits) self._test_spawn(extra_specs=extra_specs) def test_spawn_memory_shares_level(self): memory_limits = vm_util.Limits(shares_level='high') extra_specs = vm_util.ExtraSpecs(memory_limits=memory_limits) self._test_spawn(extra_specs=extra_specs) def test_spawn_memory_shares_custom(self): memory_limits = vm_util.Limits(shares_level='custom', shares_share=1948) extra_specs = vm_util.ExtraSpecs(memory_limits=memory_limits) self._test_spawn(extra_specs=extra_specs) def test_spawn_vif_limit(self): vif_limits = vm_util.Limits(limit=7) extra_specs = vm_util.ExtraSpecs(vif_limits=vif_limits) self._test_spawn(extra_specs=extra_specs) def test_spawn_vif_reservation(self): vif_limits = vm_util.Limits(reservation=7) extra_specs = vm_util.ExtraSpecs(vif_limits=vif_limits) self._test_spawn(extra_specs=extra_specs) def test_spawn_vif_shares_level(self): vif_limits = vm_util.Limits(shares_level='high') extra_specs = vm_util.ExtraSpecs(vif_limits=vif_limits) self._test_spawn(extra_specs=extra_specs) def test_spawn_vif_shares_custom(self): vif_limits = vm_util.Limits(shares_level='custom', shares_share=1948) extra_specs = vm_util.ExtraSpecs(vif_limits=vif_limits) self._test_spawn(extra_specs=extra_specs) def _validate_extra_specs(self, expected, actual): self.assertEqual(expected.cpu_limits.limit, actual.cpu_limits.limit) self.assertEqual(expected.cpu_limits.reservation, actual.cpu_limits.reservation) self.assertEqual(expected.cpu_limits.shares_level, actual.cpu_limits.shares_level) self.assertEqual(expected.cpu_limits.shares_share, actual.cpu_limits.shares_share) def _validate_flavor_extra_specs(self, flavor_extra_specs, expected): # Validate that the extra specs are parsed correctly flavor = objects.Flavor(name='my-flavor', memory_mb=6, vcpus=28, root_gb=496, ephemeral_gb=8128, swap=33550336, extra_specs=flavor_extra_specs) flavor_extra_specs = self._vmops._get_extra_specs(flavor, None) self._validate_extra_specs(expected, flavor_extra_specs) def test_extra_specs_cpu_limit(self): flavor_extra_specs = {'quota:cpu_limit': 7} cpu_limits = vm_util.Limits(limit=7) extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits) self._validate_flavor_extra_specs(flavor_extra_specs, extra_specs) def test_extra_specs_cpu_reservations(self): flavor_extra_specs = {'quota:cpu_reservation': 7} cpu_limits = vm_util.Limits(reservation=7) extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits) self._validate_flavor_extra_specs(flavor_extra_specs, extra_specs) def test_extra_specs_cpu_allocations(self): flavor_extra_specs = {'quota:cpu_limit': 7, 'quota:cpu_reservation': 6} cpu_limits = vm_util.Limits(limit=7, reservation=6) extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits) self._validate_flavor_extra_specs(flavor_extra_specs, extra_specs) def test_extra_specs_cpu_shares_level(self): flavor_extra_specs = {'quota:cpu_shares_level': 'high'} cpu_limits = vm_util.Limits(shares_level='high') extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits) self._validate_flavor_extra_specs(flavor_extra_specs, extra_specs) def test_extra_specs_cpu_shares_custom(self): flavor_extra_specs = {'quota:cpu_shares_level': 'custom', 'quota:cpu_shares_share': 1948} cpu_limits = vm_util.Limits(shares_level='custom', shares_share=1948) extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits) self._validate_flavor_extra_specs(flavor_extra_specs, extra_specs) def _make_vm_config_info(self, is_iso=False, is_sparse_disk=False): disk_type = (constants.DISK_TYPE_SPARSE if is_sparse_disk else constants.DEFAULT_DISK_TYPE) file_type = (constants.DISK_FORMAT_ISO if is_iso else constants.DEFAULT_DISK_FORMAT) image_info = images.VMwareImage( image_id=self._image_id, file_size=10 * units.Mi, file_type=file_type, disk_type=disk_type, linked_clone=True) cache_root_folder = self._ds.build_path("vmware_base", self._image_id) mock_imagecache = mock.Mock() mock_imagecache.get_image_cache_folder.return_value = cache_root_folder vi = vmops.VirtualMachineInstanceConfigInfo( self._instance, image_info, self._ds, self._dc_info, mock_imagecache) return vi @mock.patch.object(vmops.VMwareVMOps, 'check_cache_folder') @mock.patch.object(vmops.VMwareVMOps, '_fetch_image_as_file') @mock.patch.object(vmops.VMwareVMOps, '_prepare_iso_image') @mock.patch.object(vmops.VMwareVMOps, '_prepare_sparse_image') @mock.patch.object(vmops.VMwareVMOps, '_prepare_flat_image') @mock.patch.object(vmops.VMwareVMOps, '_cache_iso_image') @mock.patch.object(vmops.VMwareVMOps, '_cache_sparse_image') @mock.patch.object(vmops.VMwareVMOps, '_cache_flat_image') @mock.patch.object(vmops.VMwareVMOps, '_delete_datastore_file') def _test_fetch_image_if_missing(self, mock_delete_datastore_file, mock_cache_flat_image, mock_cache_sparse_image, mock_cache_iso_image, mock_prepare_flat_image, mock_prepare_sparse_image, mock_prepare_iso_image, mock_fetch_image_as_file, mock_check_cache_folder, is_iso=False, is_sparse_disk=False): tmp_dir_path = mock.Mock() tmp_image_path = mock.Mock() if is_iso: mock_prepare = mock_prepare_iso_image mock_cache = mock_cache_iso_image elif is_sparse_disk: mock_prepare = mock_prepare_sparse_image mock_cache = mock_cache_sparse_image else: mock_prepare = mock_prepare_flat_image mock_cache = mock_cache_flat_image mock_prepare.return_value = tmp_dir_path, tmp_image_path vi = self._make_vm_config_info(is_iso, is_sparse_disk) self._vmops._fetch_image_if_missing(self._context, vi) mock_check_cache_folder.assert_called_once_with( self._ds.name, self._ds.ref) mock_prepare.assert_called_once_with(vi) mock_fetch_image_as_file.assert_called_once_with( self._context, vi, tmp_image_path) mock_cache.assert_called_once_with(vi, tmp_image_path) mock_delete_datastore_file.assert_called_once_with( str(tmp_dir_path), self._dc_info.ref) def test_fetch_image_if_missing(self): self._test_fetch_image_if_missing() def test_fetch_image_if_missing_with_sparse(self): self._test_fetch_image_if_missing( is_sparse_disk=True) def test_fetch_image_if_missing_with_iso(self): self._test_fetch_image_if_missing( is_iso=True) def test_get_esx_host_and_cookies(self): datastore = mock.Mock() datastore.get_connected_hosts.return_value = ['fira-host'] file_path = mock.Mock() def fake_invoke(module, method, *args, **kwargs): if method == 'AcquireGenericServiceTicket': ticket = mock.Mock() ticket.id = 'fira-ticket' return ticket elif method == 'get_object_property': return 'fira-host' with mock.patch.object(self._session, 'invoke_api', fake_invoke): result = self._vmops._get_esx_host_and_cookies(datastore, 'ha-datacenter', file_path) self.assertEqual('fira-host', result[0]) cookies = result[1] self.assertEqual(1, len(cookies)) self.assertEqual('vmware_cgi_ticket', cookies[0].name) self.assertEqual('"fira-ticket"', cookies[0].value) @mock.patch.object(images, 'fetch_image') @mock.patch.object(vmops.VMwareVMOps, '_get_esx_host_and_cookies') def test_fetch_image_as_file(self, mock_get_esx_host_and_cookies, mock_fetch_image): vi = self._make_vm_config_info() image_ds_loc = mock.Mock() host = mock.Mock() dc_name = 'ha-datacenter' cookies = mock.Mock() mock_get_esx_host_and_cookies.return_value = host, cookies self._vmops._fetch_image_as_file(self._context, vi, image_ds_loc) mock_get_esx_host_and_cookies.assert_called_once_with( vi.datastore, dc_name, image_ds_loc.rel_path) mock_fetch_image.assert_called_once_with( self._context, vi.instance, host, self._session._port, dc_name, self._ds.name, image_ds_loc.rel_path, cookies=cookies) @mock.patch.object(images, 'fetch_image') @mock.patch.object(vmops.VMwareVMOps, '_get_esx_host_and_cookies') def test_fetch_image_as_file_exception(self, mock_get_esx_host_and_cookies, mock_fetch_image): vi = self._make_vm_config_info() image_ds_loc = mock.Mock() dc_name = 'ha-datacenter' mock_get_esx_host_and_cookies.side_effect = \ exception.HostNotFound(host='') self._vmops._fetch_image_as_file(self._context, vi, image_ds_loc) mock_get_esx_host_and_cookies.assert_called_once_with( vi.datastore, dc_name, image_ds_loc.rel_path) mock_fetch_image.assert_called_once_with( self._context, vi.instance, self._session._host, self._session._port, self._dc_info.name, self._ds.name, image_ds_loc.rel_path, cookies='Fake-CookieJar') @mock.patch.object(images, 'fetch_image_stream_optimized', return_value=123) def test_fetch_image_as_vapp(self, mock_fetch_image): vi = self._make_vm_config_info() image_ds_loc = mock.Mock() image_ds_loc.parent.basename = 'fake-name' self._vmops._fetch_image_as_vapp(self._context, vi, image_ds_loc) mock_fetch_image.assert_called_once_with( self._context, vi.instance, self._session, 'fake-name', self._ds.name, vi.dc_info.vmFolder, self._vmops._root_resource_pool) self.assertEqual(vi.ii.file_size, 123) @mock.patch.object(images, 'fetch_image_ova', return_value=123) def test_fetch_image_as_ova(self, mock_fetch_image): vi = self._make_vm_config_info() image_ds_loc = mock.Mock() image_ds_loc.parent.basename = 'fake-name' self._vmops._fetch_image_as_ova(self._context, vi, image_ds_loc) mock_fetch_image.assert_called_once_with( self._context, vi.instance, self._session, 'fake-name', self._ds.name, vi.dc_info.vmFolder, self._vmops._root_resource_pool) self.assertEqual(vi.ii.file_size, 123) @mock.patch.object(uuidutils, 'generate_uuid', return_value='tmp-uuid') def test_prepare_iso_image(self, mock_generate_uuid): vi = self._make_vm_config_info(is_iso=True) tmp_dir_loc, tmp_image_ds_loc = self._vmops._prepare_iso_image(vi) expected_tmp_dir_path = '[%s] vmware_temp/tmp-uuid' % (self._ds.name) expected_image_path = '[%s] vmware_temp/tmp-uuid/%s/%s.iso' % ( self._ds.name, self._image_id, self._image_id) self.assertEqual(str(tmp_dir_loc), expected_tmp_dir_path) self.assertEqual(str(tmp_image_ds_loc), expected_image_path) @mock.patch.object(uuidutils, 'generate_uuid', return_value='tmp-uuid') def test_prepare_sparse_image(self, mock_generate_uuid): vi = self._make_vm_config_info(is_sparse_disk=True) tmp_dir_loc, tmp_image_ds_loc = self._vmops._prepare_sparse_image(vi) expected_tmp_dir_path = '[%s] vmware_temp/tmp-uuid' % (self._ds.name) expected_image_path = '[%s] vmware_temp/tmp-uuid/%s/%s' % ( self._ds.name, self._image_id, "tmp-sparse.vmdk") self.assertEqual(str(tmp_dir_loc), expected_tmp_dir_path) self.assertEqual(str(tmp_image_ds_loc), expected_image_path) @mock.patch.object(ds_util, 'mkdir') @mock.patch.object(vm_util, 'create_virtual_disk') @mock.patch.object(vmops.VMwareVMOps, '_delete_datastore_file') @mock.patch.object(uuidutils, 'generate_uuid', return_value='tmp-uuid') def test_prepare_flat_image(self, mock_generate_uuid, mock_delete_datastore_file, mock_create_virtual_disk, mock_mkdir): vi = self._make_vm_config_info() tmp_dir_loc, tmp_image_ds_loc = self._vmops._prepare_flat_image(vi) expected_tmp_dir_path = '[%s] vmware_temp/tmp-uuid' % (self._ds.name) expected_image_path = '[%s] vmware_temp/tmp-uuid/%s/%s-flat.vmdk' % ( self._ds.name, self._image_id, self._image_id) expected_image_path_parent = '[%s] vmware_temp/tmp-uuid/%s' % ( self._ds.name, self._image_id) expected_path_to_create = '[%s] vmware_temp/tmp-uuid/%s/%s.vmdk' % ( self._ds.name, self._image_id, self._image_id) mock_mkdir.assert_called_once_with( self._session, DsPathMatcher(expected_image_path_parent), self._dc_info.ref) self.assertEqual(str(tmp_dir_loc), expected_tmp_dir_path) self.assertEqual(str(tmp_image_ds_loc), expected_image_path) image_info = vi.ii mock_create_virtual_disk.assert_called_once_with( self._session, self._dc_info.ref, image_info.adapter_type, image_info.disk_type, DsPathMatcher(expected_path_to_create), image_info.file_size_in_kb) mock_delete_datastore_file.assert_called_once_with( DsPathMatcher(expected_image_path), self._dc_info.ref) @mock.patch.object(ds_util, 'file_move') def test_cache_iso_image(self, mock_file_move): vi = self._make_vm_config_info(is_iso=True) tmp_image_ds_loc = mock.Mock() self._vmops._cache_iso_image(vi, tmp_image_ds_loc) mock_file_move.assert_called_once_with( self._session, self._dc_info.ref, tmp_image_ds_loc.parent, DsPathMatcher('[fake_ds] vmware_base/%s' % self._image_id)) @mock.patch.object(ds_util, 'file_move') def test_cache_flat_image(self, mock_file_move): vi = self._make_vm_config_info() tmp_image_ds_loc = mock.Mock() self._vmops._cache_flat_image(vi, tmp_image_ds_loc) mock_file_move.assert_called_once_with( self._session, self._dc_info.ref, tmp_image_ds_loc.parent, DsPathMatcher('[fake_ds] vmware_base/%s' % self._image_id)) @mock.patch.object(ds_util, 'disk_move') @mock.patch.object(ds_util, 'mkdir') def test_cache_stream_optimized_image(self, mock_mkdir, mock_disk_move): vi = self._make_vm_config_info() self._vmops._cache_stream_optimized_image(vi, mock.sentinel.tmp_image) mock_mkdir.assert_called_once_with( self._session, DsPathMatcher('[fake_ds] vmware_base/%s' % self._image_id), self._dc_info.ref) mock_disk_move.assert_called_once_with( self._session, self._dc_info.ref, mock.sentinel.tmp_image, DsPathMatcher('[fake_ds] vmware_base/%s/%s.vmdk' % (self._image_id, self._image_id))) @mock.patch.object(ds_util, 'file_move') @mock.patch.object(vm_util, 'copy_virtual_disk') @mock.patch.object(vmops.VMwareVMOps, '_delete_datastore_file') @mock.patch.object(vmops.VMwareVMOps, '_update_image_size') def test_cache_sparse_image(self, mock_update_image_size, mock_delete_datastore_file, mock_copy_virtual_disk, mock_file_move): vi = self._make_vm_config_info(is_sparse_disk=True) sparse_disk_path = "[%s] vmware_temp/tmp-uuid/%s/tmp-sparse.vmdk" % ( self._ds.name, self._image_id) tmp_image_ds_loc = ds_obj.DatastorePath.parse(sparse_disk_path) self._vmops._cache_sparse_image(vi, tmp_image_ds_loc) target_disk_path = "[%s] vmware_temp/tmp-uuid/%s/%s.vmdk" % ( self._ds.name, self._image_id, self._image_id) mock_copy_virtual_disk.assert_called_once_with( self._session, self._dc_info.ref, sparse_disk_path, DsPathMatcher(target_disk_path)) mock_update_image_size.assert_called_once_with(vi) def test_get_storage_policy_none(self): flavor = objects.Flavor(name='m1.small', memory_mb=6, vcpus=28, root_gb=496, ephemeral_gb=8128, swap=33550336, extra_specs={}) self.flags(pbm_enabled=True, pbm_default_policy='fake-policy', group='vmware') extra_specs = self._vmops._get_extra_specs(flavor, None) self.assertEqual('fake-policy', extra_specs.storage_policy) def test_get_storage_policy_extra_specs(self): extra_specs = {'vmware:storage_policy': 'flavor-policy'} flavor = objects.Flavor(name='m1.small', memory_mb=6, vcpus=28, root_gb=496, ephemeral_gb=8128, swap=33550336, extra_specs=extra_specs) self.flags(pbm_enabled=True, pbm_default_policy='default-policy', group='vmware') extra_specs = self._vmops._get_extra_specs(flavor, None) self.assertEqual('flavor-policy', extra_specs.storage_policy) def test_get_base_folder_not_set(self): self.flags(image_cache_subdirectory_name='vmware_base') base_folder = self._vmops._get_base_folder() self.assertEqual('vmware_base', base_folder) def test_get_base_folder_host_ip(self): self.flags(my_ip='7.7.7.7', image_cache_subdirectory_name='_base') base_folder = self._vmops._get_base_folder() self.assertEqual('7.7.7.7_base', base_folder) def test_get_base_folder_cache_prefix(self): self.flags(cache_prefix='my_prefix', group='vmware') self.flags(image_cache_subdirectory_name='_base') base_folder = self._vmops._get_base_folder() self.assertEqual('my_prefix_base', base_folder) def _test_reboot_vm(self, reboot_type="SOFT"): expected_methods = ['get_object_properties_dict'] if reboot_type == "SOFT": expected_methods.append('RebootGuest') else: expected_methods.append('ResetVM_Task') query = {} query['runtime.powerState'] = "poweredOn" query['summary.guest.toolsStatus'] = "toolsOk" query['summary.guest.toolsRunningStatus'] = "guestToolsRunning" def fake_call_method(module, method, *args, **kwargs): expected_method = expected_methods.pop(0) self.assertEqual(expected_method, method) if expected_method == 'get_object_properties_dict': return query elif expected_method == 'ResetVM_Task': return 'fake-task' with test.nested( mock.patch.object(vm_util, "get_vm_ref", return_value='fake-vm-ref'), mock.patch.object(self._session, "_call_method", fake_call_method), mock.patch.object(self._session, "_wait_for_task") ) as (_get_vm_ref, fake_call_method, _wait_for_task): self._vmops.reboot(self._instance, self.network_info, reboot_type) _get_vm_ref.assert_called_once_with(self._session, self._instance) if reboot_type == "HARD": _wait_for_task.assert_has_calls([ mock.call('fake-task')]) def test_reboot_vm_soft(self): self._test_reboot_vm() def test_reboot_vm_hard(self): self._test_reboot_vm(reboot_type="HARD") def test_get_instance_metadata(self): flavor = objects.Flavor(id=7, name='m1.small', memory_mb=6, vcpus=28, root_gb=496, ephemeral_gb=8128, swap=33550336, extra_specs={}) self._instance.flavor = flavor metadata = self._vmops._get_instance_metadata( self._context, self._instance) expected = ("name:fake_display_name\n" "userid:fake_user\n" "username:None\n" "projectid:fake_project\n" "projectname:None\n" "flavor:name:m1.small\n" "flavor:memory_mb:6\n" "flavor:vcpus:28\n" "flavor:ephemeral_gb:8128\n" "flavor:root_gb:496\n" "flavor:swap:33550336\n" "imageid:70a599e0-31e7-49b7-b260-868f441e862b\n" "package:%s\n" % version.version_string_with_package()) self.assertEqual(expected, metadata) @mock.patch.object(vm_util, 'reconfigure_vm') @mock.patch.object(vm_util, 'get_network_attach_config_spec', return_value='fake-attach-spec') @mock.patch.object(vm_util, 'get_attach_port_index', return_value=1) @mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref') def test_attach_interface(self, mock_get_vm_ref, mock_get_attach_port_index, mock_get_network_attach_config_spec, mock_reconfigure_vm): _network_api = mock.Mock() self._vmops._network_api = _network_api vif_info = vif.get_vif_dict(self._session, self._cluster, 'VirtualE1000', utils.is_neutron(), self._network_values) self._vmops.attach_interface(self._instance, self._image_meta, self._network_values) mock_get_vm_ref.assert_called_once_with(self._session, self._instance) mock_get_attach_port_index(self._session, 'fake-ref') mock_get_network_attach_config_spec.assert_called_once_with( self._session.vim.client.factory, vif_info, 1) mock_reconfigure_vm.assert_called_once_with(self._session, 'fake-ref', 'fake-attach-spec') _network_api.update_instance_vnic_index(mock.ANY, self._instance, self._network_values, 1) @mock.patch.object(vif, 'get_network_device', return_value='device') @mock.patch.object(vm_util, 'reconfigure_vm') @mock.patch.object(vm_util, 'get_network_detach_config_spec', return_value='fake-detach-spec') @mock.patch.object(vm_util, 'get_vm_detach_port_index', return_value=1) @mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref') def test_detach_interface(self, mock_get_vm_ref, mock_get_detach_port_index, mock_get_network_detach_config_spec, mock_reconfigure_vm, mock_get_network_device): _network_api = mock.Mock() self._vmops._network_api = _network_api with mock.patch.object(self._session, '_call_method', return_value='hardware-devices'): self._vmops.detach_interface(self._instance, self._network_values) mock_get_vm_ref.assert_called_once_with(self._session, self._instance) mock_get_detach_port_index(self._session, 'fake-ref') mock_get_network_detach_config_spec.assert_called_once_with( self._session.vim.client.factory, 'device', 1) mock_reconfigure_vm.assert_called_once_with(self._session, 'fake-ref', 'fake-detach-spec') _network_api.update_instance_vnic_index(mock.ANY, self._instance, self._network_values, None) @mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref') def test_get_mks_console(self, mock_get_vm_ref): ticket = mock.MagicMock() ticket.host = 'esx1' ticket.port = 902 ticket.ticket = 'fira' ticket.sslThumbprint = 'aa:bb:cc:dd:ee:ff' ticket.cfgFile = '[ds1] fira/foo.vmx' with mock.patch.object(self._session, '_call_method', return_value=ticket): console = self._vmops.get_mks_console(self._instance) self.assertEqual('esx1', console.host) self.assertEqual(902, console.port) path = jsonutils.loads(console.internal_access_path) self.assertEqual('fira', path['ticket']) self.assertEqual('aabbccddeeff', path['thumbprint']) self.assertEqual('[ds1] fira/foo.vmx', path['cfgFile']) def test_get_cores_per_socket(self): extra_specs = {'hw:cpu_sockets': 7} flavor = objects.Flavor(name='m1.small', memory_mb=6, vcpus=28, root_gb=496, ephemeral_gb=8128, swap=33550336, extra_specs=extra_specs) extra_specs = self._vmops._get_extra_specs(flavor, None) self.assertEqual(4, int(extra_specs.cores_per_socket)) def test_get_folder_name(self): uuid = uuidutils.generate_uuid() name = 'fira' expected = 'fira (%s)' % uuid folder_name = self._vmops._get_folder_name(name, uuid) self.assertEqual(expected, folder_name) name = 'X' * 255 expected = '%s (%s)' % ('X' * 40, uuid) folder_name = self._vmops._get_folder_name(name, uuid) self.assertEqual(expected, folder_name) self.assertEqual(79, len(folder_name)) nova-13.1.4/nova/tests/unit/virt/vmwareapi/test_read_write_util.py0000664000567000056710000000353713064447152026560 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import urllib import mock from nova import test from nova.virt.vmwareapi import read_write_util class ReadWriteUtilTestCase(test.NoDBTestCase): def test_ipv6_host_read(self): ipv6_host = 'fd8c:215d:178e:c51e:200:c9ff:fed1:584c' port = 7443 folder = 'tmp/fake.txt' # NOTE(sdague): the VMwareHTTPReadFile makes implicit http # call via requests during construction, block that from # happening here in the test. with mock.patch.object(read_write_util.VMwareHTTPReadFile, '_create_read_connection'): reader = read_write_util.VMwareHTTPReadFile(ipv6_host, port, 'fake_dc', 'fake_ds', dict(), folder) param_list = {"dcPath": 'fake_dc', "dsName": 'fake_ds'} base_url = 'https://[%s]:%s/folder/%s' % (ipv6_host, port, folder) base_url += '?' + urllib.urlencode(param_list) self.assertEqual(base_url, reader._base_url) nova-13.1.4/nova/tests/unit/virt/vmwareapi/test_ds_util.py0000664000567000056710000005206213064447152025036 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import mock from oslo_utils import units from oslo_vmware import exceptions as vexc from oslo_vmware.objects import datastore as ds_obj from nova import exception from nova import test from nova.tests.unit.virt.vmwareapi import fake from nova.virt.vmwareapi import ds_util class DsUtilTestCase(test.NoDBTestCase): def setUp(self): super(DsUtilTestCase, self).setUp() self.session = fake.FakeSession() self.flags(api_retry_count=1, group='vmware') fake.reset() def tearDown(self): super(DsUtilTestCase, self).tearDown() fake.reset() def test_file_delete(self): def fake_call_method(module, method, *args, **kwargs): self.assertEqual('DeleteDatastoreFile_Task', method) name = kwargs.get('name') self.assertEqual('[ds] fake/path', name) datacenter = kwargs.get('datacenter') self.assertEqual('fake-dc-ref', datacenter) return 'fake_delete_task' with test.nested( mock.patch.object(self.session, '_wait_for_task'), mock.patch.object(self.session, '_call_method', fake_call_method) ) as (_wait_for_task, _call_method): ds_path = ds_obj.DatastorePath('ds', 'fake/path') ds_util.file_delete(self.session, ds_path, 'fake-dc-ref') _wait_for_task.assert_has_calls([ mock.call('fake_delete_task')]) def test_file_copy(self): def fake_call_method(module, method, *args, **kwargs): self.assertEqual('CopyDatastoreFile_Task', method) src_name = kwargs.get('sourceName') self.assertEqual('[ds] fake/path/src_file', src_name) src_dc_ref = kwargs.get('sourceDatacenter') self.assertEqual('fake-src-dc-ref', src_dc_ref) dst_name = kwargs.get('destinationName') self.assertEqual('[ds] fake/path/dst_file', dst_name) dst_dc_ref = kwargs.get('destinationDatacenter') self.assertEqual('fake-dst-dc-ref', dst_dc_ref) return 'fake_copy_task' with test.nested( mock.patch.object(self.session, '_wait_for_task'), mock.patch.object(self.session, '_call_method', fake_call_method) ) as (_wait_for_task, _call_method): src_ds_path = ds_obj.DatastorePath('ds', 'fake/path', 'src_file') dst_ds_path = ds_obj.DatastorePath('ds', 'fake/path', 'dst_file') ds_util.file_copy(self.session, str(src_ds_path), 'fake-src-dc-ref', str(dst_ds_path), 'fake-dst-dc-ref') _wait_for_task.assert_has_calls([ mock.call('fake_copy_task')]) def test_file_move(self): def fake_call_method(module, method, *args, **kwargs): self.assertEqual('MoveDatastoreFile_Task', method) sourceName = kwargs.get('sourceName') self.assertEqual('[ds] tmp/src', sourceName) destinationName = kwargs.get('destinationName') self.assertEqual('[ds] base/dst', destinationName) sourceDatacenter = kwargs.get('sourceDatacenter') self.assertEqual('fake-dc-ref', sourceDatacenter) destinationDatacenter = kwargs.get('destinationDatacenter') self.assertEqual('fake-dc-ref', destinationDatacenter) return 'fake_move_task' with test.nested( mock.patch.object(self.session, '_wait_for_task'), mock.patch.object(self.session, '_call_method', fake_call_method) ) as (_wait_for_task, _call_method): src_ds_path = ds_obj.DatastorePath('ds', 'tmp/src') dst_ds_path = ds_obj.DatastorePath('ds', 'base/dst') ds_util.file_move(self.session, 'fake-dc-ref', src_ds_path, dst_ds_path) _wait_for_task.assert_has_calls([ mock.call('fake_move_task')]) def test_disk_move(self): def fake_call_method(module, method, *args, **kwargs): self.assertEqual('MoveVirtualDisk_Task', method) src_name = kwargs.get('sourceName') self.assertEqual('[ds] tmp/src', src_name) dest_name = kwargs.get('destName') self.assertEqual('[ds] base/dst', dest_name) src_datacenter = kwargs.get('sourceDatacenter') self.assertEqual('fake-dc-ref', src_datacenter) dest_datacenter = kwargs.get('destDatacenter') self.assertEqual('fake-dc-ref', dest_datacenter) return 'fake_move_task' with test.nested( mock.patch.object(self.session, '_wait_for_task'), mock.patch.object(self.session, '_call_method', fake_call_method) ) as (_wait_for_task, _call_method): ds_util.disk_move(self.session, 'fake-dc-ref', '[ds] tmp/src', '[ds] base/dst') _wait_for_task.assert_has_calls([ mock.call('fake_move_task')]) def test_disk_copy(self): with test.nested( mock.patch.object(self.session, '_wait_for_task'), mock.patch.object(self.session, '_call_method', return_value=mock.sentinel.cm) ) as (_wait_for_task, _call_method): ds_util.disk_copy(self.session, mock.sentinel.dc_ref, mock.sentinel.source_ds, mock.sentinel.dest_ds) _wait_for_task.assert_called_once_with(mock.sentinel.cm) _call_method.assert_called_once_with( mock.ANY, 'CopyVirtualDisk_Task', 'VirtualDiskManager', sourceName='sentinel.source_ds', destDatacenter=mock.sentinel.dc_ref, sourceDatacenter=mock.sentinel.dc_ref, force=False, destName='sentinel.dest_ds') def test_disk_delete(self): with test.nested( mock.patch.object(self.session, '_wait_for_task'), mock.patch.object(self.session, '_call_method', return_value=mock.sentinel.cm) ) as (_wait_for_task, _call_method): ds_util.disk_delete(self.session, 'fake-dc-ref', '[ds] tmp/disk.vmdk') _wait_for_task.assert_called_once_with(mock.sentinel.cm) _call_method.assert_called_once_with( mock.ANY, 'DeleteVirtualDisk_Task', 'VirtualDiskManager', datacenter='fake-dc-ref', name='[ds] tmp/disk.vmdk') def test_mkdir(self): def fake_call_method(module, method, *args, **kwargs): self.assertEqual('MakeDirectory', method) name = kwargs.get('name') self.assertEqual('[ds] fake/path', name) datacenter = kwargs.get('datacenter') self.assertEqual('fake-dc-ref', datacenter) createParentDirectories = kwargs.get('createParentDirectories') self.assertTrue(createParentDirectories) with mock.patch.object(self.session, '_call_method', fake_call_method): ds_path = ds_obj.DatastorePath('ds', 'fake/path') ds_util.mkdir(self.session, ds_path, 'fake-dc-ref') def test_file_exists(self): def fake_call_method(module, method, *args, **kwargs): if method == 'SearchDatastore_Task': ds_browser = args[0] self.assertEqual('fake-browser', ds_browser) datastorePath = kwargs.get('datastorePath') self.assertEqual('[ds] fake/path', datastorePath) return 'fake_exists_task' # Should never get here self.fail() def fake_wait_for_task(task_ref): if task_ref == 'fake_exists_task': result_file = fake.DataObject() result_file.path = 'fake-file' result = fake.DataObject() result.file = [result_file] result.path = '[ds] fake/path' task_info = fake.DataObject() task_info.result = result return task_info # Should never get here self.fail() with test.nested( mock.patch.object(self.session, '_call_method', fake_call_method), mock.patch.object(self.session, '_wait_for_task', fake_wait_for_task)): ds_path = ds_obj.DatastorePath('ds', 'fake/path') file_exists = ds_util.file_exists(self.session, 'fake-browser', ds_path, 'fake-file') self.assertTrue(file_exists) def test_file_exists_fails(self): def fake_call_method(module, method, *args, **kwargs): if method == 'SearchDatastore_Task': return 'fake_exists_task' # Should never get here self.fail() def fake_wait_for_task(task_ref): if task_ref == 'fake_exists_task': raise vexc.FileNotFoundException() # Should never get here self.fail() with test.nested( mock.patch.object(self.session, '_call_method', fake_call_method), mock.patch.object(self.session, '_wait_for_task', fake_wait_for_task)): ds_path = ds_obj.DatastorePath('ds', 'fake/path') file_exists = ds_util.file_exists(self.session, 'fake-browser', ds_path, 'fake-file') self.assertFalse(file_exists) def _mock_get_datastore_calls(self, *datastores): """Mock vim_util calls made by get_datastore.""" datastores_i = [None] # For the moment, at least, this list of datastores is simply passed to # get_properties_for_a_collection_of_objects, which we mock below. We # don't need to over-complicate the fake function by worrying about its # contents. fake_ds_list = ['fake-ds'] def fake_call_method(module, method, *args, **kwargs): # Mock the call which returns a list of datastores for the cluster if (module == ds_util.vutil and method == 'get_object_property' and args == ('fake-cluster', 'datastore')): fake_ds_mor = fake.DataObject() fake_ds_mor.ManagedObjectReference = fake_ds_list return fake_ds_mor # Return the datastore result sets we were passed in, in the order # given if (module == ds_util.vim_util and method == 'get_properties_for_a_collection_of_objects' and args[0] == 'Datastore' and args[1] == fake_ds_list): # Start a new iterator over given datastores datastores_i[0] = iter(datastores) return next(datastores_i[0]) # Continue returning results from the current iterator. if (module == ds_util.vutil and method == 'continue_retrieval'): try: return next(datastores_i[0]) except StopIteration: return None if (method == 'continue_retrieval' or method == 'cancel_retrieval'): return # Sentinel that get_datastore's use of vim has changed self.fail('Unexpected vim call in get_datastore: %s' % method) return mock.patch.object(self.session, '_call_method', side_effect=fake_call_method) def test_get_datastore(self): fake_objects = fake.FakeRetrieveResult() fake_objects.add_object(fake.Datastore()) fake_objects.add_object(fake.Datastore("fake-ds-2", 2048, 1000, False, "normal")) fake_objects.add_object(fake.Datastore("fake-ds-3", 4096, 2000, True, "inMaintenance")) with self._mock_get_datastore_calls(fake_objects): result = ds_util.get_datastore(self.session, 'fake-cluster') self.assertEqual("fake-ds", result.name) self.assertEqual(units.Ti, result.capacity) self.assertEqual(500 * units.Gi, result.freespace) def test_get_datastore_with_regex(self): # Test with a regex that matches with a datastore datastore_valid_regex = re.compile("^openstack.*\d$") fake_objects = fake.FakeRetrieveResult() fake_objects.add_object(fake.Datastore("openstack-ds0")) fake_objects.add_object(fake.Datastore("fake-ds0")) fake_objects.add_object(fake.Datastore("fake-ds1")) with self._mock_get_datastore_calls(fake_objects): result = ds_util.get_datastore(self.session, 'fake-cluster', datastore_valid_regex) self.assertEqual("openstack-ds0", result.name) def test_get_datastore_with_token(self): regex = re.compile("^ds.*\d$") fake0 = fake.FakeRetrieveResult() fake0.add_object(fake.Datastore("ds0", 10 * units.Gi, 5 * units.Gi)) fake0.add_object(fake.Datastore("foo", 10 * units.Gi, 9 * units.Gi)) setattr(fake0, 'token', 'token-0') fake1 = fake.FakeRetrieveResult() fake1.add_object(fake.Datastore("ds2", 10 * units.Gi, 8 * units.Gi)) fake1.add_object(fake.Datastore("ds3", 10 * units.Gi, 1 * units.Gi)) with self._mock_get_datastore_calls(fake0, fake1): result = ds_util.get_datastore(self.session, 'fake-cluster', regex) self.assertEqual("ds2", result.name) def test_get_datastore_with_list(self): # Test with a regex containing whitelist of datastores datastore_valid_regex = re.compile("(openstack-ds0|openstack-ds2)") fake_objects = fake.FakeRetrieveResult() fake_objects.add_object(fake.Datastore("openstack-ds0")) fake_objects.add_object(fake.Datastore("openstack-ds1")) fake_objects.add_object(fake.Datastore("openstack-ds2")) with self._mock_get_datastore_calls(fake_objects): result = ds_util.get_datastore(self.session, 'fake-cluster', datastore_valid_regex) self.assertNotEqual("openstack-ds1", result.name) def test_get_datastore_with_regex_error(self): # Test with a regex that has no match # Checks if code raises DatastoreNotFound with a specific message datastore_invalid_regex = re.compile("unknown-ds") exp_message = ("Datastore regex %s did not match any datastores" % datastore_invalid_regex.pattern) fake_objects = fake.FakeRetrieveResult() fake_objects.add_object(fake.Datastore("fake-ds0")) fake_objects.add_object(fake.Datastore("fake-ds1")) # assertRaisesRegExp would have been a good choice instead of # try/catch block, but it's available only from Py 2.7. try: with self._mock_get_datastore_calls(fake_objects): ds_util.get_datastore(self.session, 'fake-cluster', datastore_invalid_regex) except exception.DatastoreNotFound as e: self.assertEqual(exp_message, e.args[0]) else: self.fail("DatastoreNotFound Exception was not raised with " "message: %s" % exp_message) def test_get_datastore_without_datastore(self): self.assertRaises(exception.DatastoreNotFound, ds_util.get_datastore, fake.FakeObjectRetrievalSession(None), cluster="fake-cluster") def test_get_datastore_inaccessible_ds(self): data_store = fake.Datastore() data_store.set("summary.accessible", False) fake_objects = fake.FakeRetrieveResult() fake_objects.add_object(data_store) with self._mock_get_datastore_calls(fake_objects): self.assertRaises(exception.DatastoreNotFound, ds_util.get_datastore, self.session, 'fake-cluster') def test_get_datastore_ds_in_maintenance(self): data_store = fake.Datastore() data_store.set("summary.maintenanceMode", "inMaintenance") fake_objects = fake.FakeRetrieveResult() fake_objects.add_object(data_store) with self._mock_get_datastore_calls(fake_objects): self.assertRaises(exception.DatastoreNotFound, ds_util.get_datastore, self.session, 'fake-cluster') def test_get_datastore_no_host_in_cluster(self): def fake_call_method(module, method, *args, **kwargs): return '' with mock.patch.object(self.session, '_call_method', fake_call_method): self.assertRaises(exception.DatastoreNotFound, ds_util.get_datastore, self.session, 'fake-cluster') def _test_is_datastore_valid(self, accessible=True, maintenance_mode="normal", type="VMFS", datastore_regex=None, ds_types=ds_util.ALL_SUPPORTED_DS_TYPES): propdict = {} propdict["summary.accessible"] = accessible propdict["summary.maintenanceMode"] = maintenance_mode propdict["summary.type"] = type propdict["summary.name"] = "ds-1" return ds_util._is_datastore_valid(propdict, datastore_regex, ds_types) def test_is_datastore_valid(self): for ds_type in ds_util.ALL_SUPPORTED_DS_TYPES: self.assertTrue(self._test_is_datastore_valid(True, "normal", ds_type)) def test_is_datastore_valid_inaccessible_ds(self): self.assertFalse(self._test_is_datastore_valid(False, "normal", "VMFS")) def test_is_datastore_valid_ds_in_maintenance(self): self.assertFalse(self._test_is_datastore_valid(True, "inMaintenance", "VMFS")) def test_is_datastore_valid_ds_type_invalid(self): self.assertFalse(self._test_is_datastore_valid(True, "normal", "vfat")) def test_is_datastore_valid_not_matching_regex(self): datastore_regex = re.compile("ds-2") self.assertFalse(self._test_is_datastore_valid(True, "normal", "VMFS", datastore_regex)) def test_is_datastore_valid_matching_regex(self): datastore_regex = re.compile("ds-1") self.assertTrue(self._test_is_datastore_valid(True, "normal", "VMFS", datastore_regex)) def test_get_connected_hosts_none(self): with mock.patch.object(self.session, '_call_method') as _call_method: hosts = ds_util.get_connected_hosts(self.session, 'fake_datastore') self.assertEqual([], hosts) _call_method.assert_called_once_with( mock.ANY, 'get_object_property', 'fake_datastore', 'host') def test_get_connected_hosts(self): host = mock.Mock(spec=object) host.value = 'fake-host' host_mount = mock.Mock(spec=object) host_mount.key = host host_mounts = mock.Mock(spec=object) host_mounts.DatastoreHostMount = [host_mount] with mock.patch.object(self.session, '_call_method', return_value=host_mounts) as _call_method: hosts = ds_util.get_connected_hosts(self.session, 'fake_datastore') self.assertEqual(['fake-host'], hosts) _call_method.assert_called_once_with( mock.ANY, 'get_object_property', 'fake_datastore', 'host') nova-13.1.4/nova/tests/unit/virt/vmwareapi/test_vm_util.py0000664000567000056710000024754213064447152025063 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # Copyright 2013 Canonical Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import mock from oslo_utils import uuidutils from oslo_vmware import exceptions as vexc from oslo_vmware.objects import datastore as ds_obj from oslo_vmware import pbm from oslo_vmware import vim_util as vutil from nova import exception from nova.network import model as network_model from nova import test from nova.tests.unit import fake_instance from nova.tests.unit.virt.vmwareapi import fake from nova.tests.unit.virt.vmwareapi import stubs from nova.virt.vmwareapi import constants from nova.virt.vmwareapi import driver from nova.virt.vmwareapi import vm_util class partialObject(object): def __init__(self, path='fake-path'): self.path = path self.fault = fake.DataObject() class VMwareVMUtilTestCase(test.NoDBTestCase): def setUp(self): super(VMwareVMUtilTestCase, self).setUp() fake.reset() stubs.set_stubs(self) vm_util.vm_refs_cache_reset() self._instance = fake_instance.fake_instance_obj( None, **{'id': 7, 'name': 'fake!', 'display_name': 'fake-display-name', 'uuid': uuidutils.generate_uuid(), 'vcpus': 2, 'memory_mb': 2048}) def _test_get_stats_from_cluster(self, connection_state="connected", maintenance_mode=False): ManagedObjectRefs = [fake.ManagedObjectReference("host1", "HostSystem"), fake.ManagedObjectReference("host2", "HostSystem")] hosts = fake._convert_to_array_of_mor(ManagedObjectRefs) respool = fake.ManagedObjectReference("resgroup-11", "ResourcePool") prop_dict = {'host': hosts, 'resourcePool': respool} hardware = fake.DataObject() hardware.numCpuCores = 8 hardware.numCpuThreads = 16 hardware.vendor = "Intel" hardware.cpuModel = "Intel(R) Xeon(R)" runtime_host_1 = fake.DataObject() runtime_host_1.connectionState = "connected" runtime_host_1.inMaintenanceMode = False runtime_host_2 = fake.DataObject() runtime_host_2.connectionState = connection_state runtime_host_2.inMaintenanceMode = maintenance_mode prop_list_host_1 = [fake.Prop(name="hardware_summary", val=hardware), fake.Prop(name="runtime_summary", val=runtime_host_1)] prop_list_host_2 = [fake.Prop(name="hardware_summary", val=hardware), fake.Prop(name="runtime_summary", val=runtime_host_2)] fake_objects = fake.FakeRetrieveResult() fake_objects.add_object(fake.ObjectContent("prop_list_host1", prop_list_host_1)) fake_objects.add_object(fake.ObjectContent("prop_list_host1", prop_list_host_2)) respool_resource_usage = fake.DataObject() respool_resource_usage.maxUsage = 5368709120 respool_resource_usage.overallUsage = 2147483648 def fake_call_method(*args): if "get_object_properties_dict" in args: return prop_dict elif "get_properties_for_a_collection_of_objects" in args: return fake_objects else: return respool_resource_usage session = fake.FakeSession() with mock.patch.object(session, '_call_method', fake_call_method): result = vm_util.get_stats_from_cluster(session, "cluster1") mem_info = {} if connection_state == "connected" and not maintenance_mode: vcpus = 32 else: vcpus = 16 mem_info['total'] = 5120 mem_info['free'] = 3072 expected_stats = {'vcpus': vcpus, 'mem': mem_info} self.assertEqual(expected_stats, result) def test_get_stats_from_cluster_hosts_connected_and_active(self): self._test_get_stats_from_cluster() def test_get_stats_from_cluster_hosts_disconnected_and_active(self): self._test_get_stats_from_cluster(connection_state="disconnected") def test_get_stats_from_cluster_hosts_connected_and_maintenance(self): self._test_get_stats_from_cluster(maintenance_mode=True) def test_get_host_ref_no_hosts_in_cluster(self): self.assertRaises(exception.NoValidHost, vm_util.get_host_ref, fake.FakeObjectRetrievalSession(""), 'fake_cluster') def test_get_resize_spec(self): vcpus = 2 memory_mb = 2048 extra_specs = vm_util.ExtraSpecs() fake_factory = fake.FakeFactory() result = vm_util.get_vm_resize_spec(fake_factory, vcpus, memory_mb, extra_specs) expected = fake_factory.create('ns0:VirtualMachineConfigSpec') expected.memoryMB = memory_mb expected.numCPUs = vcpus cpuAllocation = fake_factory.create('ns0:ResourceAllocationInfo') cpuAllocation.reservation = 0 cpuAllocation.limit = -1 cpuAllocation.shares = fake_factory.create('ns0:SharesInfo') cpuAllocation.shares.level = 'normal' cpuAllocation.shares.shares = 0 expected.cpuAllocation = cpuAllocation self.assertEqual(expected, result) def test_get_resize_spec_with_limits(self): vcpus = 2 memory_mb = 2048 cpu_limits = vm_util.Limits(limit=7, reservation=6) extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits) fake_factory = fake.FakeFactory() result = vm_util.get_vm_resize_spec(fake_factory, vcpus, memory_mb, extra_specs) expected = fake_factory.create('ns0:VirtualMachineConfigSpec') expected.memoryMB = memory_mb expected.numCPUs = vcpus cpuAllocation = fake_factory.create('ns0:ResourceAllocationInfo') cpuAllocation.reservation = 6 cpuAllocation.limit = 7 cpuAllocation.shares = fake_factory.create('ns0:SharesInfo') cpuAllocation.shares.level = 'normal' cpuAllocation.shares.shares = 0 expected.cpuAllocation = cpuAllocation self.assertEqual(expected, result) def test_get_cdrom_attach_config_spec(self): fake_factory = fake.FakeFactory() datastore = fake.Datastore() result = vm_util.get_cdrom_attach_config_spec(fake_factory, datastore, "/tmp/foo.iso", 200, 0) expected = fake_factory.create('ns0:VirtualMachineConfigSpec') expected.deviceChange = [] device_change = fake_factory.create('ns0:VirtualDeviceConfigSpec') device_change.operation = 'add' device_change.device = fake_factory.create('ns0:VirtualCdrom') device_change.device.controllerKey = 200 device_change.device.unitNumber = 0 device_change.device.key = -1 connectable = fake_factory.create('ns0:VirtualDeviceConnectInfo') connectable.allowGuestControl = False connectable.startConnected = True connectable.connected = True device_change.device.connectable = connectable backing = fake_factory.create('ns0:VirtualCdromIsoBackingInfo') backing.fileName = '/tmp/foo.iso' backing.datastore = datastore device_change.device.backing = backing expected.deviceChange.append(device_change) self.assertEqual(expected, result) def test_lsilogic_controller_spec(self): # Test controller spec returned for lsiLogic sas adapter type config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101, adapter_type=constants.ADAPTER_TYPE_LSILOGICSAS) self.assertEqual("ns0:VirtualLsiLogicSASController", config_spec.device.obj_name) def test_paravirtual_controller_spec(self): # Test controller spec returned for paraVirtual adapter type config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101, adapter_type=constants.ADAPTER_TYPE_PARAVIRTUAL) self.assertEqual("ns0:ParaVirtualSCSIController", config_spec.device.obj_name) def test_create_controller_spec_with_specfic_bus_number(self): # Test controller spec with specifc bus number rather default 0 config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101, adapter_type=constants.ADAPTER_TYPE_LSILOGICSAS, bus_number=1) self.assertEqual(1, config_spec.device.busNumber) def _vmdk_path_and_adapter_type_devices(self, filename, parent=None): # Test the adapter_type returned for a lsiLogic sas controller controller_key = 1000 disk = fake.VirtualDisk() disk.controllerKey = controller_key disk_backing = fake.VirtualDiskFlatVer2BackingInfo() disk_backing.fileName = filename disk.capacityInBytes = 1024 if parent: disk_backing.parent = parent disk.backing = disk_backing # Ephemeral disk e_disk = fake.VirtualDisk() e_disk.controllerKey = controller_key disk_backing = fake.VirtualDiskFlatVer2BackingInfo() disk_backing.fileName = '[test_datastore] uuid/ephemeral_0.vmdk' e_disk.capacityInBytes = 512 e_disk.backing = disk_backing controller = fake.VirtualLsiLogicSASController() controller.key = controller_key devices = [disk, e_disk, controller] return devices def test_get_vmdk_path_and_adapter_type(self): filename = '[test_datastore] uuid/uuid.vmdk' devices = self._vmdk_path_and_adapter_type_devices(filename) session = fake.FakeSession() with mock.patch.object(session, '_call_method', return_value=devices): vmdk = vm_util.get_vmdk_info(session, None) self.assertEqual(constants.ADAPTER_TYPE_LSILOGICSAS, vmdk.adapter_type) self.assertEqual('[test_datastore] uuid/ephemeral_0.vmdk', vmdk.path) self.assertEqual(512, vmdk.capacity_in_bytes) self.assertEqual(devices[1], vmdk.device) def test_get_vmdk_path_and_adapter_type_with_match(self): n_filename = '[test_datastore] uuid/uuid.vmdk' devices = self._vmdk_path_and_adapter_type_devices(n_filename) session = fake.FakeSession() with mock.patch.object(session, '_call_method', return_value=devices): vmdk = vm_util.get_vmdk_info(session, None, uuid='uuid') self.assertEqual(constants.ADAPTER_TYPE_LSILOGICSAS, vmdk.adapter_type) self.assertEqual(n_filename, vmdk.path) self.assertEqual(1024, vmdk.capacity_in_bytes) self.assertEqual(devices[0], vmdk.device) def test_get_vmdk_path_and_adapter_type_with_nomatch(self): n_filename = '[test_datastore] diuu/diuu.vmdk' session = fake.FakeSession() devices = self._vmdk_path_and_adapter_type_devices(n_filename) with mock.patch.object(session, '_call_method', return_value=devices): vmdk = vm_util.get_vmdk_info(session, None, uuid='uuid') self.assertIsNone(vmdk.adapter_type) self.assertIsNone(vmdk.path) self.assertEqual(0, vmdk.capacity_in_bytes) self.assertIsNone(vmdk.device) def test_get_vmdk_adapter_type(self): # Test for the adapter_type to be used in vmdk descriptor # Adapter type in vmdk descriptor is same for LSI-SAS, LSILogic # and ParaVirtual vmdk_adapter_type = vm_util.get_vmdk_adapter_type( constants.DEFAULT_ADAPTER_TYPE) self.assertEqual(constants.DEFAULT_ADAPTER_TYPE, vmdk_adapter_type) vmdk_adapter_type = vm_util.get_vmdk_adapter_type( constants.ADAPTER_TYPE_LSILOGICSAS) self.assertEqual(constants.DEFAULT_ADAPTER_TYPE, vmdk_adapter_type) vmdk_adapter_type = vm_util.get_vmdk_adapter_type( constants.ADAPTER_TYPE_PARAVIRTUAL) self.assertEqual(constants.DEFAULT_ADAPTER_TYPE, vmdk_adapter_type) vmdk_adapter_type = vm_util.get_vmdk_adapter_type("dummyAdapter") self.assertEqual("dummyAdapter", vmdk_adapter_type) def test_get_scsi_adapter_type(self): vm = fake.VirtualMachine() devices = vm.get("config.hardware.device").VirtualDevice scsi_controller = fake.VirtualLsiLogicController() ide_controller = fake.VirtualIDEController() devices.append(scsi_controller) devices.append(ide_controller) fake._update_object("VirtualMachine", vm) # return the scsi type, not ide hardware_device = vm.get("config.hardware.device") self.assertEqual(constants.DEFAULT_ADAPTER_TYPE, vm_util.get_scsi_adapter_type(hardware_device)) def test_get_scsi_adapter_type_with_error(self): vm = fake.VirtualMachine() devices = vm.get("config.hardware.device").VirtualDevice scsi_controller = fake.VirtualLsiLogicController() ide_controller = fake.VirtualIDEController() devices.append(scsi_controller) devices.append(ide_controller) fake._update_object("VirtualMachine", vm) # the controller is not suitable since the device under this controller # has exceeded SCSI_MAX_CONNECT_NUMBER for i in range(0, constants.SCSI_MAX_CONNECT_NUMBER): scsi_controller.device.append('device' + str(i)) hardware_device = vm.get("config.hardware.device") self.assertRaises(exception.StorageError, vm_util.get_scsi_adapter_type, hardware_device) def test_find_allocated_slots(self): disk1 = fake.VirtualDisk(200, 0) disk2 = fake.VirtualDisk(200, 1) disk3 = fake.VirtualDisk(201, 1) ide0 = fake.VirtualIDEController(200) ide1 = fake.VirtualIDEController(201) scsi0 = fake.VirtualLsiLogicController(key=1000, scsiCtlrUnitNumber=7) devices = [disk1, disk2, disk3, ide0, ide1, scsi0] taken = vm_util._find_allocated_slots(devices) self.assertEqual([0, 1], sorted(taken[200])) self.assertEqual([1], taken[201]) self.assertEqual([7], taken[1000]) def test_get_bus_number_for_scsi_controller(self): devices = [fake.VirtualLsiLogicController(1000, scsiCtlrUnitNumber=7, busNumber=0), fake.VirtualLsiLogicController(1002, scsiCtlrUnitNumber=7, busNumber=2)] bus_number = vm_util._get_bus_number_for_scsi_controller(devices) self.assertEqual(1, bus_number) def test_get_bus_number_for_scsi_controller_buses_used_up(self): devices = [fake.VirtualLsiLogicController(1000, scsiCtlrUnitNumber=7, busNumber=0), fake.VirtualLsiLogicController(1001, scsiCtlrUnitNumber=7, busNumber=1), fake.VirtualLsiLogicController(1002, scsiCtlrUnitNumber=7, busNumber=2), fake.VirtualLsiLogicController(1003, scsiCtlrUnitNumber=7, busNumber=3)] self.assertRaises(vexc.VMwareDriverException, vm_util._get_bus_number_for_scsi_controller, devices) def test_allocate_controller_key_and_unit_number_ide_default(self): # Test that default IDE controllers are used when there is a free slot # on them disk1 = fake.VirtualDisk(200, 0) disk2 = fake.VirtualDisk(200, 1) ide0 = fake.VirtualIDEController(200) ide1 = fake.VirtualIDEController(201) devices = [disk1, disk2, ide0, ide1] (controller_key, unit_number, controller_spec) = vm_util.allocate_controller_key_and_unit_number( None, devices, 'ide') self.assertEqual(201, controller_key) self.assertEqual(0, unit_number) self.assertIsNone(controller_spec) def test_allocate_controller_key_and_unit_number_ide(self): # Test that a new controller is created when there is no free slot on # the default IDE controllers ide0 = fake.VirtualIDEController(200) ide1 = fake.VirtualIDEController(201) devices = [ide0, ide1] for controller_key in [200, 201]: for unit_number in [0, 1]: disk = fake.VirtualDisk(controller_key, unit_number) devices.append(disk) factory = fake.FakeFactory() (controller_key, unit_number, controller_spec) = vm_util.allocate_controller_key_and_unit_number( factory, devices, 'ide') self.assertEqual(-101, controller_key) self.assertEqual(0, unit_number) self.assertIsNotNone(controller_spec) def test_allocate_controller_key_and_unit_number_scsi(self): # Test that we allocate on existing SCSI controller if there is a free # slot on it devices = [fake.VirtualLsiLogicController(1000, scsiCtlrUnitNumber=7)] for unit_number in range(7): disk = fake.VirtualDisk(1000, unit_number) devices.append(disk) factory = fake.FakeFactory() (controller_key, unit_number, controller_spec) = vm_util.allocate_controller_key_and_unit_number( factory, devices, constants.DEFAULT_ADAPTER_TYPE) self.assertEqual(1000, controller_key) self.assertEqual(8, unit_number) self.assertIsNone(controller_spec) def test_allocate_controller_key_and_unit_number_scsi_new_controller(self): # Test that we allocate on existing SCSI controller if there is a free # slot on it devices = [fake.VirtualLsiLogicController(1000, scsiCtlrUnitNumber=15)] for unit_number in range(15): disk = fake.VirtualDisk(1000, unit_number) devices.append(disk) factory = fake.FakeFactory() (controller_key, unit_number, controller_spec) = vm_util.allocate_controller_key_and_unit_number( factory, devices, constants.DEFAULT_ADAPTER_TYPE) self.assertEqual(-101, controller_key) self.assertEqual(0, unit_number) self.assertEqual(1, controller_spec.device.busNumber) def test_get_vnc_config_spec(self): fake_factory = fake.FakeFactory() result = vm_util.get_vnc_config_spec(fake_factory, 7) expected = fake_factory.create('ns0:VirtualMachineConfigSpec') expected.extraConfig = [] remote_display_vnc_enabled = fake_factory.create('ns0:OptionValue') remote_display_vnc_enabled.value = 'true' remote_display_vnc_enabled.key = 'RemoteDisplay.vnc.enabled' expected.extraConfig.append(remote_display_vnc_enabled) remote_display_vnc_port = fake_factory.create('ns0:OptionValue') remote_display_vnc_port.value = 7 remote_display_vnc_port.key = 'RemoteDisplay.vnc.port' expected.extraConfig.append(remote_display_vnc_port) remote_display_vnc_keymap = fake_factory.create('ns0:OptionValue') remote_display_vnc_keymap.value = 'en-us' remote_display_vnc_keymap.key = 'RemoteDisplay.vnc.keyMap' expected.extraConfig.append(remote_display_vnc_keymap) self.assertEqual(expected, result) def _create_fake_vms(self): fake_vms = fake.FakeRetrieveResult() OptionValue = collections.namedtuple('OptionValue', ['key', 'value']) for i in range(10): vm = fake.ManagedObject() opt_val = OptionValue(key='', value=5900 + i) vm.set(vm_util.VNC_CONFIG_KEY, opt_val) fake_vms.add_object(vm) return fake_vms def test_get_vnc_port(self): fake_vms = self._create_fake_vms() self.flags(vnc_port=5900, group='vmware') self.flags(vnc_port_total=10000, group='vmware') actual = vm_util.get_vnc_port( fake.FakeObjectRetrievalSession(fake_vms)) self.assertEqual(actual, 5910) def test_get_vnc_port_exhausted(self): fake_vms = self._create_fake_vms() self.flags(vnc_port=5900, group='vmware') self.flags(vnc_port_total=10, group='vmware') self.assertRaises(exception.ConsolePortRangeExhausted, vm_util.get_vnc_port, fake.FakeObjectRetrievalSession(fake_vms)) def test_get_cluster_ref_by_name_none(self): fake_objects = fake.FakeRetrieveResult() ref = vm_util.get_cluster_ref_by_name( fake.FakeObjectRetrievalSession(fake_objects), 'fake_cluster') self.assertIsNone(ref) def test_get_cluster_ref_by_name_exists(self): fake_objects = fake.FakeRetrieveResult() cluster = fake.ClusterComputeResource(name='cluster') fake_objects.add_object(cluster) ref = vm_util.get_cluster_ref_by_name( fake.FakeObjectRetrievalSession(fake_objects), 'cluster') self.assertIs(cluster.obj, ref) def test_get_cluster_ref_by_name_missing(self): fake_objects = fake.FakeRetrieveResult() fake_objects.add_object(partialObject(path='cluster')) ref = vm_util.get_cluster_ref_by_name( fake.FakeObjectRetrievalSession(fake_objects), 'cluster') self.assertIsNone(ref) def test_propset_dict_simple(self): ObjectContent = collections.namedtuple('ObjectContent', ['propSet']) DynamicProperty = collections.namedtuple('Property', ['name', 'val']) object = ObjectContent(propSet=[ DynamicProperty(name='foo', val="bar")]) propdict = vm_util.propset_dict(object.propSet) self.assertEqual("bar", propdict['foo']) def test_propset_dict_complex(self): ObjectContent = collections.namedtuple('ObjectContent', ['propSet']) DynamicProperty = collections.namedtuple('Property', ['name', 'val']) MoRef = collections.namedtuple('Val', ['value']) object = ObjectContent(propSet=[ DynamicProperty(name='foo', val="bar"), DynamicProperty(name='some.thing', val=MoRef(value='else')), DynamicProperty(name='another.thing', val='value')]) propdict = vm_util.propset_dict(object.propSet) self.assertEqual("bar", propdict['foo']) self.assertTrue(hasattr(propdict['some.thing'], 'value')) self.assertEqual("else", propdict['some.thing'].value) self.assertEqual("value", propdict['another.thing']) def _test_detach_virtual_disk_spec(self, destroy_disk=False): virtual_device_config = vm_util.detach_virtual_disk_spec( fake.FakeFactory(), 'fake_device', destroy_disk) self.assertEqual('remove', virtual_device_config.operation) self.assertEqual('fake_device', virtual_device_config.device) self.assertEqual('ns0:VirtualDeviceConfigSpec', virtual_device_config.obj_name) if destroy_disk: self.assertEqual('destroy', virtual_device_config.fileOperation) else: self.assertFalse(hasattr(virtual_device_config, 'fileOperation')) def test_detach_virtual_disk_spec(self): self._test_detach_virtual_disk_spec(destroy_disk=False) def test_detach_virtual_disk_destroy_spec(self): self._test_detach_virtual_disk_spec(destroy_disk=True) def _create_vm_config_spec(self): fake_factory = fake.FakeFactory() spec = fake_factory.create('ns0:VirtualMachineConfigSpec') spec.name = self._instance.uuid spec.instanceUuid = self._instance.uuid spec.deviceChange = [] spec.numCPUs = 2 spec.version = None spec.memoryMB = 2048 spec.guestId = 'otherGuest' spec.extraConfig = [] extra_config = fake_factory.create("ns0:OptionValue") extra_config.value = self._instance.uuid extra_config.key = 'nvp.vm-uuid' spec.extraConfig.append(extra_config) spec.files = fake_factory.create('ns0:VirtualMachineFileInfo') spec.files.vmPathName = '[fake-datastore]' spec.managedBy = fake_factory.create('ns0:ManagedByInfo') spec.managedBy.extensionKey = 'org.openstack.compute' spec.managedBy.type = 'instance' spec.tools = fake_factory.create('ns0:ToolsConfigInfo') spec.tools.afterPowerOn = True spec.tools.afterResume = True spec.tools.beforeGuestReboot = True spec.tools.beforeGuestShutdown = True spec.tools.beforeGuestStandby = True return spec def test_get_vm_extra_config_spec(self): fake_factory = fake.FakeFactory() extra_opts = {mock.sentinel.key: mock.sentinel.value} res = vm_util.get_vm_extra_config_spec(fake_factory, extra_opts) self.assertEqual(1, len(res.extraConfig)) self.assertEqual(mock.sentinel.key, res.extraConfig[0].key) self.assertEqual(mock.sentinel.value, res.extraConfig[0].value) def test_get_vm_create_spec(self): extra_specs = vm_util.ExtraSpecs() fake_factory = fake.FakeFactory() result = vm_util.get_vm_create_spec(fake_factory, self._instance, 'fake-datastore', [], extra_specs) expected = self._create_vm_config_spec() self.assertEqual(expected, result) expected.version = None expected.memoryMB = 2048 expected.guestId = constants.DEFAULT_OS_TYPE expected.extraConfig = [] def test_get_vm_create_spec_with_serial_port(self): extra_specs = vm_util.ExtraSpecs() fake_factory = fake.FakeFactory() self.flags(serial_port_service_uri='foobar', group='vmware') self.flags(serial_port_proxy_uri='telnet://example.com:31337', group='vmware') result = vm_util.get_vm_create_spec(fake_factory, self._instance, 'fake-datastore', [], extra_specs) serial_port_spec = vm_util.create_serial_port_spec(fake_factory) expected = self._create_vm_config_spec() expected.deviceChange = [serial_port_spec] self.assertEqual(expected, result) def test_get_vm_create_spec_with_allocations(self): cpu_limits = vm_util.Limits(limit=7, reservation=6) extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits) fake_factory = fake.FakeFactory() result = vm_util.get_vm_create_spec(fake_factory, self._instance, 'fake-datastore', [], extra_specs) expected = fake_factory.create('ns0:VirtualMachineConfigSpec') expected.deviceChange = [] expected.guestId = constants.DEFAULT_OS_TYPE expected.instanceUuid = self._instance.uuid expected.memoryMB = self._instance.memory_mb expected.name = self._instance.uuid expected.numCPUs = self._instance.vcpus expected.version = None expected.files = fake_factory.create('ns0:VirtualMachineFileInfo') expected.files.vmPathName = '[fake-datastore]' expected.tools = fake_factory.create('ns0:ToolsConfigInfo') expected.tools.afterPowerOn = True expected.tools.afterResume = True expected.tools.beforeGuestReboot = True expected.tools.beforeGuestShutdown = True expected.tools.beforeGuestStandby = True expected.managedBy = fake_factory.create('ns0:ManagedByInfo') expected.managedBy.extensionKey = 'org.openstack.compute' expected.managedBy.type = 'instance' cpu_allocation = fake_factory.create('ns0:ResourceAllocationInfo') cpu_allocation.limit = 7 cpu_allocation.reservation = 6 cpu_allocation.shares = fake_factory.create('ns0:SharesInfo') cpu_allocation.shares.level = 'normal' cpu_allocation.shares.shares = 0 expected.cpuAllocation = cpu_allocation expected.extraConfig = [] extra_config = fake_factory.create('ns0:OptionValue') extra_config.key = 'nvp.vm-uuid' extra_config.value = self._instance.uuid expected.extraConfig.append(extra_config) self.assertEqual(expected, result) def test_get_vm_create_spec_with_limit(self): cpu_limits = vm_util.Limits(limit=7) extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits) fake_factory = fake.FakeFactory() result = vm_util.get_vm_create_spec(fake_factory, self._instance, 'fake-datastore', [], extra_specs) expected = fake_factory.create('ns0:VirtualMachineConfigSpec') expected.files = fake_factory.create('ns0:VirtualMachineFileInfo') expected.files.vmPathName = '[fake-datastore]' expected.instanceUuid = self._instance.uuid expected.name = self._instance.uuid expected.deviceChange = [] expected.extraConfig = [] extra_config = fake_factory.create("ns0:OptionValue") extra_config.value = self._instance.uuid extra_config.key = 'nvp.vm-uuid' expected.extraConfig.append(extra_config) expected.memoryMB = 2048 expected.managedBy = fake_factory.create('ns0:ManagedByInfo') expected.managedBy.extensionKey = 'org.openstack.compute' expected.managedBy.type = 'instance' expected.version = None expected.guestId = constants.DEFAULT_OS_TYPE expected.tools = fake_factory.create('ns0:ToolsConfigInfo') expected.tools.afterPowerOn = True expected.tools.afterResume = True expected.tools.beforeGuestReboot = True expected.tools.beforeGuestShutdown = True expected.tools.beforeGuestStandby = True cpu_allocation = fake_factory.create('ns0:ResourceAllocationInfo') cpu_allocation.limit = 7 cpu_allocation.reservation = 0 cpu_allocation.shares = fake_factory.create('ns0:SharesInfo') cpu_allocation.shares.level = 'normal' cpu_allocation.shares.shares = 0 expected.cpuAllocation = cpu_allocation expected.numCPUs = 2 self.assertEqual(expected, result) def test_get_vm_create_spec_with_share(self): cpu_limits = vm_util.Limits(shares_level='high') extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits) fake_factory = fake.FakeFactory() result = vm_util.get_vm_create_spec(fake_factory, self._instance, 'fake-datastore', [], extra_specs) expected = fake_factory.create('ns0:VirtualMachineConfigSpec') expected.files = fake_factory.create('ns0:VirtualMachineFileInfo') expected.files.vmPathName = '[fake-datastore]' expected.instanceUuid = self._instance.uuid expected.name = self._instance.uuid expected.deviceChange = [] expected.extraConfig = [] extra_config = fake_factory.create('ns0:OptionValue') extra_config.value = self._instance.uuid extra_config.key = 'nvp.vm-uuid' expected.extraConfig.append(extra_config) expected.memoryMB = 2048 expected.managedBy = fake_factory.create('ns0:ManagedByInfo') expected.managedBy.type = 'instance' expected.managedBy.extensionKey = 'org.openstack.compute' expected.version = None expected.guestId = constants.DEFAULT_OS_TYPE expected.tools = fake_factory.create('ns0:ToolsConfigInfo') expected.tools.beforeGuestStandby = True expected.tools.beforeGuestReboot = True expected.tools.beforeGuestShutdown = True expected.tools.afterResume = True expected.tools.afterPowerOn = True cpu_allocation = fake_factory.create('ns0:ResourceAllocationInfo') cpu_allocation.reservation = 0 cpu_allocation.limit = -1 cpu_allocation.shares = fake_factory.create('ns0:SharesInfo') cpu_allocation.shares.level = 'high' cpu_allocation.shares.shares = 0 expected.cpuAllocation = cpu_allocation expected.numCPUs = 2 self.assertEqual(expected, result) def test_get_vm_create_spec_with_share_custom(self): cpu_limits = vm_util.Limits(shares_level='custom', shares_share=1948) extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits) fake_factory = fake.FakeFactory() result = vm_util.get_vm_create_spec(fake_factory, self._instance, 'fake-datastore', [], extra_specs) expected = fake_factory.create('ns0:VirtualMachineConfigSpec') expected.files = fake_factory.create('ns0:VirtualMachineFileInfo') expected.files.vmPathName = '[fake-datastore]' expected.instanceUuid = self._instance.uuid expected.name = self._instance.uuid expected.deviceChange = [] expected.extraConfig = [] extra_config = fake_factory.create('ns0:OptionValue') extra_config.key = 'nvp.vm-uuid' extra_config.value = self._instance.uuid expected.extraConfig.append(extra_config) expected.memoryMB = 2048 expected.managedBy = fake_factory.create('ns0:ManagedByInfo') expected.managedBy.extensionKey = 'org.openstack.compute' expected.managedBy.type = 'instance' expected.version = None expected.guestId = constants.DEFAULT_OS_TYPE expected.tools = fake_factory.create('ns0:ToolsConfigInfo') expected.tools.beforeGuestStandby = True expected.tools.beforeGuestReboot = True expected.tools.beforeGuestShutdown = True expected.tools.afterResume = True expected.tools.afterPowerOn = True cpu_allocation = fake_factory.create('ns0:ResourceAllocationInfo') cpu_allocation.reservation = 0 cpu_allocation.limit = -1 cpu_allocation.shares = fake_factory.create('ns0:SharesInfo') cpu_allocation.shares.level = 'custom' cpu_allocation.shares.shares = 1948 expected.cpuAllocation = cpu_allocation expected.numCPUs = 2 self.assertEqual(expected, result) def test_get_vm_create_spec_with_metadata(self): extra_specs = vm_util.ExtraSpecs() fake_factory = fake.FakeFactory() result = vm_util.get_vm_create_spec(fake_factory, self._instance, 'fake-datastore', [], extra_specs, metadata='fake-metadata') expected = fake_factory.create('ns0:VirtualMachineConfigSpec') expected.name = self._instance.uuid expected.instanceUuid = self._instance.uuid expected.deviceChange = [] expected.numCPUs = 2 expected.version = None expected.memoryMB = 2048 expected.guestId = 'otherGuest' expected.annotation = 'fake-metadata' expected.extraConfig = [] extra_config = fake_factory.create("ns0:OptionValue") extra_config.value = self._instance.uuid extra_config.key = 'nvp.vm-uuid' expected.extraConfig.append(extra_config) expected.files = fake_factory.create('ns0:VirtualMachineFileInfo') expected.files.vmPathName = '[fake-datastore]' expected.managedBy = fake_factory.create('ns0:ManagedByInfo') expected.managedBy.extensionKey = 'org.openstack.compute' expected.managedBy.type = 'instance' expected.tools = fake_factory.create('ns0:ToolsConfigInfo') expected.tools.afterPowerOn = True expected.tools.afterResume = True expected.tools.beforeGuestReboot = True expected.tools.beforeGuestShutdown = True expected.tools.beforeGuestStandby = True self.assertEqual(expected, result) def test_create_vm(self): def fake_call_method(module, method, *args, **kwargs): if (method == 'CreateVM_Task'): return 'fake_create_vm_task' else: self.fail('Should not get here....') def fake_wait_for_task(self, *args): task_info = mock.Mock(state="success", result="fake_vm_ref") return task_info session = fake.FakeSession() fake_call_mock = mock.Mock(side_effect=fake_call_method) fake_wait_mock = mock.Mock(side_effect=fake_wait_for_task) with test.nested( mock.patch.object(session, '_wait_for_task', fake_wait_mock), mock.patch.object(session, '_call_method', fake_call_mock) ) as (wait_for_task, call_method): vm_ref = vm_util.create_vm( session, self._instance, 'fake_vm_folder', 'fake_config_spec', 'fake_res_pool_ref') self.assertEqual('fake_vm_ref', vm_ref) call_method.assert_called_once_with(mock.ANY, 'CreateVM_Task', 'fake_vm_folder', config='fake_config_spec', pool='fake_res_pool_ref') wait_for_task.assert_called_once_with('fake_create_vm_task') @mock.patch.object(vm_util.LOG, 'warning') def test_create_vm_invalid_guestid(self, mock_log_warn): """Ensure we warn when create_vm() fails after we passed an unrecognised guestId """ found = [False] def fake_log_warn(msg, values): if not isinstance(values, dict): return if values.get('ostype') == 'invalid_os_type': found[0] = True mock_log_warn.side_effect = fake_log_warn session = driver.VMwareAPISession() config_spec = vm_util.get_vm_create_spec( session.vim.client.factory, self._instance, 'fake-datastore', [], vm_util.ExtraSpecs(), os_type='invalid_os_type') self.assertRaises(vexc.VMwareDriverException, vm_util.create_vm, session, self._instance, 'folder', config_spec, 'res-pool') self.assertTrue(found[0]) def test_convert_vif_model(self): expected = "VirtualE1000" result = vm_util.convert_vif_model(network_model.VIF_MODEL_E1000) self.assertEqual(expected, result) expected = "VirtualE1000e" result = vm_util.convert_vif_model(network_model.VIF_MODEL_E1000E) self.assertEqual(expected, result) types = ["VirtualE1000", "VirtualE1000e", "VirtualPCNet32", "VirtualVmxnet", "VirtualVmxnet3"] for type in types: self.assertEqual(type, vm_util.convert_vif_model(type)) self.assertRaises(exception.Invalid, vm_util.convert_vif_model, "InvalidVifModel") def test_power_on_instance_with_vm_ref(self): session = fake.FakeSession() with test.nested( mock.patch.object(session, "_call_method", return_value='fake-task'), mock.patch.object(session, "_wait_for_task"), ) as (fake_call_method, fake_wait_for_task): vm_util.power_on_instance(session, self._instance, vm_ref='fake-vm-ref') fake_call_method.assert_called_once_with(session.vim, "PowerOnVM_Task", 'fake-vm-ref') fake_wait_for_task.assert_called_once_with('fake-task') def test_power_on_instance_without_vm_ref(self): session = fake.FakeSession() with test.nested( mock.patch.object(vm_util, "get_vm_ref", return_value='fake-vm-ref'), mock.patch.object(session, "_call_method", return_value='fake-task'), mock.patch.object(session, "_wait_for_task"), ) as (fake_get_vm_ref, fake_call_method, fake_wait_for_task): vm_util.power_on_instance(session, self._instance) fake_get_vm_ref.assert_called_once_with(session, self._instance) fake_call_method.assert_called_once_with(session.vim, "PowerOnVM_Task", 'fake-vm-ref') fake_wait_for_task.assert_called_once_with('fake-task') def test_power_on_instance_with_exception(self): session = fake.FakeSession() with test.nested( mock.patch.object(session, "_call_method", return_value='fake-task'), mock.patch.object(session, "_wait_for_task", side_effect=exception.NovaException('fake')), ) as (fake_call_method, fake_wait_for_task): self.assertRaises(exception.NovaException, vm_util.power_on_instance, session, self._instance, vm_ref='fake-vm-ref') fake_call_method.assert_called_once_with(session.vim, "PowerOnVM_Task", 'fake-vm-ref') fake_wait_for_task.assert_called_once_with('fake-task') def test_power_on_instance_with_power_state_exception(self): session = fake.FakeSession() with test.nested( mock.patch.object(session, "_call_method", return_value='fake-task'), mock.patch.object( session, "_wait_for_task", side_effect=vexc.InvalidPowerStateException), ) as (fake_call_method, fake_wait_for_task): vm_util.power_on_instance(session, self._instance, vm_ref='fake-vm-ref') fake_call_method.assert_called_once_with(session.vim, "PowerOnVM_Task", 'fake-vm-ref') fake_wait_for_task.assert_called_once_with('fake-task') def test_create_virtual_disk(self): session = fake.FakeSession() dm = session.vim.service_content.virtualDiskManager with test.nested( mock.patch.object(vm_util, "get_vmdk_create_spec", return_value='fake-spec'), mock.patch.object(session, "_call_method", return_value='fake-task'), mock.patch.object(session, "_wait_for_task"), ) as (fake_get_spec, fake_call_method, fake_wait_for_task): vm_util.create_virtual_disk(session, 'fake-dc-ref', 'fake-adapter-type', 'fake-disk-type', 'fake-path', 7) fake_get_spec.assert_called_once_with( session.vim.client.factory, 7, 'fake-adapter-type', 'fake-disk-type') fake_call_method.assert_called_once_with( session.vim, "CreateVirtualDisk_Task", dm, name='fake-path', datacenter='fake-dc-ref', spec='fake-spec') fake_wait_for_task.assert_called_once_with('fake-task') def test_copy_virtual_disk(self): session = fake.FakeSession() dm = session.vim.service_content.virtualDiskManager with test.nested( mock.patch.object(session, "_call_method", return_value='fake-task'), mock.patch.object(session, "_wait_for_task"), ) as (fake_call_method, fake_wait_for_task): vm_util.copy_virtual_disk(session, 'fake-dc-ref', 'fake-source', 'fake-dest') fake_call_method.assert_called_once_with( session.vim, "CopyVirtualDisk_Task", dm, sourceName='fake-source', sourceDatacenter='fake-dc-ref', destName='fake-dest') fake_wait_for_task.assert_called_once_with('fake-task') def _create_fake_vm_objects(self): fake_objects = fake.FakeRetrieveResult() fake_objects.add_object(fake.VirtualMachine()) return fake_objects def test_reconfigure_vm(self): session = fake.FakeSession() with test.nested( mock.patch.object(session, '_call_method', return_value='fake_reconfigure_task'), mock.patch.object(session, '_wait_for_task') ) as (_call_method, _wait_for_task): vm_util.reconfigure_vm(session, 'fake-ref', 'fake-spec') _call_method.assert_called_once_with(mock.ANY, 'ReconfigVM_Task', 'fake-ref', spec='fake-spec') _wait_for_task.assert_called_once_with( 'fake_reconfigure_task') def _get_network_attach_config_spec_opaque(self, network_ref, vc6_onwards=False): vif_info = {'network_name': 'fake-name', 'mac_address': '00:00:00:ca:fe:01', 'network_ref': network_ref, 'iface_id': 7, 'vif_model': 'VirtualE1000'} fake_factory = fake.FakeFactory() result = vm_util.get_network_attach_config_spec( fake_factory, vif_info, 1) card = 'ns0:VirtualEthernetCardOpaqueNetworkBackingInfo' expected = fake_factory.create('ns0:VirtualMachineConfigSpec') expected.extraConfig = [] extra_config = fake_factory.create('ns0:OptionValue') extra_config.value = vif_info['iface_id'] extra_config.key = 'nvp.iface-id.1' expected.extraConfig.append(extra_config) expected.deviceChange = [] device_change = fake_factory.create('ns0:VirtualDeviceConfigSpec') device_change.operation = 'add' device = fake_factory.create('ns0:VirtualE1000') device.macAddress = vif_info['mac_address'] if network_ref['use-external-id']: if vc6_onwards: device.externalId = vif_info['iface_id'] else: dp = fake_factory.create('ns0:DynamicProperty') dp.name = '__externalId__' dp.val = vif_info['iface_id'] device.dynamicProperty = [dp] device.addressType = 'manual' connectable = fake_factory.create('ns0:VirtualDeviceConnectInfo') connectable.allowGuestControl = True connectable.startConnected = True connectable.connected = True device.connectable = connectable backing = fake_factory.create(card) backing.opaqueNetworkType = vif_info['network_ref']['network-type'] backing.opaqueNetworkId = vif_info['network_ref']['network-id'] device.backing = backing device.key = -47 device.wakeOnLanEnabled = True device_change.device = device expected.deviceChange.append(device_change) self.assertEqual(expected, result) def test_get_network_attach_config_spec_opaque_integration_bridge(self): network_ref = {'type': 'OpaqueNetwork', 'network-id': 'fake-network-id', 'network-type': 'opaque', 'use-external-id': False} self._get_network_attach_config_spec_opaque(network_ref) def test_get_network_attach_config_spec_opaque(self): network_ref = {'type': 'OpaqueNetwork', 'network-id': 'fake-network-id', 'network-type': 'nsx.LogicalSwitch', 'use-external-id': True} self._get_network_attach_config_spec_opaque(network_ref) @mock.patch.object(fake, 'DataObject') def test_get_network_attach_config_spec_opaque_vc6_onwards(self, mock_object): # Add new attribute externalId supported from VC6 class FakeVirtualE1000(fake.DataObject): def __init__(self): super(FakeVirtualE1000, self).__init__() self.externalId = None mock_object.return_value = FakeVirtualE1000 network_ref = {'type': 'OpaqueNetwork', 'network-id': 'fake-network-id', 'network-type': 'nsx.LogicalSwitch', 'use-external-id': True} self._get_network_attach_config_spec_opaque(network_ref, vc6_onwards=True) def test_get_network_attach_config_spec_dvs(self): vif_info = {'network_name': 'br100', 'mac_address': '00:00:00:ca:fe:01', 'network_ref': {'type': 'DistributedVirtualPortgroup', 'dvsw': 'fake-network-id', 'dvpg': 'fake-group'}, 'iface_id': 7, 'vif_model': 'VirtualE1000'} fake_factory = fake.FakeFactory() result = vm_util.get_network_attach_config_spec( fake_factory, vif_info, 1) port = 'ns0:DistributedVirtualSwitchPortConnection' backing = 'ns0:VirtualEthernetCardDistributedVirtualPortBackingInfo' expected = fake_factory.create('ns0:VirtualMachineConfigSpec') expected.extraConfig = [] extra_config = fake_factory.create('ns0:OptionValue') extra_config.value = vif_info['iface_id'] extra_config.key = 'nvp.iface-id.1' expected.extraConfig.append(extra_config) expected.deviceChange = [] device_change = fake_factory.create('ns0:VirtualDeviceConfigSpec') device_change.operation = 'add' device = fake_factory.create('ns0:VirtualE1000') device.macAddress = vif_info['mac_address'] device.key = -47 device.addressType = 'manual' device.wakeOnLanEnabled = True device.backing = fake_factory.create(backing) device.backing.port = fake_factory.create(port) device.backing.port.portgroupKey = vif_info['network_ref']['dvpg'] device.backing.port.switchUuid = vif_info['network_ref']['dvsw'] connectable = fake_factory.create('ns0:VirtualDeviceConnectInfo') connectable.allowGuestControl = True connectable.connected = True connectable.startConnected = True device.connectable = connectable device_change.device = device expected.deviceChange.append(device_change) self.assertEqual(expected, result) def _get_create_vif_spec(self, fake_factory, vif_info): limits = vm_util.Limits() limits.limit = 10 limits.reservation = 20 limits.shares_level = 'custom' limits.shares_share = 40 return vm_util._create_vif_spec(fake_factory, vif_info, limits) def _construct_vif_spec(self, fake_factory, vif_info): port = 'ns0:DistributedVirtualSwitchPortConnection' backing = 'ns0:VirtualEthernetCardDistributedVirtualPortBackingInfo' device_change = fake_factory.create('ns0:VirtualDeviceConfigSpec') device_change.operation = 'add' device = fake_factory.create('ns0:VirtualE1000') device.macAddress = vif_info['mac_address'] device.key = -47 device.addressType = 'manual' device.wakeOnLanEnabled = True device.backing = fake_factory.create(backing) device.backing.port = fake_factory.create(port) device.backing.port.portgroupKey = vif_info['network_ref']['dvpg'] device.backing.port.switchUuid = vif_info['network_ref']['dvsw'] if vif_info['network_ref'].get('dvs_port_key'): device.backing.port.portKey = ( vif_info['network_ref']['dvs_port_key']) device.resourceAllocation = fake_factory.create( 'ns0:VirtualEthernetCardResourceAllocation') device.resourceAllocation.limit = 10 device.resourceAllocation.reservation = 20 device.resourceAllocation.share = fake_factory.create( 'ns0:SharesInfo') device.resourceAllocation.share.level = 'custom' device.resourceAllocation.share.shares = 40 connectable = fake_factory.create('ns0:VirtualDeviceConnectInfo') connectable.allowGuestControl = True connectable.connected = True connectable.startConnected = True device.connectable = connectable device_change.device = device return device_change def test_get_create_vif_spec(self): vif_info = {'network_name': 'br100', 'mac_address': '00:00:00:ca:fe:01', 'network_ref': {'type': 'DistributedVirtualPortgroup', 'dvsw': 'fake-network-id', 'dvpg': 'fake-group'}, 'iface_id': 7, 'vif_model': 'VirtualE1000'} fake_factory = fake.FakeFactory() result = self._get_create_vif_spec(fake_factory, vif_info) device_change = self._construct_vif_spec(fake_factory, vif_info) self.assertEqual(device_change, result) def test_get_create_vif_spec_dvs_port_key(self): vif_info = {'network_name': 'br100', 'mac_address': '00:00:00:ca:fe:01', 'network_ref': {'type': 'DistributedVirtualPortgroup', 'dvsw': 'fake-network-id', 'dvpg': 'fake-group', 'dvs_port_key': 'fake-key'}, 'iface_id': 7, 'vif_model': 'VirtualE1000'} fake_factory = fake.FakeFactory() result = self._get_create_vif_spec(fake_factory, vif_info) device_change = self._construct_vif_spec(fake_factory, vif_info) self.assertEqual(device_change, result) def test_get_network_detach_config_spec(self): fake_factory = fake.FakeFactory() result = vm_util.get_network_detach_config_spec( fake_factory, 'fake-device', 2) expected = fake_factory.create('ns0:VirtualMachineConfigSpec') expected.extraConfig = [] extra_config = fake_factory.create('ns0:OptionValue') extra_config.value = 'free' extra_config.key = 'nvp.iface-id.2' expected.extraConfig.append(extra_config) expected.deviceChange = [] device_change = fake_factory.create('ns0:VirtualDeviceConfigSpec') device_change.device = 'fake-device' device_change.operation = 'remove' expected.deviceChange.append(device_change) self.assertEqual(expected, result) @mock.patch.object(vm_util, "get_vm_ref") def test_power_off_instance(self, fake_get_ref): session = fake.FakeSession() with test.nested( mock.patch.object(session, '_call_method', return_value='fake-task'), mock.patch.object(session, '_wait_for_task') ) as (fake_call_method, fake_wait_for_task): vm_util.power_off_instance(session, self._instance, 'fake-vm-ref') fake_call_method.assert_called_once_with(session.vim, "PowerOffVM_Task", 'fake-vm-ref') fake_wait_for_task.assert_called_once_with('fake-task') self.assertFalse(fake_get_ref.called) @mock.patch.object(vm_util, "get_vm_ref", return_value="fake-vm-ref") def test_power_off_instance_no_vm_ref(self, fake_get_ref): session = fake.FakeSession() with test.nested( mock.patch.object(session, '_call_method', return_value='fake-task'), mock.patch.object(session, '_wait_for_task') ) as (fake_call_method, fake_wait_for_task): vm_util.power_off_instance(session, self._instance) fake_get_ref.assert_called_once_with(session, self._instance) fake_call_method.assert_called_once_with(session.vim, "PowerOffVM_Task", 'fake-vm-ref') fake_wait_for_task.assert_called_once_with('fake-task') @mock.patch.object(vm_util, "get_vm_ref") def test_power_off_instance_with_exception(self, fake_get_ref): session = fake.FakeSession() with test.nested( mock.patch.object(session, '_call_method', return_value='fake-task'), mock.patch.object(session, '_wait_for_task', side_effect=exception.NovaException('fake')) ) as (fake_call_method, fake_wait_for_task): self.assertRaises(exception.NovaException, vm_util.power_off_instance, session, self._instance, 'fake-vm-ref') fake_call_method.assert_called_once_with(session.vim, "PowerOffVM_Task", 'fake-vm-ref') fake_wait_for_task.assert_called_once_with('fake-task') self.assertFalse(fake_get_ref.called) @mock.patch.object(vm_util, "get_vm_ref") def test_power_off_instance_power_state_exception(self, fake_get_ref): session = fake.FakeSession() with test.nested( mock.patch.object(session, '_call_method', return_value='fake-task'), mock.patch.object( session, '_wait_for_task', side_effect=vexc.InvalidPowerStateException) ) as (fake_call_method, fake_wait_for_task): vm_util.power_off_instance(session, self._instance, 'fake-vm-ref') fake_call_method.assert_called_once_with(session.vim, "PowerOffVM_Task", 'fake-vm-ref') fake_wait_for_task.assert_called_once_with('fake-task') self.assertFalse(fake_get_ref.called) def test_get_vm_create_spec_updated_hw_version(self): extra_specs = vm_util.ExtraSpecs(hw_version='vmx-08') result = vm_util.get_vm_create_spec(fake.FakeFactory(), self._instance, 'fake-datastore', [], extra_specs=extra_specs) self.assertEqual('vmx-08', result.version) def test_vm_create_spec_with_profile_spec(self): datastore = ds_obj.Datastore('fake-ds-ref', 'fake-ds-name') extra_specs = vm_util.ExtraSpecs() create_spec = vm_util.get_vm_create_spec(fake.FakeFactory(), self._instance, datastore.name, [], extra_specs, profile_spec='fake_profile_spec') self.assertEqual(['fake_profile_spec'], create_spec.vmProfile) @mock.patch.object(pbm, 'get_profile_id_by_name') def test_get_storage_profile_spec(self, mock_retrieve_profile_id): fake_profile_id = fake.DataObject() fake_profile_id.uniqueId = 'fake_unique_id' mock_retrieve_profile_id.return_value = fake_profile_id profile_spec = vm_util.get_storage_profile_spec(fake.FakeSession(), 'fake_policy') self.assertEqual('ns0:VirtualMachineDefinedProfileSpec', profile_spec.obj_name) self.assertEqual(fake_profile_id.uniqueId, profile_spec.profileId) @mock.patch.object(pbm, 'get_profile_id_by_name') def test_storage_spec_empty_profile(self, mock_retrieve_profile_id): mock_retrieve_profile_id.return_value = None profile_spec = vm_util.get_storage_profile_spec(fake.FakeSession(), 'fake_policy') self.assertIsNone(profile_spec) def test_get_ephemeral_name(self): filename = vm_util.get_ephemeral_name(0) self.assertEqual('ephemeral_0.vmdk', filename) def test_detach_and_delete_devices_config_spec(self): fake_devices = ['device1', 'device2'] fake_factory = fake.FakeFactory() result = vm_util._detach_and_delete_devices_config_spec(fake_factory, fake_devices) expected = fake_factory.create('ns0:VirtualMachineConfigSpec') expected.deviceChange = [] device1 = fake_factory.create('ns0:VirtualDeviceConfigSpec') device1.device = 'device1' device1.operation = 'remove' device1.fileOperation = 'destroy' expected.deviceChange.append(device1) device2 = fake_factory.create('ns0:VirtualDeviceConfigSpec') device2.device = 'device2' device2.operation = 'remove' device2.fileOperation = 'destroy' expected.deviceChange.append(device2) self.assertEqual(expected, result) @mock.patch.object(vm_util, 'reconfigure_vm') def test_detach_devices_from_vm(self, mock_reconfigure): fake_devices = ['device1', 'device2'] session = fake.FakeSession() vm_util.detach_devices_from_vm(session, 'fake-ref', fake_devices) mock_reconfigure.assert_called_once_with(session, 'fake-ref', mock.ANY) def test_get_vm_boot_spec(self): disk = fake.VirtualDisk() disk.key = 7 fake_factory = fake.FakeFactory() result = vm_util.get_vm_boot_spec(fake_factory, disk) expected = fake_factory.create('ns0:VirtualMachineConfigSpec') boot_disk = fake_factory.create( 'ns0:VirtualMachineBootOptionsBootableDiskDevice') boot_disk.deviceKey = disk.key boot_options = fake_factory.create('ns0:VirtualMachineBootOptions') boot_options.bootOrder = [boot_disk] expected.bootOptions = boot_options self.assertEqual(expected, result) def _get_devices(self, filename): devices = fake._create_array_of_type('VirtualDevice') devices.VirtualDevice = self._vmdk_path_and_adapter_type_devices( filename) return devices def test_find_rescue_device(self): filename = '[test_datastore] uuid/uuid-rescue.vmdk' devices = self._get_devices(filename) device = vm_util.find_rescue_device(devices, self._instance) self.assertEqual(filename, device.backing.fileName) def test_find_rescue_device_not_found(self): filename = '[test_datastore] uuid/uuid.vmdk' devices = self._get_devices(filename) self.assertRaises(exception.NotFound, vm_util.find_rescue_device, devices, self._instance) def test_validate_limits(self): limits = vm_util.Limits(shares_level='high', shares_share=1948) self.assertRaises(exception.InvalidInput, limits.validate) limits = vm_util.Limits(shares_level='fira') self.assertRaises(exception.InvalidInput, limits.validate) def test_get_vm_create_spec_with_console_delay(self): extra_specs = vm_util.ExtraSpecs() self.flags(console_delay_seconds=2, group='vmware') fake_factory = fake.FakeFactory() result = vm_util.get_vm_create_spec(fake_factory, self._instance, 'fake-datastore', [], extra_specs) expected = fake_factory.create('ns0:VirtualMachineConfigSpec') expected.name = self._instance.uuid expected.instanceUuid = self._instance.uuid expected.deviceChange = [] expected.numCPUs = 2 expected.version = None expected.memoryMB = 2048 expected.guestId = constants.DEFAULT_OS_TYPE expected.extraConfig = [] extra_config = fake_factory.create("ns0:OptionValue") extra_config.value = self._instance.uuid extra_config.key = 'nvp.vm-uuid' expected.extraConfig.append(extra_config) extra_config = fake_factory.create("ns0:OptionValue") extra_config.value = 2000000 extra_config.key = 'keyboard.typematicMinDelay' expected.extraConfig.append(extra_config) expected.files = fake_factory.create('ns0:VirtualMachineFileInfo') expected.files.vmPathName = '[fake-datastore]' expected.managedBy = fake_factory.create('ns0:ManagedByInfo') expected.managedBy.extensionKey = 'org.openstack.compute' expected.managedBy.type = 'instance' expected.tools = fake_factory.create('ns0:ToolsConfigInfo') expected.tools.afterPowerOn = True expected.tools.afterResume = True expected.tools.beforeGuestReboot = True expected.tools.beforeGuestShutdown = True expected.tools.beforeGuestStandby = True self.assertEqual(expected, result) def test_get_vm_create_spec_with_cores_per_socket(self): extra_specs = vm_util.ExtraSpecs(cores_per_socket=4) fake_factory = fake.FakeFactory() result = vm_util.get_vm_create_spec(fake_factory, self._instance, 'fake-datastore', [], extra_specs) expected = fake_factory.create('ns0:VirtualMachineConfigSpec') expected.deviceChange = [] expected.guestId = 'otherGuest' expected.instanceUuid = self._instance.uuid expected.memoryMB = self._instance.memory_mb expected.name = self._instance.uuid expected.numCPUs = self._instance.vcpus expected.numCoresPerSocket = 4 expected.version = None expected.files = fake_factory.create('ns0:VirtualMachineFileInfo') expected.files.vmPathName = '[fake-datastore]' expected.tools = fake_factory.create('ns0:ToolsConfigInfo') expected.tools.afterPowerOn = True expected.tools.afterResume = True expected.tools.beforeGuestReboot = True expected.tools.beforeGuestShutdown = True expected.tools.beforeGuestStandby = True expected.managedBy = fake_factory.create('ns0:ManagedByInfo') expected.managedBy.extensionKey = 'org.openstack.compute' expected.managedBy.type = 'instance' expected.extraConfig = [] extra_config = fake_factory.create('ns0:OptionValue') extra_config.key = 'nvp.vm-uuid' extra_config.value = self._instance.uuid expected.extraConfig.append(extra_config) self.assertEqual(expected, result) def test_get_vm_create_spec_with_memory_allocations(self): memory_limits = vm_util.Limits(limit=7, reservation=6) extra_specs = vm_util.ExtraSpecs(memory_limits=memory_limits) fake_factory = fake.FakeFactory() result = vm_util.get_vm_create_spec(fake_factory, self._instance, 'fake-datastore', [], extra_specs) expected = fake_factory.create('ns0:VirtualMachineConfigSpec') expected.deviceChange = [] expected.guestId = 'otherGuest' expected.instanceUuid = self._instance.uuid expected.memoryMB = self._instance.memory_mb expected.name = self._instance.uuid expected.numCPUs = self._instance.vcpus expected.version = None expected.files = fake_factory.create('ns0:VirtualMachineFileInfo') expected.files.vmPathName = '[fake-datastore]' expected.tools = fake_factory.create('ns0:ToolsConfigInfo') expected.tools.afterPowerOn = True expected.tools.afterResume = True expected.tools.beforeGuestReboot = True expected.tools.beforeGuestShutdown = True expected.tools.beforeGuestStandby = True expected.managedBy = fake_factory.create('ns0:ManagedByInfo') expected.managedBy.extensionKey = 'org.openstack.compute' expected.managedBy.type = 'instance' memory_allocation = fake_factory.create('ns0:ResourceAllocationInfo') memory_allocation.limit = 7 memory_allocation.reservation = 6 memory_allocation.shares = fake_factory.create('ns0:SharesInfo') memory_allocation.shares.level = 'normal' memory_allocation.shares.shares = 0 expected.memoryAllocation = memory_allocation expected.extraConfig = [] extra_config = fake_factory.create('ns0:OptionValue') extra_config.key = 'nvp.vm-uuid' extra_config.value = self._instance.uuid expected.extraConfig.append(extra_config) self.assertEqual(expected, result) def test_get_swap(self): vm_ref = 'fake-vm-ref' # Root disk controller_key = 1000 root_disk = fake.VirtualDisk() root_disk.controllerKey = controller_key disk_backing = fake.VirtualDiskFlatVer2BackingInfo() disk_backing.fileName = '[test_datastore] uuid/uuid.vmdk' root_disk.capacityInBytes = 1048576 root_disk.backing = disk_backing # Swap disk swap_disk = fake.VirtualDisk() swap_disk.controllerKey = controller_key disk_backing = fake.VirtualDiskFlatVer2BackingInfo() disk_backing.fileName = "swap" swap_disk.capacityInBytes = 1024 swap_disk.backing = disk_backing devices = [root_disk, swap_disk] session = fake.FakeSession() with mock.patch.object(session, '_call_method', return_value=devices) as mock_call: device = vm_util.get_swap(session, vm_ref) mock_call.assert_called_once_with(mock.ANY, "get_object_property", vm_ref, "config.hardware.device") self.assertEqual(swap_disk, device) def test_create_folder_with_empty_vmfolder(self): """Test create_folder when the datacenter vmFolder is empty""" child_folder = mock.sentinel.child_folder session = fake.FakeSession() with mock.patch.object(session, '_call_method', side_effect=[None, child_folder]): parent_folder = mock.sentinel.parent_folder parent_folder.value = 'parent-ref' child_name = 'child_folder' ret = vm_util.create_folder(session, parent_folder, child_name) self.assertEqual(child_folder, ret) expected_calls = [mock.call(vutil, 'get_object_property', parent_folder, 'childEntity'), mock.call(session.vim, 'CreateFolder', parent_folder, name=child_name)] self.assertEqual(expected_calls, session._call_method.call_args_list) def test_create_folder_not_present(self): """Test create_folder when child not present.""" prop_val = mock.Mock() prop_val.ManagedObjectReference = [] child_folder = mock.sentinel.child_folder session = fake.FakeSession() with mock.patch.object(session, '_call_method', side_effect=[prop_val, child_folder]): child_name = 'child_folder' parent_folder = mock.sentinel.parent_folder parent_folder.value = 'parent-ref' ret = vm_util.create_folder(session, parent_folder, child_name) self.assertEqual(child_folder, ret) expected_invoke_api = [mock.call(vutil, 'get_object_property', parent_folder, 'childEntity'), mock.call(session.vim, 'CreateFolder', parent_folder, name=child_name)] self.assertEqual(expected_invoke_api, session._call_method.mock_calls) def test_create_folder_already_present(self): """Test create_folder when child already present.""" parent_folder = mock.sentinel.parent_folder child_name = 'child_folder' prop_val = mock.Mock() child_entity_1 = mock.Mock() child_entity_1._type = 'Folder' child_entity_1_name = 'SomeOtherName' child_entity_2 = mock.Mock() child_entity_2._type = 'Folder' child_entity_2_name = 'AnotherName' child_entity_3 = mock.Mock() child_entity_3._type = 'Folder' child_entity_3_name = child_name prop_val.ManagedObjectReference = [child_entity_1, child_entity_2, child_entity_3] session = fake.FakeSession() with mock.patch.object(session, '_call_method', side_effect=[prop_val, child_entity_1_name, child_entity_2_name, child_entity_3_name]): ret = vm_util.create_folder(session, parent_folder, child_name) self.assertEqual(child_entity_3, ret) expected_invoke_api = [mock.call(vutil, 'get_object_property', parent_folder, 'childEntity'), mock.call(vutil, 'get_object_property', child_entity_1, 'name'), mock.call(vutil, 'get_object_property', child_entity_2, 'name'), mock.call(vutil, 'get_object_property', child_entity_3, 'name')] self.assertEqual(expected_invoke_api, session._call_method.mock_calls) def test_create_folder_with_duplicate_name(self): parent_folder = mock.sentinel.parent_folder parent_folder.value = 'parent-ref' child_name = 'child_folder' prop_val_1 = mock.Mock() prop_val_1.ManagedObjectReference = [] child_entity_2 = mock.Mock() child_entity_2._type = 'Folder' prop_val_2 = mock.Mock() prop_val_2.ManagedObjectReference = [child_entity_2] child_entity_2_name = child_name details = {'object': 'folder-1'} duplicate_exception = vexc.DuplicateName(details=details) session = fake.FakeSession() with mock.patch.object(session, '_call_method', side_effect=[prop_val_1, duplicate_exception, prop_val_2, child_entity_2_name]): ret = vm_util.create_folder(session, parent_folder, child_name) self.assertEqual(child_entity_2._type, ret._type) expected_invoke_api = [mock.call(vutil, 'get_object_property', parent_folder, 'childEntity'), mock.call(session.vim, 'CreateFolder', parent_folder, name=child_name)] self.assertEqual(expected_invoke_api, session._call_method.mock_calls) def test_get_folder_does_not_exist(self): session = fake.FakeSession() with mock.patch.object(session, '_call_method', return_value=None): ret = vm_util._get_folder(session, 'fake-parent', 'fake-name') self.assertIsNone(ret) expected_invoke_api = [mock.call(vutil, 'get_object_property', 'fake-parent', 'childEntity')] self.assertEqual(expected_invoke_api, session._call_method.mock_calls) def test_get_folder_child_entry_not_folder(self): child_entity = mock.Mock() child_entity._type = 'NotFolder' prop_val = mock.Mock() prop_val.ManagedObjectReference = [child_entity] session = fake.FakeSession() with mock.patch.object(session, '_call_method', return_value=prop_val): ret = vm_util._get_folder(session, 'fake-parent', 'fake-name') self.assertIsNone(ret) expected_invoke_api = [mock.call(vutil, 'get_object_property', 'fake-parent', 'childEntity')] self.assertEqual(expected_invoke_api, session._call_method.mock_calls) def test_get_folder_child_entry_not_matched(self): child_entity = mock.Mock() child_entity._type = 'Folder' prop_val = mock.Mock() prop_val.ManagedObjectReference = [child_entity] session = fake.FakeSession() with mock.patch.object(session, '_call_method', side_effect=[prop_val, 'fake-1-name']): ret = vm_util._get_folder(session, 'fake-parent', 'fake-name') self.assertIsNone(ret) expected_invoke_api = [mock.call(vutil, 'get_object_property', 'fake-parent', 'childEntity'), mock.call(vutil, 'get_object_property', child_entity, 'name')] self.assertEqual(expected_invoke_api, session._call_method.mock_calls) def test_get_folder_child_entry_matched(self): child_entity = mock.Mock() child_entity._type = 'Folder' prop_val = mock.Mock() prop_val.ManagedObjectReference = [child_entity] session = fake.FakeSession() with mock.patch.object(session, '_call_method', side_effect=[prop_val, 'fake-name']): ret = vm_util._get_folder(session, 'fake-parent', 'fake-name') self.assertEqual(ret, child_entity) expected_invoke_api = [mock.call(vutil, 'get_object_property', 'fake-parent', 'childEntity'), mock.call(vutil, 'get_object_property', child_entity, 'name')] self.assertEqual(expected_invoke_api, session._call_method.mock_calls) def test_folder_path_ref_cache(self): path = 'OpenStack/Project (e2b86092bf064181ade43deb3188f8e4)' self.assertIsNone(vm_util.folder_ref_cache_get(path)) vm_util.folder_ref_cache_update(path, 'fake-ref') self.assertEqual('fake-ref', vm_util.folder_ref_cache_get(path)) def test_get_vm_name(self): uuid = uuidutils.generate_uuid() expected = uuid name = vm_util._get_vm_name(None, uuid) self.assertEqual(expected, name) display_name = 'fira' expected = 'fira (%s)' % uuid name = vm_util._get_vm_name(display_name, uuid) self.assertEqual(expected, name) display_name = 'X' * 255 expected = '%s (%s)' % ('X' * 41, uuid) name = vm_util._get_vm_name(display_name, uuid) self.assertEqual(expected, name) self.assertEqual(len(name), 80) @mock.patch.object(vm_util, '_get_vm_name', return_value='fake-name') def test_rename_vm(self, mock_get_name): session = fake.FakeSession() with test.nested( mock.patch.object(session, '_call_method', return_value='fake_rename_task'), mock.patch.object(session, '_wait_for_task') ) as (_call_method, _wait_for_task): vm_util.rename_vm(session, 'fake-ref', self._instance) _call_method.assert_called_once_with(mock.ANY, 'Rename_Task', 'fake-ref', newName='fake-name') _wait_for_task.assert_called_once_with( 'fake_rename_task') mock_get_name.assert_called_once_with(self._instance.display_name, self._instance.uuid) @mock.patch.object(driver.VMwareAPISession, 'vim', stubs.fake_vim_prop) class VMwareVMUtilGetHostRefTestCase(test.NoDBTestCase): # N.B. Mocking on the class only mocks test_*(), but we need # VMwareAPISession.vim to be mocked in both setUp and tests. Not mocking in # setUp causes object initialisation to fail. Not mocking in tests results # in vim calls not using FakeVim. @mock.patch.object(driver.VMwareAPISession, 'vim', stubs.fake_vim_prop) def setUp(self): super(VMwareVMUtilGetHostRefTestCase, self).setUp() fake.reset() vm_util.vm_refs_cache_reset() self.session = driver.VMwareAPISession() # Create a fake VirtualMachine running on a known host self.host_ref = list(fake._db_content['HostSystem'].keys())[0] self.vm_ref = fake.create_vm(host_ref=self.host_ref) @mock.patch.object(vm_util, 'get_vm_ref') def test_get_host_ref_for_vm(self, mock_get_vm_ref): mock_get_vm_ref.return_value = self.vm_ref ret = vm_util.get_host_ref_for_vm(self.session, 'fake-instance') mock_get_vm_ref.assert_called_once_with(self.session, 'fake-instance') self.assertEqual(self.host_ref, ret) @mock.patch.object(vm_util, 'get_vm_ref') def test_get_host_name_for_vm(self, mock_get_vm_ref): mock_get_vm_ref.return_value = self.vm_ref host = fake._get_object(self.host_ref) ret = vm_util.get_host_name_for_vm(self.session, 'fake-instance') mock_get_vm_ref.assert_called_once_with(self.session, 'fake-instance') self.assertEqual(host.name, ret) nova-13.1.4/nova/tests/unit/virt/vmwareapi/test_vif.py0000664000567000056710000003671613064447152024167 0ustar jenkinsjenkins00000000000000# Copyright 2013 Canonical Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_vmware import exceptions as vexc from oslo_vmware import vim_util from nova import exception from nova.network import model as network_model from nova import test from nova.tests.unit import matchers from nova.tests.unit import utils from nova.tests.unit.virt.vmwareapi import fake from nova.virt.vmwareapi import constants from nova.virt.vmwareapi import network_util from nova.virt.vmwareapi import vif from nova.virt.vmwareapi import vm_util class VMwareVifTestCase(test.NoDBTestCase): def setUp(self): super(VMwareVifTestCase, self).setUp() self.flags(vlan_interface='vmnet0', group='vmware') network = network_model.Network(id=0, bridge='fa0', label='fake', vlan=3, bridge_interface='eth0', injected=True) self._network = network self.vif = network_model.NetworkInfo([ network_model.VIF(id=None, address='DE:AD:BE:EF:00:00', network=network, type=None, devname=None, ovs_interfaceid=None, rxtx_cap=3) ])[0] self.session = fake.FakeSession() self.cluster = None def tearDown(self): super(VMwareVifTestCase, self).tearDown() def test_ensure_vlan_bridge(self): self.mox.StubOutWithMock(network_util, 'get_network_with_the_name') self.mox.StubOutWithMock(network_util, 'get_vswitch_for_vlan_interface') self.mox.StubOutWithMock(network_util, 'check_if_vlan_interface_exists') self.mox.StubOutWithMock(network_util, 'create_port_group') network_util.get_network_with_the_name(self.session, 'fa0', self.cluster).AndReturn(None) network_util.get_vswitch_for_vlan_interface(self.session, 'vmnet0', self.cluster).AndReturn('vmnet0') network_util.check_if_vlan_interface_exists(self.session, 'vmnet0', self.cluster).AndReturn(True) network_util.create_port_group(self.session, 'fa0', 'vmnet0', 3, self.cluster) network_util.get_network_with_the_name(self.session, 'fa0', None) self.mox.ReplayAll() vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=True) # FlatDHCP network mode without vlan - network doesn't exist with the host def test_ensure_vlan_bridge_without_vlan(self): self.mox.StubOutWithMock(network_util, 'get_network_with_the_name') self.mox.StubOutWithMock(network_util, 'get_vswitch_for_vlan_interface') self.mox.StubOutWithMock(network_util, 'check_if_vlan_interface_exists') self.mox.StubOutWithMock(network_util, 'create_port_group') network_util.get_network_with_the_name(self.session, 'fa0', self.cluster).AndReturn(None) network_util.get_vswitch_for_vlan_interface(self.session, 'vmnet0', self.cluster).AndReturn('vmnet0') network_util.check_if_vlan_interface_exists(self.session, 'vmnet0', self.cluster).AndReturn(True) network_util.create_port_group(self.session, 'fa0', 'vmnet0', 0, self.cluster) network_util.get_network_with_the_name(self.session, 'fa0', None) self.mox.ReplayAll() vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=False) # FlatDHCP network mode without vlan - network exists with the host # Get vswitch and check vlan interface should not be called def test_ensure_vlan_bridge_with_network(self): self.mox.StubOutWithMock(network_util, 'get_network_with_the_name') self.mox.StubOutWithMock(network_util, 'get_vswitch_for_vlan_interface') self.mox.StubOutWithMock(network_util, 'check_if_vlan_interface_exists') self.mox.StubOutWithMock(network_util, 'create_port_group') vm_network = {'name': 'VM Network', 'type': 'Network'} network_util.get_network_with_the_name(self.session, 'fa0', self.cluster).AndReturn(vm_network) self.mox.ReplayAll() vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=False) # Flat network mode with DVS def test_ensure_vlan_bridge_with_existing_dvs(self): network_ref = {'dvpg': 'dvportgroup-2062', 'type': 'DistributedVirtualPortgroup'} self.mox.StubOutWithMock(network_util, 'get_network_with_the_name') self.mox.StubOutWithMock(network_util, 'get_vswitch_for_vlan_interface') self.mox.StubOutWithMock(network_util, 'check_if_vlan_interface_exists') self.mox.StubOutWithMock(network_util, 'create_port_group') network_util.get_network_with_the_name(self.session, 'fa0', self.cluster).AndReturn(network_ref) self.mox.ReplayAll() ref = vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=False) self.assertThat(ref, matchers.DictMatches(network_ref)) def test_get_network_ref_flat_dhcp(self): self.mox.StubOutWithMock(vif, 'ensure_vlan_bridge') vif.ensure_vlan_bridge(self.session, self.vif, cluster=self.cluster, create_vlan=False) self.mox.ReplayAll() vif.get_network_ref(self.session, self.cluster, self.vif, False) def test_get_network_ref_bridge(self): self.mox.StubOutWithMock(vif, 'ensure_vlan_bridge') vif.ensure_vlan_bridge(self.session, self.vif, cluster=self.cluster, create_vlan=True) self.mox.ReplayAll() network = network_model.Network(id=0, bridge='fa0', label='fake', vlan=3, bridge_interface='eth0', injected=True, should_create_vlan=True) self.vif = network_model.NetworkInfo([ network_model.VIF(id=None, address='DE:AD:BE:EF:00:00', network=network, type=None, devname=None, ovs_interfaceid=None, rxtx_cap=3) ])[0] vif.get_network_ref(self.session, self.cluster, self.vif, False) def test_create_port_group_already_exists(self): def fake_call_method(module, method, *args, **kwargs): if method == 'AddPortGroup': raise vexc.AlreadyExistsException() with test.nested( mock.patch.object(vm_util, 'get_add_vswitch_port_group_spec'), mock.patch.object(vm_util, 'get_host_ref'), mock.patch.object(self.session, '_call_method', fake_call_method) ) as (_add_vswitch, _get_host, _call_method): network_util.create_port_group(self.session, 'pg_name', 'vswitch_name', vlan_id=0, cluster=None) def test_create_port_group_exception(self): def fake_call_method(module, method, *args, **kwargs): if method == 'AddPortGroup': raise vexc.VMwareDriverException() with test.nested( mock.patch.object(vm_util, 'get_add_vswitch_port_group_spec'), mock.patch.object(vm_util, 'get_host_ref'), mock.patch.object(self.session, '_call_method', fake_call_method) ) as (_add_vswitch, _get_host, _call_method): self.assertRaises(vexc.VMwareDriverException, network_util.create_port_group, self.session, 'pg_name', 'vswitch_name', vlan_id=0, cluster=None) def test_get_vif_info_none(self): vif_info = vif.get_vif_info('fake_session', 'fake_cluster', 'is_neutron', 'fake_model', None) self.assertEqual([], vif_info) def test_get_vif_info_empty_list(self): vif_info = vif.get_vif_info('fake_session', 'fake_cluster', 'is_neutron', 'fake_model', []) self.assertEqual([], vif_info) @mock.patch.object(vif, 'get_network_ref', return_value='fake_ref') def test_get_vif_info(self, mock_get_network_ref): network_info = utils.get_test_network_info() vif_info = vif.get_vif_info('fake_session', 'fake_cluster', 'is_neutron', 'fake_model', network_info) expected = [{'iface_id': 'vif-xxx-yyy-zzz', 'mac_address': 'fake', 'network_name': 'fake', 'network_ref': 'fake_ref', 'vif_model': 'fake_model'}] self.assertEqual(expected, vif_info) @mock.patch.object(vif, '_check_ovs_supported_version') def test_get_neutron_network_ovs_integration_bridge(self, mock_check): self.flags(integration_bridge='fake-bridge-id', group='vmware') vif_info = network_model.NetworkInfo([ network_model.VIF(type=network_model.VIF_TYPE_OVS, address='DE:AD:BE:EF:00:00', network=self._network)] )[0] network_ref = vif._get_neutron_network('fake-session', 'fake-cluster', vif_info) expected_ref = {'type': 'OpaqueNetwork', 'network-id': 'fake-bridge-id', 'network-type': 'opaque', 'use-external-id': False} self.assertEqual(expected_ref, network_ref) mock_check.assert_called_once_with('fake-session') @mock.patch.object(vif, '_check_ovs_supported_version') def test_get_neutron_network_ovs(self, mock_check): vif_info = network_model.NetworkInfo([ network_model.VIF(type=network_model.VIF_TYPE_OVS, address='DE:AD:BE:EF:00:00', network=self._network)] )[0] network_ref = vif._get_neutron_network('fake-session', 'fake-cluster', vif_info) expected_ref = {'type': 'OpaqueNetwork', 'network-id': 0, 'network-type': 'nsx.LogicalSwitch', 'use-external-id': True} self.assertEqual(expected_ref, network_ref) mock_check.assert_called_once_with('fake-session') @mock.patch.object(network_util, 'get_network_with_the_name') def test_get_neutron_network_dvs(self, mock_network_name): fake_network_obj = {'type': 'DistributedVirtualPortgroup', 'dvpg': 'fake-key', 'dvsw': 'fake-props'} mock_network_name.return_value = fake_network_obj vif_info = network_model.NetworkInfo([ network_model.VIF(type=network_model.VIF_TYPE_DVS, address='DE:AD:BE:EF:00:00', network=self._network)] )[0] network_ref = vif._get_neutron_network('fake-session', 'fake-cluster', vif_info) mock_network_name.assert_called_once_with('fake-session', 'fa0', 'fake-cluster') self.assertEqual(fake_network_obj, network_ref) @mock.patch.object(network_util, 'get_network_with_the_name') def test_get_neutron_network_dvs_vif_details(self, mock_network_name): fake_network_obj = {'type': 'DistributedVirtualPortgroup', 'dvpg': 'pg1', 'dvsw': 'fake-props'} mock_network_name.return_value = fake_network_obj vif_info = network_model.NetworkInfo([ network_model.VIF(type=network_model.VIF_TYPE_DVS, details={'dvs_port_key': 'key1', 'dvs_port_group_name': 'pg1'}, address='DE:AD:BE:EF:00:00', network=self._network)])[0] network_ref = vif._get_neutron_network('fake-session', 'fake-cluster', vif_info) mock_network_name.assert_called_once_with('fake-session', 'pg1', 'fake-cluster') self.assertEqual(fake_network_obj, network_ref) @mock.patch.object(network_util, 'get_network_with_the_name', return_value=None) def test_get_neutron_network_dvs_no_match(self, mock_network_name): vif_info = network_model.NetworkInfo([ network_model.VIF(type=network_model.VIF_TYPE_DVS, address='DE:AD:BE:EF:00:00', network=self._network)] )[0] self.assertRaises(exception.NetworkNotFoundForBridge, vif._get_neutron_network, 'fake-session', 'fake-cluster', vif_info) def test_get_neutron_network_invalid_type(self): vif_info = network_model.NetworkInfo([ network_model.VIF(address='DE:AD:BE:EF:00:00', network=self._network)] )[0] self.assertRaises(exception.InvalidInput, vif._get_neutron_network, 'fake-session', 'fake-cluster', vif_info) @mock.patch.object(vif.LOG, 'warning') @mock.patch.object(vim_util, 'get_vc_version', return_value='5.0.0') def test_check_invalid_ovs_version(self, mock_version, mock_warning): vif._check_ovs_supported_version('fake_session') # assert that the min version is in a warning message expected_arg = {'version': constants.MIN_VC_OVS_VERSION} version_arg_found = False for call in mock_warning.call_args_list: if call[0][1] == expected_arg: version_arg_found = True break self.assertTrue(version_arg_found) nova-13.1.4/nova/tests/unit/virt/vmwareapi/fake.py0000664000567000056710000016414613064447152023251 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # Copyright (c) 2012 VMware, Inc. # Copyright (c) 2011 Citrix Systems, Inc. # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A fake VMware VI API implementation. """ import collections import sys from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import units from oslo_utils import uuidutils from oslo_vmware import exceptions as vexc from oslo_vmware.objects import datastore as ds_obj import six from nova import exception from nova.virt.vmwareapi import constants _CLASSES = ['Datacenter', 'Datastore', 'ResourcePool', 'VirtualMachine', 'Network', 'HostSystem', 'HostNetworkSystem', 'Task', 'session', 'files', 'ClusterComputeResource', 'HostStorageSystem', 'Folder'] _FAKE_FILE_SIZE = 1024 _FAKE_VCENTER_UUID = '497c514c-ef5e-4e7f-8d93-ec921993b93a' _db_content = {} _array_types = {} _vim_map = {} LOG = logging.getLogger(__name__) def reset(): """Resets the db contents.""" cleanup() create_network() create_folder() create_host_network_system() create_host_storage_system() ds_ref1 = create_datastore('ds1', 1024, 500) create_host(ds_ref=ds_ref1) ds_ref2 = create_datastore('ds2', 1024, 500) create_host(ds_ref=ds_ref2) create_datacenter('dc1', ds_ref1) create_datacenter('dc2', ds_ref2) create_res_pool() create_cluster('test_cluster', ds_ref1) create_cluster('test_cluster2', ds_ref2) def cleanup(): """Clear the db contents.""" for c in _CLASSES: # We fake the datastore by keeping the file references as a list of # names in the db if c == 'files': _db_content[c] = [] else: _db_content[c] = {} def _create_object(table, table_obj): """Create an object in the db.""" _db_content.setdefault(table, {}) _db_content[table][table_obj.obj] = table_obj def _get_object(obj_ref): """Get object for the give reference.""" return _db_content[obj_ref.type][obj_ref] def _get_objects(obj_type): """Get objects of the type.""" lst_objs = FakeRetrieveResult() for key in _db_content[obj_type]: lst_objs.add_object(_db_content[obj_type][key]) return lst_objs def _convert_to_array_of_mor(mors): """Wraps the given array into a DataObject.""" array_of_mors = DataObject() array_of_mors.ManagedObjectReference = mors return array_of_mors def _convert_to_array_of_opt_val(optvals): """Wraps the given array into a DataObject.""" array_of_optv = DataObject() array_of_optv.OptionValue = optvals return array_of_optv def _create_array_of_type(t): """Returns an array to contain objects of type t.""" if t in _array_types: return _array_types[t]() array_type_name = 'ArrayOf%s' % t array_type = type(array_type_name, (DataObject,), {}) def __init__(self): super(array_type, self).__init__(array_type_name) setattr(self, t, []) setattr(array_type, '__init__', __init__) _array_types[t] = array_type return array_type() class FakeRetrieveResult(object): """Object to retrieve a ObjectContent list.""" def __init__(self, token=None): self.objects = [] if token is not None: self.token = token def add_object(self, object): self.objects.append(object) def _get_object_refs(obj_type): """Get object References of the type.""" lst_objs = [] for key in _db_content[obj_type]: lst_objs.append(key) return lst_objs def _update_object(table, table_obj): """Update objects of the type.""" _db_content[table][table_obj.obj] = table_obj class Prop(object): """Property Object base class.""" def __init__(self, name=None, val=None): self.name = name self.val = val class ManagedObjectReference(object): """A managed object reference is a remote identifier.""" def __init__(self, name="ManagedObject", value=None): super(ManagedObjectReference, self) # Managed Object Reference value attributes # typically have values like vm-123 or # host-232 and not UUID. self.value = value # Managed Object Reference type # attributes hold the name of the type # of the vCenter object the value # attribute is the identifier for self.type = name self._type = name class ObjectContent(object): """ObjectContent array holds dynamic properties.""" # This class is a *fake* of a class sent back to us by # SOAP. It has its own names. These names are decided # for us by the API we are *faking* here. def __init__(self, obj_ref, prop_list=None, missing_list=None): self.obj = obj_ref if not isinstance(prop_list, collections.Iterable): prop_list = [] if not isinstance(missing_list, collections.Iterable): missing_list = [] # propSet is the name your Python code will need to # use since this is the name that the API will use if prop_list: self.propSet = prop_list # missingSet is the name your python code will # need to use since this is the name that the # API we are talking to will use. if missing_list: self.missingSet = missing_list class ManagedObject(object): """Managed Object base class.""" _counter = 0 def __init__(self, mo_id_prefix="obj"): """Sets the obj property which acts as a reference to the object.""" object.__setattr__(self, 'mo_id', self._generate_moid(mo_id_prefix)) object.__setattr__(self, 'propSet', []) object.__setattr__(self, 'obj', ManagedObjectReference(self.__class__.__name__, self.mo_id)) def set(self, attr, val): """Sets an attribute value. Not using the __setattr__ directly for we want to set attributes of the type 'a.b.c' and using this function class we set the same. """ self.__setattr__(attr, val) def get(self, attr): """Gets an attribute. Used as an intermediary to get nested property like 'a.b.c' value. """ return self.__getattr__(attr) def delete(self, attr): """Deletes an attribute.""" self.propSet = filter(lambda elem: elem.name != attr, self.propSet) def __setattr__(self, attr, val): # TODO(hartsocks): this is adds unnecessary complexity to the class for prop in self.propSet: if prop.name == attr: prop.val = val return elem = Prop() elem.name = attr elem.val = val self.propSet.append(elem) def __getattr__(self, attr): # TODO(hartsocks): remove this # in a real ManagedObject you have to iterate the propSet # in a real ManagedObject, the propSet is a *set* not a list for elem in self.propSet: if elem.name == attr: return elem.val msg = "Property %(attr)s not set for the managed object %(name)s" raise exception.NovaException(msg % {'attr': attr, 'name': self.__class__.__name__}) def _generate_moid(self, prefix): """Generates a new Managed Object ID.""" self.__class__._counter += 1 return prefix + "-" + str(self.__class__._counter) def __repr__(self): return jsonutils.dumps({elem.name: elem.val for elem in self.propSet}) class DataObject(object): """Data object base class.""" def __init__(self, obj_name=None): if obj_name is None: obj_name = 'ns0:' + self.__class__.__name__ self.obj_name = obj_name def __repr__(self): return str(self.__dict__) def __eq__(self, other): return self.__dict__ == other.__dict__ class HostInternetScsiHba(DataObject): """iSCSI Host Bus Adapter.""" def __init__(self, iscsi_name=None): super(HostInternetScsiHba, self).__init__() self.device = 'vmhba33' self.key = 'key-vmhba33' self.iScsiName = iscsi_name class FileAlreadyExists(DataObject): """File already exists class.""" def __init__(self): super(FileAlreadyExists, self).__init__() self.__name__ = vexc.FILE_ALREADY_EXISTS class FileNotFound(DataObject): """File not found class.""" def __init__(self): super(FileNotFound, self).__init__() self.__name__ = vexc.FILE_NOT_FOUND class FileFault(DataObject): """File fault.""" def __init__(self): super(FileFault, self).__init__() self.__name__ = vexc.FILE_FAULT class CannotDeleteFile(DataObject): """Cannot delete file.""" def __init__(self): super(CannotDeleteFile, self).__init__() self.__name__ = vexc.CANNOT_DELETE_FILE class FileLocked(DataObject): """File locked.""" def __init__(self): super(FileLocked, self).__init__() self.__name__ = vexc.FILE_LOCKED class VirtualDisk(DataObject): """Virtual Disk class.""" def __init__(self, controllerKey=0, unitNumber=0): super(VirtualDisk, self).__init__() self.key = 0 self.controllerKey = controllerKey self.unitNumber = unitNumber class VirtualDiskFlatVer2BackingInfo(DataObject): """VirtualDiskFlatVer2BackingInfo class.""" def __init__(self): super(VirtualDiskFlatVer2BackingInfo, self).__init__() self.thinProvisioned = False self.eagerlyScrub = False class VirtualDiskRawDiskMappingVer1BackingInfo(DataObject): """VirtualDiskRawDiskMappingVer1BackingInfo class.""" def __init__(self): super(VirtualDiskRawDiskMappingVer1BackingInfo, self).__init__() self.lunUuid = "" class VirtualIDEController(DataObject): def __init__(self, key=0): self.key = key class VirtualLsiLogicController(DataObject): """VirtualLsiLogicController class.""" def __init__(self, key=0, scsiCtlrUnitNumber=0, busNumber=0): self.key = key self.busNumber = busNumber self.scsiCtlrUnitNumber = scsiCtlrUnitNumber self.device = [] class VirtualLsiLogicSASController(DataObject): """VirtualLsiLogicSASController class.""" pass class VirtualPCNet32(DataObject): """VirtualPCNet32 class.""" def __init__(self): super(VirtualPCNet32, self).__init__() self.key = 4000 class OptionValue(DataObject): """OptionValue class.""" def __init__(self, key=None, value=None): super(OptionValue, self).__init__() self.key = key self.value = value class VirtualMachine(ManagedObject): """Virtual Machine class.""" def __init__(self, **kwargs): super(VirtualMachine, self).__init__("vm") self.set("name", kwargs.get("name", 'test-vm')) self.set("runtime.connectionState", kwargs.get("conn_state", "connected")) self.set("summary.config.guestId", kwargs.get("guest", constants.DEFAULT_OS_TYPE)) ds_do = kwargs.get("ds", None) self.set("datastore", _convert_to_array_of_mor(ds_do)) self.set("summary.guest.toolsStatus", kwargs.get("toolsstatus", "toolsOk")) self.set("summary.guest.toolsRunningStatus", kwargs.get( "toolsrunningstate", "guestToolsRunning")) self.set("runtime.powerState", kwargs.get("powerstate", "poweredOn")) self.set("config.files.vmPathName", kwargs.get("vmPathName")) self.set("summary.config.numCpu", kwargs.get("numCpu", 1)) self.set("summary.config.memorySizeMB", kwargs.get("mem", 1)) self.set("summary.config.instanceUuid", kwargs.get("instanceUuid")) self.set("version", kwargs.get("version")) devices = _create_array_of_type('VirtualDevice') devices.VirtualDevice = kwargs.get("virtual_device", []) self.set("config.hardware.device", devices) exconfig_do = kwargs.get("extra_config", None) self.set("config.extraConfig", _convert_to_array_of_opt_val(exconfig_do)) if exconfig_do: for optval in exconfig_do: self.set('config.extraConfig["%s"]' % optval.key, optval) self.set('runtime.host', kwargs.get("runtime_host", None)) self.device = kwargs.get("virtual_device", []) # Sample of diagnostics data is below. config = [ ('template', False), ('vmPathName', 'fake_path'), ('memorySizeMB', 512), ('cpuReservation', 0), ('memoryReservation', 0), ('numCpu', 1), ('numEthernetCards', 1), ('numVirtualDisks', 1)] self.set("summary.config", config) quickStats = [ ('overallCpuUsage', 0), ('overallCpuDemand', 0), ('guestMemoryUsage', 0), ('hostMemoryUsage', 141), ('balloonedMemory', 0), ('consumedOverheadMemory', 20)] self.set("summary.quickStats", quickStats) key1 = {'key': 'cpuid.AES'} key2 = {'key': 'cpuid.AVX'} runtime = [ ('connectionState', 'connected'), ('powerState', 'poweredOn'), ('toolsInstallerMounted', False), ('suspendInterval', 0), ('memoryOverhead', 21417984), ('maxCpuUsage', 2000), ('featureRequirement', [key1, key2])] self.set("summary.runtime", runtime) def _update_extra_config(self, extra): extra_config = self.get("config.extraConfig") values = extra_config.OptionValue for value in values: if value.key == extra.key: value.value = extra.value return kv = DataObject() kv.key = extra.key kv.value = extra.value extra_config.OptionValue.append(kv) self.set("config.extraConfig", extra_config) extra_config = self.get("config.extraConfig") def reconfig(self, factory, val): """Called to reconfigure the VM. Actually customizes the property setting of the Virtual Machine object. """ if hasattr(val, 'name') and val.name: self.set("name", val.name) if hasattr(val, 'extraConfig'): extraConfigs = _merge_extraconfig( self.get("config.extraConfig").OptionValue, val.extraConfig) self.get("config.extraConfig").OptionValue = extraConfigs if hasattr(val, 'instanceUuid') and val.instanceUuid is not None: if val.instanceUuid == "": val.instanceUuid = uuidutils.generate_uuid() self.set("summary.config.instanceUuid", val.instanceUuid) try: if not hasattr(val, 'deviceChange'): return if hasattr(val, 'extraConfig'): # there are 2 cases - new entry or update an existing one for extra in val.extraConfig: self._update_extra_config(extra) if len(val.deviceChange) < 2: return # Case of Reconfig of VM to attach disk controller_key = val.deviceChange[0].device.controllerKey filename = val.deviceChange[0].device.backing.fileName disk = VirtualDisk() disk.controllerKey = controller_key disk_backing = VirtualDiskFlatVer2BackingInfo() disk_backing.fileName = filename disk_backing.key = -101 disk.backing = disk_backing disk.capacityInBytes = 1024 disk.capacityInKB = 1 controller = VirtualLsiLogicController() controller.key = controller_key devices = _create_array_of_type('VirtualDevice') devices.VirtualDevice = [disk, controller, self.device[0]] self.set("config.hardware.device", devices) except AttributeError: pass class Folder(ManagedObject): """Folder class.""" def __init__(self): super(Folder, self).__init__("Folder") self.set("childEntity", []) class Network(ManagedObject): """Network class.""" def __init__(self): super(Network, self).__init__("network") self.set("summary.name", "vmnet0") class ResourcePool(ManagedObject): """Resource Pool class.""" def __init__(self, name="test_ResPool", value="resgroup-test"): super(ResourcePool, self).__init__("rp") self.set("name", name) summary = DataObject() runtime = DataObject() config = DataObject() memory = DataObject() cpu = DataObject() memoryAllocation = DataObject() cpuAllocation = DataObject() vm_list = DataObject() memory.maxUsage = 1000 * units.Mi memory.overallUsage = 500 * units.Mi cpu.maxUsage = 10000 cpu.overallUsage = 1000 runtime.cpu = cpu runtime.memory = memory summary.runtime = runtime cpuAllocation.limit = 10000 memoryAllocation.limit = 1024 memoryAllocation.reservation = 1024 config.memoryAllocation = memoryAllocation config.cpuAllocation = cpuAllocation vm_list.ManagedObjectReference = [] self.set("summary", summary) self.set("summary.runtime.memory", memory) self.set("config", config) self.set("vm", vm_list) parent = ManagedObjectReference(value=value, name=name) owner = ManagedObjectReference(value=value, name=name) self.set("parent", parent) self.set("owner", owner) class DatastoreHostMount(DataObject): def __init__(self, value='host-100'): super(DatastoreHostMount, self).__init__() host_ref = (_db_content["HostSystem"] [list(_db_content["HostSystem"].keys())[0]].obj) host_system = DataObject() host_system.ManagedObjectReference = [host_ref] host_system.value = value self.key = host_system class ClusterComputeResource(ManagedObject): """Cluster class.""" def __init__(self, name="test_cluster"): super(ClusterComputeResource, self).__init__("domain") self.set("name", name) self.set("host", None) self.set("datastore", None) self.set("resourcePool", None) summary = DataObject() summary.numHosts = 0 summary.numCpuCores = 0 summary.numCpuThreads = 0 summary.numEffectiveHosts = 0 summary.totalMemory = 0 summary.effectiveMemory = 0 summary.effectiveCpu = 10000 self.set("summary", summary) def _add_root_resource_pool(self, r_pool): if r_pool: self.set("resourcePool", r_pool) def _add_host(self, host_sys): if host_sys: hosts = self.get("host") if hosts is None: hosts = DataObject() hosts.ManagedObjectReference = [] self.set("host", hosts) hosts.ManagedObjectReference.append(host_sys) # Update summary every time a new host is added self._update_summary() def _add_datastore(self, datastore): if datastore: datastores = self.get("datastore") if datastores is None: datastores = DataObject() datastores.ManagedObjectReference = [] self.set("datastore", datastores) datastores.ManagedObjectReference.append(datastore) # Method to update summary of a cluster upon host addition def _update_summary(self): summary = self.get("summary") summary.numHosts = 0 summary.numCpuCores = 0 summary.numCpuThreads = 0 summary.numEffectiveHosts = 0 summary.totalMemory = 0 summary.effectiveMemory = 0 hosts = self.get("host") # Compute the aggregate stats summary.numHosts = len(hosts.ManagedObjectReference) for host_ref in hosts.ManagedObjectReference: host_sys = _get_object(host_ref) connected = host_sys.get("connected") host_summary = host_sys.get("summary") summary.numCpuCores += host_summary.hardware.numCpuCores summary.numCpuThreads += host_summary.hardware.numCpuThreads summary.totalMemory += host_summary.hardware.memorySize free_memory = (host_summary.hardware.memorySize / units.Mi - host_summary.quickStats.overallMemoryUsage) summary.effectiveMemory += free_memory if connected else 0 summary.numEffectiveHosts += 1 if connected else 0 self.set("summary", summary) class Datastore(ManagedObject): """Datastore class.""" def __init__(self, name="fake-ds", capacity=1024, free=500, accessible=True, maintenance_mode="normal"): super(Datastore, self).__init__("ds") self.set("summary.type", "VMFS") self.set("summary.name", name) self.set("summary.capacity", capacity * units.Gi) self.set("summary.freeSpace", free * units.Gi) self.set("summary.accessible", accessible) self.set("summary.maintenanceMode", maintenance_mode) self.set("browser", "") class HostNetworkSystem(ManagedObject): """HostNetworkSystem class.""" def __init__(self, name="networkSystem"): super(HostNetworkSystem, self).__init__("ns") self.set("name", name) pnic_do = DataObject() pnic_do.device = "vmnic0" net_info_pnic = DataObject() net_info_pnic.PhysicalNic = [pnic_do] self.set("networkInfo.pnic", net_info_pnic) class HostStorageSystem(ManagedObject): """HostStorageSystem class.""" def __init__(self): super(HostStorageSystem, self).__init__("storageSystem") class HostSystem(ManagedObject): """Host System class.""" def __init__(self, name="ha-host", connected=True, ds_ref=None, maintenance_mode=False): super(HostSystem, self).__init__("host") self.set("name", name) if _db_content.get("HostNetworkSystem", None) is None: create_host_network_system() if not _get_object_refs('HostStorageSystem'): create_host_storage_system() host_net_key = list(_db_content["HostNetworkSystem"].keys())[0] host_net_sys = _db_content["HostNetworkSystem"][host_net_key].obj self.set("configManager.networkSystem", host_net_sys) host_storage_sys_key = _get_object_refs('HostStorageSystem')[0] self.set("configManager.storageSystem", host_storage_sys_key) if not ds_ref: ds_ref = create_datastore('local-host-%s' % name, 500, 500) datastores = DataObject() datastores.ManagedObjectReference = [ds_ref] self.set("datastore", datastores) summary = DataObject() hardware = DataObject() hardware.numCpuCores = 8 hardware.numCpuPkgs = 2 hardware.numCpuThreads = 16 hardware.vendor = "Intel" hardware.cpuModel = "Intel(R) Xeon(R)" hardware.uuid = "host-uuid" hardware.memorySize = units.Gi summary.hardware = hardware runtime = DataObject() if connected: runtime.connectionState = "connected" else: runtime.connectionState = "disconnected" runtime.inMaintenanceMode = maintenance_mode summary.runtime = runtime quickstats = DataObject() quickstats.overallMemoryUsage = 500 summary.quickStats = quickstats product = DataObject() product.name = "VMware ESXi" product.version = constants.MIN_VC_VERSION config = DataObject() config.product = product summary.config = config pnic_do = DataObject() pnic_do.device = "vmnic0" net_info_pnic = DataObject() net_info_pnic.PhysicalNic = [pnic_do] self.set("summary", summary) self.set("capability.maxHostSupportedVcpus", 600) self.set("summary.hardware", hardware) self.set("summary.runtime", runtime) self.set("config.network.pnic", net_info_pnic) self.set("connected", connected) if _db_content.get("Network", None) is None: create_network() net_ref = _db_content["Network"][ list(_db_content["Network"].keys())[0]].obj network_do = DataObject() network_do.ManagedObjectReference = [net_ref] self.set("network", network_do) vswitch_do = DataObject() vswitch_do.pnic = ["vmnic0"] vswitch_do.name = "vSwitch0" vswitch_do.portgroup = ["PortGroup-vmnet0"] net_swicth = DataObject() net_swicth.HostVirtualSwitch = [vswitch_do] self.set("config.network.vswitch", net_swicth) host_pg_do = DataObject() host_pg_do.key = "PortGroup-vmnet0" pg_spec = DataObject() pg_spec.vlanId = 0 pg_spec.name = "vmnet0" host_pg_do.spec = pg_spec host_pg = DataObject() host_pg.HostPortGroup = [host_pg_do] self.set("config.network.portgroup", host_pg) config = DataObject() storageDevice = DataObject() iscsi_hba = HostInternetScsiHba() iscsi_hba.iScsiName = "iscsi-name" host_bus_adapter_array = DataObject() host_bus_adapter_array.HostHostBusAdapter = [iscsi_hba] storageDevice.hostBusAdapter = host_bus_adapter_array config.storageDevice = storageDevice self.set("config.storageDevice.hostBusAdapter", host_bus_adapter_array) # Set the same on the storage system managed object host_storage_sys = _get_object(host_storage_sys_key) host_storage_sys.set('storageDeviceInfo.hostBusAdapter', host_bus_adapter_array) def _add_iscsi_target(self, data): default_lun = DataObject() default_lun.scsiLun = 'key-vim.host.ScsiDisk-010' default_lun.key = 'key-vim.host.ScsiDisk-010' default_lun.deviceName = 'fake-device' default_lun.uuid = 'fake-uuid' scsi_lun_array = DataObject() scsi_lun_array.ScsiLun = [default_lun] self.set("config.storageDevice.scsiLun", scsi_lun_array) transport = DataObject() transport.address = [data['target_portal']] transport.iScsiName = data['target_iqn'] default_target = DataObject() default_target.lun = [default_lun] default_target.transport = transport iscsi_adapter = DataObject() iscsi_adapter.adapter = 'key-vmhba33' iscsi_adapter.transport = transport iscsi_adapter.target = [default_target] iscsi_topology = DataObject() iscsi_topology.adapter = [iscsi_adapter] self.set("config.storageDevice.scsiTopology", iscsi_topology) def _add_port_group(self, spec): """Adds a port group to the host system object in the db.""" pg_name = spec.name vswitch_name = spec.vswitchName vlanid = spec.vlanId vswitch_do = DataObject() vswitch_do.pnic = ["vmnic0"] vswitch_do.name = vswitch_name vswitch_do.portgroup = ["PortGroup-%s" % pg_name] vswitches = self.get("config.network.vswitch").HostVirtualSwitch vswitches.append(vswitch_do) host_pg_do = DataObject() host_pg_do.key = "PortGroup-%s" % pg_name pg_spec = DataObject() pg_spec.vlanId = vlanid pg_spec.name = pg_name host_pg_do.spec = pg_spec host_pgrps = self.get("config.network.portgroup").HostPortGroup host_pgrps.append(host_pg_do) class Datacenter(ManagedObject): """Datacenter class.""" def __init__(self, name="ha-datacenter", ds_ref=None): super(Datacenter, self).__init__("dc") self.set("name", name) if _db_content.get("Folder", None) is None: create_folder() folder_ref = _db_content["Folder"][ list(_db_content["Folder"].keys())[0]].obj folder_do = DataObject() folder_do.ManagedObjectReference = [folder_ref] self.set("vmFolder", folder_ref) if _db_content.get("Network", None) is None: create_network() net_ref = _db_content["Network"][ list(_db_content["Network"].keys())[0]].obj network_do = DataObject() network_do.ManagedObjectReference = [net_ref] self.set("network", network_do) if ds_ref: datastore = DataObject() datastore.ManagedObjectReference = [ds_ref] else: datastore = None self.set("datastore", datastore) class Task(ManagedObject): """Task class.""" def __init__(self, task_name, state="running", result=None, error_fault=None): super(Task, self).__init__("Task") info = DataObject() info.name = task_name info.state = state if state == 'error': error = DataObject() error.localizedMessage = "Error message" if not error_fault: error.fault = DataObject() else: error.fault = error_fault info.error = error info.result = result self.set("info", info) def create_host_network_system(): host_net_system = HostNetworkSystem() _create_object("HostNetworkSystem", host_net_system) def create_host_storage_system(): host_storage_system = HostStorageSystem() _create_object("HostStorageSystem", host_storage_system) def create_host(ds_ref=None): host_system = HostSystem(ds_ref=ds_ref) _create_object('HostSystem', host_system) def create_datacenter(name, ds_ref=None): data_center = Datacenter(name, ds_ref) _create_object('Datacenter', data_center) def create_datastore(name, capacity, free): data_store = Datastore(name, capacity, free) _create_object('Datastore', data_store) return data_store.obj def create_res_pool(): res_pool = ResourcePool() _create_object('ResourcePool', res_pool) return res_pool.obj def create_folder(): folder = Folder() _create_object('Folder', folder) return folder.obj def create_network(): network = Network() _create_object('Network', network) def create_cluster(name, ds_ref): cluster = ClusterComputeResource(name=name) cluster._add_host(_get_object_refs("HostSystem")[0]) cluster._add_host(_get_object_refs("HostSystem")[1]) cluster._add_datastore(ds_ref) cluster._add_root_resource_pool(create_res_pool()) _create_object('ClusterComputeResource', cluster) return cluster def create_vm(uuid=None, name=None, cpus=1, memory=128, devices=None, vmPathName=None, extraConfig=None, res_pool_ref=None, host_ref=None, version=None): if uuid is None: uuid = uuidutils.generate_uuid() if name is None: name = uuid if devices is None: devices = [] if vmPathName is None: vm_path = ds_obj.DatastorePath( list(_db_content['Datastore'].values())[0]) else: vm_path = ds_obj.DatastorePath.parse(vmPathName) if res_pool_ref is None: res_pool_ref = list(_db_content['ResourcePool'].keys())[0] if host_ref is None: host_ref = list(_db_content["HostSystem"].keys())[0] # Fill in the default path to the vmx file if we were only given a # datastore. Note that if you create a VM with vmPathName '[foo]', when you # retrieve vmPathName it will be '[foo] uuid/uuid.vmx'. Hence we use # vm_path below for the stored value of vmPathName. if vm_path.rel_path == '': vm_path = vm_path.join(name, name + '.vmx') for key, value in six.iteritems(_db_content["Datastore"]): if value.get('summary.name') == vm_path.datastore: ds = key break else: ds = create_datastore(vm_path.datastore, 1024, 500) vm_dict = {"name": name, "ds": [ds], "runtime_host": host_ref, "powerstate": "poweredOff", "vmPathName": str(vm_path), "numCpu": cpus, "mem": memory, "extra_config": extraConfig, "virtual_device": devices, "instanceUuid": uuid, "version": version} vm = VirtualMachine(**vm_dict) _create_object("VirtualMachine", vm) res_pool = _get_object(res_pool_ref) res_pool.vm.ManagedObjectReference.append(vm.obj) return vm.obj def create_task(task_name, state="running", result=None, error_fault=None): task = Task(task_name, state, result, error_fault) _create_object("Task", task) return task def _add_file(file_path): """Adds a file reference to the db.""" _db_content["files"].append(file_path) def _remove_file(file_path): """Removes a file reference from the db.""" # Check if the remove is for a single file object or for a folder if file_path.find(".vmdk") != -1: if file_path not in _db_content.get("files"): raise vexc.FileNotFoundException(file_path) _db_content.get("files").remove(file_path) else: # Removes the files in the folder and the folder too from the db to_delete = set() for file in _db_content.get("files"): if file.find(file_path) != -1: to_delete.add(file) for file in to_delete: _db_content.get("files").remove(file) def fake_plug_vifs(*args, **kwargs): """Fakes plugging vifs.""" pass def fake_get_network(*args, **kwargs): """Fake get network.""" return {'type': 'fake'} def assertPathExists(test, path): test.assertIn(path, _db_content.get('files')) def assertPathNotExists(test, path): test.assertNotIn(path, _db_content.get('files')) def get_file(file_path): """Check if file exists in the db.""" return file_path in _db_content.get("files") def fake_upload_image(context, image, instance, **kwargs): """Fakes the upload of an image.""" pass def fake_fetch_image(context, instance, host, port, dc_name, ds_name, file_path, cookies=None): """Fakes the fetch of an image.""" ds_file_path = "[" + ds_name + "] " + file_path _add_file(ds_file_path) def _get_vm_mdo(vm_ref): """Gets the Virtual Machine with the ref from the db.""" if _db_content.get("VirtualMachine", None) is None: raise exception.NotFound("There is no VM registered") if vm_ref not in _db_content.get("VirtualMachine"): raise exception.NotFound("Virtual Machine with ref %s is not " "there" % vm_ref) return _db_content.get("VirtualMachine")[vm_ref] def _merge_extraconfig(existing, changes): """Imposes the changes in extraConfig over the existing extraConfig.""" existing = existing or [] if (changes): for c in changes: if len([x for x in existing if x.key == c.key]) > 0: extraConf = [x for x in existing if x.key == c.key][0] extraConf.value = c.value else: existing.append(c) return existing class FakeFactory(object): """Fake factory class for the suds client.""" def create(self, obj_name): """Creates a namespace object.""" klass = obj_name[4:] # skip 'ns0:' module = sys.modules[__name__] fake_klass = getattr(module, klass, None) if fake_klass is None: return DataObject(obj_name) else: return fake_klass() class SharesInfo(DataObject): def __init__(self): super(SharesInfo, self).__init__() self.level = None self.shares = None class VirtualEthernetCardResourceAllocation(DataObject): def __init__(self): super(VirtualEthernetCardResourceAllocation, self).__init__() self.share = SharesInfo() class VirtualE1000(DataObject): def __init__(self): super(VirtualE1000, self).__init__() self.resourceAllocation = VirtualEthernetCardResourceAllocation() class FakeService(DataObject): """Fake service class.""" def Logout(self, session_manager): pass def FindExtension(self, extension_manager, key): return [] class FakeClient(DataObject): """Fake client class.""" def __init__(self): """Creates a namespace object.""" self.service = FakeService() class FakeSession(object): """Fake Session Class.""" def __init__(self): self.vim = FakeVim() def _call_method(self, module, method, *args, **kwargs): raise NotImplementedError() def _wait_for_task(self, task_ref): raise NotImplementedError() class FakeObjectRetrievalSession(FakeSession): """A session for faking object retrieval tasks. _call_method() returns a given set of objects sequentially, regardless of the method called. """ def __init__(self, *ret): super(FakeObjectRetrievalSession, self).__init__() self.ret = ret self.ind = 0 def _call_method(self, module, method, *args, **kwargs): if (method == 'continue_retrieval' or method == 'cancel_retrieval'): return # return fake objects in a circular manner self.ind = (self.ind + 1) % len(self.ret) return self.ret[self.ind - 1] def get_fake_vim_object(vmware_api_session): key = vmware_api_session.__repr__() if key not in _vim_map: _vim_map[key] = FakeVim() return _vim_map[key] class FakeVim(object): """Fake VIM Class.""" def __init__(self, protocol="https", host="localhost", trace=None): """Initializes the suds client object, sets the service content contents and the cookies for the session. """ self._session = None self.client = FakeClient() self.client.factory = FakeFactory() transport = DataObject() transport.cookiejar = "Fake-CookieJar" options = DataObject() options.transport = transport self.client.options = options service_content = self.client.factory.create('ns0:ServiceContent') service_content.propertyCollector = "PropCollector" service_content.virtualDiskManager = "VirtualDiskManager" service_content.fileManager = "FileManager" service_content.rootFolder = "RootFolder" service_content.sessionManager = "SessionManager" service_content.extensionManager = "ExtensionManager" service_content.searchIndex = "SearchIndex" about_info = DataObject() about_info.name = "VMware vCenter Server" about_info.version = "5.1.0" about_info.instanceUuid = _FAKE_VCENTER_UUID service_content.about = about_info self._service_content = service_content @property def service_content(self): return self._service_content def __repr__(self): return "Fake VIM Object" def __str__(self): return "Fake VIM Object" def _login(self): """Logs in and sets the session object in the db.""" self._session = uuidutils.generate_uuid() session = DataObject() session.key = self._session session.userName = 'sessionUserName' _db_content['session'][self._session] = session return session def _terminate_session(self, *args, **kwargs): """Terminates a session.""" s = kwargs.get("sessionId")[0] if s not in _db_content['session']: return del _db_content['session'][s] def _check_session(self): """Checks if the session is active.""" if (self._session is None or self._session not in _db_content['session']): LOG.debug("Session is faulty") raise vexc.VimFaultException([vexc.NOT_AUTHENTICATED], "Session Invalid") def _session_is_active(self, *args, **kwargs): try: self._check_session() return True except Exception: return False def _create_vm(self, method, *args, **kwargs): """Creates and registers a VM object with the Host System.""" config_spec = kwargs.get("config") if config_spec.guestId not in constants.VALID_OS_TYPES: ex = vexc.VMwareDriverException('A specified parameter was ' 'not correct.') return create_task(method, "error", error_fault=ex).obj pool = kwargs.get('pool') version = getattr(config_spec, 'version', None) devices = [] for device_change in config_spec.deviceChange: if device_change.operation == 'add': devices.append(device_change.device) vm_ref = create_vm(config_spec.instanceUuid, config_spec.name, config_spec.numCPUs, config_spec.memoryMB, devices, config_spec.files.vmPathName, config_spec.extraConfig, pool, version=version) task_mdo = create_task(method, "success", result=vm_ref) return task_mdo.obj def _create_folder(self, method, *args, **kwargs): return create_folder() def _reconfig_vm(self, method, *args, **kwargs): """Reconfigures a VM and sets the properties supplied.""" vm_ref = args[0] vm_mdo = _get_vm_mdo(vm_ref) vm_mdo.reconfig(self.client.factory, kwargs.get("spec")) task_mdo = create_task(method, "success") return task_mdo.obj def _rename(self, method, *args, **kwargs): vm_ref = args[0] vm_mdo = _get_vm_mdo(vm_ref) vm_mdo.set('name', kwargs['newName']) task_mdo = create_task(method, "success") return task_mdo.obj def _create_copy_disk(self, method, vmdk_file_path): """Creates/copies a vmdk file object in the datastore.""" # We need to add/create both .vmdk and .-flat.vmdk files flat_vmdk_file_path = vmdk_file_path.replace(".vmdk", "-flat.vmdk") _add_file(vmdk_file_path) _add_file(flat_vmdk_file_path) task_mdo = create_task(method, "success") return task_mdo.obj def _extend_disk(self, method, size): """Extend disk size when create an instance.""" task_mdo = create_task(method, "success") return task_mdo.obj def _snapshot_vm(self, method): """Snapshots a VM. Here we do nothing for faking sake.""" task_mdo = create_task(method, "success") return task_mdo.obj def _find_all_by_uuid(self, *args, **kwargs): uuid = kwargs.get('uuid') vm_refs = [] for vm_ref in _db_content.get("VirtualMachine"): vm = _get_object(vm_ref) vm_uuid = vm.get("summary.config.instanceUuid") if vm_uuid == uuid: vm_refs.append(vm_ref) return vm_refs def _delete_snapshot(self, method, *args, **kwargs): """Deletes a VM snapshot. Here we do nothing for faking sake.""" task_mdo = create_task(method, "success") return task_mdo.obj def _delete_file(self, method, *args, **kwargs): """Deletes a file from the datastore.""" _remove_file(kwargs.get("name")) task_mdo = create_task(method, "success") return task_mdo.obj def _just_return(self): """Fakes a return.""" return def _just_return_task(self, method): """Fakes a task return.""" task_mdo = create_task(method, "success") return task_mdo.obj def _clone_vm(self, method, *args, **kwargs): """Fakes a VM clone.""" """Creates and registers a VM object with the Host System.""" source_vmref = args[0] source_vm_mdo = _get_vm_mdo(source_vmref) clone_spec = kwargs.get("spec") vm_dict = { "name": kwargs.get("name"), "ds": source_vm_mdo.get("datastore"), "runtime_host": source_vm_mdo.get("runtime.host"), "powerstate": source_vm_mdo.get("runtime.powerState"), "vmPathName": source_vm_mdo.get("config.files.vmPathName"), "numCpu": source_vm_mdo.get("summary.config.numCpu"), "mem": source_vm_mdo.get("summary.config.memorySizeMB"), "extra_config": source_vm_mdo.get("config.extraConfig").OptionValue, "virtual_device": source_vm_mdo.get("config.hardware.device").VirtualDevice, "instanceUuid": source_vm_mdo.get("summary.config.instanceUuid")} if hasattr(clone_spec, 'config'): # Impose the config changes specified in the config property if (hasattr(clone_spec.config, 'instanceUuid') and clone_spec.config.instanceUuid is not None): vm_dict["instanceUuid"] = clone_spec.config.instanceUuid if hasattr(clone_spec.config, 'extraConfig'): extraConfigs = _merge_extraconfig(vm_dict["extra_config"], clone_spec.config.extraConfig) vm_dict["extra_config"] = extraConfigs virtual_machine = VirtualMachine(**vm_dict) _create_object("VirtualMachine", virtual_machine) task_mdo = create_task(method, "success") return task_mdo.obj def _unregister_vm(self, method, *args, **kwargs): """Unregisters a VM from the Host System.""" vm_ref = args[0] _get_vm_mdo(vm_ref) del _db_content["VirtualMachine"][vm_ref] task_mdo = create_task(method, "success") return task_mdo.obj def _search_ds(self, method, *args, **kwargs): """Searches the datastore for a file.""" # TODO(garyk): add support for spec parameter ds_path = kwargs.get("datastorePath") matched_files = set() # Check if we are searching for a file or a directory directory = False dname = '%s/' % ds_path for file in _db_content.get("files"): if file == dname: directory = True break # A directory search implies that we must return all # subdirectories if directory: for file in _db_content.get("files"): if file.find(ds_path) != -1: if not file.endswith(ds_path): path = file.replace(dname, '', 1).split('/') if path: matched_files.add(path[0]) if not matched_files: matched_files.add('/') else: for file in _db_content.get("files"): if file.find(ds_path) != -1: matched_files.add(ds_path) if matched_files: result = DataObject() result.path = ds_path result.file = [] for file in matched_files: matched = DataObject() matched.path = file matched.fileSize = 1024 result.file.append(matched) task_mdo = create_task(method, "success", result=result) else: task_mdo = create_task(method, "error", error_fault=FileNotFound()) return task_mdo.obj def _move_file(self, method, *args, **kwargs): source = kwargs.get('sourceName') destination = kwargs.get('destinationName') new_files = [] if source != destination: for file in _db_content.get("files"): if source in file: new_file = file.replace(source, destination) new_files.append(new_file) # if source is not a file then the children will also # be deleted _remove_file(source) for file in new_files: _add_file(file) task_mdo = create_task(method, "success") return task_mdo.obj def _make_dir(self, method, *args, **kwargs): """Creates a directory in the datastore.""" ds_path = kwargs.get("name") if get_file(ds_path): raise vexc.FileAlreadyExistsException() _db_content["files"].append('%s/' % ds_path) def _set_power_state(self, method, vm_ref, pwr_state="poweredOn"): """Sets power state for the VM.""" if _db_content.get("VirtualMachine", None) is None: raise exception.NotFound("No Virtual Machine has been " "registered yet") if vm_ref not in _db_content.get("VirtualMachine"): raise exception.NotFound("Virtual Machine with ref %s is not " "there" % vm_ref) vm_mdo = _db_content.get("VirtualMachine").get(vm_ref) vm_mdo.set("runtime.powerState", pwr_state) task_mdo = create_task(method, "success") return task_mdo.obj def _retrieve_properties_continue(self, method, *args, **kwargs): """Continues the retrieve.""" return FakeRetrieveResult() def _retrieve_properties_cancel(self, method, *args, **kwargs): """Cancels the retrieve.""" return None def _retrieve_properties(self, method, *args, **kwargs): """Retrieves properties based on the type.""" spec_set = kwargs.get("specSet")[0] spec_type = spec_set.propSet[0].type properties = spec_set.propSet[0].pathSet if not isinstance(properties, list): properties = properties.split() objs = spec_set.objectSet lst_ret_objs = FakeRetrieveResult() for obj in objs: try: obj_ref = obj.obj if obj_ref == "RootFolder": # This means that we are retrieving props for all managed # data objects of the specified 'type' in the entire # inventory. This gets invoked by vim_util.get_objects. mdo_refs = _db_content[spec_type] elif obj_ref.type != spec_type: # This means that we are retrieving props for the managed # data objects in the parent object's 'path' property. # This gets invoked by vim_util.get_inner_objects # eg. obj_ref = # type = 'DataStore' # path = 'datastore' # the above will retrieve all datastores in the given # cluster. parent_mdo = _db_content[obj_ref.type][obj_ref] path = obj.selectSet[0].path mdo_refs = parent_mdo.get(path).ManagedObjectReference else: # This means that we are retrieving props of the given # managed data object. This gets invoked by # vim_util.get_properties_for_a_collection_of_objects. mdo_refs = [obj_ref] for mdo_ref in mdo_refs: mdo = _db_content[spec_type][mdo_ref] prop_list = [] for prop_name in properties: prop = Prop(prop_name, mdo.get(prop_name)) prop_list.append(prop) obj_content = ObjectContent(mdo.obj, prop_list) lst_ret_objs.add_object(obj_content) except Exception: LOG.exception("_retrieve_properties error") continue return lst_ret_objs def _add_port_group(self, method, *args, **kwargs): """Adds a port group to the host system.""" _host_sk = list(_db_content["HostSystem"].keys())[0] host_mdo = _db_content["HostSystem"][_host_sk] host_mdo._add_port_group(kwargs.get("portgrp")) def _add_iscsi_send_tgt(self, method, *args, **kwargs): """Adds a iscsi send target to the hba.""" send_targets = kwargs.get('targets') host_storage_sys = _get_objects('HostStorageSystem').objects[0] iscsi_hba_array = host_storage_sys.get('storageDeviceInfo' '.hostBusAdapter') iscsi_hba = iscsi_hba_array.HostHostBusAdapter[0] if hasattr(iscsi_hba, 'configuredSendTarget'): iscsi_hba.configuredSendTarget.extend(send_targets) else: iscsi_hba.configuredSendTarget = send_targets def __getattr__(self, attr_name): if attr_name != "Login": self._check_session() if attr_name == "Login": return lambda *args, **kwargs: self._login() elif attr_name == "SessionIsActive": return lambda *args, **kwargs: self._session_is_active( *args, **kwargs) elif attr_name == "TerminateSession": return lambda *args, **kwargs: self._terminate_session( *args, **kwargs) elif attr_name == "CreateVM_Task": return lambda *args, **kwargs: self._create_vm(attr_name, *args, **kwargs) elif attr_name == "CreateFolder": return lambda *args, **kwargs: self._create_folder(attr_name, *args, **kwargs) elif attr_name == "ReconfigVM_Task": return lambda *args, **kwargs: self._reconfig_vm(attr_name, *args, **kwargs) elif attr_name == "Rename_Task": return lambda *args, **kwargs: self._rename(attr_name, *args, **kwargs) elif attr_name == "CreateVirtualDisk_Task": return lambda *args, **kwargs: self._create_copy_disk(attr_name, kwargs.get("name")) elif attr_name == "DeleteDatastoreFile_Task": return lambda *args, **kwargs: self._delete_file(attr_name, *args, **kwargs) elif attr_name == "PowerOnVM_Task": return lambda *args, **kwargs: self._set_power_state(attr_name, args[0], "poweredOn") elif attr_name == "PowerOffVM_Task": return lambda *args, **kwargs: self._set_power_state(attr_name, args[0], "poweredOff") elif attr_name == "RebootGuest": return lambda *args, **kwargs: self._just_return() elif attr_name == "ResetVM_Task": return lambda *args, **kwargs: self._set_power_state(attr_name, args[0], "poweredOn") elif attr_name == "SuspendVM_Task": return lambda *args, **kwargs: self._set_power_state(attr_name, args[0], "suspended") elif attr_name == "CreateSnapshot_Task": return lambda *args, **kwargs: self._snapshot_vm(attr_name) elif attr_name == "RemoveSnapshot_Task": return lambda *args, **kwargs: self._delete_snapshot(attr_name, *args, **kwargs) elif attr_name == "CopyVirtualDisk_Task": return lambda *args, **kwargs: self._create_copy_disk(attr_name, kwargs.get("destName")) elif attr_name == "ExtendVirtualDisk_Task": return lambda *args, **kwargs: self._extend_disk(attr_name, kwargs.get("size")) elif attr_name == "Destroy_Task": return lambda *args, **kwargs: self._unregister_vm(attr_name, *args, **kwargs) elif attr_name == "UnregisterVM": return lambda *args, **kwargs: self._unregister_vm(attr_name, *args, **kwargs) elif attr_name == "CloneVM_Task": return lambda *args, **kwargs: self._clone_vm(attr_name, *args, **kwargs) elif attr_name == "FindAllByUuid": return lambda *args, **kwargs: self._find_all_by_uuid(attr_name, *args, **kwargs) elif attr_name == "SearchDatastore_Task": return lambda *args, **kwargs: self._search_ds(attr_name, *args, **kwargs) elif attr_name == "MoveDatastoreFile_Task": return lambda *args, **kwargs: self._move_file(attr_name, *args, **kwargs) elif attr_name == "MakeDirectory": return lambda *args, **kwargs: self._make_dir(attr_name, *args, **kwargs) elif attr_name == "RetrievePropertiesEx": return lambda *args, **kwargs: self._retrieve_properties( attr_name, *args, **kwargs) elif attr_name == "ContinueRetrievePropertiesEx": return lambda *args, **kwargs: self._retrieve_properties_continue( attr_name, *args, **kwargs) elif attr_name == "CancelRetrievePropertiesEx": return lambda *args, **kwargs: self._retrieve_properties_cancel( attr_name, *args, **kwargs) elif attr_name == "AddPortGroup": return lambda *args, **kwargs: self._add_port_group(attr_name, *args, **kwargs) elif attr_name in ("RebootHost_Task", "ShutdownHost_Task", "PowerUpHostFromStandBy_Task", "EnterMaintenanceMode_Task", "ExitMaintenanceMode_Task", "RescanHba"): return lambda *args, **kwargs: self._just_return_task(attr_name) elif attr_name == "AddInternetScsiSendTargets": return lambda *args, **kwargs: self._add_iscsi_send_tgt(attr_name, *args, **kwargs) nova-13.1.4/nova/tests/unit/virt/vmwareapi/test_imagecache.py0000664000567000056710000002745113064447152025445 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import mock from oslo_config import cfg from oslo_utils import fixture as utils_fixture from oslo_vmware.objects import datastore as ds_obj from oslo_vmware import vim_util as vutil from nova import objects from nova import test from nova.tests.unit import fake_instance from nova.tests.unit.virt.vmwareapi import fake from nova.tests import uuidsentinel from nova.virt.vmwareapi import ds_util from nova.virt.vmwareapi import imagecache CONF = cfg.CONF class ImageCacheManagerTestCase(test.NoDBTestCase): REQUIRES_LOCKING = True def setUp(self): super(ImageCacheManagerTestCase, self).setUp() self._session = mock.Mock(name='session') self._imagecache = imagecache.ImageCacheManager(self._session, 'fake-base-folder') self._time = datetime.datetime(2012, 11, 22, 12, 00, 00) self._file_name = 'ts-2012-11-22-12-00-00' fake.reset() def tearDown(self): super(ImageCacheManagerTestCase, self).tearDown() fake.reset() def test_timestamp_cleanup(self): def fake_get_timestamp(ds_browser, ds_path): self.assertEqual('fake-ds-browser', ds_browser) self.assertEqual('[fake-ds] fake-path', str(ds_path)) if not self.exists: return ts = '%s%s' % (imagecache.TIMESTAMP_PREFIX, self._time.strftime(imagecache.TIMESTAMP_FORMAT)) return ts with test.nested( mock.patch.object(self._imagecache, '_get_timestamp', fake_get_timestamp), mock.patch.object(ds_util, 'file_delete') ) as (_get_timestamp, _file_delete): self.exists = False self._imagecache.timestamp_cleanup( 'fake-dc-ref', 'fake-ds-browser', ds_obj.DatastorePath('fake-ds', 'fake-path')) self.assertEqual(0, _file_delete.call_count) self.exists = True self._imagecache.timestamp_cleanup( 'fake-dc-ref', 'fake-ds-browser', ds_obj.DatastorePath('fake-ds', 'fake-path')) expected_ds_path = ds_obj.DatastorePath( 'fake-ds', 'fake-path', self._file_name) _file_delete.assert_called_once_with(self._session, expected_ds_path, 'fake-dc-ref') def test_get_timestamp(self): def fake_get_sub_folders(session, ds_browser, ds_path): self.assertEqual('fake-ds-browser', ds_browser) self.assertEqual('[fake-ds] fake-path', str(ds_path)) if self.exists: files = set() files.add(self._file_name) return files with mock.patch.object(ds_util, 'get_sub_folders', fake_get_sub_folders): self.exists = True ts = self._imagecache._get_timestamp( 'fake-ds-browser', ds_obj.DatastorePath('fake-ds', 'fake-path')) self.assertEqual(self._file_name, ts) self.exists = False ts = self._imagecache._get_timestamp( 'fake-ds-browser', ds_obj.DatastorePath('fake-ds', 'fake-path')) self.assertIsNone(ts) def test_get_timestamp_filename(self): self.useFixture(utils_fixture.TimeFixture(self._time)) fn = self._imagecache._get_timestamp_filename() self.assertEqual(self._file_name, fn) def test_get_datetime_from_filename(self): t = self._imagecache._get_datetime_from_filename(self._file_name) self.assertEqual(self._time, t) def test_get_ds_browser(self): cache = self._imagecache._ds_browser ds_browser = mock.Mock() moref = fake.ManagedObjectReference('datastore-100') self.assertIsNone(cache.get(moref.value)) mock_get_method = mock.Mock(return_value=ds_browser) with mock.patch.object(vutil, 'get_object_property', mock_get_method): ret = self._imagecache._get_ds_browser(moref) mock_get_method.assert_called_once_with(mock.ANY, moref, 'browser') self.assertIs(ds_browser, ret) self.assertIs(ds_browser, cache.get(moref.value)) def test_list_base_images(self): def fake_get_object_property(vim, mobj, property_name): return 'fake-ds-browser' def fake_get_sub_folders(session, ds_browser, ds_path): files = set() files.add('image-ref-uuid') return files with test.nested( mock.patch.object(vutil, 'get_object_property', fake_get_object_property), mock.patch.object(ds_util, 'get_sub_folders', fake_get_sub_folders) ) as (_get_dynamic, _get_sub_folders): fake_ds_ref = fake.ManagedObjectReference('fake-ds-ref') datastore = ds_obj.Datastore(name='ds', ref=fake_ds_ref) ds_path = datastore.build_path('base_folder') images = self._imagecache._list_datastore_images( ds_path, datastore) originals = set() originals.add('image-ref-uuid') self.assertEqual({'originals': originals, 'unexplained_images': []}, images) @mock.patch.object(imagecache.ImageCacheManager, 'timestamp_folder_get') @mock.patch.object(imagecache.ImageCacheManager, 'timestamp_cleanup') @mock.patch.object(imagecache.ImageCacheManager, '_get_ds_browser') def test_enlist_image(self, mock_get_ds_browser, mock_timestamp_cleanup, mock_timestamp_folder_get): image_id = "fake_image_id" dc_ref = "fake_dc_ref" fake_ds_ref = mock.Mock() ds = ds_obj.Datastore( ref=fake_ds_ref, name='fake_ds', capacity=1, freespace=1) ds_browser = mock.Mock() mock_get_ds_browser.return_value = ds_browser timestamp_folder_path = mock.Mock() mock_timestamp_folder_get.return_value = timestamp_folder_path self._imagecache.enlist_image(image_id, ds, dc_ref) cache_root_folder = ds.build_path("fake-base-folder") mock_get_ds_browser.assert_called_once_with( ds.ref) mock_timestamp_folder_get.assert_called_once_with( cache_root_folder, "fake_image_id") mock_timestamp_cleanup.assert_called_once_with( dc_ref, ds_browser, timestamp_folder_path) def test_age_cached_images(self): def fake_get_ds_browser(ds_ref): return 'fake-ds-browser' def fake_get_timestamp(ds_browser, ds_path): self._get_timestamp_called += 1 path = str(ds_path) if path == '[fake-ds] fake-path/fake-image-1': # No time stamp exists return if path == '[fake-ds] fake-path/fake-image-2': # Timestamp that will be valid => no deletion return 'ts-2012-11-22-10-00-00' if path == '[fake-ds] fake-path/fake-image-3': # Timestamp that will be invalid => deletion return 'ts-2012-11-20-12-00-00' self.fail() def fake_mkdir(session, ts_path, dc_ref): self.assertEqual( '[fake-ds] fake-path/fake-image-1/ts-2012-11-22-12-00-00', str(ts_path)) def fake_file_delete(session, ds_path, dc_ref): self.assertEqual('[fake-ds] fake-path/fake-image-3', str(ds_path)) def fake_timestamp_cleanup(dc_ref, ds_browser, ds_path): self.assertEqual('[fake-ds] fake-path/fake-image-4', str(ds_path)) with test.nested( mock.patch.object(self._imagecache, '_get_ds_browser', fake_get_ds_browser), mock.patch.object(self._imagecache, '_get_timestamp', fake_get_timestamp), mock.patch.object(ds_util, 'mkdir', fake_mkdir), mock.patch.object(ds_util, 'file_delete', fake_file_delete), mock.patch.object(self._imagecache, 'timestamp_cleanup', fake_timestamp_cleanup), ) as (_get_ds_browser, _get_timestamp, _mkdir, _file_delete, _timestamp_cleanup): self.useFixture(utils_fixture.TimeFixture(self._time)) datastore = ds_obj.Datastore(name='ds', ref='fake-ds-ref') dc_info = ds_util.DcInfo(ref='dc_ref', name='name', vmFolder='vmFolder') self._get_timestamp_called = 0 self._imagecache.originals = set(['fake-image-1', 'fake-image-2', 'fake-image-3', 'fake-image-4']) self._imagecache.used_images = set(['fake-image-4']) self._imagecache._age_cached_images( 'fake-context', datastore, dc_info, ds_obj.DatastorePath('fake-ds', 'fake-path')) self.assertEqual(3, self._get_timestamp_called) @mock.patch.object(objects.block_device.BlockDeviceMappingList, 'get_by_instance_uuid') def test_update(self, mock_get_by_inst): def fake_list_datastore_images(ds_path, datastore): return {'unexplained_images': [], 'originals': self.images} def fake_age_cached_images(context, datastore, dc_info, ds_path): self.assertEqual('[ds] fake-base-folder', str(ds_path)) self.assertEqual(self.images, self._imagecache.used_images) self.assertEqual(self.images, self._imagecache.originals) with test.nested( mock.patch.object(self._imagecache, '_list_datastore_images', fake_list_datastore_images), mock.patch.object(self._imagecache, '_age_cached_images', fake_age_cached_images) ) as (_list_base, _age_and_verify): instances = [{'image_ref': '1', 'host': CONF.host, 'name': 'inst-1', 'uuid': uuidsentinel.foo, 'vm_state': '', 'task_state': ''}, {'image_ref': '2', 'host': CONF.host, 'name': 'inst-2', 'uuid': uuidsentinel.bar, 'vm_state': '', 'task_state': ''}] all_instances = [fake_instance.fake_instance_obj(None, **instance) for instance in instances] self.images = set(['1', '2']) datastore = ds_obj.Datastore(name='ds', ref='fake-ds-ref') dc_info = ds_util.DcInfo(ref='dc_ref', name='name', vmFolder='vmFolder') datastores_info = [(datastore, dc_info)] self._imagecache.update('context', all_instances, datastores_info) nova-13.1.4/nova/tests/unit/virt/vmwareapi/test_ds_util_datastore_selection.py0000664000567000056710000001630713064447141031151 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import re from oslo_utils import units from oslo_vmware.objects import datastore as ds_obj from nova import test from nova.virt.vmwareapi import ds_util ResultSet = collections.namedtuple('ResultSet', ['objects']) ObjectContent = collections.namedtuple('ObjectContent', ['obj', 'propSet']) DynamicProperty = collections.namedtuple('Property', ['name', 'val']) MoRef = collections.namedtuple('ManagedObjectReference', ['value']) class VMwareDSUtilDatastoreSelectionTestCase(test.NoDBTestCase): def setUp(self): super(VMwareDSUtilDatastoreSelectionTestCase, self).setUp() self.data = [ ['VMFS', 'os-some-name', True, 'normal', 987654321, 12346789], ['NFS', 'another-name', True, 'normal', 9876543210, 123467890], ['BAD', 'some-name-bad', True, 'normal', 98765432100, 1234678900], ['VMFS', 'some-name-good', False, 'normal', 987654321, 12346789], ['VMFS', 'new-name', True, 'inMaintenance', 987654321, 12346789] ] def build_result_set(self, mock_data, name_list=None): # datastores will have a moref_id of ds-000 and # so on based on their index in the mock_data list if name_list is None: name_list = self.propset_name_list objects = [] for id, row in enumerate(mock_data): obj = ObjectContent( obj=MoRef(value="ds-%03d" % id), propSet=[]) for index, value in enumerate(row): obj.propSet.append( DynamicProperty(name=name_list[index], val=row[index])) objects.append(obj) return ResultSet(objects=objects) @property def propset_name_list(self): return ['summary.type', 'summary.name', 'summary.accessible', 'summary.maintenanceMode', 'summary.capacity', 'summary.freeSpace'] def test_filter_datastores_simple(self): datastores = self.build_result_set(self.data) best_match = ds_obj.Datastore(ref='fake_ref', name='ds', capacity=0, freespace=0) rec = ds_util._select_datastore(None, datastores, best_match) self.assertIsNotNone(rec.ref, "could not find datastore!") self.assertEqual('ds-001', rec.ref.value, "didn't find the right datastore!") self.assertEqual(123467890, rec.freespace, "did not obtain correct freespace!") def test_filter_datastores_empty(self): data = [] datastores = self.build_result_set(data) best_match = ds_obj.Datastore(ref='fake_ref', name='ds', capacity=0, freespace=0) rec = ds_util._select_datastore(None, datastores, best_match) self.assertEqual(best_match, rec) def test_filter_datastores_no_match(self): datastores = self.build_result_set(self.data) datastore_regex = re.compile('no_match.*') best_match = ds_obj.Datastore(ref='fake_ref', name='ds', capacity=0, freespace=0) rec = ds_util._select_datastore(None, datastores, best_match, datastore_regex) self.assertEqual(best_match, rec, "did not match datastore properly") def test_filter_datastores_specific_match(self): data = [ ['VMFS', 'os-some-name', True, 'normal', 987654321, 1234678], ['NFS', 'another-name', True, 'normal', 9876543210, 123467890], ['BAD', 'some-name-bad', True, 'normal', 98765432100, 1234678900], ['VMFS', 'some-name-good', True, 'normal', 987654321, 12346789], ['VMFS', 'some-other-good', False, 'normal', 987654321000, 12346789000], ['VMFS', 'new-name', True, 'inMaintenance', 987654321000, 12346789000] ] # only the DS some-name-good is accessible and matches the regex datastores = self.build_result_set(data) datastore_regex = re.compile('.*-good$') best_match = ds_obj.Datastore(ref='fake_ref', name='ds', capacity=0, freespace=0) rec = ds_util._select_datastore(None, datastores, best_match, datastore_regex) self.assertIsNotNone(rec, "could not find datastore!") self.assertEqual('ds-003', rec.ref.value, "didn't find the right datastore!") self.assertNotEqual('ds-004', rec.ref.value, "accepted an unreachable datastore!") self.assertEqual('some-name-good', rec.name) self.assertEqual(12346789, rec.freespace, "did not obtain correct freespace!") self.assertEqual(987654321, rec.capacity, "did not obtain correct capacity!") def test_filter_datastores_missing_props(self): data = [ ['VMFS', 'os-some-name', 987654321, 1234678], ['NFS', 'another-name', 9876543210, 123467890], ] # no matches are expected when 'summary.accessible' is missing prop_names = ['summary.type', 'summary.name', 'summary.capacity', 'summary.freeSpace'] datastores = self.build_result_set(data, prop_names) best_match = ds_obj.Datastore(ref='fake_ref', name='ds', capacity=0, freespace=0) rec = ds_util._select_datastore(None, datastores, best_match) self.assertEqual(best_match, rec, "no matches were expected") def test_filter_datastores_best_match(self): data = [ ['VMFS', 'spam-good', True, 20 * units.Gi, 10 * units.Gi], ['NFS', 'eggs-good', True, 40 * units.Gi, 15 * units.Gi], ['NFS41', 'nfs41-is-good', True, 35 * units.Gi, 12 * units.Gi], ['BAD', 'some-name-bad', True, 30 * units.Gi, 20 * units.Gi], ['VMFS', 'some-name-good', True, 50 * units.Gi, 5 * units.Gi], ['VMFS', 'some-other-good', True, 10 * units.Gi, 10 * units.Gi], ] datastores = self.build_result_set(data) datastore_regex = re.compile('.*-good$') # the current best match is better than all candidates best_match = ds_obj.Datastore(ref='ds-100', name='best-ds-good', capacity=20 * units.Gi, freespace=19 * units.Gi) rec = ds_util._select_datastore(None, datastores, best_match, datastore_regex) self.assertEqual(best_match, rec, "did not match datastore properly") nova-13.1.4/nova/tests/unit/virt/vmwareapi/test_volumeops.py0000664000567000056710000010042113064447141025413 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_vmware import exceptions as oslo_vmw_exceptions from oslo_vmware import vim_util as vutil from nova.compute import vm_states from nova import context from nova import exception from nova import test from nova.tests.unit import fake_instance from nova.tests.unit.image import fake as image_fake from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake from nova.tests.unit.virt.vmwareapi import stubs from nova.tests import uuidsentinel from nova.virt.vmwareapi import constants from nova.virt.vmwareapi import driver from nova.virt.vmwareapi import vm_util from nova.virt.vmwareapi import volumeops class VMwareVolumeOpsTestCase(test.NoDBTestCase): def setUp(self): super(VMwareVolumeOpsTestCase, self).setUp() vmwareapi_fake.reset() stubs.set_stubs(self) self._session = driver.VMwareAPISession() self._context = context.RequestContext('fake_user', 'fake_project') self._volumeops = volumeops.VMwareVolumeOps(self._session) self._image_id = image_fake.get_valid_image_id() self._instance_values = { 'name': 'fake_name', 'uuid': uuidsentinel.foo, 'vcpus': 1, 'memory_mb': 512, 'image_ref': self._image_id, 'root_gb': 10, 'node': 'respool-1001(MyResPoolName)', 'expected_attrs': ['system_metadata'], } self._instance = fake_instance.fake_instance_obj(self._context, **self._instance_values) def _test_detach_disk_from_vm(self, destroy_disk=False): def fake_call_method(module, method, *args, **kwargs): vmdk_detach_config_spec = kwargs.get('spec') virtual_device_config = vmdk_detach_config_spec.deviceChange[0] self.assertEqual('remove', virtual_device_config.operation) self.assertEqual('ns0:VirtualDeviceConfigSpec', virtual_device_config.obj_name) if destroy_disk: self.assertEqual('destroy', virtual_device_config.fileOperation) else: self.assertFalse(hasattr(virtual_device_config, 'fileOperation')) return 'fake_configure_task' with test.nested( mock.patch.object(self._session, '_wait_for_task'), mock.patch.object(self._session, '_call_method', fake_call_method) ) as (_wait_for_task, _call_method): fake_device = vmwareapi_fake.DataObject() fake_device.backing = vmwareapi_fake.DataObject() fake_device.backing.fileName = 'fake_path' fake_device.key = 'fake_key' self._volumeops.detach_disk_from_vm('fake_vm_ref', self._instance, fake_device, destroy_disk) _wait_for_task.assert_has_calls([ mock.call('fake_configure_task')]) def test_detach_with_destroy_disk_from_vm(self): self._test_detach_disk_from_vm(destroy_disk=True) def test_detach_without_destroy_disk_from_vm(self): self._test_detach_disk_from_vm(destroy_disk=False) def _fake_call_get_object_property(self, uuid, result): def fake_call_method(vim, method, vm_ref, prop): expected_prop = 'config.extraConfig["volume-%s"]' % uuid self.assertEqual('VirtualMachine', vm_ref._type) self.assertEqual(expected_prop, prop) return result return fake_call_method def test_get_volume_uuid(self): vm_ref = vmwareapi_fake.ManagedObjectReference('VirtualMachine', 'vm-134') uuid = '1234' opt_val = vmwareapi_fake.OptionValue('volume-%s' % uuid, 'volume-val') fake_call = self._fake_call_get_object_property(uuid, opt_val) with mock.patch.object(self._session, "_call_method", fake_call): val = self._volumeops._get_volume_uuid(vm_ref, uuid) self.assertEqual('volume-val', val) def test_get_volume_uuid_not_found(self): vm_ref = vmwareapi_fake.ManagedObjectReference('VirtualMachine', 'vm-134') uuid = '1234' fake_call = self._fake_call_get_object_property(uuid, None) with mock.patch.object(self._session, "_call_method", fake_call): val = self._volumeops._get_volume_uuid(vm_ref, uuid) self.assertIsNone(val) def test_attach_volume_vmdk_invalid(self): connection_info = {'driver_volume_type': 'vmdk', 'serial': 'volume-fake-id', 'data': {'volume': 'vm-10', 'volume_id': 'volume-fake-id'}} instance = mock.MagicMock(name='fake-name', vm_state=vm_states.ACTIVE) vmdk_info = vm_util.VmdkInfo('fake-path', constants.ADAPTER_TYPE_IDE, constants.DISK_TYPE_PREALLOCATED, 1024, 'fake-device') with test.nested( mock.patch.object(vm_util, 'get_vm_ref'), mock.patch.object(self._volumeops, '_get_volume_ref'), mock.patch.object(vm_util, 'get_vmdk_info', return_value=vmdk_info), mock.patch.object(vm_util, 'get_vm_state', return_value='PoweredOn') ) as (get_vm_ref, get_volume_ref, get_vmdk_info, get_vm_state): self.assertRaises(exception.Invalid, self._volumeops._attach_volume_vmdk, connection_info, instance) get_vm_ref.assert_called_once_with(self._volumeops._session, instance) get_volume_ref.assert_called_once_with( connection_info['data']['volume']) self.assertTrue(get_vmdk_info.called) get_vm_state.assert_called_once_with(self._volumeops._session, instance) @mock.patch.object(vm_util, 'get_vm_extra_config_spec', return_value=mock.sentinel.extra_config) @mock.patch.object(vm_util, 'reconfigure_vm') def test_update_volume_details(self, reconfigure_vm, get_vm_extra_config_spec): volume_uuid = '26f5948e-52a3-4ee6-8d48-0a379afd0828' device_uuid = '0d86246a-2adb-470d-a9f7-bce09930c5d' self._volumeops._update_volume_details( mock.sentinel.vm_ref, volume_uuid, device_uuid) get_vm_extra_config_spec.assert_called_once_with( self._volumeops._session.vim.client.factory, {'volume-%s' % volume_uuid: device_uuid}) reconfigure_vm.assert_called_once_with(self._volumeops._session, mock.sentinel.vm_ref, mock.sentinel.extra_config) def _fake_connection_info(self): return {'driver_volume_type': 'vmdk', 'serial': 'volume-fake-id', 'data': {'volume': 'vm-10', 'volume_id': 'volume-fake-id'}} @mock.patch.object(volumeops.VMwareVolumeOps, '_get_volume_uuid') @mock.patch.object(vm_util, 'get_vmdk_backed_disk_device') def test_get_vmdk_backed_disk_device(self, get_vmdk_backed_disk_device, get_volume_uuid): session = mock.Mock() self._volumeops._session = session hardware_devices = mock.sentinel.hardware_devices session._call_method.return_value = hardware_devices disk_uuid = mock.sentinel.disk_uuid get_volume_uuid.return_value = disk_uuid device = mock.sentinel.device get_vmdk_backed_disk_device.return_value = device vm_ref = mock.sentinel.vm_ref connection_info = self._fake_connection_info() ret = self._volumeops._get_vmdk_backed_disk_device( vm_ref, connection_info['data']) self.assertEqual(device, ret) session._call_method.assert_called_once_with( vutil, "get_object_property", vm_ref, "config.hardware.device") get_volume_uuid.assert_called_once_with( vm_ref, connection_info['data']['volume_id']) get_vmdk_backed_disk_device.assert_called_once_with(hardware_devices, disk_uuid) @mock.patch.object(volumeops.VMwareVolumeOps, '_get_volume_uuid') @mock.patch.object(vm_util, 'get_vmdk_backed_disk_device') def test_get_vmdk_backed_disk_device_with_missing_disk_device( self, get_vmdk_backed_disk_device, get_volume_uuid): session = mock.Mock() self._volumeops._session = session hardware_devices = mock.sentinel.hardware_devices session._call_method.return_value = hardware_devices disk_uuid = mock.sentinel.disk_uuid get_volume_uuid.return_value = disk_uuid get_vmdk_backed_disk_device.return_value = None vm_ref = mock.sentinel.vm_ref connection_info = self._fake_connection_info() self.assertRaises(exception.DiskNotFound, self._volumeops._get_vmdk_backed_disk_device, vm_ref, connection_info['data']) session._call_method.assert_called_once_with( vutil, "get_object_property", vm_ref, "config.hardware.device") get_volume_uuid.assert_called_once_with( vm_ref, connection_info['data']['volume_id']) get_vmdk_backed_disk_device.assert_called_once_with(hardware_devices, disk_uuid) def test_detach_volume_vmdk(self): vmdk_info = vm_util.VmdkInfo('fake-path', 'lsiLogic', 'thin', 1024, 'fake-device') with test.nested( mock.patch.object(vm_util, 'get_vm_ref', return_value=mock.sentinel.vm_ref), mock.patch.object(self._volumeops, '_get_volume_ref', return_value=mock.sentinel.volume_ref), mock.patch.object(self._volumeops, '_get_vmdk_backed_disk_device', return_value=mock.sentinel.device), mock.patch.object(vm_util, 'get_vmdk_info', return_value=vmdk_info), mock.patch.object(self._volumeops, '_consolidate_vmdk_volume'), mock.patch.object(self._volumeops, 'detach_disk_from_vm'), mock.patch.object(self._volumeops, '_update_volume_details'), ) as (get_vm_ref, get_volume_ref, get_vmdk_backed_disk_device, get_vmdk_info, consolidate_vmdk_volume, detach_disk_from_vm, update_volume_details): connection_info = {'driver_volume_type': 'vmdk', 'serial': 'volume-fake-id', 'data': {'volume': 'vm-10', 'volume_id': 'd11a82de-ddaa-448d-b50a-a255a7e61a1e' }} instance = mock.MagicMock(name='fake-name', vm_state=vm_states.ACTIVE) self._volumeops._detach_volume_vmdk(connection_info, instance) get_vm_ref.assert_called_once_with(self._volumeops._session, instance) get_volume_ref.assert_called_once_with( connection_info['data']['volume']) get_vmdk_backed_disk_device.assert_called_once_with( mock.sentinel.vm_ref, connection_info['data']) get_vmdk_info.assert_called_once_with(self._volumeops._session, mock.sentinel.volume_ref) consolidate_vmdk_volume.assert_called_once_with( instance, mock.sentinel.vm_ref, mock.sentinel.device, mock.sentinel.volume_ref, adapter_type=vmdk_info.adapter_type, disk_type=vmdk_info.disk_type) detach_disk_from_vm.assert_called_once_with(mock.sentinel.vm_ref, instance, mock.sentinel.device) update_volume_details.assert_called_once_with( mock.sentinel.vm_ref, connection_info['data']['volume_id'], "") def test_detach_volume_vmdk_invalid(self): connection_info = {'driver_volume_type': 'vmdk', 'serial': 'volume-fake-id', 'data': {'volume': 'vm-10', 'volume_id': 'volume-fake-id'}} instance = mock.MagicMock(name='fake-name', vm_state=vm_states.ACTIVE) vmdk_info = vm_util.VmdkInfo('fake-path', constants.ADAPTER_TYPE_IDE, constants.DISK_TYPE_PREALLOCATED, 1024, 'fake-device') with test.nested( mock.patch.object(vm_util, 'get_vm_ref', return_value=mock.sentinel.vm_ref), mock.patch.object(self._volumeops, '_get_volume_ref'), mock.patch.object(self._volumeops, '_get_vmdk_backed_disk_device'), mock.patch.object(vm_util, 'get_vmdk_info', return_value=vmdk_info), mock.patch.object(vm_util, 'get_vm_state', return_value='PoweredOn') ) as (get_vm_ref, get_volume_ref, get_vmdk_backed_disk_device, get_vmdk_info, get_vm_state): self.assertRaises(exception.Invalid, self._volumeops._detach_volume_vmdk, connection_info, instance) get_vm_ref.assert_called_once_with(self._volumeops._session, instance) get_volume_ref.assert_called_once_with( connection_info['data']['volume']) get_vmdk_backed_disk_device.assert_called_once_with( mock.sentinel.vm_ref, connection_info['data']) self.assertTrue(get_vmdk_info.called) get_vm_state.assert_called_once_with(self._volumeops._session, instance) @mock.patch.object(vm_util, 'get_vm_ref') @mock.patch.object(vm_util, 'get_rdm_disk') @mock.patch.object(volumeops.VMwareVolumeOps, '_iscsi_get_target') @mock.patch.object(volumeops.VMwareVolumeOps, 'detach_disk_from_vm') def test_detach_volume_iscsi(self, detach_disk_from_vm, iscsi_get_target, get_rdm_disk, get_vm_ref): vm_ref = mock.sentinel.vm_ref get_vm_ref.return_value = vm_ref device_name = mock.sentinel.device_name disk_uuid = mock.sentinel.disk_uuid iscsi_get_target.return_value = (device_name, disk_uuid) session = mock.Mock() self._volumeops._session = session hardware_devices = mock.sentinel.hardware_devices session._call_method.return_value = hardware_devices device = mock.sentinel.device get_rdm_disk.return_value = device connection_info = self._fake_connection_info() instance = mock.sentinel.instance self._volumeops._detach_volume_iscsi(connection_info, instance) get_vm_ref.assert_called_once_with(session, instance) iscsi_get_target.assert_called_once_with(connection_info['data']) session._call_method.assert_called_once_with( vutil, "get_object_property", vm_ref, "config.hardware.device") get_rdm_disk.assert_called_once_with(hardware_devices, disk_uuid) detach_disk_from_vm.assert_called_once_with( vm_ref, instance, device, destroy_disk=True) @mock.patch.object(vm_util, 'get_vm_ref') @mock.patch.object(volumeops.VMwareVolumeOps, '_iscsi_get_target') def test_detach_volume_iscsi_with_missing_iscsi_target( self, iscsi_get_target, get_vm_ref): vm_ref = mock.sentinel.vm_ref get_vm_ref.return_value = vm_ref iscsi_get_target.return_value = (None, None) connection_info = self._fake_connection_info() instance = mock.sentinel.instance self.assertRaises( exception.StorageError, self._volumeops._detach_volume_iscsi, connection_info, instance) get_vm_ref.assert_called_once_with(self._volumeops._session, instance) iscsi_get_target.assert_called_once_with(connection_info['data']) @mock.patch.object(vm_util, 'get_vm_ref') @mock.patch.object(vm_util, 'get_rdm_disk') @mock.patch.object(volumeops.VMwareVolumeOps, '_iscsi_get_target') @mock.patch.object(volumeops.VMwareVolumeOps, 'detach_disk_from_vm') def test_detach_volume_iscsi_with_missing_disk_device( self, detach_disk_from_vm, iscsi_get_target, get_rdm_disk, get_vm_ref): vm_ref = mock.sentinel.vm_ref get_vm_ref.return_value = vm_ref device_name = mock.sentinel.device_name disk_uuid = mock.sentinel.disk_uuid iscsi_get_target.return_value = (device_name, disk_uuid) session = mock.Mock() self._volumeops._session = session hardware_devices = mock.sentinel.hardware_devices session._call_method.return_value = hardware_devices get_rdm_disk.return_value = None connection_info = self._fake_connection_info() instance = mock.sentinel.instance self.assertRaises( exception.DiskNotFound, self._volumeops._detach_volume_iscsi, connection_info, instance) get_vm_ref.assert_called_once_with(session, instance) iscsi_get_target.assert_called_once_with(connection_info['data']) session._call_method.assert_called_once_with( vutil, "get_object_property", vm_ref, "config.hardware.device") get_rdm_disk.assert_called_once_with(hardware_devices, disk_uuid) self.assertFalse(detach_disk_from_vm.called) def _test_attach_volume_vmdk(self, adapter_type=None): connection_info = {'driver_volume_type': constants.DISK_FORMAT_VMDK, 'serial': 'volume-fake-id', 'data': {'volume': 'vm-10', 'volume_id': 'volume-fake-id'}} vm_ref = 'fake-vm-ref' volume_device = mock.MagicMock() volume_device.backing.fileName = 'fake-path' default_adapter_type = constants.DEFAULT_ADAPTER_TYPE disk_type = constants.DEFAULT_DISK_TYPE disk_uuid = 'e97f357b-331e-4ad1-b726-89be048fb811' backing = mock.Mock(uuid=disk_uuid) device = mock.Mock(backing=backing) vmdk_info = vm_util.VmdkInfo('fake-path', default_adapter_type, disk_type, 1024, device) adapter_type = adapter_type or default_adapter_type if adapter_type == constants.ADAPTER_TYPE_IDE: vm_state = 'PoweredOff' else: vm_state = 'PoweredOn' with test.nested( mock.patch.object(vm_util, 'get_vm_ref', return_value=vm_ref), mock.patch.object(self._volumeops, '_get_volume_ref'), mock.patch.object(vm_util, 'get_vmdk_info', return_value=vmdk_info), mock.patch.object(self._volumeops, 'attach_disk_to_vm'), mock.patch.object(self._volumeops, '_update_volume_details'), mock.patch.object(vm_util, 'get_vm_state', return_value=vm_state) ) as (get_vm_ref, get_volume_ref, get_vmdk_info, attach_disk_to_vm, update_volume_details, get_vm_state): self._volumeops.attach_volume(connection_info, self._instance, adapter_type) get_vm_ref.assert_called_once_with(self._volumeops._session, self._instance) get_volume_ref.assert_called_once_with( connection_info['data']['volume']) self.assertTrue(get_vmdk_info.called) attach_disk_to_vm.assert_called_once_with( vm_ref, self._instance, adapter_type, constants.DISK_TYPE_PREALLOCATED, vmdk_path='fake-path') update_volume_details.assert_called_once_with( vm_ref, connection_info['data']['volume_id'], disk_uuid) if adapter_type == constants.ADAPTER_TYPE_IDE: get_vm_state.assert_called_once_with(self._volumeops._session, self._instance) else: self.assertFalse(get_vm_state.called) def _test_attach_volume_iscsi(self, adapter_type=None): connection_info = {'driver_volume_type': constants.DISK_FORMAT_ISCSI, 'serial': 'volume-fake-id', 'data': {'volume': 'vm-10', 'volume_id': 'volume-fake-id'}} vm_ref = 'fake-vm-ref' default_adapter_type = constants.DEFAULT_ADAPTER_TYPE adapter_type = adapter_type or default_adapter_type with test.nested( mock.patch.object(vm_util, 'get_vm_ref', return_value=vm_ref), mock.patch.object(self._volumeops, '_iscsi_discover_target', return_value=(mock.sentinel.device_name, mock.sentinel.uuid)), mock.patch.object(vm_util, 'get_scsi_adapter_type', return_value=adapter_type), mock.patch.object(self._volumeops, 'attach_disk_to_vm') ) as (get_vm_ref, iscsi_discover_target, get_scsi_adapter_type, attach_disk_to_vm): self._volumeops.attach_volume(connection_info, self._instance, adapter_type) get_vm_ref.assert_called_once_with(self._volumeops._session, self._instance) iscsi_discover_target.assert_called_once_with( connection_info['data']) if adapter_type is None: self.assertTrue(get_scsi_adapter_type.called) attach_disk_to_vm.assert_called_once_with(vm_ref, self._instance, adapter_type, 'rdmp', device_name=mock.sentinel.device_name) def test_attach_volume_vmdk(self): for adapter_type in (None, constants.DEFAULT_ADAPTER_TYPE, constants.ADAPTER_TYPE_BUSLOGIC, constants.ADAPTER_TYPE_IDE, constants.ADAPTER_TYPE_LSILOGICSAS, constants.ADAPTER_TYPE_PARAVIRTUAL): self._test_attach_volume_vmdk(adapter_type) def test_attach_volume_iscsi(self): for adapter_type in (None, constants.DEFAULT_ADAPTER_TYPE, constants.ADAPTER_TYPE_BUSLOGIC, constants.ADAPTER_TYPE_LSILOGICSAS, constants.ADAPTER_TYPE_PARAVIRTUAL): self._test_attach_volume_iscsi(adapter_type) @mock.patch.object(volumeops.VMwareVolumeOps, '_get_vmdk_base_volume_device') @mock.patch.object(volumeops.VMwareVolumeOps, '_relocate_vmdk_volume') def test_consolidate_vmdk_volume_with_no_relocate( self, relocate_vmdk_volume, get_vmdk_base_volume_device): file_name = mock.sentinel.file_name backing = mock.Mock(fileName=file_name) original_device = mock.Mock(backing=backing) get_vmdk_base_volume_device.return_value = original_device device = mock.Mock(backing=backing) volume_ref = mock.sentinel.volume_ref vm_ref = mock.sentinel.vm_ref self._volumeops._consolidate_vmdk_volume(self._instance, vm_ref, device, volume_ref) get_vmdk_base_volume_device.assert_called_once_with(volume_ref) self.assertFalse(relocate_vmdk_volume.called) @mock.patch.object(volumeops.VMwareVolumeOps, '_get_vmdk_base_volume_device') @mock.patch.object(volumeops.VMwareVolumeOps, '_relocate_vmdk_volume') @mock.patch.object(volumeops.VMwareVolumeOps, '_get_host_of_vm') @mock.patch.object(volumeops.VMwareVolumeOps, '_get_res_pool_of_host') @mock.patch.object(volumeops.VMwareVolumeOps, 'detach_disk_from_vm') @mock.patch.object(volumeops.VMwareVolumeOps, 'attach_disk_to_vm') def test_consolidate_vmdk_volume_with_relocate( self, attach_disk_to_vm, detach_disk_from_vm, get_res_pool_of_host, get_host_of_vm, relocate_vmdk_volume, get_vmdk_base_volume_device): file_name = mock.sentinel.file_name backing = mock.Mock(fileName=file_name) original_device = mock.Mock(backing=backing) get_vmdk_base_volume_device.return_value = original_device new_file_name = mock.sentinel.new_file_name datastore = mock.sentinel.datastore new_backing = mock.Mock(fileName=new_file_name, datastore=datastore) device = mock.Mock(backing=new_backing) host = mock.sentinel.host get_host_of_vm.return_value = host rp = mock.sentinel.rp get_res_pool_of_host.return_value = rp instance = self._instance volume_ref = mock.sentinel.volume_ref vm_ref = mock.sentinel.vm_ref adapter_type = constants.ADAPTER_TYPE_BUSLOGIC disk_type = constants.DISK_TYPE_EAGER_ZEROED_THICK self._volumeops._consolidate_vmdk_volume(instance, vm_ref, device, volume_ref, adapter_type, disk_type) get_vmdk_base_volume_device.assert_called_once_with(volume_ref) relocate_vmdk_volume.assert_called_once_with( volume_ref, rp, datastore, host) detach_disk_from_vm.assert_called_once_with( volume_ref, instance, original_device, destroy_disk=True) attach_disk_to_vm.assert_called_once_with( volume_ref, instance, adapter_type, disk_type, vmdk_path=new_file_name) @mock.patch.object(volumeops.VMwareVolumeOps, '_get_vmdk_base_volume_device') @mock.patch.object(volumeops.VMwareVolumeOps, '_relocate_vmdk_volume') @mock.patch.object(volumeops.VMwareVolumeOps, '_get_host_of_vm') @mock.patch.object(volumeops.VMwareVolumeOps, '_get_res_pool_of_host') @mock.patch.object(volumeops.VMwareVolumeOps, 'detach_disk_from_vm') @mock.patch.object(volumeops.VMwareVolumeOps, 'attach_disk_to_vm') def test_consolidate_vmdk_volume_with_missing_vmdk( self, attach_disk_to_vm, detach_disk_from_vm, get_res_pool_of_host, get_host_of_vm, relocate_vmdk_volume, get_vmdk_base_volume_device): file_name = mock.sentinel.file_name backing = mock.Mock(fileName=file_name) original_device = mock.Mock(backing=backing) get_vmdk_base_volume_device.return_value = original_device new_file_name = mock.sentinel.new_file_name datastore = mock.sentinel.datastore new_backing = mock.Mock(fileName=new_file_name, datastore=datastore) device = mock.Mock(backing=new_backing) host = mock.sentinel.host get_host_of_vm.return_value = host rp = mock.sentinel.rp get_res_pool_of_host.return_value = rp relocate_vmdk_volume.side_effect = [ oslo_vmw_exceptions.FileNotFoundException, None] instance = mock.sentinel.instance volume_ref = mock.sentinel.volume_ref vm_ref = mock.sentinel.vm_ref adapter_type = constants.ADAPTER_TYPE_BUSLOGIC disk_type = constants.DISK_TYPE_EAGER_ZEROED_THICK self._volumeops._consolidate_vmdk_volume(instance, vm_ref, device, volume_ref, adapter_type, disk_type) get_vmdk_base_volume_device.assert_called_once_with(volume_ref) relocate_calls = [mock.call(volume_ref, rp, datastore, host), mock.call(volume_ref, rp, datastore, host)] self.assertEqual(relocate_calls, relocate_vmdk_volume.call_args_list) detach_disk_from_vm.assert_called_once_with( volume_ref, instance, original_device) attach_disk_to_vm.assert_called_once_with( volume_ref, instance, adapter_type, disk_type, vmdk_path=new_file_name) def test_iscsi_get_host_iqn(self): host_mor = mock.Mock() iqn = 'iscsi-name' hba = vmwareapi_fake.HostInternetScsiHba(iqn) hbas = mock.MagicMock(HostHostBusAdapter=[hba]) with test.nested( mock.patch.object(vm_util, 'get_host_ref_for_vm', return_value=host_mor), mock.patch.object(self._volumeops._session, '_call_method', return_value=hbas) ) as (fake_get_host_ref_for_vm, fake_call_method): result = self._volumeops._iscsi_get_host_iqn(self._instance) fake_get_host_ref_for_vm.assert_called_once_with( self._volumeops._session, self._instance) fake_call_method.assert_called_once_with(vutil, "get_object_property", host_mor, "config.storageDevice.hostBusAdapter") self.assertEqual(iqn, result) def test_iscsi_get_host_iqn_instance_not_found(self): host_mor = mock.Mock() iqn = 'iscsi-name' hba = vmwareapi_fake.HostInternetScsiHba(iqn) hbas = mock.MagicMock(HostHostBusAdapter=[hba]) with test.nested( mock.patch.object(vm_util, 'get_host_ref_for_vm', side_effect=exception.InstanceNotFound('fake')), mock.patch.object(vm_util, 'get_host_ref', return_value=host_mor), mock.patch.object(self._volumeops._session, '_call_method', return_value=hbas) ) as (fake_get_host_ref_for_vm, fake_get_host_ref, fake_call_method): result = self._volumeops._iscsi_get_host_iqn(self._instance) fake_get_host_ref_for_vm.assert_called_once_with( self._volumeops._session, self._instance) fake_get_host_ref.assert_called_once_with( self._volumeops._session, self._volumeops._cluster) fake_call_method.assert_called_once_with(vutil, "get_object_property", host_mor, "config.storageDevice.hostBusAdapter") self.assertEqual(iqn, result) def test_get_volume_connector(self): vm_id = 'fake-vm' vm_ref = mock.MagicMock(value=vm_id) iqn = 'iscsi-name' url = 'test_url' self.flags(host_ip=url, group='vmware') with test.nested( mock.patch.object(vm_util, 'get_vm_ref', return_value=vm_ref), mock.patch.object(self._volumeops, '_iscsi_get_host_iqn', return_value=iqn) ) as (fake_get_vm_ref, fake_iscsi_get_host_iqn): connector = self._volumeops.get_volume_connector(self._instance) fake_get_vm_ref.assert_called_once_with(self._volumeops._session, self._instance) fake_iscsi_get_host_iqn.assert_called_once_with(self._instance) self.assertEqual(url, connector['ip']) self.assertEqual(url, connector['host']) self.assertEqual(iqn, connector['initiator']) self.assertEqual(vm_id, connector['instance']) nova-13.1.4/nova/tests/unit/virt/vmwareapi/test_driver_api.py0000664000567000056710000030556413064447152025527 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # Copyright (c) 2012 VMware, Inc. # Copyright (c) 2011 Citrix Systems, Inc. # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test suite for VMwareAPI. """ import collections import datetime from eventlet import greenthread import mock from mox3 import mox from oslo_config import cfg from oslo_utils import fixture as utils_fixture from oslo_utils import units from oslo_utils import uuidutils from oslo_vmware import exceptions as vexc from oslo_vmware.objects import datastore as ds_obj from oslo_vmware import pbm from oslo_vmware import vim_util as oslo_vim_util from nova import block_device from nova.compute import api as compute_api from nova.compute import power_state from nova.compute import task_states from nova.compute import vm_states from nova import context from nova import exception from nova.image import glance from nova.network import model as network_model from nova import objects from nova import test from nova.tests.unit import fake_instance import nova.tests.unit.image.fake from nova.tests.unit import matchers from nova.tests.unit import test_flavors from nova.tests.unit import utils from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake from nova.tests.unit.virt.vmwareapi import stubs from nova.tests import uuidsentinel from nova.virt import driver as v_driver from nova.virt.vmwareapi import constants from nova.virt.vmwareapi import driver from nova.virt.vmwareapi import ds_util from nova.virt.vmwareapi import error_util from nova.virt.vmwareapi import imagecache from nova.virt.vmwareapi import images from nova.virt.vmwareapi import vif from nova.virt.vmwareapi import vim_util from nova.virt.vmwareapi import vm_util from nova.virt.vmwareapi import vmops from nova.virt.vmwareapi import volumeops CONF = cfg.CONF CONF.import_opt('host', 'nova.netconf') CONF.import_opt('remove_unused_original_minimum_age_seconds', 'nova.virt.imagecache') def _fake_create_session(inst): session = vmwareapi_fake.DataObject() session.key = 'fake_key' session.userName = 'fake_username' session._pbm_wsdl_loc = None session._pbm = None inst._session = session class VMwareDriverStartupTestCase(test.NoDBTestCase): def _start_driver_with_flags(self, expected_exception_type, startup_flags): self.flags(**startup_flags) with mock.patch( 'nova.virt.vmwareapi.driver.VMwareAPISession.__init__'): e = self.assertRaises( Exception, driver.VMwareVCDriver, None) # noqa self.assertIs(type(e), expected_exception_type) def test_start_driver_no_user(self): self._start_driver_with_flags( Exception, dict(host_ip='ip', host_password='password', group='vmware')) def test_start_driver_no_host(self): self._start_driver_with_flags( Exception, dict(host_username='username', host_password='password', group='vmware')) def test_start_driver_no_password(self): self._start_driver_with_flags( Exception, dict(host_ip='ip', host_username='username', group='vmware')) def test_start_driver_with_user_host_password(self): # Getting the InvalidInput exception signifies that no exception # is raised regarding missing user/password/host self._start_driver_with_flags( nova.exception.InvalidInput, dict(host_ip='ip', host_password='password', host_username="user", datastore_regex="bad(regex", group='vmware')) class VMwareSessionTestCase(test.NoDBTestCase): @mock.patch.object(driver.VMwareAPISession, '_is_vim_object', return_value=False) def test_call_method(self, mock_is_vim): with test.nested( mock.patch.object(driver.VMwareAPISession, '_create_session', _fake_create_session), mock.patch.object(driver.VMwareAPISession, 'invoke_api'), ) as (fake_create, fake_invoke): session = driver.VMwareAPISession() session._vim = mock.Mock() module = mock.Mock() session._call_method(module, 'fira') fake_invoke.assert_called_once_with(module, 'fira', session._vim) @mock.patch.object(driver.VMwareAPISession, '_is_vim_object', return_value=True) def test_call_method_vim(self, mock_is_vim): with test.nested( mock.patch.object(driver.VMwareAPISession, '_create_session', _fake_create_session), mock.patch.object(driver.VMwareAPISession, 'invoke_api'), ) as (fake_create, fake_invoke): session = driver.VMwareAPISession() module = mock.Mock() session._call_method(module, 'fira') fake_invoke.assert_called_once_with(module, 'fira') class VMwareAPIVMTestCase(test.NoDBTestCase): """Unit tests for Vmware API connection calls.""" REQUIRES_LOCKING = True @mock.patch.object(driver.VMwareVCDriver, '_register_openstack_extension') def setUp(self, mock_register): super(VMwareAPIVMTestCase, self).setUp() ds_util.dc_cache_reset() vm_util.vm_refs_cache_reset() self.context = context.RequestContext('fake', 'fake', is_admin=False) self.flags(cluster_name='test_cluster', host_ip='test_url', host_username='test_username', host_password='test_pass', api_retry_count=1, use_linked_clone=False, group='vmware') self.flags(enabled=False, group='vnc') self.flags(image_cache_subdirectory_name='vmware_base', my_ip='') self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id) stubs.set_stubs(self) vmwareapi_fake.reset() nova.tests.unit.image.fake.stub_out_image_service(self) self.conn = driver.VMwareVCDriver(None, False) self._set_exception_vars() self.node_name = self.conn._nodename self.ds = 'ds1' self._display_name = 'fake-display-name' self.vim = vmwareapi_fake.FakeVim() # NOTE(vish): none of the network plugging code is actually # being tested self.network_info = utils.get_test_network_info() image_ref = nova.tests.unit.image.fake.get_valid_image_id() (image_service, image_id) = glance.get_remote_image_service( self.context, image_ref) metadata = image_service.show(self.context, image_id) self.image = objects.ImageMeta.from_dict({ 'id': image_ref, 'disk_format': 'vmdk', 'size': int(metadata['size']), }) self.fake_image_uuid = self.image.id nova.tests.unit.image.fake.stub_out_image_service(self) self.vnc_host = 'ha-host' def tearDown(self): super(VMwareAPIVMTestCase, self).tearDown() vmwareapi_fake.cleanup() nova.tests.unit.image.fake.FakeImageService_reset() def test_legacy_block_device_info(self): self.assertFalse(self.conn.need_legacy_block_device_info) def test_get_host_ip_addr(self): self.assertEqual('test_url', self.conn.get_host_ip_addr()) def test_init_host_with_no_session(self): self.conn._session = mock.Mock() self.conn._session.vim = None self.conn.init_host('fake_host') self.conn._session._create_session.assert_called_once_with() def test_init_host(self): try: self.conn.init_host("fake_host") except Exception as ex: self.fail("init_host raised: %s" % ex) def _set_exception_vars(self): self.wait_task = self.conn._session._wait_for_task self.call_method = self.conn._session._call_method self.task_ref = None self.exception = False def test_cleanup_host(self): self.conn.init_host("fake_host") try: self.conn.cleanup_host("fake_host") except Exception as ex: self.fail("cleanup_host raised: %s" % ex) def test_driver_capabilities(self): self.assertTrue(self.conn.capabilities['has_imagecache']) self.assertFalse(self.conn.capabilities['supports_recreate']) self.assertTrue( self.conn.capabilities['supports_migrate_to_same_host']) @mock.patch.object(pbm, 'get_profile_id_by_name') def test_configuration_pbm(self, get_profile_mock): get_profile_mock.return_value = 'fake-profile' self.flags(pbm_enabled=True, pbm_default_policy='fake-policy', pbm_wsdl_location='fake-location', group='vmware') self.conn._validate_configuration() @mock.patch.object(pbm, 'get_profile_id_by_name') def test_configuration_pbm_bad_default(self, get_profile_mock): get_profile_mock.return_value = None self.flags(pbm_enabled=True, pbm_wsdl_location='fake-location', pbm_default_policy='fake-policy', group='vmware') self.assertRaises(error_util.PbmDefaultPolicyDoesNotExist, self.conn._validate_configuration) def test_login_retries(self): self.attempts = 0 self.login_session = vmwareapi_fake.FakeVim()._login() def _fake_login(_self): self.attempts += 1 if self.attempts == 1: raise vexc.VimConnectionException('Here is my fake exception') return self.login_session def _fake_check_session(_self): return True self.stub_out('nova.tests.unit.virt.vmwareapi.fake.FakeVim._login', _fake_login) self.stub_out('nova.tests.unit.virt.vmwareapi.' 'fake.FakeVim._check_session', _fake_check_session) with mock.patch.object(greenthread, 'sleep'): self.conn = driver.VMwareAPISession() self.assertEqual(2, self.attempts) def _get_instance_type_by_name(self, type): for instance_type in test_flavors.DEFAULT_FLAVOR_OBJS: if instance_type.name == type: return instance_type if type == 'm1.micro': return {'memory_mb': 128, 'root_gb': 0, 'deleted_at': None, 'name': 'm1.micro', 'deleted': 0, 'created_at': None, 'ephemeral_gb': 0, 'updated_at': None, 'disabled': False, 'vcpus': 1, 'extra_specs': {}, 'swap': 0, 'rxtx_factor': 1.0, 'is_public': True, 'flavorid': '1', 'vcpu_weight': None, 'id': 2} def _create_instance(self, node=None, set_image_ref=True, uuid=None, instance_type='m1.large', ephemeral=None, instance_type_updates=None): if not node: node = self.node_name if not uuid: uuid = uuidutils.generate_uuid() self.type_data = dict(self._get_instance_type_by_name(instance_type)) if instance_type_updates: self.type_data.update(instance_type_updates) if ephemeral is not None: self.type_data['ephemeral_gb'] = ephemeral values = {'name': 'fake_name', 'display_name': self._display_name, 'id': 1, 'uuid': uuid, 'project_id': self.project_id, 'user_id': self.user_id, 'kernel_id': "fake_kernel_uuid", 'ramdisk_id': "fake_ramdisk_uuid", 'mac_address': "de:ad:be:ef:be:ef", 'flavor': objects.Flavor(**self.type_data), 'node': node, 'memory_mb': self.type_data['memory_mb'], 'root_gb': self.type_data['root_gb'], 'ephemeral_gb': self.type_data['ephemeral_gb'], 'vcpus': self.type_data['vcpus'], 'swap': self.type_data['swap'], 'expected_attrs': ['system_metadata'], } if set_image_ref: values['image_ref'] = self.fake_image_uuid self.instance_node = node self.uuid = uuid self.instance = fake_instance.fake_instance_obj( self.context, **values) def _create_vm(self, node=None, num_instances=1, uuid=None, instance_type='m1.large', powered_on=True, ephemeral=None, bdi=None, instance_type_updates=None): """Create and spawn the VM.""" if not node: node = self.node_name self._create_instance(node=node, uuid=uuid, instance_type=instance_type, ephemeral=ephemeral, instance_type_updates=instance_type_updates) self.assertIsNone(vm_util.vm_ref_cache_get(self.uuid)) self.conn.spawn(self.context, self.instance, self.image, injected_files=[], admin_password=None, network_info=self.network_info, block_device_info=bdi) self._check_vm_record(num_instances=num_instances, powered_on=powered_on, uuid=uuid) self.assertIsNotNone(vm_util.vm_ref_cache_get(self.uuid)) def _get_vm_record(self): # Get record for VM vms = vmwareapi_fake._get_objects("VirtualMachine") for vm in vms.objects: if vm.get('name') == vm_util._get_vm_name(self._display_name, self.uuid): return vm self.fail('Unable to find VM backing!') def _get_info(self, uuid=None, node=None, name=None): uuid = uuid if uuid else self.uuid node = node if node else self.instance_node name = name if node else '1' return self.conn.get_info(fake_instance.fake_instance_obj( None, **{'uuid': uuid, 'name': name, 'node': node})) def _check_vm_record(self, num_instances=1, powered_on=True, uuid=None): """Check if the spawned VM's properties correspond to the instance in the db. """ instances = self.conn.list_instances() if uuidutils.is_uuid_like(uuid): self.assertEqual(num_instances, len(instances)) # Get Nova record for VM vm_info = self._get_info() vm = self._get_vm_record() # Check that m1.large above turned into the right thing. mem_kib = int(self.type_data['memory_mb']) << 10 vcpus = self.type_data['vcpus'] self.assertEqual(vm_info.max_mem_kb, mem_kib) self.assertEqual(vm_info.mem_kb, mem_kib) self.assertEqual(vm.get("summary.config.instanceUuid"), self.uuid) self.assertEqual(vm.get("summary.config.numCpu"), vcpus) self.assertEqual(vm.get("summary.config.memorySizeMB"), self.type_data['memory_mb']) self.assertEqual("ns0:VirtualE1000", vm.get("config.hardware.device").VirtualDevice[2].obj_name) if powered_on: # Check that the VM is running according to Nova self.assertEqual(power_state.RUNNING, vm_info.state) # Check that the VM is running according to vSphere API. self.assertEqual('poweredOn', vm.get("runtime.powerState")) else: # Check that the VM is not running according to Nova self.assertEqual(power_state.SHUTDOWN, vm_info.state) # Check that the VM is not running according to vSphere API. self.assertEqual('poweredOff', vm.get("runtime.powerState")) found_vm_uuid = False found_iface_id = False extras = vm.get("config.extraConfig") for c in extras.OptionValue: if (c.key == "nvp.vm-uuid" and c.value == self.instance['uuid']): found_vm_uuid = True if (c.key == "nvp.iface-id.0" and c.value == "vif-xxx-yyy-zzz"): found_iface_id = True self.assertTrue(found_vm_uuid) self.assertTrue(found_iface_id) def _check_vm_info(self, info, pwr_state=power_state.RUNNING): """Check if the get_info returned values correspond to the instance object in the db. """ mem_kib = int(self.type_data['memory_mb']) << 10 self.assertEqual(info.state, pwr_state) self.assertEqual(info.max_mem_kb, mem_kib) self.assertEqual(info.mem_kb, mem_kib) self.assertEqual(info.num_cpu, self.type_data['vcpus']) def test_instance_exists(self): self._create_vm() self.assertTrue(self.conn.instance_exists(self.instance)) invalid_instance = fake_instance.fake_instance_obj( None, uuid=uuidsentinel.foo, name='bar', node=self.node_name) self.assertFalse(self.conn.instance_exists(invalid_instance)) def test_list_instances_1(self): self._create_vm() instances = self.conn.list_instances() self.assertEqual(1, len(instances)) def test_list_instance_uuids(self): self._create_vm() uuids = self.conn.list_instance_uuids() self.assertEqual(1, len(uuids)) def _cached_files_exist(self, exists=True): cache = ds_obj.DatastorePath(self.ds, 'vmware_base', self.fake_image_uuid, '%s.vmdk' % self.fake_image_uuid) if exists: vmwareapi_fake.assertPathExists(self, str(cache)) else: vmwareapi_fake.assertPathNotExists(self, str(cache)) @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage, 'from_image') def test_instance_dir_disk_created(self, mock_from_image): """Test image file is cached when even when use_linked_clone is False """ img_props = images.VMwareImage( image_id=self.fake_image_uuid, linked_clone=False) mock_from_image.return_value = img_props self._create_vm() path = ds_obj.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid) vmwareapi_fake.assertPathExists(self, str(path)) self._cached_files_exist() @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage, 'from_image') def test_cache_dir_disk_created(self, mock_from_image): """Test image disk is cached when use_linked_clone is True.""" self.flags(use_linked_clone=True, group='vmware') img_props = images.VMwareImage( image_id=self.fake_image_uuid, file_size=1 * units.Ki, disk_type=constants.DISK_TYPE_SPARSE) mock_from_image.return_value = img_props self._create_vm() path = ds_obj.DatastorePath(self.ds, 'vmware_base', self.fake_image_uuid, '%s.vmdk' % self.fake_image_uuid) root = ds_obj.DatastorePath(self.ds, 'vmware_base', self.fake_image_uuid, '%s.80.vmdk' % self.fake_image_uuid) vmwareapi_fake.assertPathExists(self, str(path)) vmwareapi_fake.assertPathExists(self, str(root)) def _iso_disk_type_created(self, instance_type='m1.large'): self.image.disk_format = 'iso' self._create_vm(instance_type=instance_type) path = ds_obj.DatastorePath(self.ds, 'vmware_base', self.fake_image_uuid, '%s.iso' % self.fake_image_uuid) vmwareapi_fake.assertPathExists(self, str(path)) def test_iso_disk_type_created(self): self._iso_disk_type_created() path = ds_obj.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid) vmwareapi_fake.assertPathExists(self, str(path)) def test_iso_disk_type_created_with_root_gb_0(self): self._iso_disk_type_created(instance_type='m1.micro') path = ds_obj.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid) vmwareapi_fake.assertPathNotExists(self, str(path)) def test_iso_disk_cdrom_attach(self): iso_path = ds_obj.DatastorePath(self.ds, 'vmware_base', self.fake_image_uuid, '%s.iso' % self.fake_image_uuid) def fake_attach_cdrom(vm_ref, instance, data_store_ref, iso_uploaded_path): self.assertEqual(iso_uploaded_path, str(iso_path)) self.stub_out('nova.virt.vmwareapi.vmops._attach_cdrom_to_vm', fake_attach_cdrom) self.image.disk_format = 'iso' self._create_vm() @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage, 'from_image') def test_iso_disk_cdrom_attach_with_config_drive(self, mock_from_image): img_props = images.VMwareImage( image_id=self.fake_image_uuid, file_size=80 * units.Gi, file_type='iso', linked_clone=False) mock_from_image.return_value = img_props self.flags(force_config_drive=True) iso_path = [ ds_obj.DatastorePath(self.ds, 'vmware_base', self.fake_image_uuid, '%s.iso' % self.fake_image_uuid), ds_obj.DatastorePath(self.ds, 'fake-config-drive')] self.iso_index = 0 def fake_attach_cdrom(vm_ref, instance, data_store_ref, iso_uploaded_path): self.assertEqual(iso_uploaded_path, str(iso_path[self.iso_index])) self.iso_index += 1 with test.nested( mock.patch.object(self.conn._vmops, '_attach_cdrom_to_vm', side_effect=fake_attach_cdrom), mock.patch.object(self.conn._vmops, '_create_config_drive', return_value='fake-config-drive'), ) as (fake_attach_cdrom_to_vm, fake_create_config_drive): self.image.disk_format = 'iso' self._create_vm() self.assertEqual(2, self.iso_index) self.assertEqual(fake_attach_cdrom_to_vm.call_count, 2) self.assertEqual(fake_create_config_drive.call_count, 1) def test_ephemeral_disk_attach(self): self._create_vm(ephemeral=50) path = ds_obj.DatastorePath(self.ds, self.uuid, 'ephemeral_0.vmdk') vmwareapi_fake.assertPathExists(self, str(path)) def test_ephemeral_disk_attach_from_bdi(self): ephemerals = [{'device_type': 'disk', 'disk_bus': constants.DEFAULT_ADAPTER_TYPE, 'size': 25}, {'device_type': 'disk', 'disk_bus': constants.DEFAULT_ADAPTER_TYPE, 'size': 25}] bdi = {'ephemerals': ephemerals} self._create_vm(bdi=bdi, ephemeral=50) path = ds_obj.DatastorePath(self.ds, self.uuid, 'ephemeral_0.vmdk') vmwareapi_fake.assertPathExists(self, str(path)) path = ds_obj.DatastorePath(self.ds, self.uuid, 'ephemeral_1.vmdk') vmwareapi_fake.assertPathExists(self, str(path)) def test_ephemeral_disk_attach_from_bdii_with_no_ephs(self): bdi = {'ephemerals': []} self._create_vm(bdi=bdi, ephemeral=50) path = ds_obj.DatastorePath(self.ds, self.uuid, 'ephemeral_0.vmdk') vmwareapi_fake.assertPathExists(self, str(path)) def test_cdrom_attach_with_config_drive(self): self.flags(force_config_drive=True) iso_path = ds_obj.DatastorePath(self.ds, 'fake-config-drive') self.cd_attach_called = False def fake_attach_cdrom(vm_ref, instance, data_store_ref, iso_uploaded_path): self.assertEqual(iso_uploaded_path, str(iso_path)) self.cd_attach_called = True with test.nested( mock.patch.object(self.conn._vmops, '_attach_cdrom_to_vm', side_effect=fake_attach_cdrom), mock.patch.object(self.conn._vmops, '_create_config_drive', return_value='fake-config-drive'), ) as (fake_attach_cdrom_to_vm, fake_create_config_drive): self._create_vm() self.assertTrue(self.cd_attach_called) @mock.patch.object(vmops.VMwareVMOps, 'power_off') @mock.patch.object(driver.VMwareVCDriver, 'detach_volume') @mock.patch.object(vmops.VMwareVMOps, 'destroy') def test_destroy_with_attached_volumes(self, mock_destroy, mock_detach_volume, mock_power_off): self._create_vm() connection_info = {'data': 'fake-data', 'serial': 'volume-fake-id'} bdm = [{'connection_info': connection_info, 'disk_bus': 'fake-bus', 'device_name': 'fake-name', 'mount_device': '/dev/sdb'}] bdi = {'block_device_mapping': bdm, 'root_device_name': '/dev/sda'} self.assertNotEqual(vm_states.STOPPED, self.instance.vm_state) self.conn.destroy(self.context, self.instance, self.network_info, block_device_info=bdi) mock_power_off.assert_called_once_with(self.instance) mock_detach_volume.assert_called_once_with( connection_info, self.instance, 'fake-name') mock_destroy.assert_called_once_with(self.instance, True) @mock.patch.object(vmops.VMwareVMOps, 'power_off', side_effect=vexc.ManagedObjectNotFoundException()) @mock.patch.object(vmops.VMwareVMOps, 'destroy') def test_destroy_with_attached_volumes_missing(self, mock_destroy, mock_power_off): self._create_vm() connection_info = {'data': 'fake-data', 'serial': 'volume-fake-id'} bdm = [{'connection_info': connection_info, 'disk_bus': 'fake-bus', 'device_name': 'fake-name', 'mount_device': '/dev/sdb'}] bdi = {'block_device_mapping': bdm, 'root_device_name': '/dev/sda'} self.assertNotEqual(vm_states.STOPPED, self.instance.vm_state) self.conn.destroy(self.context, self.instance, self.network_info, block_device_info=bdi) mock_power_off.assert_called_once_with(self.instance) mock_destroy.assert_called_once_with(self.instance, True) @mock.patch.object(driver.VMwareVCDriver, 'detach_volume', side_effect=exception.NovaException()) @mock.patch.object(vmops.VMwareVMOps, 'destroy') def test_destroy_with_attached_volumes_with_exception( self, mock_destroy, mock_detach_volume): self._create_vm() connection_info = {'data': 'fake-data', 'serial': 'volume-fake-id'} bdm = [{'connection_info': connection_info, 'disk_bus': 'fake-bus', 'device_name': 'fake-name', 'mount_device': '/dev/sdb'}] bdi = {'block_device_mapping': bdm, 'root_device_name': '/dev/sda'} self.assertRaises(exception.NovaException, self.conn.destroy, self.context, self.instance, self.network_info, block_device_info=bdi) mock_detach_volume.assert_called_once_with( connection_info, self.instance, 'fake-name') self.assertFalse(mock_destroy.called) @mock.patch.object(driver.VMwareVCDriver, 'detach_volume', side_effect=exception.DiskNotFound(message='oh man')) @mock.patch.object(vmops.VMwareVMOps, 'destroy') def test_destroy_with_attached_volumes_with_disk_not_found( self, mock_destroy, mock_detach_volume): self._create_vm() connection_info = {'data': 'fake-data', 'serial': 'volume-fake-id'} bdm = [{'connection_info': connection_info, 'disk_bus': 'fake-bus', 'device_name': 'fake-name', 'mount_device': '/dev/sdb'}] bdi = {'block_device_mapping': bdm, 'root_device_name': '/dev/sda'} self.conn.destroy(self.context, self.instance, self.network_info, block_device_info=bdi) mock_detach_volume.assert_called_once_with( connection_info, self.instance, 'fake-name') self.assertTrue(mock_destroy.called) mock_destroy.assert_called_once_with(self.instance, True) def test_spawn(self): self._create_vm() info = self._get_info() self._check_vm_info(info, power_state.RUNNING) def test_spawn_vm_ref_cached(self): uuid = uuidutils.generate_uuid() self.assertIsNone(vm_util.vm_ref_cache_get(uuid)) self._create_vm(uuid=uuid) self.assertIsNotNone(vm_util.vm_ref_cache_get(uuid)) def test_spawn_power_on(self): self._create_vm() info = self._get_info() self._check_vm_info(info, power_state.RUNNING) def test_spawn_root_size_0(self): self._create_vm(instance_type='m1.micro') info = self._get_info() self._check_vm_info(info, power_state.RUNNING) cache = ('[%s] vmware_base/%s/%s.vmdk' % (self.ds, self.fake_image_uuid, self.fake_image_uuid)) gb_cache = ('[%s] vmware_base/%s/%s.0.vmdk' % (self.ds, self.fake_image_uuid, self.fake_image_uuid)) vmwareapi_fake.assertPathExists(self, cache) vmwareapi_fake.assertPathNotExists(self, gb_cache) def _spawn_with_delete_exception(self, fault=None): def fake_call_method(module, method, *args, **kwargs): task_ref = self.call_method(module, method, *args, **kwargs) if method == "DeleteDatastoreFile_Task": self.exception = True task_mdo = vmwareapi_fake.create_task(method, "error", error_fault=fault) return task_mdo.obj return task_ref with ( mock.patch.object(self.conn._session, '_call_method', fake_call_method) ): if fault: self._create_vm() info = self._get_info() self._check_vm_info(info, power_state.RUNNING) else: self.assertRaises(vexc.VMwareDriverException, self._create_vm) self.assertTrue(self.exception) def test_spawn_with_delete_exception_not_found(self): self._spawn_with_delete_exception(vmwareapi_fake.FileNotFound()) def test_spawn_with_delete_exception_file_fault(self): self._spawn_with_delete_exception(vmwareapi_fake.FileFault()) def test_spawn_with_delete_exception_cannot_delete_file(self): self._spawn_with_delete_exception(vmwareapi_fake.CannotDeleteFile()) def test_spawn_with_delete_exception_file_locked(self): self._spawn_with_delete_exception(vmwareapi_fake.FileLocked()) def test_spawn_with_delete_exception_general(self): self._spawn_with_delete_exception() def test_spawn_disk_extend(self): self.mox.StubOutWithMock(self.conn._vmops, '_extend_virtual_disk') requested_size = 80 * units.Mi self.conn._vmops._extend_virtual_disk(mox.IgnoreArg(), requested_size, mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self._create_vm() info = self._get_info() self._check_vm_info(info, power_state.RUNNING) def test_spawn_disk_extend_exists(self): root = ds_obj.DatastorePath(self.ds, 'vmware_base', self.fake_image_uuid, '%s.80.vmdk' % self.fake_image_uuid) def _fake_extend(instance, requested_size, name, dc_ref): vmwareapi_fake._add_file(str(root)) with test.nested( mock.patch.object(self.conn._vmops, '_extend_virtual_disk', side_effect=_fake_extend) ) as (fake_extend_virtual_disk): self._create_vm() info = self._get_info() self._check_vm_info(info, power_state.RUNNING) vmwareapi_fake.assertPathExists(self, str(root)) self.assertEqual(1, fake_extend_virtual_disk[0].call_count) @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage, 'from_image') def test_spawn_disk_extend_sparse(self, mock_from_image): img_props = images.VMwareImage( image_id=self.fake_image_uuid, file_size=units.Ki, disk_type=constants.DISK_TYPE_SPARSE, linked_clone=True) mock_from_image.return_value = img_props with test.nested( mock.patch.object(self.conn._vmops, '_extend_virtual_disk'), mock.patch.object(self.conn._vmops, 'get_datacenter_ref_and_name'), ) as (mock_extend, mock_get_dc): dc_val = mock.Mock() dc_val.ref = "fake_dc_ref" dc_val.name = "dc1" mock_get_dc.return_value = dc_val self._create_vm() iid = img_props.image_id cached_image = ds_obj.DatastorePath(self.ds, 'vmware_base', iid, '%s.80.vmdk' % iid) mock_extend.assert_called_once_with( self.instance, self.instance.root_gb * units.Mi, str(cached_image), "fake_dc_ref") def test_spawn_disk_extend_failed_copy(self): # Spawn instance # copy for extend fails without creating a file # # Expect the copy error to be raised self.flags(use_linked_clone=True, group='vmware') CopyError = vexc.FileFaultException def fake_wait_for_task(task_ref): if task_ref == 'fake-copy-task': raise CopyError('Copy failed!') return self.wait_task(task_ref) def fake_call_method(module, method, *args, **kwargs): if method == "CopyVirtualDisk_Task": return 'fake-copy-task' return self.call_method(module, method, *args, **kwargs) with test.nested( mock.patch.object(self.conn._session, '_call_method', new=fake_call_method), mock.patch.object(self.conn._session, '_wait_for_task', new=fake_wait_for_task)): self.assertRaises(CopyError, self._create_vm) def test_spawn_disk_extend_failed_partial_copy(self): # Spawn instance # Copy for extend fails, leaving a file behind # # Expect the file to be cleaned up # Expect the copy error to be raised self.flags(use_linked_clone=True, group='vmware') self.task_ref = None uuid = self.fake_image_uuid cached_image = '[%s] vmware_base/%s/%s.80.vmdk' % (self.ds, uuid, uuid) CopyError = vexc.FileFaultException def fake_wait_for_task(task_ref): if task_ref == self.task_ref: self.task_ref = None vmwareapi_fake.assertPathExists(self, cached_image) # N.B. We don't test for -flat here because real # CopyVirtualDisk_Task doesn't actually create it raise CopyError('Copy failed!') return self.wait_task(task_ref) def fake_call_method(module, method, *args, **kwargs): task_ref = self.call_method(module, method, *args, **kwargs) if method == "CopyVirtualDisk_Task": self.task_ref = task_ref return task_ref with test.nested( mock.patch.object(self.conn._session, '_call_method', new=fake_call_method), mock.patch.object(self.conn._session, '_wait_for_task', new=fake_wait_for_task)): self.assertRaises(CopyError, self._create_vm) vmwareapi_fake.assertPathNotExists(self, cached_image) def test_spawn_disk_extend_failed_partial_copy_failed_cleanup(self): # Spawn instance # Copy for extend fails, leaves file behind # File cleanup fails # # Expect file to be left behind # Expect file cleanup error to be raised self.flags(use_linked_clone=True, group='vmware') self.task_ref = None uuid = self.fake_image_uuid cached_image = '[%s] vmware_base/%s/%s.80.vmdk' % (self.ds, uuid, uuid) CopyError = vexc.FileFaultException DeleteError = vexc.CannotDeleteFileException def fake_wait_for_task(task_ref): if task_ref == self.task_ref: self.task_ref = None vmwareapi_fake.assertPathExists(self, cached_image) # N.B. We don't test for -flat here because real # CopyVirtualDisk_Task doesn't actually create it raise CopyError('Copy failed!') elif task_ref == 'fake-delete-task': raise DeleteError('Delete failed!') return self.wait_task(task_ref) def fake_call_method(module, method, *args, **kwargs): if method == "DeleteDatastoreFile_Task": return 'fake-delete-task' task_ref = self.call_method(module, method, *args, **kwargs) if method == "CopyVirtualDisk_Task": self.task_ref = task_ref return task_ref with test.nested( mock.patch.object(self.conn._session, '_wait_for_task', new=fake_wait_for_task), mock.patch.object(self.conn._session, '_call_method', new=fake_call_method)): self.assertRaises(DeleteError, self._create_vm) vmwareapi_fake.assertPathExists(self, cached_image) @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage, 'from_image') def test_spawn_disk_invalid_disk_size(self, mock_from_image): img_props = images.VMwareImage( image_id=self.fake_image_uuid, file_size=82 * units.Gi, disk_type=constants.DISK_TYPE_SPARSE, linked_clone=True) mock_from_image.return_value = img_props self.assertRaises(exception.InstanceUnacceptable, self._create_vm) @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage, 'from_image') def test_spawn_disk_extend_insufficient_disk_space(self, mock_from_image): img_props = images.VMwareImage( image_id=self.fake_image_uuid, file_size=1024, disk_type=constants.DISK_TYPE_SPARSE, linked_clone=True) mock_from_image.return_value = img_props cached_image = ds_obj.DatastorePath(self.ds, 'vmware_base', self.fake_image_uuid, '%s.80.vmdk' % self.fake_image_uuid) tmp_file = ds_obj.DatastorePath(self.ds, 'vmware_base', self.fake_image_uuid, '%s.80-flat.vmdk' % self.fake_image_uuid) NoDiskSpace = vexc.get_fault_class('NoDiskSpace') def fake_wait_for_task(task_ref): if task_ref == self.task_ref: self.task_ref = None raise NoDiskSpace() return self.wait_task(task_ref) def fake_call_method(module, method, *args, **kwargs): task_ref = self.call_method(module, method, *args, **kwargs) if method == 'ExtendVirtualDisk_Task': self.task_ref = task_ref return task_ref with test.nested( mock.patch.object(self.conn._session, '_wait_for_task', fake_wait_for_task), mock.patch.object(self.conn._session, '_call_method', fake_call_method) ) as (mock_wait_for_task, mock_call_method): self.assertRaises(NoDiskSpace, self._create_vm) vmwareapi_fake.assertPathNotExists(self, str(cached_image)) vmwareapi_fake.assertPathNotExists(self, str(tmp_file)) def test_spawn_with_move_file_exists_exception(self): # The test will validate that the spawn completes # successfully. The "MoveDatastoreFile_Task" will # raise an file exists exception. The flag # self.exception will be checked to see that # the exception has indeed been raised. def fake_wait_for_task(task_ref): if task_ref == self.task_ref: self.task_ref = None self.exception = True raise vexc.FileAlreadyExistsException() return self.wait_task(task_ref) def fake_call_method(module, method, *args, **kwargs): task_ref = self.call_method(module, method, *args, **kwargs) if method == "MoveDatastoreFile_Task": self.task_ref = task_ref return task_ref with test.nested( mock.patch.object(self.conn._session, '_wait_for_task', fake_wait_for_task), mock.patch.object(self.conn._session, '_call_method', fake_call_method) ) as (_wait_for_task, _call_method): self._create_vm() info = self._get_info() self._check_vm_info(info, power_state.RUNNING) self.assertTrue(self.exception) def test_spawn_with_move_general_exception(self): # The test will validate that the spawn completes # successfully. The "MoveDatastoreFile_Task" will # raise a general exception. The flag self.exception # will be checked to see that the exception has # indeed been raised. def fake_wait_for_task(task_ref): if task_ref == self.task_ref: self.task_ref = None self.exception = True raise vexc.VMwareDriverException('Exception!') return self.wait_task(task_ref) def fake_call_method(module, method, *args, **kwargs): task_ref = self.call_method(module, method, *args, **kwargs) if method == "MoveDatastoreFile_Task": self.task_ref = task_ref return task_ref with test.nested( mock.patch.object(self.conn._session, '_wait_for_task', fake_wait_for_task), mock.patch.object(self.conn._session, '_call_method', fake_call_method) ) as (_wait_for_task, _call_method): self.assertRaises(vexc.VMwareDriverException, self._create_vm) self.assertTrue(self.exception) def test_spawn_with_move_poll_exception(self): self.call_method = self.conn._session._call_method def fake_call_method(module, method, *args, **kwargs): task_ref = self.call_method(module, method, *args, **kwargs) if method == "MoveDatastoreFile_Task": task_mdo = vmwareapi_fake.create_task(method, "error") return task_mdo.obj return task_ref with ( mock.patch.object(self.conn._session, '_call_method', fake_call_method) ): self.assertRaises(vexc.VMwareDriverException, self._create_vm) def test_spawn_with_move_file_exists_poll_exception(self): # The test will validate that the spawn completes # successfully. The "MoveDatastoreFile_Task" will # raise a file exists exception. The flag self.exception # will be checked to see that the exception has # indeed been raised. def fake_call_method(module, method, *args, **kwargs): task_ref = self.call_method(module, method, *args, **kwargs) if method == "MoveDatastoreFile_Task": self.exception = True task_mdo = vmwareapi_fake.create_task(method, "error", error_fault=vmwareapi_fake.FileAlreadyExists()) return task_mdo.obj return task_ref with ( mock.patch.object(self.conn._session, '_call_method', fake_call_method) ): self._create_vm() info = self._get_info() self._check_vm_info(info, power_state.RUNNING) self.assertTrue(self.exception) def _spawn_attach_volume_vmdk(self, set_image_ref=True): self._create_instance(set_image_ref=set_image_ref) self.mox.StubOutWithMock(block_device, 'volume_in_mapping') self.mox.StubOutWithMock(v_driver, 'block_device_info_get_mapping') connection_info = self._test_vmdk_connection_info('vmdk') root_disk = [{'connection_info': connection_info, 'boot_index': 0}] v_driver.block_device_info_get_mapping( mox.IgnoreArg()).AndReturn(root_disk) self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, '_get_res_pool_of_vm') volumeops.VMwareVolumeOps._get_res_pool_of_vm( mox.IgnoreArg()).AndReturn('fake_res_pool') self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, '_relocate_vmdk_volume') volumeops.VMwareVolumeOps._relocate_vmdk_volume(mox.IgnoreArg(), 'fake_res_pool', mox.IgnoreArg()) self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, 'attach_volume') volumeops.VMwareVolumeOps.attach_volume(connection_info, self.instance, constants.DEFAULT_ADAPTER_TYPE) self.mox.ReplayAll() block_device_info = {'block_device_mapping': root_disk} self.conn.spawn(self.context, self.instance, self.image, injected_files=[], admin_password=None, network_info=self.network_info, block_device_info=block_device_info) def test_spawn_attach_volume_iscsi(self): self._create_instance() self.mox.StubOutWithMock(block_device, 'volume_in_mapping') self.mox.StubOutWithMock(v_driver, 'block_device_info_get_mapping') connection_info = self._test_vmdk_connection_info('iscsi') root_disk = [{'connection_info': connection_info, 'boot_index': 0}] v_driver.block_device_info_get_mapping( mox.IgnoreArg()).AndReturn(root_disk) self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, 'attach_volume') volumeops.VMwareVolumeOps.attach_volume(connection_info, self.instance, constants.DEFAULT_ADAPTER_TYPE) self.mox.ReplayAll() block_device_info = {'mount_device': 'vda'} self.conn.spawn(self.context, self.instance, self.image, injected_files=[], admin_password=None, network_info=self.network_info, block_device_info=block_device_info) def test_spawn_hw_versions(self): updates = {'extra_specs': {'vmware:hw_version': 'vmx-08'}} self._create_vm(instance_type_updates=updates) vm = self._get_vm_record() version = vm.get("version") self.assertEqual('vmx-08', version) def mock_upload_image(self, context, image, instance, session, **kwargs): self.assertEqual('Test-Snapshot', image) self.assertEqual(self.instance, instance) self.assertEqual(1024, kwargs['vmdk_size']) def test_get_vm_ref_using_extra_config(self): self._create_vm() vm_ref = vm_util._get_vm_ref_from_extraconfig(self.conn._session, self.instance['uuid']) self.assertIsNotNone(vm_ref, 'VM Reference cannot be none') # Disrupt the fake Virtual Machine object so that extraConfig # cannot be matched. fake_vm = self._get_vm_record() fake_vm.get('config.extraConfig["nvp.vm-uuid"]').value = "" # We should not get a Virtual Machine through extraConfig. vm_ref = vm_util._get_vm_ref_from_extraconfig(self.conn._session, self.instance['uuid']) self.assertIsNone(vm_ref, 'VM Reference should be none') # Check if we can find the Virtual Machine using the name. vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance) self.assertIsNotNone(vm_ref, 'VM Reference cannot be none') def test_search_vm_ref_by_identifier(self): self._create_vm() vm_ref = vm_util.search_vm_ref_by_identifier(self.conn._session, self.instance['uuid']) self.assertIsNotNone(vm_ref, 'VM Reference cannot be none') fake_vm = self._get_vm_record() fake_vm.set("summary.config.instanceUuid", "foo") fake_vm.set("name", "foo") fake_vm.get('config.extraConfig["nvp.vm-uuid"]').value = "foo" self.assertIsNone(vm_util.search_vm_ref_by_identifier( self.conn._session, self.instance['uuid']), "VM Reference should be none") self.assertIsNotNone( vm_util.search_vm_ref_by_identifier(self.conn._session, "foo"), "VM Reference should not be none") def test_get_object_for_optionvalue(self): self._create_vm() vms = self.conn._session._call_method(vim_util, "get_objects", "VirtualMachine", ['config.extraConfig["nvp.vm-uuid"]']) vm_ref = vm_util._get_object_for_optionvalue(vms, self.instance["uuid"]) self.assertIsNotNone(vm_ref, 'VM Reference cannot be none') def _test_snapshot(self): expected_calls = [ {'args': (), 'kwargs': {'task_state': task_states.IMAGE_PENDING_UPLOAD}}, {'args': (), 'kwargs': {'task_state': task_states.IMAGE_UPLOADING, 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}] func_call_matcher = matchers.FunctionCallMatcher(expected_calls) info = self._get_info() self._check_vm_info(info, power_state.RUNNING) with mock.patch.object(images, 'upload_image_stream_optimized', self.mock_upload_image): self.conn.snapshot(self.context, self.instance, "Test-Snapshot", func_call_matcher.call) info = self._get_info() self._check_vm_info(info, power_state.RUNNING) self.assertIsNone(func_call_matcher.match()) def test_snapshot(self): self._create_vm() self._test_snapshot() def test_snapshot_no_root_disk(self): self._iso_disk_type_created(instance_type='m1.micro') self.assertRaises(error_util.NoRootDiskDefined, self.conn.snapshot, self.context, self.instance, "Test-Snapshot", lambda *args, **kwargs: None) def test_snapshot_non_existent(self): self._create_instance() self.assertRaises(exception.InstanceNotFound, self.conn.snapshot, self.context, self.instance, "Test-Snapshot", lambda *args, **kwargs: None) def test_snapshot_delete_vm_snapshot(self): self._create_vm() fake_vm = self._get_vm_record() snapshot_ref = vmwareapi_fake.ManagedObjectReference( value="Snapshot-123", name="VirtualMachineSnapshot") self.mox.StubOutWithMock(vmops.VMwareVMOps, '_create_vm_snapshot') self.conn._vmops._create_vm_snapshot( self.instance, fake_vm.obj).AndReturn(snapshot_ref) self.mox.StubOutWithMock(vmops.VMwareVMOps, '_delete_vm_snapshot') self.conn._vmops._delete_vm_snapshot( self.instance, fake_vm.obj, snapshot_ref).AndReturn(None) self.mox.ReplayAll() self._test_snapshot() def _snapshot_delete_vm_snapshot_exception(self, exception, call_count=1): self._create_vm() fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0].obj snapshot_ref = vmwareapi_fake.ManagedObjectReference( value="Snapshot-123", name="VirtualMachineSnapshot") with test.nested( mock.patch.object(self.conn._session, '_wait_for_task', side_effect=exception), mock.patch.object(vmops, '_time_sleep_wrapper') ) as (_fake_wait, _fake_sleep): if exception != vexc.TaskInProgress: self.assertRaises(exception, self.conn._vmops._delete_vm_snapshot, self.instance, fake_vm, snapshot_ref) self.assertEqual(0, _fake_sleep.call_count) else: self.conn._vmops._delete_vm_snapshot(self.instance, fake_vm, snapshot_ref) self.assertEqual(call_count - 1, _fake_sleep.call_count) self.assertEqual(call_count, _fake_wait.call_count) def test_snapshot_delete_vm_snapshot_exception(self): self._snapshot_delete_vm_snapshot_exception(exception.NovaException) def test_snapshot_delete_vm_snapshot_exception_retry(self): self.flags(api_retry_count=5, group='vmware') self._snapshot_delete_vm_snapshot_exception(vexc.TaskInProgress, 5) def test_reboot(self): self._create_vm() info = self._get_info() self._check_vm_info(info, power_state.RUNNING) reboot_type = "SOFT" self.conn.reboot(self.context, self.instance, self.network_info, reboot_type) info = self._get_info() self._check_vm_info(info, power_state.RUNNING) def test_reboot_hard(self): self._create_vm() info = self._get_info() self._check_vm_info(info, power_state.RUNNING) reboot_type = "HARD" self.conn.reboot(self.context, self.instance, self.network_info, reboot_type) info = self._get_info() self._check_vm_info(info, power_state.RUNNING) def test_reboot_with_uuid(self): """Test fall back to use name when can't find by uuid.""" self._create_vm() info = self._get_info() self._check_vm_info(info, power_state.RUNNING) reboot_type = "SOFT" self.conn.reboot(self.context, self.instance, self.network_info, reboot_type) info = self._get_info() self._check_vm_info(info, power_state.RUNNING) def test_reboot_non_existent(self): self._create_instance() self.assertRaises(exception.InstanceNotFound, self.conn.reboot, self.context, self.instance, self.network_info, 'SOFT') def test_poll_rebooting_instances(self): self.mox.StubOutWithMock(compute_api.API, 'reboot') compute_api.API.reboot(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self._create_vm() instances = [self.instance] self.conn.poll_rebooting_instances(60, instances) def test_reboot_not_poweredon(self): self._create_vm() info = self._get_info() self._check_vm_info(info, power_state.RUNNING) self.conn.suspend(self.context, self.instance) info = self._get_info() self._check_vm_info(info, power_state.SUSPENDED) self.assertRaises(exception.InstanceRebootFailure, self.conn.reboot, self.context, self.instance, self.network_info, 'SOFT') def test_suspend(self): self._create_vm() info = self._get_info() self._check_vm_info(info, power_state.RUNNING) self.conn.suspend(self.context, self.instance) info = self._get_info() self._check_vm_info(info, power_state.SUSPENDED) def test_suspend_non_existent(self): self._create_instance() self.assertRaises(exception.InstanceNotFound, self.conn.suspend, self.context, self.instance) def test_resume(self): self._create_vm() info = self._get_info() self._check_vm_info(info, power_state.RUNNING) self.conn.suspend(self.context, self.instance) info = self._get_info() self._check_vm_info(info, power_state.SUSPENDED) self.conn.resume(self.context, self.instance, self.network_info) info = self._get_info() self._check_vm_info(info, power_state.RUNNING) def test_resume_non_existent(self): self._create_instance() self.assertRaises(exception.InstanceNotFound, self.conn.resume, self.context, self.instance, self.network_info) def test_resume_not_suspended(self): self._create_vm() info = self._get_info() self._check_vm_info(info, power_state.RUNNING) self.assertRaises(exception.InstanceResumeFailure, self.conn.resume, self.context, self.instance, self.network_info) def test_power_on(self): self._create_vm() info = self._get_info() self._check_vm_info(info, power_state.RUNNING) self.conn.power_off(self.instance) info = self._get_info() self._check_vm_info(info, power_state.SHUTDOWN) self.conn.power_on(self.context, self.instance, self.network_info) info = self._get_info() self._check_vm_info(info, power_state.RUNNING) def test_power_on_non_existent(self): self._create_instance() self.assertRaises(exception.InstanceNotFound, self.conn.power_on, self.context, self.instance, self.network_info) def test_power_off(self): self._create_vm() info = self._get_info() self._check_vm_info(info, power_state.RUNNING) self.conn.power_off(self.instance) info = self._get_info() self._check_vm_info(info, power_state.SHUTDOWN) def test_power_off_non_existent(self): self._create_instance() self.assertRaises(exception.InstanceNotFound, self.conn.power_off, self.instance) @mock.patch.object(driver.VMwareVCDriver, 'reboot') @mock.patch.object(vm_util, 'get_vm_state', return_value='poweredOff') def test_resume_state_on_host_boot(self, mock_get_vm_state, mock_reboot): self._create_instance() self.conn.resume_state_on_host_boot(self.context, self.instance, 'network_info') mock_get_vm_state.assert_called_once_with(self.conn._session, self.instance) mock_reboot.assert_called_once_with(self.context, self.instance, 'network_info', 'hard', None) def test_resume_state_on_host_boot_no_reboot(self): self._create_instance() for state in ['poweredOn', 'suspended']: with test.nested( mock.patch.object(driver.VMwareVCDriver, 'reboot'), mock.patch.object(vm_util, 'get_vm_state', return_value=state) ) as (mock_reboot, mock_get_vm_state): self.conn.resume_state_on_host_boot(self.context, self.instance, 'network_info') mock_get_vm_state.assert_called_once_with(self.conn._session, self.instance) self.assertFalse(mock_reboot.called) @mock.patch('nova.virt.driver.block_device_info_get_mapping') @mock.patch('nova.virt.vmwareapi.driver.VMwareVCDriver.detach_volume') def test_detach_instance_volumes( self, detach_volume, block_device_info_get_mapping): self._create_vm() def _mock_bdm(connection_info, device_name): return {'connection_info': connection_info, 'device_name': device_name} disk_1 = _mock_bdm(mock.sentinel.connection_info_1, 'dev1') disk_2 = _mock_bdm(mock.sentinel.connection_info_2, 'dev2') block_device_info_get_mapping.return_value = [disk_1, disk_2] detach_volume.side_effect = [None, exception.DiskNotFound("Error")] with mock.patch.object(self.conn, '_vmops') as vmops: block_device_info = mock.sentinel.block_device_info self.conn._detach_instance_volumes(self.instance, block_device_info) block_device_info_get_mapping.assert_called_once_with( block_device_info) vmops.power_off.assert_called_once_with(self.instance) exp_detach_calls = [mock.call(mock.sentinel.connection_info_1, self.instance, 'dev1'), mock.call(mock.sentinel.connection_info_2, self.instance, 'dev2')] self.assertEqual(exp_detach_calls, detach_volume.call_args_list) def test_destroy(self): self._create_vm() info = self._get_info() self._check_vm_info(info, power_state.RUNNING) instances = self.conn.list_instances() self.assertEqual(1, len(instances)) self.conn.destroy(self.context, self.instance, self.network_info) instances = self.conn.list_instances() self.assertEqual(0, len(instances)) self.assertIsNone(vm_util.vm_ref_cache_get(self.uuid)) def test_destroy_no_datastore(self): self._create_vm() info = self._get_info() self._check_vm_info(info, power_state.RUNNING) instances = self.conn.list_instances() self.assertEqual(1, len(instances)) # Delete the vmPathName vm = self._get_vm_record() vm.delete('config.files.vmPathName') self.conn.destroy(self.context, self.instance, self.network_info) instances = self.conn.list_instances() self.assertEqual(0, len(instances)) def test_destroy_non_existent(self): self.destroy_disks = True with mock.patch.object(self.conn._vmops, "destroy") as mock_destroy: self._create_instance() self.conn.destroy(self.context, self.instance, self.network_info, None, self.destroy_disks) mock_destroy.assert_called_once_with(self.instance, self.destroy_disks) def test_destroy_instance_without_compute(self): instance = fake_instance.fake_instance_obj(None) self.destroy_disks = True with mock.patch.object(self.conn._vmops, "destroy") as mock_destroy: self.conn.destroy(self.context, instance, self.network_info, None, self.destroy_disks) self.assertFalse(mock_destroy.called) def _destroy_instance_without_vm_ref(self, task_state=None): def fake_vm_ref_from_name(session, vm_name): return 'fake-ref' self._create_instance() with test.nested( mock.patch.object(vm_util, 'get_vm_ref_from_name', fake_vm_ref_from_name), mock.patch.object(self.conn._session, '_call_method'), mock.patch.object(self.conn._vmops, '_destroy_instance') ) as (mock_get, mock_call, mock_destroy): self.instance.task_state = task_state self.conn.destroy(self.context, self.instance, self.network_info, None, True) if task_state == task_states.RESIZE_REVERTING: expected = 0 else: expected = 1 self.assertEqual(expected, mock_destroy.call_count) self.assertFalse(mock_call.called) def test_destroy_instance_without_vm_ref(self): self._destroy_instance_without_vm_ref() def test_destroy_instance_without_vm_ref_with_resize_revert(self): self._destroy_instance_without_vm_ref( task_state=task_states.RESIZE_REVERTING) def _rescue(self, config_drive=False): # validate that the power on is only called once self._power_on = vm_util.power_on_instance self._power_on_called = 0 def fake_attach_disk_to_vm(vm_ref, instance, adapter_type, disk_type, vmdk_path=None, disk_size=None, linked_clone=False, controller_key=None, unit_number=None, device_name=None): info = self.conn.get_info(instance) self._check_vm_info(info, power_state.SHUTDOWN) if config_drive: def fake_create_config_drive(instance, injected_files, password, network_info, data_store_name, folder, instance_uuid, cookies): self.assertTrue(uuidutils.is_uuid_like(instance['uuid'])) return str(ds_obj.DatastorePath(data_store_name, instance_uuid, 'fake.iso')) self.stub_out('nova.virt.vmwareapi.vmops._create_config_drive', fake_create_config_drive) self._create_vm() def fake_power_on_instance(session, instance, vm_ref=None): self._power_on_called += 1 return self._power_on(session, instance, vm_ref=vm_ref) info = self._get_info() self._check_vm_info(info, power_state.RUNNING) self.stub_out('nova.virt.vmwareapi.vm_util.power_on_instance', fake_power_on_instance) self.stub_out('nova.virt.vmwareapi.volumeops.' 'VMwareVolumeOps.attach_disk_to_vm', fake_attach_disk_to_vm) self.conn.rescue(self.context, self.instance, self.network_info, self.image, 'fake-password') info = self.conn.get_info({'name': '1', 'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) info = self.conn.get_info({'name': '1-orig', 'uuid': '%s-orig' % self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.SHUTDOWN) self.assertIsNotNone(vm_util.vm_ref_cache_get(self.uuid)) self.assertEqual(1, self._power_on_called) def test_get_diagnostics(self): self._create_vm() expected = {'memoryReservation': 0, 'suspendInterval': 0, 'maxCpuUsage': 2000, 'toolsInstallerMounted': False, 'consumedOverheadMemory': 20, 'numEthernetCards': 1, 'numCpu': 1, 'featureRequirement': [{'key': 'cpuid.AES'}], 'memoryOverhead': 21417984, 'guestMemoryUsage': 0, 'connectionState': 'connected', 'memorySizeMB': 512, 'balloonedMemory': 0, 'vmPathName': 'fake_path', 'template': False, 'overallCpuUsage': 0, 'powerState': 'poweredOn', 'cpuReservation': 0, 'overallCpuDemand': 0, 'numVirtualDisks': 1, 'hostMemoryUsage': 141} expected = {'vmware:' + k: v for k, v in expected.items()} instance = fake_instance.fake_instance_obj(None, name=1, uuid=self.uuid, node=self.instance_node) self.assertThat( self.conn.get_diagnostics(instance), matchers.DictMatches(expected)) def test_get_instance_diagnostics(self): self._create_vm() expected = {'uptime': 0, 'memory_details': {'used': 0, 'maximum': 512}, 'nic_details': [], 'driver': 'vmwareapi', 'state': 'running', 'version': '1.0', 'cpu_details': [], 'disk_details': [], 'hypervisor_os': 'esxi', 'config_drive': 'False'} instance = objects.Instance(uuid=self.uuid, config_drive=False, system_metadata={}, node=self.instance_node) actual = self.conn.get_instance_diagnostics(instance) self.assertThat(actual.serialize(), matchers.DictMatches(expected)) def test_get_console_output(self): self.assertRaises(NotImplementedError, self.conn.get_console_output, None, None) def test_get_vnc_console_non_existent(self): self._create_instance() self.assertRaises(exception.InstanceNotFound, self.conn.get_vnc_console, self.context, self.instance) def _test_get_vnc_console(self): self._create_vm() fake_vm = self._get_vm_record() OptionValue = collections.namedtuple('OptionValue', ['key', 'value']) opt_val = OptionValue(key='', value=5906) fake_vm.set(vm_util.VNC_CONFIG_KEY, opt_val) vnc_console = self.conn.get_vnc_console(self.context, self.instance) self.assertEqual(self.vnc_host, vnc_console.host) self.assertEqual(5906, vnc_console.port) def test_get_vnc_console(self): self._test_get_vnc_console() def test_get_vnc_console_noport(self): self._create_vm() self.assertRaises(exception.ConsoleTypeUnavailable, self.conn.get_vnc_console, self.context, self.instance) def test_get_volume_connector(self): self._create_vm() connector_dict = self.conn.get_volume_connector(self.instance) fake_vm = self._get_vm_record() fake_vm_id = fake_vm.obj.value self.assertEqual('test_url', connector_dict['ip']) self.assertEqual('iscsi-name', connector_dict['initiator']) self.assertEqual('test_url', connector_dict['host']) self.assertEqual(fake_vm_id, connector_dict['instance']) def _test_vmdk_connection_info(self, type): return {'driver_volume_type': type, 'serial': 'volume-fake-id', 'data': {'volume': 'vm-10', 'volume_id': 'volume-fake-id'}} def test_volume_attach_vmdk(self): self._create_vm() connection_info = self._test_vmdk_connection_info('vmdk') mount_point = '/dev/vdc' self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, '_attach_volume_vmdk') volumeops.VMwareVolumeOps._attach_volume_vmdk(connection_info, self.instance, None) self.mox.ReplayAll() self.conn.attach_volume(None, connection_info, self.instance, mount_point) def test_volume_detach_vmdk(self): self._create_vm() connection_info = self._test_vmdk_connection_info('vmdk') mount_point = '/dev/vdc' self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, '_detach_volume_vmdk') volumeops.VMwareVolumeOps._detach_volume_vmdk(connection_info, self.instance) self.mox.ReplayAll() self.conn.detach_volume(connection_info, self.instance, mount_point, encryption=None) def test_attach_vmdk_disk_to_vm(self): self._create_vm() connection_info = self._test_vmdk_connection_info('vmdk') adapter_type = constants.DEFAULT_ADAPTER_TYPE disk_type = constants.DEFAULT_DISK_TYPE disk_uuid = 'e97f357b-331e-4ad1-b726-89be048fb811' backing = mock.Mock(uuid=disk_uuid) device = mock.Mock(backing=backing) vmdk_info = vm_util.VmdkInfo('fake-path', adapter_type, disk_type, 64, device) with test.nested( mock.patch.object(vm_util, 'get_vm_ref', return_value=mock.sentinel.vm_ref), mock.patch.object(volumeops.VMwareVolumeOps, '_get_volume_ref'), mock.patch.object(vm_util, 'get_vmdk_info', return_value=vmdk_info), mock.patch.object(volumeops.VMwareVolumeOps, 'attach_disk_to_vm'), mock.patch.object(volumeops.VMwareVolumeOps, '_update_volume_details') ) as (get_vm_ref, get_volume_ref, get_vmdk_info, attach_disk_to_vm, update_volume_details): self.conn.attach_volume(None, connection_info, self.instance, '/dev/vdc') get_vm_ref.assert_called_once_with(self.conn._session, self.instance) get_volume_ref.assert_called_once_with( connection_info['data']['volume']) self.assertTrue(get_vmdk_info.called) attach_disk_to_vm.assert_called_once_with(mock.sentinel.vm_ref, self.instance, adapter_type, disk_type, vmdk_path='fake-path') update_volume_details.assert_called_once_with( mock.sentinel.vm_ref, connection_info['data']['volume_id'], disk_uuid) def test_detach_vmdk_disk_from_vm(self): self._create_vm() connection_info = self._test_vmdk_connection_info('vmdk') with mock.patch.object(volumeops.VMwareVolumeOps, 'detach_volume') as detach_volume: self.conn.detach_volume(connection_info, self.instance, '/dev/vdc', encryption=None) detach_volume.assert_called_once_with(connection_info, self.instance) def test_volume_attach_iscsi(self): self._create_vm() connection_info = self._test_vmdk_connection_info('iscsi') mount_point = '/dev/vdc' self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, '_attach_volume_iscsi') volumeops.VMwareVolumeOps._attach_volume_iscsi(connection_info, self.instance, None) self.mox.ReplayAll() self.conn.attach_volume(None, connection_info, self.instance, mount_point) def test_volume_detach_iscsi(self): self._create_vm() connection_info = self._test_vmdk_connection_info('iscsi') mount_point = '/dev/vdc' self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, '_detach_volume_iscsi') volumeops.VMwareVolumeOps._detach_volume_iscsi(connection_info, self.instance) self.mox.ReplayAll() self.conn.detach_volume(connection_info, self.instance, mount_point, encryption=None) def test_attach_iscsi_disk_to_vm(self): self._create_vm() connection_info = self._test_vmdk_connection_info('iscsi') connection_info['data']['target_portal'] = 'fake_target_host:port' connection_info['data']['target_iqn'] = 'fake_target_iqn' mount_point = '/dev/vdc' discover = ('fake_name', 'fake_uuid') self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, '_iscsi_get_target') # simulate target not found volumeops.VMwareVolumeOps._iscsi_get_target( connection_info['data']).AndReturn((None, None)) self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, '_iscsi_add_send_target_host') # rescan gets called with target portal self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, '_iscsi_rescan_hba') volumeops.VMwareVolumeOps._iscsi_rescan_hba( connection_info['data']['target_portal']) # simulate target found volumeops.VMwareVolumeOps._iscsi_get_target( connection_info['data']).AndReturn(discover) self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, 'attach_disk_to_vm') volumeops.VMwareVolumeOps.attach_disk_to_vm(mox.IgnoreArg(), self.instance, mox.IgnoreArg(), 'rdmp', device_name=mox.IgnoreArg()) self.mox.ReplayAll() self.conn.attach_volume(None, connection_info, self.instance, mount_point) def test_iscsi_rescan_hba(self): fake_target_portal = 'fake_target_host:port' host_storage_sys = vmwareapi_fake._get_objects( "HostStorageSystem").objects[0] iscsi_hba_array = host_storage_sys.get('storageDeviceInfo' '.hostBusAdapter') iscsi_hba = iscsi_hba_array.HostHostBusAdapter[0] # Check the host system does not have the send target self.assertRaises(AttributeError, getattr, iscsi_hba, 'configuredSendTarget') # Rescan HBA with the target portal vops = volumeops.VMwareVolumeOps(self.conn._session) vops._iscsi_rescan_hba(fake_target_portal) # Check if HBA has the target portal configured self.assertEqual('fake_target_host', iscsi_hba.configuredSendTarget[0].address) # Rescan HBA with same portal vops._iscsi_rescan_hba(fake_target_portal) self.assertEqual(1, len(iscsi_hba.configuredSendTarget)) def test_iscsi_get_target(self): data = {'target_portal': 'fake_target_host:port', 'target_iqn': 'fake_target_iqn'} host = vmwareapi_fake._get_objects('HostSystem').objects[0] host._add_iscsi_target(data) vops = volumeops.VMwareVolumeOps(self.conn._session) result = vops._iscsi_get_target(data) self.assertEqual(('fake-device', 'fake-uuid'), result) def test_detach_iscsi_disk_from_vm(self): self._create_vm() connection_info = self._test_vmdk_connection_info('iscsi') connection_info['data']['target_portal'] = 'fake_target_portal' connection_info['data']['target_iqn'] = 'fake_target_iqn' mount_point = '/dev/vdc' find = ('fake_name', 'fake_uuid') self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, '_iscsi_get_target') volumeops.VMwareVolumeOps._iscsi_get_target( connection_info['data']).AndReturn(find) self.mox.StubOutWithMock(vm_util, 'get_rdm_disk') device = 'fake_device' vm_util.get_rdm_disk(mox.IgnoreArg(), 'fake_uuid').AndReturn(device) self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, 'detach_disk_from_vm') volumeops.VMwareVolumeOps.detach_disk_from_vm(mox.IgnoreArg(), self.instance, device, destroy_disk=True) self.mox.ReplayAll() self.conn.detach_volume(connection_info, self.instance, mount_point, encryption=None) def test_connection_info_get(self): self._create_vm() connector = self.conn.get_volume_connector(self.instance) self.assertEqual('test_url', connector['ip']) self.assertEqual('test_url', connector['host']) self.assertEqual('iscsi-name', connector['initiator']) self.assertIn('instance', connector) def test_connection_info_get_after_destroy(self): self._create_vm() self.conn.destroy(self.context, self.instance, self.network_info) connector = self.conn.get_volume_connector(self.instance) self.assertEqual('test_url', connector['ip']) self.assertEqual('test_url', connector['host']) self.assertEqual('iscsi-name', connector['initiator']) self.assertNotIn('instance', connector) def test_refresh_instance_security_rules(self): self.assertRaises(NotImplementedError, self.conn.refresh_instance_security_rules, instance=None) @mock.patch.object(objects.block_device.BlockDeviceMappingList, 'get_by_instance_uuid') def test_image_aging_image_used(self, mock_get_by_inst): self._create_vm() all_instances = [self.instance] self.conn.manage_image_cache(self.context, all_instances) self._cached_files_exist() def _get_timestamp_filename(self): return '%s%s' % (imagecache.TIMESTAMP_PREFIX, self.old_time.strftime(imagecache.TIMESTAMP_FORMAT)) def _override_time(self): self.old_time = datetime.datetime(2012, 11, 22, 12, 00, 00) def _fake_get_timestamp_filename(fake): return self._get_timestamp_filename() self.stub_out('nova.virt.vmwareapi.imagecache.' 'ImageCacheManager._get_timestamp_filename', _fake_get_timestamp_filename) def _timestamp_file_exists(self, exists=True): timestamp = ds_obj.DatastorePath(self.ds, 'vmware_base', self.fake_image_uuid, self._get_timestamp_filename() + '/') if exists: vmwareapi_fake.assertPathExists(self, str(timestamp)) else: vmwareapi_fake.assertPathNotExists(self, str(timestamp)) def _image_aging_image_marked_for_deletion(self): self._create_vm(uuid=uuidutils.generate_uuid()) self._cached_files_exist() all_instances = [] self.conn.manage_image_cache(self.context, all_instances) self._cached_files_exist() self._timestamp_file_exists() def test_image_aging_image_marked_for_deletion(self): self._override_time() self._image_aging_image_marked_for_deletion() def _timestamp_file_removed(self): self._override_time() self._image_aging_image_marked_for_deletion() self._create_vm(num_instances=2, uuid=uuidutils.generate_uuid()) self._timestamp_file_exists(exists=False) def test_timestamp_file_removed_spawn(self): self._timestamp_file_removed() @mock.patch.object(objects.block_device.BlockDeviceMappingList, 'get_by_instance_uuid') def test_timestamp_file_removed_aging(self, mock_get_by_inst): self._timestamp_file_removed() ts = self._get_timestamp_filename() ts_path = ds_obj.DatastorePath(self.ds, 'vmware_base', self.fake_image_uuid, ts + '/') vmwareapi_fake._add_file(str(ts_path)) self._timestamp_file_exists() all_instances = [self.instance] self.conn.manage_image_cache(self.context, all_instances) self._timestamp_file_exists(exists=False) @mock.patch.object(objects.block_device.BlockDeviceMappingList, 'get_by_instance_uuid') def test_image_aging_disabled(self, mock_get_by_inst): self._override_time() self.flags(remove_unused_base_images=False) self._create_vm() self._cached_files_exist() all_instances = [] self.conn.manage_image_cache(self.context, all_instances) self._cached_files_exist(exists=True) self._timestamp_file_exists(exists=False) def _image_aging_aged(self, aging_time=100): self._override_time() cur_time = datetime.datetime(2012, 11, 22, 12, 00, 10) self.flags(remove_unused_original_minimum_age_seconds=aging_time) self._image_aging_image_marked_for_deletion() all_instances = [] self.useFixture(utils_fixture.TimeFixture(cur_time)) self.conn.manage_image_cache(self.context, all_instances) def test_image_aging_aged(self): self._image_aging_aged(aging_time=8) self._cached_files_exist(exists=False) def test_image_aging_not_aged(self): self._image_aging_aged() self._cached_files_exist() def test_public_api_signatures(self): self.assertPublicAPISignatures(v_driver.ComputeDriver(None), self.conn) def test_register_extension(self): with mock.patch.object(self.conn._session, '_call_method', return_value=None) as mock_call_method: self.conn._register_openstack_extension() mock_call_method.assert_has_calls( [mock.call(oslo_vim_util, 'find_extension', constants.EXTENSION_KEY), mock.call(oslo_vim_util, 'register_extension', constants.EXTENSION_KEY, constants.EXTENSION_TYPE_INSTANCE)]) def test_register_extension_already_exists(self): with mock.patch.object(self.conn._session, '_call_method', return_value='fake-extension') as mock_find_ext: self.conn._register_openstack_extension() mock_find_ext.assert_called_once_with(oslo_vim_util, 'find_extension', constants.EXTENSION_KEY) def test_list_instances(self): instances = self.conn.list_instances() self.assertEqual(0, len(instances)) def _setup_mocks_for_session(self, mock_init): mock_init.return_value = None vcdriver = driver.VMwareVCDriver(None, False) vcdriver._session = mock.Mock() vcdriver._session.vim = None def side_effect(): vcdriver._session.vim = mock.Mock() vcdriver._session._create_session.side_effect = side_effect return vcdriver def test_host_power_action(self): self.assertRaises(NotImplementedError, self.conn.host_power_action, 'action') def test_host_maintenance_mode(self): self.assertRaises(NotImplementedError, self.conn.host_maintenance_mode, 'host', 'mode') def test_set_host_enabled(self): self.assertRaises(NotImplementedError, self.conn.set_host_enabled, 'state') def test_datastore_regex_configured(self): self.assertEqual(self.conn._datastore_regex, self.conn._vmops._datastore_regex) self.assertEqual(self.conn._datastore_regex, self.conn._vc_state._datastore_regex) @mock.patch('nova.virt.vmwareapi.ds_util.get_datastore') def test_datastore_regex_configured_vcstate(self, mock_get_ds_ref): vcstate = self.conn._vc_state self.conn.get_available_resource(self.node_name) mock_get_ds_ref.assert_called_with( vcstate._session, vcstate._cluster, vcstate._datastore_regex) def test_get_available_resource(self): stats = self.conn.get_available_resource(self.node_name) self.assertEqual(32, stats['vcpus']) self.assertEqual(1024, stats['local_gb']) self.assertEqual(1024 - 500, stats['local_gb_used']) self.assertEqual(1000, stats['memory_mb']) self.assertEqual(500, stats['memory_mb_used']) self.assertEqual('VMware vCenter Server', stats['hypervisor_type']) self.assertEqual(5001000, stats['hypervisor_version']) self.assertEqual(self.node_name, stats['hypervisor_hostname']) self.assertIsNone(stats['cpu_info']) self.assertEqual( [("i686", "vmware", "hvm"), ("x86_64", "vmware", "hvm")], stats['supported_instances']) def test_invalid_datastore_regex(self): # Tests if we raise an exception for Invalid Regular Expression in # vmware_datastore_regex self.flags(cluster_name='test_cluster', datastore_regex='fake-ds(01', group='vmware') self.assertRaises(exception.InvalidInput, driver.VMwareVCDriver, None) def test_get_available_nodes(self): nodelist = self.conn.get_available_nodes() self.assertEqual(1, len(nodelist)) self.assertIn(self.node_name, nodelist) @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage, 'from_image') def test_spawn_with_sparse_image(self, mock_from_image): img_info = images.VMwareImage( image_id=self.fake_image_uuid, file_size=1024, disk_type=constants.DISK_TYPE_SPARSE, linked_clone=False) mock_from_image.return_value = img_info self._create_vm() info = self._get_info() self._check_vm_info(info, power_state.RUNNING) def test_plug_vifs(self): # Check to make sure the method raises NotImplementedError. self._create_instance() self.assertRaises(NotImplementedError, self.conn.plug_vifs, instance=self.instance, network_info=None) def test_unplug_vifs(self): # Check to make sure the method raises NotImplementedError. self._create_instance() self.assertRaises(NotImplementedError, self.conn.unplug_vifs, instance=self.instance, network_info=None) def _create_vif(self): gw_4 = network_model.IP(address='101.168.1.1', type='gateway') dns_4 = network_model.IP(address='8.8.8.8', type=None) subnet_4 = network_model.Subnet(cidr='101.168.1.0/24', dns=[dns_4], gateway=gw_4, routes=None, dhcp_server='191.168.1.1') gw_6 = network_model.IP(address='101:1db9::1', type='gateway') subnet_6 = network_model.Subnet(cidr='101:1db9::/64', dns=None, gateway=gw_6, ips=None, routes=None) network_neutron = network_model.Network(id='network-id-xxx-yyy-zzz', bridge=None, label=None, subnets=[subnet_4, subnet_6], bridge_interface='eth0', vlan=99) vif_bridge_neutron = network_model.VIF(id='new-vif-xxx-yyy-zzz', address='ca:fe:de:ad:be:ef', network=network_neutron, type=None, devname='tap-xxx-yyy-zzz', ovs_interfaceid='aaa-bbb-ccc') return vif_bridge_neutron def _validate_interfaces(self, id, index, num_iface_ids): vm = self._get_vm_record() found_iface_id = False extras = vm.get("config.extraConfig") key = "nvp.iface-id.%s" % index num_found = 0 for c in extras.OptionValue: if c.key.startswith("nvp.iface-id."): num_found += 1 if c.key == key and c.value == id: found_iface_id = True self.assertTrue(found_iface_id) self.assertEqual(num_iface_ids, num_found) def _attach_interface(self, vif): self.conn.attach_interface(self.instance, self.image, vif) self._validate_interfaces(vif['id'], 1, 2) def test_attach_interface(self): self._create_vm() vif = self._create_vif() self._attach_interface(vif) def test_attach_interface_with_exception(self): self._create_vm() vif = self._create_vif() with mock.patch.object(self.conn._session, '_wait_for_task', side_effect=Exception): self.assertRaises(exception.InterfaceAttachFailed, self.conn.attach_interface, self.instance, self.image, vif) @mock.patch.object(vif, 'get_network_device', return_value='fake_device') def _detach_interface(self, vif, mock_get_device): self._create_vm() self._attach_interface(vif) self.conn.detach_interface(self.instance, vif) self._validate_interfaces('free', 1, 2) def test_detach_interface(self): vif = self._create_vif() self._detach_interface(vif) def test_detach_interface_and_attach(self): vif = self._create_vif() self._detach_interface(vif) self.conn.attach_interface(self.instance, self.image, vif) self._validate_interfaces(vif['id'], 1, 2) def test_detach_interface_no_device(self): self._create_vm() vif = self._create_vif() self._attach_interface(vif) self.assertRaises(exception.NotFound, self.conn.detach_interface, self.instance, vif) def test_detach_interface_no_vif_match(self): self._create_vm() vif = self._create_vif() self._attach_interface(vif) vif['id'] = 'bad-id' self.assertRaises(exception.NotFound, self.conn.detach_interface, self.instance, vif) @mock.patch.object(vif, 'get_network_device', return_value='fake_device') def test_detach_interface_with_exception(self, mock_get_device): self._create_vm() vif = self._create_vif() self._attach_interface(vif) with mock.patch.object(self.conn._session, '_wait_for_task', side_effect=Exception): self.assertRaises(exception.InterfaceDetachFailed, self.conn.detach_interface, self.instance, vif) def test_resize_to_smaller_disk(self): self._create_vm(instance_type='m1.large') flavor = self._get_instance_type_by_name('m1.small') self.assertRaises(exception.InstanceFaultRollback, self.conn.migrate_disk_and_power_off, self.context, self.instance, 'fake_dest', flavor, None) def test_spawn_attach_volume_vmdk(self): self._spawn_attach_volume_vmdk() def test_spawn_attach_volume_vmdk_no_image_ref(self): self._spawn_attach_volume_vmdk(set_image_ref=False) def test_pause(self): # Tests that the VMwareVCDriver does not implement the pause method. self._create_instance() self.assertRaises(NotImplementedError, self.conn.pause, self.instance) def test_unpause(self): # Tests that the VMwareVCDriver does not implement the unpause method. self._create_instance() self.assertRaises(NotImplementedError, self.conn.unpause, self.instance) def test_datastore_dc_map(self): self.assertEqual({}, ds_util._DS_DC_MAPPING) self._create_vm() # currently there are 2 data stores self.assertEqual(2, len(ds_util._DS_DC_MAPPING)) def test_pre_live_migration(self): self.assertRaises(NotImplementedError, self.conn.pre_live_migration, self.context, 'fake_instance', 'fake_block_device_info', 'fake_network_info', 'fake_disk_info') def test_live_migration(self): self.assertRaises(NotImplementedError, self.conn.live_migration, self.context, 'fake_instance', 'fake_dest', 'fake_post_method', 'fake_recover_method') def test_rollback_live_migration_at_destination(self): self.assertRaises(NotImplementedError, self.conn.rollback_live_migration_at_destination, self.context, 'fake_instance', 'fake_network_info', 'fake_block_device_info') def test_post_live_migration(self): self.assertIsNone(self.conn.post_live_migration(self.context, 'fake_instance', 'fake_block_device_info')) def test_get_instance_disk_info_is_implemented(self): # Ensure that the method has been implemented in the driver instance = objects.Instance() try: disk_info = self.conn.get_instance_disk_info(instance) self.assertIsNone(disk_info) except NotImplementedError: self.fail("test_get_instance_disk_info() should not raise " "NotImplementedError") def test_get_host_uptime(self): self.assertRaises(NotImplementedError, self.conn.get_host_uptime) def test_pbm_wsdl_location(self): self.flags(pbm_enabled=True, pbm_wsdl_location='fira', group='vmware') self.conn._update_pbm_location() self.assertEqual('fira', self.conn._session._pbm_wsdl_loc) self.assertIsNone(self.conn._session._pbm) def test_nodename(self): test_mor = "domain-26" self.assertEqual("%s.%s" % (test_mor, vmwareapi_fake._FAKE_VCENTER_UUID), self.conn._create_nodename(test_mor), "VC driver failed to create the proper node name") @mock.patch.object(driver.LOG, 'warning') def test_min_version(self, mock_warning): self.conn._check_min_version() self.assertFalse(mock_warning.called) @mock.patch.object(driver.LOG, 'warning') @mock.patch.object(oslo_vim_util, 'get_vc_version', return_value='5.0.0') def test_invalid_min_version(self, mock_version, mock_warning): self.conn._check_min_version() # assert that the min version is in a warning message expected_arg = {'version': constants.MIN_VC_VERSION} version_arg_found = False for call in mock_warning.call_args_list: if call[0][1] == expected_arg: version_arg_found = True break self.assertTrue(version_arg_found) nova-13.1.4/nova/tests/unit/virt/test_images.py0000664000567000056710000000534113064447152022643 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from oslo_concurrency import processutils from nova import exception from nova import test from nova import utils from nova.virt import images class QemuTestCase(test.NoDBTestCase): def test_qemu_info_with_bad_path(self): self.assertRaises(exception.DiskNotFound, images.qemu_img_info, '/path/that/does/not/exist') @mock.patch.object(os.path, 'exists', return_value=True) def test_qemu_info_with_errors(self, path_exists): self.assertRaises(exception.InvalidDiskInfo, images.qemu_img_info, '/fake/path') @mock.patch.object(os.path, 'exists', return_value=True) @mock.patch.object(utils, 'execute', return_value=('stdout', None)) def test_qemu_info_with_no_errors(self, path_exists, utils_execute): image_info = images.qemu_img_info('/fake/path') self.assertTrue(image_info) self.assertTrue(str(image_info)) @mock.patch.object(utils, 'execute', side_effect=processutils.ProcessExecutionError) def test_convert_image_with_errors(self, mocked_execute): self.assertRaises(exception.ImageUnacceptable, images.convert_image, '/path/that/does/not/exist', '/other/path/that/does/not/exist', 'qcow2', 'raw') @mock.patch.object(images, 'convert_image', side_effect=exception.ImageUnacceptable) @mock.patch.object(images, 'qemu_img_info') @mock.patch.object(images, 'fetch') def test_fetch_to_raw_errors(self, convert_image, qemu_img_info, fetch): qemu_img_info.backing_file = None qemu_img_info.file_format = 'qcow2' qemu_img_info.virtual_size = 20 self.assertRaisesRegex(exception.ImageUnacceptable, 'Image href123 is unacceptable.*', images.fetch_to_raw, None, 'href123', '/no/path', None, None) nova-13.1.4/nova/tests/unit/virt/fakelibosinfo.py0000664000567000056710000000565513064447140023157 0ustar jenkinsjenkins00000000000000# Copyright 2016 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def match_item(obj, fltr): key, val = list(fltr._filter.items())[0] if key == 'class': key = '_class' elif key == 'short-id': key = 'short_id' return getattr(obj, key, None) == val class Loader(object): def process_default_path(self): pass def get_db(self): return Db() class Db(object): def __init__(self): # Generate test devices self.devs = [] self.oslist = None net = Device() net._class = 'net' net.name = 'virtio-net' self.devs.append(net) net = Device() net._class = 'block' net.name = 'virtio-block' self.devs.append(net) devlist = DeviceList() devlist.devices = self.devs fedora = Os() fedora.name = 'Fedora 22' fedora.id = 'http://fedoraproject.org/fedora/22' fedora.short_id = 'fedora22' fedora.dev_list = devlist self.oslist = OsList() self.oslist.os_list = [fedora] def get_os_list(self): return self.oslist class Filter(object): def __init__(self): self._filter = {} @classmethod def new(cls): return cls() def add_constraint(self, flt_key, val): self._filter[flt_key] = val class OsList(object): def __init__(self): self.os_list = [] def new_filtered(self, fltr): new_list = OsList() new_list.os_list = [os for os in self.os_list if match_item(os, fltr)] return new_list def get_length(self): return len(self.os_list) def get_nth(self, index): return self.os_list[index] class Os(object): def __init__(self): self.name = None self.short_id = None self.id = None self.dev_list = None def get_all_devices(self, fltr): new_list = DeviceList() new_list.devices = [dev for dev in self.dev_list.devices if match_item(dev, fltr)] return new_list def get_name(self): return self.name class DeviceList(object): def __init__(self): self.devices = [] def get_length(self): return len(self.devices) def get_nth(self, index): return self.devices[index] class Device(object): def __init__(self): self.name = None self._class = None def get_name(self): return self.name nova-13.1.4/nova/tests/unit/virt/test_virt_drivers.py0000664000567000056710000011641613064447152024126 0ustar jenkinsjenkins00000000000000# Copyright 2010 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import sys import traceback import fixtures import mock import netaddr from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import importutils from oslo_utils import timeutils import six from nova.compute import manager from nova.console import type as ctype from nova import context from nova import exception from nova import objects from nova import test from nova.tests import fixtures as nova_fixtures from nova.tests.unit import fake_block_device from nova.tests.unit.image import fake as fake_image from nova.tests.unit import utils as test_utils from nova.tests.unit.virt.libvirt import fake_libvirt_utils from nova.virt import block_device as driver_block_device from nova.virt import event as virtevent from nova.virt import fake from nova.virt import hardware from nova.virt import libvirt from nova.virt.libvirt import imagebackend LOG = logging.getLogger(__name__) def catch_notimplementederror(f): """Decorator to simplify catching drivers raising NotImplementedError If a particular call makes a driver raise NotImplementedError, we log it so that we can extract this information afterwards as needed. """ def wrapped_func(self, *args, **kwargs): try: return f(self, *args, **kwargs) except NotImplementedError: frame = traceback.extract_tb(sys.exc_info()[2])[-1] LOG.error("%(driver)s does not implement %(method)s " "required for test %(test)s" % {'driver': type(self.connection), 'method': frame[2], 'test': f.__name__}) wrapped_func.__name__ = f.__name__ wrapped_func.__doc__ = f.__doc__ return wrapped_func class _FakeDriverBackendTestCase(object): def _setup_fakelibvirt(self): # So that the _supports_direct_io does the test based # on the current working directory, instead of the # default instances_path which doesn't exist self.flags(instances_path=self.useFixture(fixtures.TempDir()).path) # Put fakelibvirt in place if 'libvirt' in sys.modules: self.saved_libvirt = sys.modules['libvirt'] else: self.saved_libvirt = None import nova.tests.unit.virt.libvirt.fake_imagebackend as \ fake_imagebackend import nova.tests.unit.virt.libvirt.fake_libvirt_utils as \ fake_libvirt_utils import nova.tests.unit.virt.libvirt.fakelibvirt as fakelibvirt import nova.tests.unit.virt.libvirt.fake_os_brick_connector as \ fake_os_brick_connector sys.modules['libvirt'] = fakelibvirt import nova.virt.libvirt.driver import nova.virt.libvirt.firewall import nova.virt.libvirt.host self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.imagebackend', fake_imagebackend)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.libvirt', fakelibvirt)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.libvirt_utils', fake_libvirt_utils)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.host.libvirt', fakelibvirt)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.imagebackend.libvirt_utils', fake_libvirt_utils)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.firewall.libvirt', fakelibvirt)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.connector', fake_os_brick_connector)) fakelibvirt.disable_event_thread(self) self.flags(rescue_image_id="2", rescue_kernel_id="3", rescue_ramdisk_id=None, snapshots_directory='./', sysinfo_serial='none', group='libvirt') def fake_extend(image, size): pass def fake_migrateToURI(*a): pass def fake_make_drive(_self, _path): pass def fake_get_instance_disk_info(_self, instance, xml=None, block_device_info=None): return '[]' def fake_delete_instance_files(_self, _instance): pass def fake_wait(): pass def fake_detach_device_with_retry(_self, get_device_conf_func, device, persistent, live, max_retry_count=7, inc_sleep_time=2, max_sleep_time=30): # Still calling detach, but instead of returning function # that actually checks if device is gone from XML, just continue # because XML never gets updated in these tests _self.detach_device(get_device_conf_func(device), persistent=persistent, live=live) return fake_wait self.stubs.Set(nova.virt.libvirt.driver.LibvirtDriver, '_get_instance_disk_info', fake_get_instance_disk_info) self.stubs.Set(nova.virt.libvirt.driver.disk, 'extend', fake_extend) self.stubs.Set(nova.virt.libvirt.driver.LibvirtDriver, 'delete_instance_files', fake_delete_instance_files) self.stubs.Set(nova.virt.libvirt.guest.Guest, 'detach_device_with_retry', fake_detach_device_with_retry) # Like the existing fakelibvirt.migrateToURI, do nothing, # but don't fail for these tests. self.stubs.Set(nova.virt.libvirt.driver.libvirt.Domain, 'migrateToURI', fake_migrateToURI) # We can't actually make a config drive v2 because ensure_tree has # been faked out self.stubs.Set(nova.virt.configdrive.ConfigDriveBuilder, 'make_drive', fake_make_drive) def _teardown_fakelibvirt(self): # Restore libvirt if self.saved_libvirt: sys.modules['libvirt'] = self.saved_libvirt def setUp(self): super(_FakeDriverBackendTestCase, self).setUp() # TODO(sdague): it would be nice to do this in a way that only # the relevant backends where replaced for tests, though this # should not harm anything by doing it for all backends fake_image.stub_out_image_service(self) self._setup_fakelibvirt() def tearDown(self): fake_image.FakeImageService_reset() self._teardown_fakelibvirt() super(_FakeDriverBackendTestCase, self).tearDown() class VirtDriverLoaderTestCase(_FakeDriverBackendTestCase, test.TestCase): """Test that ComputeManager can successfully load both old style and new style drivers and end up with the correct final class. """ # if your driver supports being tested in a fake way, it can go here # # both long form and short form drivers are supported new_drivers = { 'nova.virt.fake.FakeDriver': 'FakeDriver', 'nova.virt.libvirt.LibvirtDriver': 'LibvirtDriver', 'fake.FakeDriver': 'FakeDriver', 'libvirt.LibvirtDriver': 'LibvirtDriver' } def test_load_new_drivers(self): for cls, driver in six.iteritems(self.new_drivers): self.flags(compute_driver=cls) # NOTE(sdague) the try block is to make it easier to debug a # failure by knowing which driver broke try: cm = manager.ComputeManager() except Exception as e: self.fail("Couldn't load driver %s - %s" % (cls, e)) self.assertEqual(cm.driver.__class__.__name__, driver, "Could't load driver %s" % cls) def test_fail_to_load_new_drivers(self): self.flags(compute_driver='nova.virt.amiga') def _fake_exit(error): raise test.TestingException() self.stubs.Set(sys, 'exit', _fake_exit) self.assertRaises(test.TestingException, manager.ComputeManager) class _VirtDriverTestCase(_FakeDriverBackendTestCase): def setUp(self): super(_VirtDriverTestCase, self).setUp() self.flags(instances_path=self.useFixture(fixtures.TempDir()).path) self.connection = importutils.import_object(self.driver_module, fake.FakeVirtAPI()) self.ctxt = test_utils.get_test_admin_context() self.image_service = fake_image.FakeImageService() # NOTE(dripton): resolve_driver_format does some file reading and # writing and chowning that complicate testing too much by requiring # using real directories with proper permissions. Just stub it out # here; we test it in test_imagebackend.py self.stubs.Set(imagebackend.Image, 'resolve_driver_format', imagebackend.Image._get_driver_format) def _get_running_instance(self, obj=True): instance_ref = test_utils.get_test_instance(obj=obj) network_info = test_utils.get_test_network_info() network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \ '1.1.1.1' image_meta = test_utils.get_test_image_object(None, instance_ref) self.connection.spawn(self.ctxt, instance_ref, image_meta, [], 'herp', network_info=network_info) return instance_ref, network_info @catch_notimplementederror def test_init_host(self): self.connection.init_host('myhostname') @catch_notimplementederror def test_list_instances(self): self.connection.list_instances() @catch_notimplementederror def test_list_instance_uuids(self): self.connection.list_instance_uuids() @catch_notimplementederror def test_spawn(self): instance_ref, network_info = self._get_running_instance() domains = self.connection.list_instances() self.assertIn(instance_ref['name'], domains) num_instances = self.connection.get_num_instances() self.assertEqual(1, num_instances) @catch_notimplementederror def test_snapshot_not_running(self): instance_ref = test_utils.get_test_instance() img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'}) self.assertRaises(exception.InstanceNotRunning, self.connection.snapshot, self.ctxt, instance_ref, img_ref['id'], lambda *args, **kwargs: None) @catch_notimplementederror def test_snapshot_running(self): img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'}) instance_ref, network_info = self._get_running_instance() self.connection.snapshot(self.ctxt, instance_ref, img_ref['id'], lambda *args, **kwargs: None) @catch_notimplementederror def test_post_interrupted_snapshot_cleanup(self): instance_ref, network_info = self._get_running_instance() self.connection.post_interrupted_snapshot_cleanup(self.ctxt, instance_ref) @catch_notimplementederror def test_reboot(self): reboot_type = "SOFT" instance_ref, network_info = self._get_running_instance() self.connection.reboot(self.ctxt, instance_ref, network_info, reboot_type) @catch_notimplementederror def test_get_host_ip_addr(self): host_ip = self.connection.get_host_ip_addr() # Will raise an exception if it's not a valid IP at all ip = netaddr.IPAddress(host_ip) # For now, assume IPv4. self.assertEqual(ip.version, 4) @catch_notimplementederror def test_set_admin_password(self): instance, network_info = self._get_running_instance(obj=True) self.connection.set_admin_password(instance, 'p4ssw0rd') @catch_notimplementederror def test_inject_file(self): instance_ref, network_info = self._get_running_instance() self.connection.inject_file(instance_ref, base64.b64encode('/testfile'), base64.b64encode('testcontents')) @catch_notimplementederror def test_resume_state_on_host_boot(self): instance_ref, network_info = self._get_running_instance() self.connection.resume_state_on_host_boot(self.ctxt, instance_ref, network_info) @catch_notimplementederror def test_rescue(self): image_meta = objects.ImageMeta.from_dict({}) instance_ref, network_info = self._get_running_instance() self.connection.rescue(self.ctxt, instance_ref, network_info, image_meta, '') @catch_notimplementederror def test_unrescue_unrescued_instance(self): instance_ref, network_info = self._get_running_instance() self.connection.unrescue(instance_ref, network_info) @catch_notimplementederror def test_unrescue_rescued_instance(self): image_meta = objects.ImageMeta.from_dict({}) instance_ref, network_info = self._get_running_instance() self.connection.rescue(self.ctxt, instance_ref, network_info, image_meta, '') self.connection.unrescue(instance_ref, network_info) @catch_notimplementederror def test_poll_rebooting_instances(self): instances = [self._get_running_instance()] self.connection.poll_rebooting_instances(10, instances) @catch_notimplementederror def test_migrate_disk_and_power_off(self): instance_ref, network_info = self._get_running_instance() flavor_ref = test_utils.get_test_flavor() self.connection.migrate_disk_and_power_off( self.ctxt, instance_ref, 'dest_host', flavor_ref, network_info) @catch_notimplementederror def test_power_off(self): instance_ref, network_info = self._get_running_instance() self.connection.power_off(instance_ref) @catch_notimplementederror def test_power_on_running(self): instance_ref, network_info = self._get_running_instance() self.connection.power_on(self.ctxt, instance_ref, network_info, None) @catch_notimplementederror def test_power_on_powered_off(self): instance_ref, network_info = self._get_running_instance() self.connection.power_off(instance_ref) self.connection.power_on(self.ctxt, instance_ref, network_info, None) @catch_notimplementederror def test_trigger_crash_dump(self): instance_ref, network_info = self._get_running_instance() self.connection.trigger_crash_dump(instance_ref) @catch_notimplementederror def test_soft_delete(self): instance_ref, network_info = self._get_running_instance(obj=True) self.connection.soft_delete(instance_ref) @catch_notimplementederror def test_restore_running(self): instance_ref, network_info = self._get_running_instance() self.connection.restore(instance_ref) @catch_notimplementederror def test_restore_soft_deleted(self): instance_ref, network_info = self._get_running_instance() self.connection.soft_delete(instance_ref) self.connection.restore(instance_ref) @catch_notimplementederror def test_pause(self): instance_ref, network_info = self._get_running_instance() self.connection.pause(instance_ref) @catch_notimplementederror def test_unpause_unpaused_instance(self): instance_ref, network_info = self._get_running_instance() self.connection.unpause(instance_ref) @catch_notimplementederror def test_unpause_paused_instance(self): instance_ref, network_info = self._get_running_instance() self.connection.pause(instance_ref) self.connection.unpause(instance_ref) @catch_notimplementederror def test_suspend(self): instance_ref, network_info = self._get_running_instance() self.connection.suspend(self.ctxt, instance_ref) @catch_notimplementederror def test_resume_unsuspended_instance(self): instance_ref, network_info = self._get_running_instance() self.connection.resume(self.ctxt, instance_ref, network_info) @catch_notimplementederror def test_resume_suspended_instance(self): instance_ref, network_info = self._get_running_instance() self.connection.suspend(self.ctxt, instance_ref) self.connection.resume(self.ctxt, instance_ref, network_info) @catch_notimplementederror def test_destroy_instance_nonexistent(self): fake_instance = test_utils.get_test_instance(obj=True) network_info = test_utils.get_test_network_info() self.connection.destroy(self.ctxt, fake_instance, network_info) @catch_notimplementederror def test_destroy_instance(self): instance_ref, network_info = self._get_running_instance() self.assertIn(instance_ref['name'], self.connection.list_instances()) self.connection.destroy(self.ctxt, instance_ref, network_info) self.assertNotIn(instance_ref['name'], self.connection.list_instances()) @catch_notimplementederror def test_get_volume_connector(self): result = self.connection.get_volume_connector({'id': 'fake'}) self.assertIn('ip', result) self.assertIn('initiator', result) self.assertIn('host', result) @catch_notimplementederror def test_get_volume_connector_storage_ip(self): ip = 'my_ip' storage_ip = 'storage_ip' self.flags(my_block_storage_ip=storage_ip, my_ip=ip) result = self.connection.get_volume_connector({'id': 'fake'}) self.assertIn('ip', result) self.assertIn('initiator', result) self.assertIn('host', result) self.assertEqual(storage_ip, result['ip']) @catch_notimplementederror def test_attach_detach_volume(self): instance_ref, network_info = self._get_running_instance() connection_info = { "driver_volume_type": "fake", "serial": "fake_serial", "data": {} } self.assertIsNone( self.connection.attach_volume(None, connection_info, instance_ref, '/dev/sda')) self.assertIsNone( self.connection.detach_volume(connection_info, instance_ref, '/dev/sda')) @catch_notimplementederror def test_swap_volume(self): instance_ref, network_info = self._get_running_instance() self.assertIsNone( self.connection.attach_volume(None, {'driver_volume_type': 'fake', 'data': {}}, instance_ref, '/dev/sda')) self.assertIsNone( self.connection.swap_volume({'driver_volume_type': 'fake', 'data': {}}, {'driver_volume_type': 'fake', 'data': {}}, instance_ref, '/dev/sda', 2)) @catch_notimplementederror def test_attach_detach_different_power_states(self): instance_ref, network_info = self._get_running_instance() connection_info = { "driver_volume_type": "fake", "serial": "fake_serial", "data": {} } self.connection.power_off(instance_ref) self.connection.attach_volume(None, connection_info, instance_ref, '/dev/sda') bdm = { 'root_device_name': None, 'swap': None, 'ephemerals': [], 'block_device_mapping': driver_block_device.convert_volumes([ objects.BlockDeviceMapping( self.ctxt, **fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'instance_uuid': instance_ref['uuid'], 'device_name': '/dev/sda', 'source_type': 'volume', 'destination_type': 'volume', 'delete_on_termination': False, 'snapshot_id': None, 'volume_id': 'abcdedf', 'volume_size': None, 'no_device': None })), ]) } bdm['block_device_mapping'][0]['connection_info'] = ( {'driver_volume_type': 'fake', 'data': {}}) with mock.patch.object( driver_block_device.DriverVolumeBlockDevice, 'save'): self.connection.power_on( self.ctxt, instance_ref, network_info, bdm) self.connection.detach_volume(connection_info, instance_ref, '/dev/sda') @catch_notimplementederror def test_get_info(self): instance_ref, network_info = self._get_running_instance() info = self.connection.get_info(instance_ref) self.assertIsInstance(info, hardware.InstanceInfo) @catch_notimplementederror def test_get_info_for_unknown_instance(self): fake_instance = test_utils.get_test_instance(obj=True) self.assertRaises(exception.NotFound, self.connection.get_info, fake_instance) @catch_notimplementederror def test_get_diagnostics(self): instance_ref, network_info = self._get_running_instance(obj=True) self.connection.get_diagnostics(instance_ref) @catch_notimplementederror def test_get_instance_diagnostics(self): instance_ref, network_info = self._get_running_instance(obj=True) instance_ref['launched_at'] = timeutils.utcnow() self.connection.get_instance_diagnostics(instance_ref) @catch_notimplementederror def test_block_stats(self): instance_ref, network_info = self._get_running_instance() stats = self.connection.block_stats(instance_ref, 'someid') self.assertEqual(len(stats), 5) @catch_notimplementederror def test_get_console_output(self): fake_libvirt_utils.files['dummy.log'] = '' instance_ref, network_info = self._get_running_instance() console_output = self.connection.get_console_output(self.ctxt, instance_ref) self.assertIsInstance(console_output, six.string_types) @catch_notimplementederror def test_get_vnc_console(self): instance, network_info = self._get_running_instance(obj=True) vnc_console = self.connection.get_vnc_console(self.ctxt, instance) self.assertIsInstance(vnc_console, ctype.ConsoleVNC) @catch_notimplementederror def test_get_spice_console(self): instance_ref, network_info = self._get_running_instance() spice_console = self.connection.get_spice_console(self.ctxt, instance_ref) self.assertIsInstance(spice_console, ctype.ConsoleSpice) @catch_notimplementederror def test_get_rdp_console(self): instance_ref, network_info = self._get_running_instance() rdp_console = self.connection.get_rdp_console(self.ctxt, instance_ref) self.assertIsInstance(rdp_console, ctype.ConsoleRDP) @catch_notimplementederror def test_get_serial_console(self): instance_ref, network_info = self._get_running_instance() serial_console = self.connection.get_serial_console(self.ctxt, instance_ref) self.assertIsInstance(serial_console, ctype.ConsoleSerial) @catch_notimplementederror def test_get_mks_console(self): instance_ref, network_info = self._get_running_instance() mks_console = self.connection.get_mks_console(self.ctxt, instance_ref) self.assertIsInstance(mks_console, ctype.ConsoleMKS) @catch_notimplementederror def test_get_console_pool_info(self): instance_ref, network_info = self._get_running_instance() console_pool = self.connection.get_console_pool_info(instance_ref) self.assertIn('address', console_pool) self.assertIn('username', console_pool) self.assertIn('password', console_pool) @catch_notimplementederror def test_refresh_security_group_rules(self): # FIXME: Create security group and add the instance to it instance_ref, network_info = self._get_running_instance() self.connection.refresh_security_group_rules(1) @catch_notimplementederror def test_refresh_instance_security_rules(self): # FIXME: Create security group and add the instance to it instance_ref, network_info = self._get_running_instance() self.connection.refresh_instance_security_rules(instance_ref) @catch_notimplementederror def test_ensure_filtering_for_instance(self): instance = test_utils.get_test_instance(obj=True) network_info = test_utils.get_test_network_info() self.connection.ensure_filtering_rules_for_instance(instance, network_info) @catch_notimplementederror def test_unfilter_instance(self): instance_ref = test_utils.get_test_instance() network_info = test_utils.get_test_network_info() self.connection.unfilter_instance(instance_ref, network_info) def test_live_migration(self): instance_ref, network_info = self._get_running_instance() fake_context = context.RequestContext('fake', 'fake') migration = objects.Migration(context=fake_context, id=1) migrate_data = objects.LibvirtLiveMigrateData( migration=migration, bdms=[], block_migration=False) self.connection.live_migration(self.ctxt, instance_ref, 'otherhost', lambda *a: None, lambda *a: None, migrate_data=migrate_data) @catch_notimplementederror def test_live_migration_force_complete(self): instance_ref, network_info = self._get_running_instance() self.connection.live_migration_force_complete(instance_ref) @catch_notimplementederror def test_live_migration_abort(self): instance_ref, network_info = self._get_running_instance() self.connection.live_migration_abort(instance_ref) @catch_notimplementederror def _check_available_resource_fields(self, host_status): keys = ['vcpus', 'memory_mb', 'local_gb', 'vcpus_used', 'memory_mb_used', 'hypervisor_type', 'hypervisor_version', 'hypervisor_hostname', 'cpu_info', 'disk_available_least', 'supported_instances'] for key in keys: self.assertIn(key, host_status) self.assertIsInstance(host_status['hypervisor_version'], int) @catch_notimplementederror def test_get_available_resource(self): available_resource = self.connection.get_available_resource( 'myhostname') self._check_available_resource_fields(available_resource) @catch_notimplementederror def test_get_available_nodes(self): self.connection.get_available_nodes(False) @catch_notimplementederror def _check_host_cpu_status_fields(self, host_cpu_status): self.assertIn('kernel', host_cpu_status) self.assertIn('idle', host_cpu_status) self.assertIn('user', host_cpu_status) self.assertIn('iowait', host_cpu_status) self.assertIn('frequency', host_cpu_status) @catch_notimplementederror def test_get_host_cpu_stats(self): host_cpu_status = self.connection.get_host_cpu_stats() self._check_host_cpu_status_fields(host_cpu_status) @catch_notimplementederror def test_set_host_enabled(self): self.connection.set_host_enabled(True) @catch_notimplementederror def test_get_host_uptime(self): self.connection.get_host_uptime() @catch_notimplementederror def test_host_power_action_reboot(self): self.connection.host_power_action('reboot') @catch_notimplementederror def test_host_power_action_shutdown(self): self.connection.host_power_action('shutdown') @catch_notimplementederror def test_host_power_action_startup(self): self.connection.host_power_action('startup') @catch_notimplementederror def test_add_to_aggregate(self): self.connection.add_to_aggregate(self.ctxt, 'aggregate', 'host') @catch_notimplementederror def test_remove_from_aggregate(self): self.connection.remove_from_aggregate(self.ctxt, 'aggregate', 'host') def test_events(self): got_events = [] def handler(event): got_events.append(event) self.connection.register_event_listener(handler) event1 = virtevent.LifecycleEvent( "cef19ce0-0ca2-11df-855d-b19fbce37686", virtevent.EVENT_LIFECYCLE_STARTED) event2 = virtevent.LifecycleEvent( "cef19ce0-0ca2-11df-855d-b19fbce37686", virtevent.EVENT_LIFECYCLE_PAUSED) self.connection.emit_event(event1) self.connection.emit_event(event2) want_events = [event1, event2] self.assertEqual(want_events, got_events) event3 = virtevent.LifecycleEvent( "cef19ce0-0ca2-11df-855d-b19fbce37686", virtevent.EVENT_LIFECYCLE_RESUMED) event4 = virtevent.LifecycleEvent( "cef19ce0-0ca2-11df-855d-b19fbce37686", virtevent.EVENT_LIFECYCLE_STOPPED) self.connection.emit_event(event3) self.connection.emit_event(event4) want_events = [event1, event2, event3, event4] self.assertEqual(want_events, got_events) def test_event_bad_object(self): # Passing in something which does not inherit # from virtevent.Event def handler(event): pass self.connection.register_event_listener(handler) badevent = { "foo": "bar" } self.assertRaises(ValueError, self.connection.emit_event, badevent) def test_event_bad_callback(self): # Check that if a callback raises an exception, # it does not propagate back out of the # 'emit_event' call def handler(event): raise Exception("Hit Me!") self.connection.register_event_listener(handler) event1 = virtevent.LifecycleEvent( "cef19ce0-0ca2-11df-855d-b19fbce37686", virtevent.EVENT_LIFECYCLE_STARTED) self.connection.emit_event(event1) def test_emit_unicode_event(self): """Tests that we do not fail on translated unicode events.""" started_event = virtevent.LifecycleEvent( "cef19ce0-0ca2-11df-855d-b19fbce37686", virtevent.EVENT_LIFECYCLE_STARTED) callback = mock.Mock() self.connection.register_event_listener(callback) with mock.patch.object(started_event, 'get_name', return_value=u'\xF0\x9F\x92\xA9'): self.connection.emit_event(started_event) callback.assert_called_once_with(started_event) def test_set_bootable(self): self.assertRaises(NotImplementedError, self.connection.set_bootable, 'instance', True) @catch_notimplementederror def test_get_instance_disk_info(self): # This should be implemented by any driver that supports live migrate. instance_ref, network_info = self._get_running_instance() self.connection.get_instance_disk_info(instance_ref, block_device_info={}) @catch_notimplementederror def test_get_device_name_for_instance(self): instance, _ = self._get_running_instance() self.connection.get_device_name_for_instance( instance, [], mock.Mock(spec=objects.BlockDeviceMapping)) def test_network_binding_host_id(self): # NOTE(jroll) self._get_running_instance calls spawn(), so we can't # use it to test this method. Make a simple object instead; we just # need instance.host. instance = objects.Instance(self.ctxt, host='somehost') self.assertEqual(instance.host, self.connection.network_binding_host_id(self.ctxt, instance)) class AbstractDriverTestCase(_VirtDriverTestCase, test.TestCase): def setUp(self): self.driver_module = "nova.virt.driver.ComputeDriver" super(AbstractDriverTestCase, self).setUp() def test_live_migration(self): self.skipTest('Live migration is not implemented in the base ' 'virt driver.') class FakeConnectionTestCase(_VirtDriverTestCase, test.TestCase): def setUp(self): self.driver_module = 'nova.virt.fake.FakeDriver' fake.set_nodes(['myhostname']) super(FakeConnectionTestCase, self).setUp() def _check_available_resource_fields(self, host_status): super(FakeConnectionTestCase, self)._check_available_resource_fields( host_status) hypervisor_type = host_status['hypervisor_type'] supported_instances = host_status['supported_instances'] try: # supported_instances could be JSON wrapped supported_instances = jsonutils.loads(supported_instances) except TypeError: pass self.assertTrue(any(hypervisor_type in x for x in supported_instances)) class LibvirtConnTestCase(_VirtDriverTestCase, test.TestCase): REQUIRES_LOCKING = True def setUp(self): # Point _VirtDriverTestCase at the right module self.driver_module = 'nova.virt.libvirt.LibvirtDriver' super(LibvirtConnTestCase, self).setUp() self.stubs.Set(self.connection, '_set_host_enabled', mock.MagicMock()) self.useFixture(fixtures.MonkeyPatch( 'nova.context.get_admin_context', self._fake_admin_context)) # This is needed for the live migration tests which spawn off the # operation for monitoring. self.useFixture(nova_fixtures.SpawnIsSynchronousFixture()) def _fake_admin_context(self, *args, **kwargs): return self.ctxt def test_force_hard_reboot(self): self.flags(wait_soft_reboot_seconds=0, group='libvirt') self.test_reboot() def test_migrate_disk_and_power_off(self): # there is lack of fake stuff to execute this method. so pass. self.skipTest("Test nothing, but this method" " needed to override superclass.") def test_internal_set_host_enabled(self): self.mox.UnsetStubs() service_mock = mock.MagicMock() # Previous status of the service: disabled: False service_mock.configure_mock(disabled_reason='None', disabled=False) with mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock): self.connection._set_host_enabled(False, 'ERROR!') self.assertTrue(service_mock.disabled) self.assertEqual(service_mock.disabled_reason, 'AUTO: ERROR!') def test_set_host_enabled_when_auto_disabled(self): self.mox.UnsetStubs() service_mock = mock.MagicMock() # Previous status of the service: disabled: True, 'AUTO: ERROR' service_mock.configure_mock(disabled_reason='AUTO: ERROR', disabled=True) with mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock): self.connection._set_host_enabled(True) self.assertFalse(service_mock.disabled) self.assertIsNone(service_mock.disabled_reason) def test_set_host_enabled_when_manually_disabled(self): self.mox.UnsetStubs() service_mock = mock.MagicMock() # Previous status of the service: disabled: True, 'Manually disabled' service_mock.configure_mock(disabled_reason='Manually disabled', disabled=True) with mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock): self.connection._set_host_enabled(True) self.assertTrue(service_mock.disabled) self.assertEqual(service_mock.disabled_reason, 'Manually disabled') def test_set_host_enabled_dont_override_manually_disabled(self): self.mox.UnsetStubs() service_mock = mock.MagicMock() # Previous status of the service: disabled: True, 'Manually disabled' service_mock.configure_mock(disabled_reason='Manually disabled', disabled=True) with mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock): self.connection._set_host_enabled(False, 'ERROR!') self.assertTrue(service_mock.disabled) self.assertEqual(service_mock.disabled_reason, 'Manually disabled') @catch_notimplementederror @mock.patch.object(libvirt.driver.LibvirtDriver, '_unplug_vifs') def test_unplug_vifs_with_destroy_vifs_false(self, unplug_vifs_mock): instance_ref, network_info = self._get_running_instance() self.connection.cleanup(self.ctxt, instance_ref, network_info, destroy_vifs=False) self.assertEqual(unplug_vifs_mock.call_count, 0) @catch_notimplementederror @mock.patch.object(libvirt.driver.LibvirtDriver, '_unplug_vifs') def test_unplug_vifs_with_destroy_vifs_true(self, unplug_vifs_mock): instance_ref, network_info = self._get_running_instance() self.connection.cleanup(self.ctxt, instance_ref, network_info, destroy_vifs=True) self.assertEqual(unplug_vifs_mock.call_count, 1) unplug_vifs_mock.assert_called_once_with(instance_ref, network_info, True) def test_get_device_name_for_instance(self): self.skipTest("Tested by the nova.tests.unit.virt.libvirt suite") @catch_notimplementederror @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch("nova.virt.libvirt.host.Host.has_min_version") def test_set_admin_password(self, ver, mock_image): self.flags(virt_type='kvm', group='libvirt') mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes"}} instance, network_info = self._get_running_instance(obj=True) self.connection.set_admin_password(instance, 'p4ssw0rd') nova-13.1.4/nova/tests/unit/virt/test_hardware.py0000664000567000056710000034567413064447152023213 0ustar jenkinsjenkins00000000000000# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import uuid import mock from oslo_serialization import jsonutils import six from nova import context from nova import exception from nova import objects from nova.objects import base as base_obj from nova.objects import fields from nova.pci import stats from nova import test from nova.virt import hardware as hw class InstanceInfoTests(test.NoDBTestCase): def test_instance_info_default(self): ii = hw.InstanceInfo() self.assertIsNone(ii.state) self.assertIsNone(ii.id) self.assertEqual(0, ii.max_mem_kb) self.assertEqual(0, ii.mem_kb) self.assertEqual(0, ii.num_cpu) self.assertEqual(0, ii.cpu_time_ns) def test_instance_info(self): ii = hw.InstanceInfo(state='fake-state', max_mem_kb=1, mem_kb=2, num_cpu=3, cpu_time_ns=4, id='fake-id') self.assertEqual('fake-state', ii.state) self.assertEqual('fake-id', ii.id) self.assertEqual(1, ii.max_mem_kb) self.assertEqual(2, ii.mem_kb) self.assertEqual(3, ii.num_cpu) self.assertEqual(4, ii.cpu_time_ns) def test_instance_infoi_equals(self): ii1 = hw.InstanceInfo(state='fake-state', max_mem_kb=1, mem_kb=2, num_cpu=3, cpu_time_ns=4, id='fake-id') ii2 = hw.InstanceInfo(state='fake-state', max_mem_kb=1, mem_kb=2, num_cpu=3, cpu_time_ns=4, id='fake-id') ii3 = hw.InstanceInfo(state='fake-estat', max_mem_kb=11, mem_kb=22, num_cpu=33, cpu_time_ns=44, id='fake-di') self.assertEqual(ii1, ii2) self.assertNotEqual(ii1, ii3) class CpuSetTestCase(test.NoDBTestCase): def test_get_vcpu_pin_set(self): self.flags(vcpu_pin_set="1-3,5,^2") cpuset_ids = hw.get_vcpu_pin_set() self.assertEqual(set([1, 3, 5]), cpuset_ids) def test_parse_cpu_spec_none_returns_none(self): self.flags(vcpu_pin_set=None) cpuset_ids = hw.get_vcpu_pin_set() self.assertIsNone(cpuset_ids) def test_parse_cpu_spec_valid_syntax_works(self): cpuset_ids = hw.parse_cpu_spec("1") self.assertEqual(set([1]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec("1,2") self.assertEqual(set([1, 2]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec(", , 1 , ,, 2, ,") self.assertEqual(set([1, 2]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec("1-1") self.assertEqual(set([1]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec(" 1 - 1, 1 - 2 , 1 -3") self.assertEqual(set([1, 2, 3]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec("1,^2") self.assertEqual(set([1]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec("1-2, ^1") self.assertEqual(set([2]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec("1-3,5,^2") self.assertEqual(set([1, 3, 5]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec(" 1 - 3 , ^2, 5") self.assertEqual(set([1, 3, 5]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec(" 1,1, ^1") self.assertEqual(set([]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec("^0-1") self.assertEqual(set([]), cpuset_ids) cpuset_ids = hw.parse_cpu_spec("0-3,^1-2") self.assertEqual(set([0, 3]), cpuset_ids) def test_parse_cpu_spec_invalid_syntax_raises(self): self.assertRaises(exception.Invalid, hw.parse_cpu_spec, " -1-3,5,^2") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "1-3-,5,^2") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "-3,5,^2") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "1-,5,^2") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "1-3,5,^2^") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "1-3,5,^2-") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "--13,^^5,^2") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "a-3,5,^2") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "1-a,5,^2") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "1-3,b,^2") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "1-3,5,^c") self.assertRaises(exception.Invalid, hw.parse_cpu_spec, "3 - 1, 5 , ^ 2 ") def test_format_cpu_spec(self): cpus = set([]) spec = hw.format_cpu_spec(cpus) self.assertEqual("", spec) cpus = [] spec = hw.format_cpu_spec(cpus) self.assertEqual("", spec) cpus = set([1, 3]) spec = hw.format_cpu_spec(cpus) self.assertEqual("1,3", spec) cpus = [1, 3] spec = hw.format_cpu_spec(cpus) self.assertEqual("1,3", spec) cpus = set([1, 2, 4, 6]) spec = hw.format_cpu_spec(cpus) self.assertEqual("1-2,4,6", spec) cpus = [1, 2, 4, 6] spec = hw.format_cpu_spec(cpus) self.assertEqual("1-2,4,6", spec) cpus = set([10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48]) spec = hw.format_cpu_spec(cpus) self.assertEqual("10-11,13-16,19-20,40,42,48", spec) cpus = [10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48] spec = hw.format_cpu_spec(cpus) self.assertEqual("10-11,13-16,19-20,40,42,48", spec) cpus = set([1, 2, 4, 6]) spec = hw.format_cpu_spec(cpus, allow_ranges=False) self.assertEqual("1,2,4,6", spec) cpus = [1, 2, 4, 6] spec = hw.format_cpu_spec(cpus, allow_ranges=False) self.assertEqual("1,2,4,6", spec) cpus = set([10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48]) spec = hw.format_cpu_spec(cpus, allow_ranges=False) self.assertEqual("10,11,13,14,15,16,19,20,40,42,48", spec) cpus = [10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48] spec = hw.format_cpu_spec(cpus, allow_ranges=False) self.assertEqual("10,11,13,14,15,16,19,20,40,42,48", spec) class VCPUTopologyTest(test.NoDBTestCase): def test_validate_config(self): testdata = [ { # Flavor sets preferred topology only "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_sockets": "8", "hw:cpu_cores": "2", "hw:cpu_threads": "1", }), "image": { "properties": {} }, "expect": ( 8, 2, 1, 65536, 65536, 65536 ) }, { # Image topology overrides flavor "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_sockets": "8", "hw:cpu_cores": "2", "hw:cpu_threads": "1", "hw:cpu_max_threads": "2", }), "image": { "properties": { "hw_cpu_sockets": "4", "hw_cpu_cores": "2", "hw_cpu_threads": "2", } }, "expect": ( 4, 2, 2, 65536, 65536, 2, ) }, { # Partial image topology overrides flavor "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_sockets": "8", "hw:cpu_cores": "2", "hw:cpu_threads": "1", }), "image": { "properties": { "hw_cpu_sockets": "2", } }, "expect": ( 2, -1, -1, 65536, 65536, 65536, ) }, { # Restrict use of threads "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_max_threads": "2", }), "image": { "properties": { "hw_cpu_max_threads": "1", } }, "expect": ( -1, -1, -1, 65536, 65536, 1, ) }, { # Force use of at least two sockets "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_max_cores": "8", "hw:cpu_max_threads": "1", }), "image": { "properties": {} }, "expect": ( -1, -1, -1, 65536, 8, 1 ) }, { # Image limits reduce flavor "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_max_cores": "8", "hw:cpu_max_threads": "1", }), "image": { "properties": { "hw_cpu_max_cores": "4", } }, "expect": ( -1, -1, -1, 65536, 4, 1 ) }, { # Image limits kill flavor preferred "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_sockets": "2", "hw:cpu_cores": "8", "hw:cpu_threads": "1", }), "image": { "properties": { "hw_cpu_max_cores": "4", } }, "expect": ( -1, -1, -1, 65536, 4, 65536 ) }, { # Image limits cannot exceed flavor "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_max_cores": "8", "hw:cpu_max_threads": "1", }), "image": { "properties": { "hw_cpu_max_cores": "16", } }, "expect": exception.ImageVCPULimitsRangeExceeded, }, { # Image preferred cannot exceed flavor "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_max_cores": "8", "hw:cpu_max_threads": "1", }), "image": { "properties": { "hw_cpu_cores": "16", } }, "expect": exception.ImageVCPUTopologyRangeExceeded, }, ] for topo_test in testdata: image_meta = objects.ImageMeta.from_dict(topo_test["image"]) if type(topo_test["expect"]) == tuple: (preferred, maximum) = hw._get_cpu_topology_constraints( topo_test["flavor"], image_meta) self.assertEqual(topo_test["expect"][0], preferred.sockets) self.assertEqual(topo_test["expect"][1], preferred.cores) self.assertEqual(topo_test["expect"][2], preferred.threads) self.assertEqual(topo_test["expect"][3], maximum.sockets) self.assertEqual(topo_test["expect"][4], maximum.cores) self.assertEqual(topo_test["expect"][5], maximum.threads) else: self.assertRaises(topo_test["expect"], hw._get_cpu_topology_constraints, topo_test["flavor"], image_meta) def test_possible_topologies(self): testdata = [ { "allow_threads": True, "vcpus": 8, "maxsockets": 8, "maxcores": 8, "maxthreads": 2, "expect": [ [8, 1, 1], [4, 2, 1], [2, 4, 1], [1, 8, 1], [4, 1, 2], [2, 2, 2], [1, 4, 2], ] }, { "allow_threads": False, "vcpus": 8, "maxsockets": 8, "maxcores": 8, "maxthreads": 2, "expect": [ [8, 1, 1], [4, 2, 1], [2, 4, 1], [1, 8, 1], ] }, { "allow_threads": True, "vcpus": 8, "maxsockets": 1024, "maxcores": 1024, "maxthreads": 2, "expect": [ [8, 1, 1], [4, 2, 1], [2, 4, 1], [1, 8, 1], [4, 1, 2], [2, 2, 2], [1, 4, 2], ] }, { "allow_threads": True, "vcpus": 8, "maxsockets": 1024, "maxcores": 1, "maxthreads": 2, "expect": [ [8, 1, 1], [4, 1, 2], ] }, { "allow_threads": True, "vcpus": 7, "maxsockets": 8, "maxcores": 8, "maxthreads": 2, "expect": [ [7, 1, 1], [1, 7, 1], ] }, { "allow_threads": True, "vcpus": 8, "maxsockets": 2, "maxcores": 1, "maxthreads": 1, "expect": exception.ImageVCPULimitsRangeImpossible, }, { "allow_threads": False, "vcpus": 8, "maxsockets": 2, "maxcores": 1, "maxthreads": 4, "expect": exception.ImageVCPULimitsRangeImpossible, }, ] for topo_test in testdata: if type(topo_test["expect"]) == list: actual = [] for topology in hw._get_possible_cpu_topologies( topo_test["vcpus"], objects.VirtCPUTopology( sockets=topo_test["maxsockets"], cores=topo_test["maxcores"], threads=topo_test["maxthreads"]), topo_test["allow_threads"]): actual.append([topology.sockets, topology.cores, topology.threads]) self.assertEqual(topo_test["expect"], actual) else: self.assertRaises(topo_test["expect"], hw._get_possible_cpu_topologies, topo_test["vcpus"], objects.VirtCPUTopology( sockets=topo_test["maxsockets"], cores=topo_test["maxcores"], threads=topo_test["maxthreads"]), topo_test["allow_threads"]) def test_sorting_topologies(self): testdata = [ { "allow_threads": True, "vcpus": 8, "maxsockets": 8, "maxcores": 8, "maxthreads": 2, "sockets": 4, "cores": 2, "threads": 1, "expect": [ [4, 2, 1], # score = 2 [8, 1, 1], # score = 1 [2, 4, 1], # score = 1 [1, 8, 1], # score = 1 [4, 1, 2], # score = 1 [2, 2, 2], # score = 1 [1, 4, 2], # score = 1 ] }, { "allow_threads": True, "vcpus": 8, "maxsockets": 1024, "maxcores": 1024, "maxthreads": 2, "sockets": -1, "cores": 4, "threads": -1, "expect": [ [2, 4, 1], # score = 1 [1, 4, 2], # score = 1 [8, 1, 1], # score = 0 [4, 2, 1], # score = 0 [1, 8, 1], # score = 0 [4, 1, 2], # score = 0 [2, 2, 2], # score = 0 ] }, { "allow_threads": True, "vcpus": 8, "maxsockets": 1024, "maxcores": 1, "maxthreads": 2, "sockets": -1, "cores": -1, "threads": 2, "expect": [ [4, 1, 2], # score = 1 [8, 1, 1], # score = 0 ] }, { "allow_threads": False, "vcpus": 8, "maxsockets": 1024, "maxcores": 1, "maxthreads": 2, "sockets": -1, "cores": -1, "threads": 2, "expect": [ [8, 1, 1], # score = 0 ] }, ] for topo_test in testdata: actual = [] possible = hw._get_possible_cpu_topologies( topo_test["vcpus"], objects.VirtCPUTopology(sockets=topo_test["maxsockets"], cores=topo_test["maxcores"], threads=topo_test["maxthreads"]), topo_test["allow_threads"]) tops = hw._sort_possible_cpu_topologies( possible, objects.VirtCPUTopology(sockets=topo_test["sockets"], cores=topo_test["cores"], threads=topo_test["threads"])) for topology in tops: actual.append([topology.sockets, topology.cores, topology.threads]) self.assertEqual(topo_test["expect"], actual) def test_best_config(self): testdata = [ { # Flavor sets preferred topology only "allow_threads": True, "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_sockets": "8", "hw:cpu_cores": "2", "hw:cpu_threads": "1" }), "image": { "properties": {} }, "expect": [8, 2, 1], }, { # Image topology overrides flavor "allow_threads": True, "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_sockets": "8", "hw:cpu_cores": "2", "hw:cpu_threads": "1", "hw:cpu_maxthreads": "2", }), "image": { "properties": { "hw_cpu_sockets": "4", "hw_cpu_cores": "2", "hw_cpu_threads": "2", } }, "expect": [4, 2, 2], }, { # Image topology overrides flavor "allow_threads": False, "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_sockets": "8", "hw:cpu_cores": "2", "hw:cpu_threads": "1", "hw:cpu_maxthreads": "2", }), "image": { "properties": { "hw_cpu_sockets": "4", "hw_cpu_cores": "2", "hw_cpu_threads": "2", } }, "expect": [8, 2, 1], }, { # Partial image topology overrides flavor "allow_threads": True, "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_sockets": "8", "hw:cpu_cores": "2", "hw:cpu_threads": "1" }), "image": { "properties": { "hw_cpu_sockets": "2" } }, "expect": [2, 8, 1], }, { # Restrict use of threads "allow_threads": True, "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_max_threads": "1" }), "image": { "properties": {} }, "expect": [16, 1, 1] }, { # Force use of at least two sockets "allow_threads": True, "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_max_cores": "8", "hw:cpu_max_threads": "1", }), "image": { "properties": {} }, "expect": [16, 1, 1] }, { # Image limits reduce flavor "allow_threads": True, "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_max_sockets": "8", "hw:cpu_max_cores": "8", "hw:cpu_max_threads": "1", }), "image": { "properties": { "hw_cpu_max_sockets": 4, } }, "expect": [4, 4, 1] }, { # Image limits kill flavor preferred "allow_threads": True, "flavor": objects.Flavor(vcpus=16, memory_mb=2048, extra_specs={ "hw:cpu_sockets": "2", "hw:cpu_cores": "8", "hw:cpu_threads": "1", }), "image": { "properties": { "hw_cpu_max_cores": 4, } }, "expect": [16, 1, 1] }, { # NUMA needs threads, only cores requested by flavor "allow_threads": True, "flavor": objects.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:cpu_cores": "2", }), "image": { "properties": { "hw_cpu_max_cores": 2, } }, "numa_topology": objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=0, cpuset=set([0, 1]), memory=1024, cpu_topology=objects.VirtCPUTopology( sockets=1, cores=1, threads=2)), objects.InstanceNUMACell( id=1, cpuset=set([2, 3]), memory=1024)]), "expect": [1, 2, 2] }, { # NUMA needs threads, but more than requested by flavor - the # least amount of threads wins "allow_threads": True, "flavor": objects.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:cpu_threads": "2", }), "image": { "properties": {} }, "numa_topology": objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=0, cpuset=set([0, 1, 2, 3]), memory=2048, cpu_topology=objects.VirtCPUTopology( sockets=1, cores=1, threads=4))]), "expect": [2, 1, 2] }, { # NUMA needs threads, but more than limit in flavor - the # least amount of threads which divides into the vcpu # count wins. So with desired 4, max of 3, and # vcpu count of 4, we should get 2 threads. "allow_threads": True, "flavor": objects.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:cpu_max_sockets": "5", "hw:cpu_max_cores": "2", "hw:cpu_max_threads": "3", }), "image": { "properties": {} }, "numa_topology": objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=0, cpuset=set([0, 1, 2, 3]), memory=2048, cpu_topology=objects.VirtCPUTopology( sockets=1, cores=1, threads=4))]), "expect": [2, 1, 2] }, { # NUMA needs threads, but thread count does not # divide into flavor vcpu count, so we must # reduce thread count to closest divisor "allow_threads": True, "flavor": objects.Flavor(vcpus=6, memory_mb=2048, extra_specs={ }), "image": { "properties": {} }, "numa_topology": objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=0, cpuset=set([0, 1, 2, 3]), memory=2048, cpu_topology=objects.VirtCPUTopology( sockets=1, cores=1, threads=4))]), "expect": [2, 1, 3] }, { # NUMA needs different number of threads per cell - the least # amount of threads wins "allow_threads": True, "flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={}), "image": { "properties": {} }, "numa_topology": objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=0, cpuset=set([0, 1, 2, 3]), memory=1024, cpu_topology=objects.VirtCPUTopology( sockets=1, cores=2, threads=2)), objects.InstanceNUMACell( id=1, cpuset=set([4, 5, 6, 7]), memory=1024, cpu_topology=objects.VirtCPUTopology( sockets=1, cores=1, threads=4))]), "expect": [4, 1, 2] }, ] for topo_test in testdata: image_meta = objects.ImageMeta.from_dict(topo_test["image"]) topology = hw._get_desirable_cpu_topologies( topo_test["flavor"], image_meta, topo_test["allow_threads"], topo_test.get("numa_topology"))[0] self.assertEqual(topo_test["expect"][0], topology.sockets) self.assertEqual(topo_test["expect"][1], topology.cores) self.assertEqual(topo_test["expect"][2], topology.threads) class NUMATopologyTest(test.NoDBTestCase): def test_topology_constraints(self): testdata = [ { "flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={ }), "image": { }, "expect": None, }, { "flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2 }), "image": { }, "expect": objects.InstanceNUMATopology(cells= [ objects.InstanceNUMACell( id=0, cpuset=set([0, 1, 2, 3]), memory=1024), objects.InstanceNUMACell( id=1, cpuset=set([4, 5, 6, 7]), memory=1024), ]), }, { "flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:mem_page_size": 2048 }), "image": { }, "expect": objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell( id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=2048, pagesize=2048) ]), }, { # vcpus is not a multiple of nodes, so it # is an error to not provide cpu/mem mapping "flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 3 }), "image": { }, "expect": exception.ImageNUMATopologyAsymmetric, }, { "flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 3, "hw:numa_cpus.0": "0-3", "hw:numa_mem.0": "1024", "hw:numa_cpus.1": "4,6", "hw:numa_mem.1": "512", "hw:numa_cpus.2": "5,7", "hw:numa_mem.2": "512", }), "image": { }, "expect": objects.InstanceNUMATopology(cells= [ objects.InstanceNUMACell( id=0, cpuset=set([0, 1, 2, 3]), memory=1024), objects.InstanceNUMACell( id=1, cpuset=set([4, 6]), memory=512), objects.InstanceNUMACell( id=2, cpuset=set([5, 7]), memory=512) ]), }, { "flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={ }), "image": { "properties": { "hw_numa_nodes": 3, "hw_numa_cpus.0": "0-3", "hw_numa_mem.0": "1024", "hw_numa_cpus.1": "4,6", "hw_numa_mem.1": "512", "hw_numa_cpus.2": "5,7", "hw_numa_mem.2": "512", }, }, "expect": objects.InstanceNUMATopology(cells= [ objects.InstanceNUMACell( id=0, cpuset=set([0, 1, 2, 3]), memory=1024), objects.InstanceNUMACell( id=1, cpuset=set([4, 6]), memory=512), objects.InstanceNUMACell( id=2, cpuset=set([5, 7]), memory=512) ]), }, { # Request a CPU that is out of range # wrt vCPU count "flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 1, "hw:numa_cpus.0": "0-16", "hw:numa_mem.0": "2048", }), "image": { }, "expect": exception.ImageNUMATopologyCPUOutOfRange, }, { # Request the same CPU in two nodes "flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2, "hw:numa_cpus.0": "0-7", "hw:numa_mem.0": "1024", "hw:numa_cpus.1": "0-7", "hw:numa_mem.1": "1024", }), "image": { }, "expect": exception.ImageNUMATopologyCPUDuplicates, }, { # Request with some CPUs not assigned "flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2, "hw:numa_cpus.0": "0-2", "hw:numa_mem.0": "1024", "hw:numa_cpus.1": "3-4", "hw:numa_mem.1": "1024", }), "image": { }, "expect": exception.ImageNUMATopologyCPUsUnassigned, }, { # Request too little memory vs flavor total "flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2, "hw:numa_cpus.0": "0-3", "hw:numa_mem.0": "512", "hw:numa_cpus.1": "4-7", "hw:numa_mem.1": "512", }), "image": { }, "expect": exception.ImageNUMATopologyMemoryOutOfRange, }, { # Request too much memory vs flavor total "flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2, "hw:numa_cpus.0": "0-3", "hw:numa_mem.0": "1576", "hw:numa_cpus.1": "4-7", "hw:numa_mem.1": "1576", }), "image": { }, "expect": exception.ImageNUMATopologyMemoryOutOfRange, }, { # Request missing mem.0 "flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2, "hw:numa_cpus.0": "0-3", "hw:numa_mem.1": "1576", }), "image": { }, "expect": exception.ImageNUMATopologyIncomplete, }, { # Request missing cpu.0 "flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2, "hw:numa_mem.0": "1576", "hw:numa_cpus.1": "4-7", }), "image": { }, "expect": exception.ImageNUMATopologyIncomplete, }, { # Image attempts to override flavor "flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2, }), "image": { "properties": { "hw_numa_nodes": 4} }, "expect": exception.ImageNUMATopologyForbidden, }, { # NUMA + CPU pinning requested in the flavor "flavor": objects.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2, "hw:cpu_policy": fields.CPUAllocationPolicy.DEDICATED }), "image": { }, "expect": objects.InstanceNUMATopology(cells= [ objects.InstanceNUMACell( id=0, cpuset=set([0, 1]), memory=1024, cpu_policy=fields.CPUAllocationPolicy.DEDICATED), objects.InstanceNUMACell( id=1, cpuset=set([2, 3]), memory=1024, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) }, { # no NUMA + CPU pinning requested in the flavor "flavor": objects.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:cpu_policy": fields.CPUAllocationPolicy.DEDICATED }), "image": { }, "expect": objects.InstanceNUMATopology(cells= [ objects.InstanceNUMACell( id=0, cpuset=set([0, 1, 2, 3]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) }, { # NUMA + CPU pinning requested in the image "flavor": objects.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2 }), "image": { "properties": { "hw_cpu_policy": fields.CPUAllocationPolicy.DEDICATED }}, "expect": objects.InstanceNUMATopology(cells= [ objects.InstanceNUMACell( id=0, cpuset=set([0, 1]), memory=1024, cpu_policy=fields.CPUAllocationPolicy.DEDICATED), objects.InstanceNUMACell( id=1, cpuset=set([2, 3]), memory=1024, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) }, { # no NUMA + CPU pinning requested in the image "flavor": objects.Flavor(vcpus=4, memory_mb=2048, extra_specs={}), "image": { "properties": { "hw_cpu_policy": fields.CPUAllocationPolicy.DEDICATED }}, "expect": objects.InstanceNUMATopology(cells= [ objects.InstanceNUMACell( id=0, cpuset=set([0, 1, 2, 3]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) }, { # Invalid CPU pinning override "flavor": objects.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2, "hw:cpu_policy": fields.CPUAllocationPolicy.SHARED }), "image": { "properties": { "hw_cpu_policy": fields.CPUAllocationPolicy.DEDICATED} }, "expect": exception.ImageCPUPinningForbidden, }, { # Invalid CPU pinning policy with realtime "flavor": objects.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:cpu_policy": fields.CPUAllocationPolicy.SHARED, "hw:cpu_realtime": "yes", }), "image": { "properties": {} }, "expect": exception.RealtimeConfigurationInvalid, }, { # Invalid CPU thread pinning override "flavor": objects.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:numa_nodes": 2, "hw:cpu_policy": fields.CPUAllocationPolicy.DEDICATED, "hw:cpu_thread_policy": fields.CPUThreadAllocationPolicy.ISOLATE, }), "image": { "properties": { "hw_cpu_policy": fields.CPUAllocationPolicy.DEDICATED, "hw_cpu_thread_policy": fields.CPUThreadAllocationPolicy.REQUIRE, } }, "expect": exception.ImageCPUThreadPolicyForbidden, }, { # Invalid CPU pinning policy with CPU thread pinning "flavor": objects.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:cpu_policy": fields.CPUAllocationPolicy.SHARED, "hw:cpu_thread_policy": fields.CPUThreadAllocationPolicy.ISOLATE, }), "image": { "properties": {} }, "expect": exception.CPUThreadPolicyConfigurationInvalid, }, { # Invalid vCPUs mask with realtime "flavor": objects.Flavor(vcpus=4, memory_mb=2048, extra_specs={ "hw:cpu_policy": "dedicated", "hw:cpu_realtime": "yes", }), "image": { "properties": {} }, "expect": exception.RealtimeMaskNotFoundOrInvalid, }, ] for testitem in testdata: image_meta = objects.ImageMeta.from_dict(testitem["image"]) if testitem["expect"] is None: topology = hw.numa_get_constraints( testitem["flavor"], image_meta) self.assertIsNone(topology) elif type(testitem["expect"]) == type: self.assertRaises(testitem["expect"], hw.numa_get_constraints, testitem["flavor"], image_meta) else: topology = hw.numa_get_constraints( testitem["flavor"], image_meta) self.assertIsNotNone(topology) self.assertEqual(len(testitem["expect"].cells), len(topology.cells)) for i in range(len(topology.cells)): self.assertEqual(testitem["expect"].cells[i].id, topology.cells[i].id) self.assertEqual(testitem["expect"].cells[i].cpuset, topology.cells[i].cpuset) self.assertEqual(testitem["expect"].cells[i].memory, topology.cells[i].memory) self.assertEqual(testitem["expect"].cells[i].pagesize, topology.cells[i].pagesize) self.assertEqual(testitem["expect"].cells[i].cpu_pinning, topology.cells[i].cpu_pinning) def test_host_usage_contiguous(self): hpages0_4K = objects.NUMAPagesTopology(size_kb=4, total=256, used=0) hpages0_2M = objects.NUMAPagesTopology(size_kb=2048, total=0, used=1) hpages1_4K = objects.NUMAPagesTopology(size_kb=4, total=128, used=2) hpages1_2M = objects.NUMAPagesTopology(size_kb=2048, total=0, used=3) hosttopo = objects.NUMATopology(cells=[ objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[ hpages0_4K, hpages0_2M], siblings=[], pinned_cpus=set([])), objects.NUMACell(id=1, cpuset=set([4, 6]), memory=512, cpu_usage=0, memory_usage=0, mempages=[ hpages1_4K, hpages1_2M], siblings=[], pinned_cpus=set([])), objects.NUMACell(id=2, cpuset=set([5, 7]), memory=512, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), ]) instance1 = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell(id=0, cpuset=set([0, 1, 2]), memory=256), objects.InstanceNUMACell(id=1, cpuset=set([4]), memory=256), ]) instance2 = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell(id=0, cpuset=set([0, 1]), memory=256), objects.InstanceNUMACell(id=1, cpuset=set([5, 7]), memory=256), ]) hostusage = hw.numa_usage_from_instances( hosttopo, [instance1, instance2]) self.assertEqual(len(hosttopo), len(hostusage)) self.assertIsInstance(hostusage.cells[0], objects.NUMACell) self.assertEqual(hosttopo.cells[0].cpuset, hostusage.cells[0].cpuset) self.assertEqual(hosttopo.cells[0].memory, hostusage.cells[0].memory) self.assertEqual(hostusage.cells[0].cpu_usage, 5) self.assertEqual(hostusage.cells[0].memory_usage, 512) self.assertEqual(hostusage.cells[0].mempages, [ hpages0_4K, hpages0_2M]) self.assertIsInstance(hostusage.cells[1], objects.NUMACell) self.assertEqual(hosttopo.cells[1].cpuset, hostusage.cells[1].cpuset) self.assertEqual(hosttopo.cells[1].memory, hostusage.cells[1].memory) self.assertEqual(hostusage.cells[1].cpu_usage, 3) self.assertEqual(hostusage.cells[1].memory_usage, 512) self.assertEqual(hostusage.cells[1].mempages, [ hpages1_4K, hpages1_2M]) self.assertEqual(256, hpages0_4K.total) self.assertEqual(0, hpages0_4K.used) self.assertEqual(0, hpages0_2M.total) self.assertEqual(1, hpages0_2M.used) self.assertIsInstance(hostusage.cells[2], objects.NUMACell) self.assertEqual(hosttopo.cells[2].cpuset, hostusage.cells[2].cpuset) self.assertEqual(hosttopo.cells[2].memory, hostusage.cells[2].memory) self.assertEqual(hostusage.cells[2].cpu_usage, 0) self.assertEqual(hostusage.cells[2].memory_usage, 0) self.assertEqual(128, hpages1_4K.total) self.assertEqual(2, hpages1_4K.used) self.assertEqual(0, hpages1_2M.total) self.assertEqual(3, hpages1_2M.used) def test_host_usage_sparse(self): hosttopo = objects.NUMATopology(cells=[ objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), objects.NUMACell(id=5, cpuset=set([4, 6]), memory=512, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), objects.NUMACell(id=6, cpuset=set([5, 7]), memory=512, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), ]) instance1 = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell(id=0, cpuset=set([0, 1, 2]), memory=256), objects.InstanceNUMACell(id=6, cpuset=set([4]), memory=256), ]) instance2 = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell(id=0, cpuset=set([0, 1]), memory=256, cpu_usage=0, memory_usage=0, mempages=[]), objects.InstanceNUMACell(id=5, cpuset=set([5, 7]), memory=256, cpu_usage=0, memory_usage=0, mempages=[]), ]) hostusage = hw.numa_usage_from_instances( hosttopo, [instance1, instance2]) self.assertEqual(len(hosttopo), len(hostusage)) self.assertIsInstance(hostusage.cells[0], objects.NUMACell) self.assertEqual(hosttopo.cells[0].id, hostusage.cells[0].id) self.assertEqual(hosttopo.cells[0].cpuset, hostusage.cells[0].cpuset) self.assertEqual(hosttopo.cells[0].memory, hostusage.cells[0].memory) self.assertEqual(hostusage.cells[0].cpu_usage, 5) self.assertEqual(hostusage.cells[0].memory_usage, 512) self.assertIsInstance(hostusage.cells[1], objects.NUMACell) self.assertEqual(hosttopo.cells[1].id, hostusage.cells[1].id) self.assertEqual(hosttopo.cells[1].cpuset, hostusage.cells[1].cpuset) self.assertEqual(hosttopo.cells[1].memory, hostusage.cells[1].memory) self.assertEqual(hostusage.cells[1].cpu_usage, 2) self.assertEqual(hostusage.cells[1].memory_usage, 256) self.assertIsInstance(hostusage.cells[2], objects.NUMACell) self.assertEqual(hosttopo.cells[2].cpuset, hostusage.cells[2].cpuset) self.assertEqual(hosttopo.cells[2].memory, hostusage.cells[2].memory) self.assertEqual(hostusage.cells[2].cpu_usage, 1) self.assertEqual(hostusage.cells[2].memory_usage, 256) def test_host_usage_culmulative_with_free(self): hosttopo = objects.NUMATopology(cells=[ objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=1024, cpu_usage=2, memory_usage=512, mempages=[], siblings=[], pinned_cpus=set([])), objects.NUMACell(id=1, cpuset=set([4, 6]), memory=512, cpu_usage=1, memory_usage=512, mempages=[], siblings=[], pinned_cpus=set([])), objects.NUMACell(id=2, cpuset=set([5, 7]), memory=256, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), ]) instance1 = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell(id=0, cpuset=set([0, 1, 2]), memory=512), objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=256), objects.InstanceNUMACell(id=2, cpuset=set([4]), memory=256)]) hostusage = hw.numa_usage_from_instances( hosttopo, [instance1]) self.assertIsInstance(hostusage.cells[0], objects.NUMACell) self.assertEqual(hostusage.cells[0].cpu_usage, 5) self.assertEqual(hostusage.cells[0].memory_usage, 1024) self.assertIsInstance(hostusage.cells[1], objects.NUMACell) self.assertEqual(hostusage.cells[1].cpu_usage, 2) self.assertEqual(hostusage.cells[1].memory_usage, 768) self.assertIsInstance(hostusage.cells[2], objects.NUMACell) self.assertEqual(hostusage.cells[2].cpu_usage, 1) self.assertEqual(hostusage.cells[2].memory_usage, 256) # Test freeing of resources hostusage = hw.numa_usage_from_instances( hostusage, [instance1], free=True) self.assertEqual(hostusage.cells[0].cpu_usage, 2) self.assertEqual(hostusage.cells[0].memory_usage, 512) self.assertEqual(hostusage.cells[1].cpu_usage, 1) self.assertEqual(hostusage.cells[1].memory_usage, 512) self.assertEqual(hostusage.cells[2].cpu_usage, 0) self.assertEqual(hostusage.cells[2].memory_usage, 0) def test_topo_usage_none(self): hosttopo = objects.NUMATopology(cells=[ objects.NUMACell(id=0, cpuset=set([0, 1]), memory=512, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), objects.NUMACell(id=1, cpuset=set([2, 3]), memory=512, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), ]) instance1 = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell(id=0, cpuset=set([0, 1]), memory=256), objects.InstanceNUMACell(id=2, cpuset=set([2]), memory=256), ]) hostusage = hw.numa_usage_from_instances( None, [instance1]) self.assertIsNone(hostusage) hostusage = hw.numa_usage_from_instances( hosttopo, []) self.assertEqual(hostusage.cells[0].cpu_usage, 0) self.assertEqual(hostusage.cells[0].memory_usage, 0) self.assertEqual(hostusage.cells[1].cpu_usage, 0) self.assertEqual(hostusage.cells[1].memory_usage, 0) hostusage = hw.numa_usage_from_instances( hosttopo, None) self.assertEqual(hostusage.cells[0].cpu_usage, 0) self.assertEqual(hostusage.cells[0].memory_usage, 0) self.assertEqual(hostusage.cells[1].cpu_usage, 0) self.assertEqual(hostusage.cells[1].memory_usage, 0) def assertNUMACellMatches(self, expected_cell, got_cell): attrs = ('cpuset', 'memory', 'id') if isinstance(expected_cell, objects.NUMATopology): attrs += ('cpu_usage', 'memory_usage') for attr in attrs: self.assertEqual(getattr(expected_cell, attr), getattr(got_cell, attr)) def test_json(self): expected = objects.NUMATopology( cells=[ objects.NUMACell(id=1, cpuset=set([1, 2]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), objects.NUMACell(id=2, cpuset=set([3, 4]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([]))]) got = objects.NUMATopology.obj_from_db_obj(expected._to_json()) for exp_cell, got_cell in zip(expected.cells, got.cells): self.assertNUMACellMatches(exp_cell, got_cell) class VirtNUMATopologyCellUsageTestCase(test.NoDBTestCase): def test_fit_instance_cell_success_no_limit(self): host_cell = objects.NUMACell(id=4, cpuset=set([1, 2]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])) instance_cell = objects.InstanceNUMACell( id=0, cpuset=set([1, 2]), memory=1024) fitted_cell = hw._numa_fit_instance_cell(host_cell, instance_cell) self.assertIsInstance(fitted_cell, objects.InstanceNUMACell) self.assertEqual(host_cell.id, fitted_cell.id) def test_fit_instance_cell_success_w_limit(self): host_cell = objects.NUMACell(id=4, cpuset=set([1, 2]), memory=1024, cpu_usage=2, memory_usage=1024, mempages=[], siblings=[], pinned_cpus=set([])) limit_cell = objects.NUMATopologyLimits( cpu_allocation_ratio=2, ram_allocation_ratio=2) instance_cell = objects.InstanceNUMACell( id=0, cpuset=set([1, 2]), memory=1024) fitted_cell = hw._numa_fit_instance_cell( host_cell, instance_cell, limit_cell=limit_cell) self.assertIsInstance(fitted_cell, objects.InstanceNUMACell) self.assertEqual(host_cell.id, fitted_cell.id) def test_fit_instance_cell_self_overcommit(self): host_cell = objects.NUMACell(id=4, cpuset=set([1, 2]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])) limit_cell = objects.NUMATopologyLimits( cpu_allocation_ratio=2, ram_allocation_ratio=2) instance_cell = objects.InstanceNUMACell( id=0, cpuset=set([1, 2, 3]), memory=4096) fitted_cell = hw._numa_fit_instance_cell( host_cell, instance_cell, limit_cell=limit_cell) self.assertIsNone(fitted_cell) def test_fit_instance_cell_fail_w_limit(self): host_cell = objects.NUMACell(id=4, cpuset=set([1, 2]), memory=1024, cpu_usage=2, memory_usage=1024, mempages=[], siblings=[], pinned_cpus=set([])) instance_cell = objects.InstanceNUMACell( id=0, cpuset=set([1, 2]), memory=4096) limit_cell = objects.NUMATopologyLimits( cpu_allocation_ratio=2, ram_allocation_ratio=2) fitted_cell = hw._numa_fit_instance_cell( host_cell, instance_cell, limit_cell=limit_cell) self.assertIsNone(fitted_cell) instance_cell = objects.InstanceNUMACell( id=0, cpuset=set([1, 2, 3, 4, 5]), memory=1024) fitted_cell = hw._numa_fit_instance_cell( host_cell, instance_cell, limit_cell=limit_cell) self.assertIsNone(fitted_cell) class VirtNUMAHostTopologyTestCase(test.NoDBTestCase): def setUp(self): super(VirtNUMAHostTopologyTestCase, self).setUp() self.host = objects.NUMATopology( cells=[ objects.NUMACell(id=1, cpuset=set([1, 2]), memory=2048, cpu_usage=2, memory_usage=2048, mempages=[], siblings=[], pinned_cpus=set([])), objects.NUMACell(id=2, cpuset=set([3, 4]), memory=2048, cpu_usage=2, memory_usage=2048, mempages=[], siblings=[], pinned_cpus=set([]))]) self.limits = objects.NUMATopologyLimits( cpu_allocation_ratio=2, ram_allocation_ratio=2) self.instance1 = objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=0, cpuset=set([1, 2]), memory=2048)]) self.instance2 = objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=0, cpuset=set([1, 2, 3, 4]), memory=1024)]) self.instance3 = objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=0, cpuset=set([1, 2]), memory=1024)]) def test_get_fitting_success_no_limits(self): fitted_instance1 = hw.numa_fit_instance_to_host( self.host, self.instance1) self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology) self.host = hw.numa_usage_from_instances(self.host, [fitted_instance1]) fitted_instance2 = hw.numa_fit_instance_to_host( self.host, self.instance3) self.assertIsInstance(fitted_instance2, objects.InstanceNUMATopology) def test_get_fitting_success_limits(self): fitted_instance = hw.numa_fit_instance_to_host( self.host, self.instance3, self.limits) self.assertIsInstance(fitted_instance, objects.InstanceNUMATopology) self.assertEqual(1, fitted_instance.cells[0].id) def test_get_fitting_fails_no_limits(self): fitted_instance = hw.numa_fit_instance_to_host( self.host, self.instance2, self.limits) self.assertIsNone(fitted_instance) def test_get_fitting_culmulative_fails_limits(self): fitted_instance1 = hw.numa_fit_instance_to_host( self.host, self.instance1, self.limits) self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology) self.assertEqual(1, fitted_instance1.cells[0].id) self.host = hw.numa_usage_from_instances(self.host, [fitted_instance1]) fitted_instance2 = hw.numa_fit_instance_to_host( self.host, self.instance2, self.limits) self.assertIsNone(fitted_instance2) def test_get_fitting_culmulative_success_limits(self): fitted_instance1 = hw.numa_fit_instance_to_host( self.host, self.instance1, self.limits) self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology) self.assertEqual(1, fitted_instance1.cells[0].id) self.host = hw.numa_usage_from_instances(self.host, [fitted_instance1]) fitted_instance2 = hw.numa_fit_instance_to_host( self.host, self.instance3, self.limits) self.assertIsInstance(fitted_instance2, objects.InstanceNUMATopology) self.assertEqual(2, fitted_instance2.cells[0].id) def test_get_fitting_pci_success(self): pci_request = objects.InstancePCIRequest(count=1, spec=[{'vendor_id': '8086'}]) pci_reqs = [pci_request] pci_stats = stats.PciDeviceStats() with mock.patch.object(stats.PciDeviceStats, 'support_requests', return_value= True): fitted_instance1 = hw.numa_fit_instance_to_host(self.host, self.instance1, pci_requests=pci_reqs, pci_stats=pci_stats) self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology) def test_get_fitting_pci_fail(self): pci_request = objects.InstancePCIRequest(count=1, spec=[{'vendor_id': '8086'}]) pci_reqs = [pci_request] pci_stats = stats.PciDeviceStats() with mock.patch.object(stats.PciDeviceStats, 'support_requests', return_value= False): fitted_instance1 = hw.numa_fit_instance_to_host( self.host, self.instance1, pci_requests=pci_reqs, pci_stats=pci_stats) self.assertIsNone(fitted_instance1) class NumberOfSerialPortsTest(test.NoDBTestCase): def test_flavor(self): flavor = objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={"hw:serial_port_count": 3}) image_meta = objects.ImageMeta.from_dict({}) num_ports = hw.get_number_of_serial_ports(flavor, image_meta) self.assertEqual(3, num_ports) def test_image_meta(self): flavor = objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={}) image_meta = objects.ImageMeta.from_dict( {"properties": {"hw_serial_port_count": 2}}) num_ports = hw.get_number_of_serial_ports(flavor, image_meta) self.assertEqual(2, num_ports) def test_flavor_invalid_value(self): flavor = objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={"hw:serial_port_count": 'foo'}) image_meta = objects.ImageMeta.from_dict({}) self.assertRaises(exception.ImageSerialPortNumberInvalid, hw.get_number_of_serial_ports, flavor, image_meta) def test_image_meta_smaller_than_flavor(self): flavor = objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={"hw:serial_port_count": 3}) image_meta = objects.ImageMeta.from_dict( {"properties": {"hw_serial_port_count": 2}}) num_ports = hw.get_number_of_serial_ports(flavor, image_meta) self.assertEqual(2, num_ports) def test_flavor_smaller_than_image_meta(self): flavor = objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={"hw:serial_port_count": 3}) image_meta = objects.ImageMeta.from_dict( {"properties": {"hw_serial_port_count": 4}}) self.assertRaises(exception.ImageSerialPortNumberExceedFlavorValue, hw.get_number_of_serial_ports, flavor, image_meta) class HelperMethodsTestCase(test.NoDBTestCase): def setUp(self): super(HelperMethodsTestCase, self).setUp() self.hosttopo = objects.NUMATopology(cells=[ objects.NUMACell(id=0, cpuset=set([0, 1]), memory=512, memory_usage=0, cpu_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), objects.NUMACell(id=1, cpuset=set([2, 3]), memory=512, memory_usage=0, cpu_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), ]) self.instancetopo = objects.InstanceNUMATopology( instance_uuid='fake-uuid', cells=[ objects.InstanceNUMACell( id=0, cpuset=set([0, 1]), memory=256, pagesize=2048, cpu_pinning={0: 0, 1: 1}), objects.InstanceNUMACell( id=1, cpuset=set([2]), memory=256, pagesize=2048, cpu_pinning={2: 3}), ]) self.context = context.RequestContext('fake-user', 'fake-project') def _check_usage(self, host_usage): self.assertEqual(2, host_usage.cells[0].cpu_usage) self.assertEqual(256, host_usage.cells[0].memory_usage) self.assertEqual(1, host_usage.cells[1].cpu_usage) self.assertEqual(256, host_usage.cells[1].memory_usage) def test_dicts_json(self): host = {'numa_topology': self.hosttopo._to_json()} instance = {'numa_topology': self.instancetopo._to_json()} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self._check_usage(objects.NUMATopology.obj_from_db_obj(res)) def test_dicts_instance_json(self): host = {'numa_topology': self.hosttopo} instance = {'numa_topology': self.instancetopo._to_json()} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, objects.NUMATopology) self._check_usage(res) def test_dicts_instance_json_old(self): host = {'numa_topology': self.hosttopo} instance = {'numa_topology': jsonutils.dumps(self.instancetopo._to_dict())} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, objects.NUMATopology) self._check_usage(res) def test_dicts_host_json(self): host = {'numa_topology': self.hosttopo._to_json()} instance = {'numa_topology': self.instancetopo} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self._check_usage(objects.NUMATopology.obj_from_db_obj(res)) def test_dicts_host_json_old(self): host = {'numa_topology': jsonutils.dumps( self.hosttopo._to_dict())} instance = {'numa_topology': self.instancetopo} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self._check_usage(objects.NUMATopology.obj_from_db_obj(res)) def test_object_host_instance_json(self): host = objects.ComputeNode(numa_topology=self.hosttopo._to_json()) instance = {'numa_topology': self.instancetopo._to_json()} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self._check_usage(objects.NUMATopology.obj_from_db_obj(res)) def test_object_host_instance(self): host = objects.ComputeNode(numa_topology=self.hosttopo._to_json()) instance = {'numa_topology': self.instancetopo} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self._check_usage(objects.NUMATopology.obj_from_db_obj(res)) def test_instance_with_fetch(self): host = objects.ComputeNode(numa_topology=self.hosttopo._to_json()) fake_uuid = str(uuid.uuid4()) instance = {'uuid': fake_uuid} with mock.patch.object(objects.InstanceNUMATopology, 'get_by_instance_uuid', return_value=None) as get_mock: res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self.assertTrue(get_mock.called) def test_object_instance_with_load(self): host = objects.ComputeNode(numa_topology=self.hosttopo._to_json()) fake_uuid = str(uuid.uuid4()) instance = objects.Instance(context=self.context, uuid=fake_uuid) with mock.patch.object(objects.InstanceNUMATopology, 'get_by_instance_uuid', return_value=None) as get_mock: res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self.assertTrue(get_mock.called) def test_instance_serialized_by_build_request_spec(self): host = objects.ComputeNode(numa_topology=self.hosttopo._to_json()) fake_uuid = str(uuid.uuid4()) instance = objects.Instance(context=self.context, id=1, uuid=fake_uuid, numa_topology=self.instancetopo) # NOTE (ndipanov): This emulates scheduler.utils.build_request_spec # We can remove this test once we no longer use that method. instance_raw = jsonutils.to_primitive( base_obj.obj_to_primitive(instance)) res = hw.get_host_numa_usage_from_instance(host, instance_raw) self.assertIsInstance(res, six.string_types) self._check_usage(objects.NUMATopology.obj_from_db_obj(res)) def test_attr_host(self): class Host(object): def __init__(obj): obj.numa_topology = self.hosttopo._to_json() host = Host() instance = {'numa_topology': self.instancetopo._to_json()} res = hw.get_host_numa_usage_from_instance(host, instance) self.assertIsInstance(res, six.string_types) self._check_usage(objects.NUMATopology.obj_from_db_obj(res)) def test_never_serialize_result(self): host = {'numa_topology': self.hosttopo._to_json()} instance = {'numa_topology': self.instancetopo} res = hw.get_host_numa_usage_from_instance(host, instance, never_serialize_result=True) self.assertIsInstance(res, objects.NUMATopology) self._check_usage(res) def test_dict_numa_topology_to_obj(self): fake_uuid = str(uuid.uuid4()) instance = objects.Instance(context=self.context, id=1, uuid=fake_uuid, numa_topology=self.instancetopo) instance_dict = base_obj.obj_to_primitive(instance) instance_numa_topo = hw.instance_topology_from_instance(instance_dict) for expected_cell, actual_cell in zip(self.instancetopo.cells, instance_numa_topo.cells): for k in expected_cell.fields: self.assertEqual(getattr(expected_cell, k), getattr(actual_cell, k)) class VirtMemoryPagesTestCase(test.NoDBTestCase): def test_cell_instance_pagesize(self): cell = objects.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024, pagesize=2048) self.assertEqual(0, cell.id) self.assertEqual(set([0]), cell.cpuset) self.assertEqual(1024, cell.memory) self.assertEqual(2048, cell.pagesize) def test_numa_pagesize_usage_from_cell(self): instcell = objects.InstanceNUMACell( id=0, cpuset=set([0]), memory=512, pagesize=2048) hostcell = objects.NUMACell( id=0, cpuset=set([0]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[objects.NUMAPagesTopology( size_kb=2048, total=512, used=0)], siblings=[], pinned_cpus=set([])) topo = hw._numa_pagesize_usage_from_cell(hostcell, instcell, 1) self.assertEqual(2048, topo[0].size_kb) self.assertEqual(512, topo[0].total) self.assertEqual(256, topo[0].used) def _test_get_requested_mempages_pagesize(self, spec=None, props=None): flavor = objects.Flavor(vcpus=16, memory_mb=2048, extra_specs=spec or {}) image_meta = objects.ImageMeta.from_dict({"properties": props or {}}) return hw._numa_get_pagesize_constraints(flavor, image_meta) def test_get_requested_mempages_pagesize_from_flavor_swipe(self): self.assertEqual( hw.MEMPAGES_SMALL, self._test_get_requested_mempages_pagesize( spec={"hw:mem_page_size": "small"})) self.assertEqual( hw.MEMPAGES_LARGE, self._test_get_requested_mempages_pagesize( spec={"hw:mem_page_size": "large"})) self.assertEqual( hw.MEMPAGES_ANY, self._test_get_requested_mempages_pagesize( spec={"hw:mem_page_size": "any"})) def test_get_requested_mempages_pagesize_from_flavor_specific(self): self.assertEqual( 2048, self._test_get_requested_mempages_pagesize( spec={"hw:mem_page_size": "2048"})) def test_get_requested_mempages_pagesize_from_flavor_invalid(self): self.assertRaises( exception.MemoryPageSizeInvalid, self._test_get_requested_mempages_pagesize, {"hw:mem_page_size": "foo"}) self.assertRaises( exception.MemoryPageSizeInvalid, self._test_get_requested_mempages_pagesize, {"hw:mem_page_size": "-42"}) def test_get_requested_mempages_pagesizes_from_flavor_suffix_sweep(self): self.assertEqual( 2048, self._test_get_requested_mempages_pagesize( spec={"hw:mem_page_size": "2048KB"})) self.assertEqual( 2048, self._test_get_requested_mempages_pagesize( spec={"hw:mem_page_size": "2MB"})) self.assertEqual( 1048576, self._test_get_requested_mempages_pagesize( spec={"hw:mem_page_size": "1GB"})) def test_get_requested_mempages_pagesize_from_image_flavor_any(self): self.assertEqual( 2048, self._test_get_requested_mempages_pagesize( spec={"hw:mem_page_size": "any"}, props={"hw_mem_page_size": "2048"})) def test_get_requested_mempages_pagesize_from_image_flavor_large(self): self.assertEqual( 2048, self._test_get_requested_mempages_pagesize( spec={"hw:mem_page_size": "large"}, props={"hw_mem_page_size": "2048"})) def test_get_requested_mempages_pagesize_from_image_forbidden(self): self.assertRaises( exception.MemoryPageSizeForbidden, self._test_get_requested_mempages_pagesize, {"hw:mem_page_size": "small"}, {"hw_mem_page_size": "2048"}) def test_get_requested_mempages_pagesize_from_image_forbidden2(self): self.assertRaises( exception.MemoryPageSizeForbidden, self._test_get_requested_mempages_pagesize, {}, {"hw_mem_page_size": "2048"}) def test_cell_accepts_request_wipe(self): host_cell = objects.NUMACell( id=0, cpuset=set([0]), memory=1024, mempages=[ objects.NUMAPagesTopology(size_kb=4, total=262144, used=0), ], siblings=[], pinned_cpus=set([])) inst_cell = objects.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024, pagesize=hw.MEMPAGES_SMALL) self.assertEqual( 4, hw._numa_cell_supports_pagesize_request(host_cell, inst_cell)) inst_cell = objects.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024, pagesize=hw.MEMPAGES_ANY) self.assertEqual( 4, hw._numa_cell_supports_pagesize_request(host_cell, inst_cell)) inst_cell = objects.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024, pagesize=hw.MEMPAGES_LARGE) self.assertIsNone(hw._numa_cell_supports_pagesize_request( host_cell, inst_cell)) def test_cell_accepts_request_large_pass(self): inst_cell = objects.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024, pagesize=hw.MEMPAGES_LARGE) host_cell = objects.NUMACell( id=0, cpuset=set([0]), memory=1024, mempages=[ objects.NUMAPagesTopology(size_kb=4, total=256, used=0), objects.NUMAPagesTopology(size_kb=2048, total=512, used=0) ], siblings=[], pinned_cpus=set([])) self.assertEqual( 2048, hw._numa_cell_supports_pagesize_request(host_cell, inst_cell)) def test_cell_accepts_request_custom_pass(self): inst_cell = objects.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024, pagesize=2048) host_cell = objects.NUMACell( id=0, cpuset=set([0]), memory=1024, mempages=[ objects.NUMAPagesTopology(size_kb=4, total=256, used=0), objects.NUMAPagesTopology(size_kb=2048, total=512, used=0) ], siblings=[], pinned_cpus=set([])) self.assertEqual( 2048, hw._numa_cell_supports_pagesize_request(host_cell, inst_cell)) def test_cell_accepts_request_remainder_memory(self): # Test memory can't be divided with no rem by mempage's size_kb inst_cell = objects.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024 + 1, pagesize=2048) host_cell = objects.NUMACell( id=0, cpuset=set([0]), memory=1024, mempages=[ objects.NUMAPagesTopology(size_kb=4, total=256, used=0), objects.NUMAPagesTopology(size_kb=2048, total=512, used=0) ], siblings=[], pinned_cpus=set([])) self.assertIsNone(hw._numa_cell_supports_pagesize_request( host_cell, inst_cell)) def test_cell_accepts_request_host_mempages(self): # Test pagesize not in host's mempages inst_cell = objects.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024, pagesize=4096) host_cell = objects.NUMACell( id=0, cpuset=set([0]), memory=1024, mempages=[ objects.NUMAPagesTopology(size_kb=4, total=256, used=0), objects.NUMAPagesTopology(size_kb=2048, total=512, used=0) ], siblings=[], pinned_cpus=set([])) self.assertRaises(exception.MemoryPageSizeNotSupported, hw._numa_cell_supports_pagesize_request, host_cell, inst_cell) class _CPUPinningTestCaseBase(object): def assertEqualTopology(self, expected, got): for attr in ('sockets', 'cores', 'threads'): self.assertEqual(getattr(expected, attr), getattr(got, attr), "Mismatch on %s" % attr) def assertInstanceCellPinned(self, instance_cell, cell_ids=None): default_cell_id = 0 self.assertIsNotNone(instance_cell) if cell_ids is None: self.assertEqual(default_cell_id, instance_cell.id) else: self.assertIn(instance_cell.id, cell_ids) self.assertEqual(len(instance_cell.cpuset), len(instance_cell.cpu_pinning)) def assertPinningPreferThreads(self, instance_cell, host_cell): """Make sure we are preferring threads. We do this by assessing that at least 2 CPUs went to the same core if that was even possible to begin with. """ max_free_siblings = max(map(len, host_cell.free_siblings)) if len(instance_cell) > 1 and max_free_siblings > 1: cpu_to_sib = {} for sib in host_cell.free_siblings: for cpu in sib: cpu_to_sib[cpu] = tuple(sorted(sib)) pins_per_sib = collections.defaultdict(int) for inst_p, host_p in instance_cell.cpu_pinning.items(): pins_per_sib[cpu_to_sib[host_p]] += 1 self.assertTrue(max(pins_per_sib.values()) > 1, "Seems threads were not preferred by the pinning " "logic.") class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase): def test_get_pinning_inst_too_large_cpu(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2]), memory=2048, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([])) inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertIsNone(inst_pin) def test_get_pinning_inst_too_large_mem(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2]), memory=2048, memory_usage=1024, siblings=[], mempages=[], pinned_cpus=set([])) inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertIsNone(inst_pin) def test_get_pinning_inst_not_avail(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=2048, memory_usage=0, pinned_cpus=set([0]), siblings=[], mempages=[]) inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertIsNone(inst_pin) def test_get_pinning_no_sibling_fits_empty(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2]), memory=2048, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([])) inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=3, threads=1) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) got_pinning = {x: x for x in range(0, 3)} self.assertEqual(got_pinning, inst_pin.cpu_pinning) def test_get_pinning_no_sibling_fits_w_usage(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=2048, memory_usage=0, pinned_cpus=set([1]), mempages=[], siblings=[]) inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2]), memory=1024) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_pinning = {0: 0, 1: 2, 2: 3} self.assertEqual(got_pinning, inst_pin.cpu_pinning) def test_get_pinning_instance_siblings_fits(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=2048, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([])) inst_pin = objects.InstanceNUMACell( cpuset=set([0, 1, 2, 3]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=4, threads=1) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) got_pinning = {x: x for x in range(0, 4)} self.assertEqual(got_pinning, inst_pin.cpu_pinning) def test_get_pinning_instance_siblings_host_siblings_fits_empty(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=2048, memory_usage=0, siblings=[set([0, 1]), set([2, 3])], mempages=[], pinned_cpus=set([])) inst_pin = objects.InstanceNUMACell( cpuset=set([0, 1, 2, 3]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) got_pinning = {x: x for x in range(0, 4)} self.assertEqual(got_pinning, inst_pin.cpu_pinning) def test_get_pinning_instance_siblings_host_siblings_fits_empty_2(self): host_pin = objects.NUMACell( id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=4096, memory_usage=0, siblings=[set([0, 1]), set([2, 3]), set([4, 5]), set([6, 7])], mempages=[], pinned_cpus=set([])) inst_pin = objects.InstanceNUMACell( cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=4, threads=2) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) got_pinning = {x: x for x in range(0, 8)} self.assertEqual(got_pinning, inst_pin.cpu_pinning) def test_get_pinning_instance_siblings_host_siblings_fits_w_usage(self): host_pin = objects.NUMACell( id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=4096, memory_usage=0, pinned_cpus=set([1, 2, 5, 6]), siblings=[set([0, 1, 2, 3]), set([4, 5, 6, 7])], mempages=[]) inst_pin = objects.InstanceNUMACell( cpuset=set([0, 1, 2, 3]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) got_pinning = {0: 0, 1: 3, 2: 4, 3: 7} self.assertEqual(got_pinning, inst_pin.cpu_pinning) def test_get_pinning_host_siblings_fit_single_core(self): host_pin = objects.NUMACell( id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=4096, memory_usage=0, siblings=[set([0, 1, 2, 3]), set([4, 5, 6, 7])], mempages=[], pinned_cpus=set([])) inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=1, threads=4) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) got_pinning = {x: x for x in range(0, 4)} self.assertEqual(got_pinning, inst_pin.cpu_pinning) def test_get_pinning_host_siblings_fit(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, memory_usage=0, siblings=[set([0, 1]), set([2, 3])], mempages=[], pinned_cpus=set([])) inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) got_pinning = {x: x for x in range(0, 4)} self.assertEqual(got_pinning, inst_pin.cpu_pinning) def test_get_pinning_require_policy_too_few_siblings(self): host_pin = objects.NUMACell( id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=4096, memory_usage=0, pinned_cpus=set([0, 1, 2]), siblings=[set([0, 4]), set([1, 5]), set([2, 6]), set([3, 7])], mempages=[]) inst_pin = objects.InstanceNUMACell( cpuset=set([0, 1, 2, 3]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED, cpu_thread_policy=fields.CPUThreadAllocationPolicy.REQUIRE) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertIsNone(inst_pin) def test_get_pinning_require_policy_fits(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, memory_usage=0, siblings=[set([0, 1]), set([2, 3])], mempages=[], pinned_cpus=set([])) inst_pin = objects.InstanceNUMACell( cpuset=set([0, 1, 2, 3]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED, cpu_thread_policy=fields.CPUThreadAllocationPolicy.REQUIRE) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) def test_get_pinning_require_policy_fits_w_usage(self): host_pin = objects.NUMACell( id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=4096, memory_usage=0, pinned_cpus=set([0, 1]), siblings=[set([0, 4]), set([1, 5]), set([2, 6]), set([3, 7])], mempages=[]) inst_pin = objects.InstanceNUMACell( cpuset=set([0, 1, 2, 3]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED, cpu_thread_policy=fields.CPUThreadAllocationPolicy.REQUIRE) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) def test_get_pinning_host_siblings_instance_odd_fit(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=4096, memory_usage=0, siblings=[set([0, 1]), set([2, 3]), set([4, 5]), set([6, 7])], mempages=[], pinned_cpus=set([])) inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3, 4]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=5, threads=1) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) def test_get_pinning_host_siblings_instance_fit_optimize_threads(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=4096, memory_usage=0, siblings=[set([0, 1, 2, 3]), set([4, 5, 6, 7])], mempages=[], pinned_cpus=set([])) inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3, 4, 5]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=3, threads=2) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) def test_get_pinning_host_siblings_instance_odd_fit_w_usage(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=4096, memory_usage=0, siblings=[set([0, 1]), set([2, 3]), set([4, 5]), set([6, 7])], mempages=[], pinned_cpus=set([0, 2, 5])) inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=3, threads=1) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) def test_get_pinning_host_siblings_instance_odd_fit_orphan_only(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=4096, memory_usage=0, siblings=[set([0, 1]), set([2, 3]), set([4, 5]), set([6, 7])], mempages=[], pinned_cpus=set([0, 2, 5, 6])) inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=4, threads=1) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) def test_get_pinning_host_siblings_large_instance_odd_fit(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), memory=4096, memory_usage=0, siblings=[set([0, 8]), set([1, 9]), set([2, 10]), set([3, 11]), set([4, 12]), set([5, 13]), set([6, 14]), set([7, 15])], mempages=[], pinned_cpus=set([])) inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3, 4]), memory=2048) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) self.assertPinningPreferThreads(inst_pin, host_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=5, threads=1) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) def test_get_pinning_isolate_policy_too_few_fully_free_cores(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, memory_usage=0, siblings=[set([0, 1]), set([2, 3])], mempages=[], pinned_cpus=set([1])) inst_pin = objects.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED, cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertIsNone(inst_pin) def test_get_pinning_isolate_policy_no_fully_free_cores(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, memory_usage=0, siblings=[set([0, 1]), set([2, 3])], mempages=[], pinned_cpus=set([1, 2])) inst_pin = objects.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED, cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertIsNone(inst_pin) def test_get_pinning_isolate_policy_fits(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([])) inst_pin = objects.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED, cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=1) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) def test_get_pinning_isolate_policy_fits_ht_host(self): host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, memory_usage=0, siblings=[set([0, 1]), set([2, 3])], mempages=[], pinned_cpus=set([])) inst_pin = objects.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED, cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=1) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) def test_get_pinning_isolate_policy_fits_w_usage(self): host_pin = objects.NUMACell( id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=4096, memory_usage=0, pinned_cpus=set([0, 1]), siblings=[set([0, 4]), set([1, 5]), set([2, 6]), set([3, 7])], mempages=[]) inst_pin = objects.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED, cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE) inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin) self.assertInstanceCellPinned(inst_pin) got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=1) self.assertEqualTopology(got_topo, inst_pin.cpu_topology) class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase): def test_host_numa_fit_instance_to_host_single_cell(self): host_topo = objects.NUMATopology( cells=[objects.NUMACell(id=0, cpuset=set([0, 1]), memory=2048, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([])), objects.NUMACell(id=1, cpuset=set([2, 3]), memory=2048, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([]))] ) inst_topo = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo) for cell in inst_topo.cells: self.assertInstanceCellPinned(cell, cell_ids=(0, 1)) def test_host_numa_fit_instance_to_host_single_cell_w_usage(self): host_topo = objects.NUMATopology( cells=[objects.NUMACell(id=0, cpuset=set([0, 1]), pinned_cpus=set([0]), memory=2048, memory_usage=0, siblings=[], mempages=[]), objects.NUMACell(id=1, cpuset=set([2, 3]), memory=2048, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([]))]) inst_topo = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo) for cell in inst_topo.cells: self.assertInstanceCellPinned(cell, cell_ids=(1,)) def test_host_numa_fit_instance_to_host_single_cell_fail(self): host_topo = objects.NUMATopology( cells=[objects.NUMACell(id=0, cpuset=set([0, 1]), memory=2048, pinned_cpus=set([0]), memory_usage=0, siblings=[], mempages=[]), objects.NUMACell(id=1, cpuset=set([2, 3]), memory=2048, pinned_cpus=set([2]), memory_usage=0, siblings=[], mempages=[])]) inst_topo = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo) self.assertIsNone(inst_topo) def test_host_numa_fit_instance_to_host_fit(self): host_topo = objects.NUMATopology( cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=2048, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([])), objects.NUMACell(id=1, cpuset=set([4, 5, 6, 7]), memory=2048, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([]))]) inst_topo = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED), objects.InstanceNUMACell( cpuset=set([2, 3]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo) for cell in inst_topo.cells: self.assertInstanceCellPinned(cell, cell_ids=(0, 1)) def test_host_numa_fit_instance_to_host_barely_fit(self): host_topo = objects.NUMATopology( cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=2048, pinned_cpus=set([0]), siblings=[], mempages=[], memory_usage=0), objects.NUMACell(id=1, cpuset=set([4, 5, 6, 7]), memory=2048, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([4, 5, 6])), objects.NUMACell(id=2, cpuset=set([8, 9, 10, 11]), memory=2048, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([10, 11]))]) inst_topo = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED), objects.InstanceNUMACell( cpuset=set([2, 3]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo) for cell in inst_topo.cells: self.assertInstanceCellPinned(cell, cell_ids=(0, 2)) def test_host_numa_fit_instance_to_host_fail_capacity(self): host_topo = objects.NUMATopology( cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([0])), objects.NUMACell(id=1, cpuset=set([4, 5, 6, 7]), memory=4096, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([4, 5, 6]))]) inst_topo = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED), objects.InstanceNUMACell( cpuset=set([2, 3]), memory=2048, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo) self.assertIsNone(inst_topo) def test_host_numa_fit_instance_to_host_fail_topology(self): host_topo = objects.NUMATopology( cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([])), objects.NUMACell(id=1, cpuset=set([4, 5, 6, 7]), memory=4096, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([]))]) inst_topo = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( cpuset=set([0, 1]), memory=1024, cpu_policy=fields.CPUAllocationPolicy.DEDICATED), objects.InstanceNUMACell( cpuset=set([2, 3]), memory=1024, cpu_policy=fields.CPUAllocationPolicy.DEDICATED), objects.InstanceNUMACell( cpuset=set([4, 5]), memory=1024, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo) self.assertIsNone(inst_topo) def test_cpu_pinning_usage_from_instances(self): host_pin = objects.NUMATopology( cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, cpu_usage=0, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([]))]) inst_pin_1 = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( cpuset=set([0, 1]), id=0, memory=2048, cpu_pinning={0: 0, 1: 3}, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) inst_pin_2 = objects.InstanceNUMATopology( cells = [objects.InstanceNUMACell( cpuset=set([0, 1]), id=0, memory=2048, cpu_pinning={0: 1, 1: 2}, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) host_pin = hw.numa_usage_from_instances( host_pin, [inst_pin_1, inst_pin_2]) self.assertEqual(set([0, 1, 2, 3]), host_pin.cells[0].pinned_cpus) def test_cpu_pinning_usage_from_instances_free(self): host_pin = objects.NUMATopology( cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, cpu_usage=0, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([0, 1, 3]))]) inst_pin_1 = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( cpuset=set([0]), memory=1024, cpu_pinning={0: 1}, id=0, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) inst_pin_2 = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( cpuset=set([0, 1]), memory=1024, id=0, cpu_pinning={0: 0, 1: 3}, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) host_pin = hw.numa_usage_from_instances( host_pin, [inst_pin_1, inst_pin_2], free=True) self.assertEqual(set(), host_pin.cells[0].pinned_cpus) def test_host_usage_from_instances_fail(self): host_pin = objects.NUMATopology( cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, cpu_usage=0, memory_usage=0, siblings=[], mempages=[], pinned_cpus=set([]))]) inst_pin_1 = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, id=0, cpu_pinning={0: 0, 1: 3}, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) inst_pin_2 = objects.InstanceNUMATopology( cells = [objects.InstanceNUMACell( cpuset=set([0, 1]), id=0, memory=2048, cpu_pinning={0: 0, 1: 2}, cpu_policy=fields.CPUAllocationPolicy.DEDICATED)]) self.assertRaises(exception.CPUPinningInvalid, hw.numa_usage_from_instances, host_pin, [inst_pin_1, inst_pin_2]) def test_host_usage_from_instances_isolate(self): host_pin = objects.NUMATopology( cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, cpu_usage=0, memory_usage=0, siblings=[set([0, 2]), set([1, 3])], mempages=[], pinned_cpus=set([]))]) inst_pin_1 = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, id=0, cpu_pinning={0: 0, 1: 1}, cpu_policy=fields.CPUAllocationPolicy.DEDICATED, cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE )]) new_cell = hw.numa_usage_from_instances(host_pin, [inst_pin_1]) self.assertEqual(host_pin.cells[0].cpuset, new_cell.cells[0].pinned_cpus) self.assertEqual(new_cell.cells[0].cpu_usage, 4) def test_host_usage_from_instances_isolate_free(self): host_pin = objects.NUMATopology( cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=4096, cpu_usage=4, memory_usage=0, siblings=[set([0, 2]), set([1, 3])], mempages=[], pinned_cpus=set([0, 1, 2, 3]))]) inst_pin_1 = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( cpuset=set([0, 1]), memory=2048, id=0, cpu_pinning={0: 0, 1: 1}, cpu_policy=fields.CPUAllocationPolicy.DEDICATED, cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE )]) new_cell = hw.numa_usage_from_instances(host_pin, [inst_pin_1], free=True) self.assertEqual(set([]), new_cell.cells[0].pinned_cpus) self.assertEqual(new_cell.cells[0].cpu_usage, 0) class CPURealtimeTestCase(test.NoDBTestCase): def test_success_flavor(self): flavor = {"extra_specs": {"hw:cpu_realtime_mask": "^1"}} image = objects.ImageMeta.from_dict({}) rt, em = hw.vcpus_realtime_topology(set([0, 1, 2]), flavor, image) self.assertEqual(set([0, 2]), rt) self.assertEqual(set([1]), em) def test_success_image(self): flavor = {"extra_specs": {}} image = objects.ImageMeta.from_dict( {"properties": {"hw_cpu_realtime_mask": "^0-1"}}) rt, em = hw.vcpus_realtime_topology(set([0, 1, 2]), flavor, image) self.assertEqual(set([2]), rt) self.assertEqual(set([0, 1]), em) def test_no_mask_configured(self): flavor = {"extra_specs": {}} image = objects.ImageMeta.from_dict({"properties": {}}) self.assertRaises( exception.RealtimeMaskNotFoundOrInvalid, hw.vcpus_realtime_topology, set([0, 1, 2]), flavor, image) def test_mask_badly_configured(self): flavor = {"extra_specs": {"hw:cpu_realtime_mask": "^0-2"}} image = objects.ImageMeta.from_dict({"properties": {}}) self.assertRaises( exception.RealtimeMaskNotFoundOrInvalid, hw.vcpus_realtime_topology, set([0, 1, 2]), flavor, image) nova-13.1.4/nova/tests/unit/virt/test_virt.py0000664000567000056710000002541113064447152022362 0ustar jenkinsjenkins00000000000000# Copyright 2011 Isaku Yamahata # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import io import mock import six from nova import test from nova import utils from nova.virt.disk import api as disk_api from nova.virt.disk.mount import api as mount from nova.virt import driver PROC_MOUNTS_CONTENTS = """rootfs / rootfs rw 0 0 sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0 proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0 udev /dev devtmpfs rw,relatime,size=1013160k,nr_inodes=253290,mode=755 0 0 devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620 0 0 tmpfs /run tmpfs rw,nosuid,relatime,size=408904k,mode=755 0 0""" class TestVirtDriver(test.NoDBTestCase): def test_block_device(self): swap = {'device_name': '/dev/sdb', 'swap_size': 1} ephemerals = [{'num': 0, 'virtual_name': 'ephemeral0', 'device_name': '/dev/sdc1', 'size': 1}] block_device_mapping = [{'mount_device': '/dev/sde', 'device_path': 'fake_device'}] block_device_info = { 'root_device_name': '/dev/sda', 'swap': swap, 'ephemerals': ephemerals, 'block_device_mapping': block_device_mapping} empty_block_device_info = {} self.assertEqual( driver.block_device_info_get_root(block_device_info), '/dev/sda') self.assertIsNone( driver.block_device_info_get_root(empty_block_device_info)) self.assertIsNone(driver.block_device_info_get_root(None)) self.assertEqual( driver.block_device_info_get_swap(block_device_info), swap) self.assertIsNone(driver.block_device_info_get_swap( empty_block_device_info)['device_name']) self.assertEqual(driver.block_device_info_get_swap( empty_block_device_info)['swap_size'], 0) self.assertIsNone( driver.block_device_info_get_swap({'swap': None})['device_name']) self.assertEqual( driver.block_device_info_get_swap({'swap': None})['swap_size'], 0) self.assertIsNone( driver.block_device_info_get_swap(None)['device_name']) self.assertEqual( driver.block_device_info_get_swap(None)['swap_size'], 0) self.assertEqual( driver.block_device_info_get_ephemerals(block_device_info), ephemerals) self.assertEqual( driver.block_device_info_get_ephemerals(empty_block_device_info), []) self.assertEqual( driver.block_device_info_get_ephemerals(None), []) def test_swap_is_usable(self): self.assertFalse(driver.swap_is_usable(None)) self.assertFalse(driver.swap_is_usable({'device_name': None})) self.assertFalse(driver.swap_is_usable({'device_name': '/dev/sdb', 'swap_size': 0})) self.assertTrue(driver.swap_is_usable({'device_name': '/dev/sdb', 'swap_size': 1})) class FakeMount(object): def __init__(self, image, mount_dir, partition=None, device=None): self.image = image self.partition = partition self.mount_dir = mount_dir self.linked = self.mapped = self.mounted = False self.device = device def do_mount(self): self.linked = True self.mapped = True self.mounted = True self.device = '/dev/fake' return True def do_umount(self): self.linked = True self.mounted = False def do_teardown(self): self.linked = False self.mapped = False self.mounted = False self.device = None class TestDiskImage(test.NoDBTestCase): def mock_proc_mounts(self, mock_open): response = io.StringIO(six.text_type(PROC_MOUNTS_CONTENTS)) mock_open.return_value = response @mock.patch.object(six.moves.builtins, 'open') def test_mount(self, mock_open): self.mock_proc_mounts(mock_open) image = '/tmp/fake-image' mountdir = '/mnt/fake_rootfs' fakemount = FakeMount(image, mountdir, None) def fake_instance_for_format(image, mountdir, partition): return fakemount self.stubs.Set(mount.Mount, 'instance_for_format', staticmethod(fake_instance_for_format)) diskimage = disk_api._DiskImage(image=image, mount_dir=mountdir) dev = diskimage.mount() self.assertEqual(diskimage._mounter, fakemount) self.assertEqual(dev, '/dev/fake') @mock.patch.object(six.moves.builtins, 'open') def test_umount(self, mock_open): self.mock_proc_mounts(mock_open) image = '/tmp/fake-image' mountdir = '/mnt/fake_rootfs' fakemount = FakeMount(image, mountdir, None) def fake_instance_for_format(image, mountdir, partition): return fakemount self.stubs.Set(mount.Mount, 'instance_for_format', staticmethod(fake_instance_for_format)) diskimage = disk_api._DiskImage(image=image, mount_dir=mountdir) dev = diskimage.mount() self.assertEqual(diskimage._mounter, fakemount) self.assertEqual(dev, '/dev/fake') diskimage.umount() self.assertIsNone(diskimage._mounter) @mock.patch.object(six.moves.builtins, 'open') def test_teardown(self, mock_open): self.mock_proc_mounts(mock_open) image = '/tmp/fake-image' mountdir = '/mnt/fake_rootfs' fakemount = FakeMount(image, mountdir, None) def fake_instance_for_format(image, mountdir, partition): return fakemount self.stubs.Set(mount.Mount, 'instance_for_format', staticmethod(fake_instance_for_format)) diskimage = disk_api._DiskImage(image=image, mount_dir=mountdir) dev = diskimage.mount() self.assertEqual(diskimage._mounter, fakemount) self.assertEqual(dev, '/dev/fake') diskimage.teardown() self.assertIsNone(diskimage._mounter) class TestVirtDisk(test.NoDBTestCase): def setUp(self): super(TestVirtDisk, self).setUp() self.executes = [] def fake_execute(*cmd, **kwargs): self.executes.append(cmd) return None, None self.stubs.Set(utils, 'execute', fake_execute) def test_lxc_setup_container(self): image = '/tmp/fake-image' container_dir = '/mnt/fake_rootfs/' def proc_mounts(self, mount_point): return None def fake_instance_for_format(image, mountdir, partition): return FakeMount(image, mountdir, partition) self.stub_out('os.path.exists', lambda _: True) self.stubs.Set(disk_api._DiskImage, '_device_for_path', proc_mounts) self.stubs.Set(mount.Mount, 'instance_for_format', staticmethod(fake_instance_for_format)) self.assertEqual(disk_api.setup_container(image, container_dir), '/dev/fake') def test_lxc_teardown_container(self): def proc_mounts(self, mount_point): mount_points = { '/mnt/loop/nopart': '/dev/loop0', '/mnt/loop/part': '/dev/mapper/loop0p1', '/mnt/nbd/nopart': '/dev/nbd15', '/mnt/nbd/part': '/dev/mapper/nbd15p1', } return mount_points[mount_point] self.stub_out('os.path.exists', lambda _: True) self.stubs.Set(disk_api._DiskImage, '_device_for_path', proc_mounts) expected_commands = [] disk_api.teardown_container('/mnt/loop/nopart') expected_commands += [ ('umount', '/dev/loop0'), ('losetup', '--detach', '/dev/loop0'), ] disk_api.teardown_container('/mnt/loop/part') expected_commands += [ ('umount', '/dev/mapper/loop0p1'), ('kpartx', '-d', '/dev/loop0'), ('losetup', '--detach', '/dev/loop0'), ] disk_api.teardown_container('/mnt/nbd/nopart') expected_commands += [ ('blockdev', '--flushbufs', '/dev/nbd15'), ('umount', '/dev/nbd15'), ('qemu-nbd', '-d', '/dev/nbd15'), ] disk_api.teardown_container('/mnt/nbd/part') expected_commands += [ ('blockdev', '--flushbufs', '/dev/nbd15'), ('umount', '/dev/mapper/nbd15p1'), ('kpartx', '-d', '/dev/nbd15'), ('qemu-nbd', '-d', '/dev/nbd15'), ] # NOTE(thomasem): Not adding any commands in this case, because we're # not expecting an additional umount for LocalBlockImages. This is to # assert that no additional commands are run in this case. disk_api.teardown_container('/dev/volume-group/uuid_disk') self.assertEqual(self.executes, expected_commands) def test_lxc_teardown_container_with_namespace_cleaned(self): def proc_mounts(self, mount_point): return None self.stub_out('os.path.exists', lambda _: True) self.stubs.Set(disk_api._DiskImage, '_device_for_path', proc_mounts) expected_commands = [] disk_api.teardown_container('/mnt/loop/nopart', '/dev/loop0') expected_commands += [ ('losetup', '--detach', '/dev/loop0'), ] disk_api.teardown_container('/mnt/loop/part', '/dev/loop0') expected_commands += [ ('losetup', '--detach', '/dev/loop0'), ] disk_api.teardown_container('/mnt/nbd/nopart', '/dev/nbd15') expected_commands += [ ('qemu-nbd', '-d', '/dev/nbd15'), ] disk_api.teardown_container('/mnt/nbd/part', '/dev/nbd15') expected_commands += [ ('qemu-nbd', '-d', '/dev/nbd15'), ] self.assertEqual(self.executes, expected_commands) nova-13.1.4/nova/tests/unit/virt/test_events.py0000664000567000056710000000223213064447140022673 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from nova import test from nova.virt import event class TestEvents(test.NoDBTestCase): def test_event_repr(self): t = time.time() uuid = '1234' lifecycle = event.EVENT_LIFECYCLE_RESUMED e = event.Event(t) self.assertEqual(str(e), "" % t) e = event.InstanceEvent(uuid, timestamp=t) self.assertEqual(str(e), "" % (t, uuid)) e = event.LifecycleEvent(uuid, lifecycle, timestamp=t) self.assertEqual(str(e), " Resumed>" % (t, uuid)) nova-13.1.4/nova/tests/unit/virt/libvirt/0000775000567000056710000000000013064447471021441 5ustar jenkinsjenkins00000000000000nova-13.1.4/nova/tests/unit/virt/libvirt/test_firewall.py0000664000567000056710000007726413064447152024673 0ustar jenkinsjenkins00000000000000# Copyright 2010 OpenStack Foundation # Copyright 2012 University Of Minho # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import uuid from xml.dom import minidom from eventlet import greenthread from lxml import etree import mock from mox3 import mox from oslo_concurrency.fixture import lockutils as lock_fixture from nova.compute import utils as compute_utils from nova import exception from nova.network import linux_net from nova import objects from nova import test from nova.tests.unit import fake_network from nova.tests.unit.virt.libvirt import fakelibvirt from nova.virt.libvirt import firewall from nova.virt.libvirt import host from nova.virt import netutils _fake_network_info = fake_network.fake_get_instance_nw_info _fake_stub_out_get_nw_info = fake_network.stub_out_nw_api_get_instance_nw_info _ipv4_like = fake_network.ipv4_like class NWFilterFakes(object): def __init__(self): self.filters = {} def nwfilterLookupByName(self, name): if name in self.filters: return self.filters[name] raise fakelibvirt.libvirtError('Filter Not Found') def filterDefineXMLMock(self, xml): class FakeNWFilterInternal(object): def __init__(self, parent, name, u, xml): self.name = name self.uuid = u self.parent = parent self.xml = xml def XMLDesc(self, flags): return self.xml def undefine(self): del self.parent.filters[self.name] tree = etree.fromstring(xml) name = tree.get('name') u = tree.find('uuid') if u is None: u = uuid.uuid4().hex else: u = u.text if name not in self.filters: self.filters[name] = FakeNWFilterInternal(self, name, u, xml) else: if self.filters[name].uuid != u: raise fakelibvirt.libvirtError( "Mismatching name '%s' with uuid '%s' vs '%s'" % (name, self.filters[name].uuid, u)) self.filters[name].xml = xml return True class IptablesFirewallTestCase(test.NoDBTestCase): def setUp(self): super(IptablesFirewallTestCase, self).setUp() self.useFixture(lock_fixture.ExternalLockFixture()) self.useFixture(fakelibvirt.FakeLibvirtFixture()) self.fw = firewall.IptablesFirewallDriver( host=host.Host("qemu:///system")) in_rules = [ '# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011', '*nat', ':PREROUTING ACCEPT [1170:189210]', ':INPUT ACCEPT [844:71028]', ':OUTPUT ACCEPT [5149:405186]', ':POSTROUTING ACCEPT [5063:386098]', '# Completed on Tue Dec 18 15:50:25 2012', '# Generated by iptables-save v1.4.12 on Tue Dec 18 15:50:25 201;', '*mangle', ':PREROUTING ACCEPT [241:39722]', ':INPUT ACCEPT [230:39282]', ':FORWARD ACCEPT [0:0]', ':OUTPUT ACCEPT [266:26558]', ':POSTROUTING ACCEPT [267:26590]', '-A POSTROUTING -o virbr0 -p udp -m udp --dport 68 -j CHECKSUM ' '--checksum-fill', 'COMMIT', '# Completed on Tue Dec 18 15:50:25 2012', '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010', '*filter', ':INPUT ACCEPT [969615:281627771]', ':FORWARD ACCEPT [0:0]', ':OUTPUT ACCEPT [915599:63811649]', ':nova-block-ipv4 - [0:0]', '[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ', '[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED' ',ESTABLISHED -j ACCEPT ', '[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ', '[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ', '[0:0] -A FORWARD -o virbr0 -j REJECT ' '--reject-with icmp-port-unreachable ', '[0:0] -A FORWARD -i virbr0 -j REJECT ' '--reject-with icmp-port-unreachable ', 'COMMIT', '# Completed on Mon Dec 6 11:54:13 2010', ] in6_filter_rules = [ '# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011', '*filter', ':INPUT ACCEPT [349155:75810423]', ':FORWARD ACCEPT [0:0]', ':OUTPUT ACCEPT [349256:75777230]', 'COMMIT', '# Completed on Tue Jan 18 23:47:56 2011', ] def _create_instance_ref(self, uuid="74526555-9166-4893-a203-126bdcab0d67"): inst = objects.Instance( id=7, uuid=uuid, user_id="fake", project_id="fake", image_ref='155d900f-4e14-4e4c-a73d-069cbf4541e6', instance_type_id=1) inst.info_cache = objects.InstanceInfoCache() inst.info_cache.deleted = False return inst @mock.patch.object(objects.InstanceList, "get_by_security_group_id") @mock.patch.object(objects.SecurityGroupRuleList, "get_by_instance") def test_static_filters(self, mock_secrule, mock_instlist): UUID = "2674993b-6adb-4733-abd9-a7c10cc1f146" SRC_UUID = "0e0a76b2-7c52-4bc0-9a60-d83017e42c1a" instance_ref = self._create_instance_ref(UUID) src_instance_ref = self._create_instance_ref(SRC_UUID) secgroup = objects.SecurityGroup(id=1, user_id='fake', project_id='fake', name='testgroup', description='test group') src_secgroup = objects.SecurityGroup(id=2, user_id='fake', project_id='fake', name='testsourcegroup', description='src group') r1 = objects.SecurityGroupRule(parent_group_id=secgroup['id'], protocol='icmp', from_port=-1, to_port=-1, cidr='192.168.11.0/24', grantee_group=None) r2 = objects.SecurityGroupRule(parent_group_id=secgroup['id'], protocol='icmp', from_port=8, to_port=-1, cidr='192.168.11.0/24', grantee_group=None) r3 = objects.SecurityGroupRule(parent_group_id=secgroup['id'], protocol='tcp', from_port=80, to_port=81, cidr='192.168.10.0/24', grantee_group=None) r4 = objects.SecurityGroupRule(parent_group_id=secgroup['id'], protocol='tcp', from_port=80, to_port=81, cidr=None, grantee_group=src_secgroup, group_id=src_secgroup['id']) r5 = objects.SecurityGroupRule(parent_group_id=secgroup['id'], protocol=None, cidr=None, grantee_group=src_secgroup, group_id=src_secgroup['id']) secgroup_list = objects.SecurityGroupList() secgroup_list.objects.append(secgroup) src_secgroup_list = objects.SecurityGroupList() src_secgroup_list.objects.append(src_secgroup) instance_ref.security_groups = secgroup_list src_instance_ref.security_groups = src_secgroup_list mock_secrule.return_value = objects.SecurityGroupRuleList( objects=[r1, r2, r3, r4, r5]) def _fake_instlist(ctxt, id): if id == src_secgroup['id']: insts = objects.InstanceList() insts.objects.append(src_instance_ref) return insts else: insts = objects.InstanceList() insts.objects.append(instance_ref) return insts mock_instlist.side_effect = _fake_instlist def fake_iptables_execute(*cmd, **kwargs): process_input = kwargs.get('process_input', None) if cmd == ('ip6tables-save', '-c'): return '\n'.join(self.in6_filter_rules), None if cmd == ('iptables-save', '-c'): return '\n'.join(self.in_rules), None if cmd == ('iptables-restore', '-c'): lines = process_input.split('\n') if '*filter' in lines: self.out_rules = lines return '', '' if cmd == ('ip6tables-restore', '-c',): lines = process_input.split('\n') if '*filter' in lines: self.out6_rules = lines return '', '' network_model = _fake_network_info(self, 1) linux_net.iptables_manager.execute = fake_iptables_execute self.stubs.Set(compute_utils, 'get_nw_info_for_instance', lambda instance: network_model) self.fw.prepare_instance_filter(instance_ref, network_model) self.fw.apply_instance_filter(instance_ref, network_model) in_rules = filter(lambda l: not l.startswith('#'), self.in_rules) for rule in in_rules: if 'nova' not in rule: self.assertIn(rule, self.out_rules, 'Rule went missing: %s' % rule) instance_chain = None for rule in self.out_rules: # This is pretty crude, but it'll do for now # last two octets change if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule): instance_chain = rule.split(' ')[-1] break self.assertTrue(instance_chain, "The instance chain wasn't added") security_group_chain = None for rule in self.out_rules: # This is pretty crude, but it'll do for now if '-A %s -j' % instance_chain in rule: security_group_chain = rule.split(' ')[-1] break self.assertTrue(security_group_chain, "The security group chain wasn't added") regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp ' '-s 192.168.11.0/24') self.assertTrue(len(filter(regex.match, self.out_rules)) > 0, "ICMP acceptance rule wasn't added") regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp ' '--icmp-type 8 -s 192.168.11.0/24') self.assertTrue(len(filter(regex.match, self.out_rules)) > 0, "ICMP Echo Request acceptance rule wasn't added") for ip in network_model.fixed_ips(): if ip['version'] != 4: continue regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp -m multiport ' '--dports 80:81 -s %s' % ip['address']) self.assertTrue(len(filter(regex.match, self.out_rules)) > 0, "TCP port 80/81 acceptance rule wasn't added") regex = re.compile('\[0\:0\] -A .* -j ACCEPT -s ' '%s' % ip['address']) self.assertTrue(len(filter(regex.match, self.out_rules)) > 0, "Protocol/port-less acceptance rule wasn't added") regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp ' '-m multiport --dports 80:81 -s 192.168.10.0/24') self.assertTrue(len(filter(regex.match, self.out_rules)) > 0, "TCP port 80/81 acceptance rule wasn't added") def test_filters_for_instance_with_ip_v6(self): self.flags(use_ipv6=True) network_info = _fake_network_info(self, 1) rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info) self.assertEqual(len(rulesv4), 2) self.assertEqual(len(rulesv6), 1) def test_filters_for_instance_without_ip_v6(self): self.flags(use_ipv6=False) network_info = _fake_network_info(self, 1) rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info) self.assertEqual(len(rulesv4), 2) self.assertEqual(len(rulesv6), 0) @mock.patch.object(objects.SecurityGroupRuleList, "get_by_instance") def test_multinic_iptables(self, mock_secrule): mock_secrule.return_value = objects.SecurityGroupRuleList() ipv4_rules_per_addr = 1 ipv4_addr_per_network = 2 ipv6_rules_per_addr = 1 ipv6_addr_per_network = 1 networks_count = 5 instance_ref = self._create_instance_ref() instance_ref.security_groups = objects.SecurityGroupList() network_info = _fake_network_info(self, networks_count, ipv4_addr_per_network) network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \ '1.1.1.1' ipv4_len = len(self.fw.iptables.ipv4['filter'].rules) ipv6_len = len(self.fw.iptables.ipv6['filter'].rules) inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref, network_info) self.fw.prepare_instance_filter(instance_ref, network_info) ipv4 = self.fw.iptables.ipv4['filter'].rules ipv6 = self.fw.iptables.ipv6['filter'].rules ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len # Extra rules are for the DHCP request rules = (ipv4_rules_per_addr * ipv4_addr_per_network * networks_count) + 2 self.assertEqual(ipv4_network_rules, rules) self.assertEqual(ipv6_network_rules, ipv6_rules_per_addr * ipv6_addr_per_network * networks_count) def test_do_refresh_security_group_rules(self): instance_ref = self._create_instance_ref() self.mox.StubOutWithMock(self.fw, 'instance_rules') self.mox.StubOutWithMock(self.fw, 'add_filters_for_instance', use_mock_anything=True) self.mox.StubOutWithMock(self.fw.iptables.ipv4['filter'], 'has_chain') self.fw.instance_rules(instance_ref, mox.IgnoreArg()).AndReturn((None, None)) self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.fw.instance_rules(instance_ref, mox.IgnoreArg()).AndReturn((None, None)) self.fw.iptables.ipv4['filter'].has_chain(mox.IgnoreArg() ).AndReturn(True) self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self.fw.prepare_instance_filter(instance_ref, mox.IgnoreArg()) self.fw.instance_info[instance_ref['id']] = (instance_ref, None) self.fw.do_refresh_security_group_rules("fake") def test_do_refresh_security_group_rules_instance_gone(self): instance1 = objects.Instance(None, id=1, uuid='fake-uuid1') instance2 = objects.Instance(None, id=2, uuid='fake-uuid2') self.fw.instance_info = {1: (instance1, 'netinfo1'), 2: (instance2, 'netinfo2')} mock_filter = mock.MagicMock() with mock.patch.dict(self.fw.iptables.ipv4, {'filter': mock_filter}): mock_filter.has_chain.return_value = False with mock.patch.object(self.fw, 'instance_rules') as mock_ir: mock_ir.return_value = (None, None) self.fw.do_refresh_security_group_rules('secgroup') self.assertEqual(2, mock_ir.call_count) # NOTE(danms): Make sure that it is checking has_chain each time, # continuing to process all the instances, and never adding the # new chains back if has_chain() is False mock_filter.has_chain.assert_has_calls([mock.call('inst-1'), mock.call('inst-2')], any_order=True) self.assertEqual(0, mock_filter.add_chain.call_count) @mock.patch.object(fakelibvirt.virConnect, "nwfilterLookupByName") @mock.patch.object(fakelibvirt.virConnect, "nwfilterDefineXML") @mock.patch.object(objects.InstanceList, "get_by_security_group_id") @mock.patch.object(objects.SecurityGroupRuleList, "get_by_instance") def test_unfilter_instance_undefines_nwfilter(self, mock_secrule, mock_instlist, mock_define, mock_lookup): fakefilter = NWFilterFakes() mock_lookup.side_effect = fakefilter.nwfilterLookupByName mock_define.side_effect = fakefilter.filterDefineXMLMock instance_ref = self._create_instance_ref() instance_ref.security_groups = objects.SecurityGroupList() mock_secrule.return_value = objects.SecurityGroupRuleList() network_info = _fake_network_info(self, 1) self.fw.setup_basic_filtering(instance_ref, network_info) self.fw.prepare_instance_filter(instance_ref, network_info) self.fw.apply_instance_filter(instance_ref, network_info) original_filter_count = len(fakefilter.filters) self.fw.unfilter_instance(instance_ref, network_info) # should undefine just the instance filter self.assertEqual(original_filter_count - len(fakefilter.filters), 1) @mock.patch.object(firewall, 'libvirt', fakelibvirt) class NWFilterTestCase(test.NoDBTestCase): def setUp(self): super(NWFilterTestCase, self).setUp() self.useFixture(fakelibvirt.FakeLibvirtFixture()) self.fw = firewall.NWFilterFirewall(host=host.Host("qemu:///system")) def _create_security_group(self, instance_ref): secgroup = objects.SecurityGroup(id=1, user_id='fake', project_id='fake', name='testgroup', description='test group description') secgroup_list = objects.SecurityGroupList() secgroup_list.objects.append(secgroup) instance_ref.security_groups = secgroup_list return secgroup def _create_instance(self): inst = objects.Instance( id=7, uuid="74526555-9166-4893-a203-126bdcab0d67", user_id="fake", project_id="fake", image_ref='155d900f-4e14-4e4c-a73d-069cbf4541e6', instance_type_id=1) inst.info_cache = objects.InstanceInfoCache() inst.info_cache.deleted = False return inst @mock.patch.object(fakelibvirt.virConnect, "nwfilterDefineXML") def test_creates_base_rule_first(self, mock_define): # These come pre-defined by libvirt self.defined_filters = ['no-mac-spoofing', 'no-ip-spoofing', 'no-arp-spoofing', 'allow-dhcp-server'] self.recursive_depends = {} for f in self.defined_filters: self.recursive_depends[f] = [] def fake_define(xml): dom = minidom.parseString(xml) name = dom.firstChild.getAttribute('name') self.recursive_depends[name] = [] for f in dom.getElementsByTagName('filterref'): ref = f.getAttribute('filter') self.assertIn(ref, self.defined_filters, ('%s referenced filter that does ' + 'not yet exist: %s') % (name, ref)) dependencies = [ref] + self.recursive_depends[ref] self.recursive_depends[name] += dependencies self.defined_filters.append(name) return True mock_define.side_effect = fake_define instance_ref = self._create_instance() self._create_security_group(instance_ref) def _ensure_all_called(mac, allow_dhcp): instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'], mac.translate({ord(':'): None})) requiredlist = ['no-arp-spoofing', 'no-ip-spoofing', 'no-mac-spoofing'] required_not_list = [] if allow_dhcp: requiredlist.append('allow-dhcp-server') else: required_not_list.append('allow-dhcp-server') for required in requiredlist: self.assertIn(required, self.recursive_depends[instance_filter], "Instance's filter does not include %s" % required) for required_not in required_not_list: self.assertNotIn(required_not, self.recursive_depends[instance_filter], "Instance filter includes %s" % required_not) network_info = _fake_network_info(self, 1) # since there is one (network_info) there is one vif # pass this vif's mac to _ensure_all_called() # to set the instance_filter properly mac = network_info[0]['address'] network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \ '1.1.1.1' self.fw.setup_basic_filtering(instance_ref, network_info) allow_dhcp = True _ensure_all_called(mac, allow_dhcp) network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = None self.fw.setup_basic_filtering(instance_ref, network_info) allow_dhcp = False _ensure_all_called(mac, allow_dhcp) @mock.patch.object(fakelibvirt.virConnect, "nwfilterLookupByName") @mock.patch.object(fakelibvirt.virConnect, "nwfilterDefineXML") def test_unfilter_instance_undefines_nwfilters(self, mock_define, mock_lookup): fakefilter = NWFilterFakes() mock_lookup.side_effect = fakefilter.nwfilterLookupByName mock_define.side_effect = fakefilter.filterDefineXMLMock instance_ref = self._create_instance() self._create_security_group(instance_ref) network_info = _fake_network_info(self, 1) self.fw.setup_basic_filtering(instance_ref, network_info) original_filter_count = len(fakefilter.filters) self.fw.unfilter_instance(instance_ref, network_info) self.assertEqual(original_filter_count - len(fakefilter.filters), 1) @mock.patch.object(fakelibvirt.virConnect, "nwfilterLookupByName") @mock.patch.object(greenthread, 'sleep') def test_unfilter_instance_retry_and_error(self, mock_sleep, mock_lookup): # Tests that we try to undefine the network filter when it's in use # until we hit a timeout. We try two times and sleep once in between. self.flags(live_migration_retry_count=2) in_use = fakelibvirt.libvirtError('nwfilter is in use') in_use.err = (fakelibvirt.VIR_ERR_OPERATION_INVALID,) mock_undefine = mock.Mock(side_effect=in_use) fakefilter = mock.MagicMock(undefine=mock_undefine) mock_lookup.return_value = fakefilter instance_ref = self._create_instance() network_info = _fake_network_info(self, 1) self.assertRaises(fakelibvirt.libvirtError, self.fw.unfilter_instance, instance_ref, network_info) self.assertEqual(2, mock_lookup.call_count) self.assertEqual(2, mock_undefine.call_count) mock_sleep.assert_called_once_with(1) @mock.patch.object(fakelibvirt.virConnect, "nwfilterLookupByName") @mock.patch.object(greenthread, 'sleep') def test_unfilter_instance_retry_not_found(self, mock_sleep, mock_lookup): # Tests that we exit if the nw filter is not found. in_use = fakelibvirt.libvirtError('nwfilter is in use') in_use.err = (fakelibvirt.VIR_ERR_OPERATION_INVALID,) not_found = fakelibvirt.libvirtError('no nwfilter with matching name') not_found.err = (fakelibvirt.VIR_ERR_NO_NWFILTER,) mock_undefine = mock.Mock(side_effect=(in_use, not_found)) fakefilter = mock.MagicMock(undefine=mock_undefine) mock_lookup.return_value = fakefilter instance_ref = self._create_instance() network_info = _fake_network_info(self, 1) self.fw.unfilter_instance(instance_ref, network_info) self.assertEqual(2, mock_lookup.call_count) self.assertEqual(2, mock_undefine.call_count) mock_sleep.assert_called_once_with(1) @mock.patch.object(fakelibvirt.virConnect, "nwfilterLookupByName") @mock.patch.object(greenthread, 'sleep') def test_unfilter_instance_retry_and_pass(self, mock_sleep, mock_lookup): # Tests that we retry on in-use error but pass if undefine() works # while looping. in_use = fakelibvirt.libvirtError('nwfilter is in use') in_use.err = (fakelibvirt.VIR_ERR_OPERATION_INVALID,) mock_undefine = mock.Mock(side_effect=(in_use, None)) fakefilter = mock.MagicMock(undefine=mock_undefine) mock_lookup.return_value = fakefilter instance_ref = self._create_instance() network_info = _fake_network_info(self, 1) self.fw.unfilter_instance(instance_ref, network_info) self.assertEqual(2, mock_lookup.call_count) self.assertEqual(2, mock_undefine.call_count) mock_sleep.assert_called_once_with(1) def test_redefining_nwfilters(self): fakefilter = NWFilterFakes() self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName instance_ref = self._create_instance() self._create_security_group(instance_ref) network_info = _fake_network_info(self, 1) self.fw.setup_basic_filtering(instance_ref, network_info) self.fw.setup_basic_filtering(instance_ref, network_info) @mock.patch.object(fakelibvirt.virConnect, "nwfilterLookupByName") @mock.patch.object(fakelibvirt.virConnect, "nwfilterDefineXML") def test_nwfilter_parameters(self, mock_define, mock_lookup): fakefilter = NWFilterFakes() mock_lookup.side_effect = fakefilter.nwfilterLookupByName mock_define.side_effect = fakefilter.filterDefineXMLMock instance_ref = self._create_instance() self._create_security_group(instance_ref) network_info = _fake_network_info(self, 1) self.fw.setup_basic_filtering(instance_ref, network_info) vif = network_info[0] nic_id = vif['address'].replace(':', '') instance_filter_name = self.fw._instance_filter_name(instance_ref, nic_id) f = fakefilter.nwfilterLookupByName(instance_filter_name) tree = etree.fromstring(f.xml) for fref in tree.findall('filterref'): parameters = fref.findall('./parameter') for parameter in parameters: subnet_v4, subnet_v6 = vif['network']['subnets'] if parameter.get('name') == 'IP': self.assertTrue(_ipv4_like(parameter.get('value'), '192.168')) elif parameter.get('name') == 'DHCPSERVER': dhcp_server = subnet_v4.get('dhcp_server') self.assertEqual(parameter.get('value'), dhcp_server) elif parameter.get('name') == 'RASERVER': ra_server = subnet_v6['gateway']['address'] + "/128" self.assertEqual(parameter.get('value'), ra_server) elif parameter.get('name') == 'PROJNET': ipv4_cidr = subnet_v4['cidr'] net, mask = netutils.get_net_and_mask(ipv4_cidr) self.assertEqual(parameter.get('value'), net) elif parameter.get('name') == 'PROJMASK': ipv4_cidr = subnet_v4['cidr'] net, mask = netutils.get_net_and_mask(ipv4_cidr) self.assertEqual(parameter.get('value'), mask) elif parameter.get('name') == 'PROJNET6': ipv6_cidr = subnet_v6['cidr'] net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr) self.assertEqual(parameter.get('value'), net) elif parameter.get('name') == 'PROJMASK6': ipv6_cidr = subnet_v6['cidr'] net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr) self.assertEqual(parameter.get('value'), prefix) else: raise exception.InvalidParameterValue('unknown parameter ' 'in filter') @mock.patch.object(fakelibvirt.virConnect, "nwfilterLookupByName") @mock.patch.object(fakelibvirt.virConnect, "nwfilterDefineXML") def test_multinic_base_filter_selection(self, mock_define, mock_lookup): fakefilter = NWFilterFakes() mock_lookup.side_effect = fakefilter.nwfilterLookupByName mock_define.side_effect = fakefilter.filterDefineXMLMock instance_ref = self._create_instance() self._create_security_group(instance_ref) network_info = _fake_network_info(self, 2) network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \ '1.1.1.1' self.fw.setup_basic_filtering(instance_ref, network_info) def assert_filterref(instance, vif, expected=None): expected = expected or [] nic_id = vif['address'].replace(':', '') filter_name = self.fw._instance_filter_name(instance, nic_id) f = fakefilter.nwfilterLookupByName(filter_name) tree = etree.fromstring(f.xml) frefs = [fr.get('filter') for fr in tree.findall('filterref')] self.assertEqual(set(expected), set(frefs)) assert_filterref(instance_ref, network_info[0], expected=['nova-base']) assert_filterref(instance_ref, network_info[1], expected=['nova-nodhcp']) @mock.patch.object(firewall.LOG, 'debug') def test_get_filter_uuid_unicode_exception_logging(self, debug): with mock.patch.object(self.fw._conn, 'nwfilterLookupByName') as look: look.side_effect = fakelibvirt.libvirtError(u"\U0001F4A9") self.fw._get_filter_uuid('test') self.assertEqual(2, debug.call_count) self.assertEqual(u"Cannot find UUID for filter '%(name)s': '%(e)s'", debug.call_args_list[0][0][0]) nova-13.1.4/nova/tests/unit/virt/libvirt/test_blockinfo.py0000664000567000056710000014752013064447152025025 0ustar jenkinsjenkins00000000000000# Copyright 2010 OpenStack Foundation # Copyright 2012 University Of Minho # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import fixtures import mock from nova import block_device from nova.compute import arch from nova import context from nova import exception from nova import objects from nova import test from nova.tests.unit import fake_block_device import nova.tests.unit.image.fake from nova.tests.unit.virt import fakelibosinfo from nova.virt import block_device as driver_block_device from nova.virt import driver from nova.virt.libvirt import blockinfo class LibvirtBlockInfoTest(test.NoDBTestCase): def setUp(self): super(LibvirtBlockInfoTest, self).setUp() self.user_id = 'fake' self.project_id = 'fake' self.context = context.get_admin_context() nova.tests.unit.image.fake.stub_out_image_service(self) self.test_instance = { 'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310', 'memory_kb': '1024000', 'basepath': '/some/path', 'bridge_name': 'br100', 'vcpus': 2, 'project_id': 'fake', 'bridge': 'br101', 'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6', 'root_gb': 10, 'ephemeral_gb': 20, 'instance_type_id': 2, # m1.tiny 'config_drive': None, 'system_metadata': {}, } self.test_image_meta = { 'disk_format': 'raw', } flavor = objects.Flavor(memory_mb=128, root_gb=0, name='m1.micro', ephemeral_gb=0, vcpus=1, swap=0, rxtx_factor=1.0, flavorid='1', vcpu_weight=None, id=2) self.test_instance['flavor'] = flavor self.test_instance['old_flavor'] = None self.test_instance['new_flavor'] = None def test_volume_in_mapping(self): swap = {'device_name': '/dev/sdb', 'swap_size': 1} ephemerals = [{'device_type': 'disk', 'guest_format': 'ext4', 'device_name': '/dev/sdc1', 'size': 10}, {'disk_bus': 'ide', 'guest_format': None, 'device_name': '/dev/sdd', 'size': 10}] block_device_mapping = [{'mount_device': '/dev/sde', 'device_path': 'fake_device'}, {'mount_device': '/dev/sdf', 'device_path': 'fake_device'}] block_device_info = { 'root_device_name': '/dev/sda', 'swap': swap, 'ephemerals': ephemerals, 'block_device_mapping': block_device_mapping} def _assert_volume_in_mapping(device_name, true_or_false): self.assertEqual( true_or_false, block_device.volume_in_mapping(device_name, block_device_info)) _assert_volume_in_mapping('sda', False) _assert_volume_in_mapping('sdb', True) _assert_volume_in_mapping('sdc1', True) _assert_volume_in_mapping('sdd', True) _assert_volume_in_mapping('sde', True) _assert_volume_in_mapping('sdf', True) _assert_volume_in_mapping('sdg', False) _assert_volume_in_mapping('sdh1', False) def test_find_disk_dev(self): mapping = { "disk.local": { 'dev': 'sda', 'bus': 'scsi', 'type': 'disk', }, "disk.swap": { 'dev': 'sdc', 'bus': 'scsi', 'type': 'disk', }, } dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'scsi') self.assertEqual('sdb', dev) dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'scsi', last_device=True) self.assertEqual('sdz', dev) dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'virtio') self.assertEqual('vda', dev) dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'fdc') self.assertEqual('fda', dev) @mock.patch('nova.virt.libvirt.blockinfo.has_disk_dev', return_value=True) def test_find_disk_dev_for_disk_bus_no_free_error(self, has_disk_dev_mock): # Tests that an exception is raised when all devices for a given prefix # are already reserved. mapping = { 'disk': { 'bus': 'ide', 'dev': 'hda', 'type': 'cdrom', 'boot_index': '1', } } self.assertRaises(exception.NovaException, blockinfo.find_disk_dev_for_disk_bus, mapping, 'ide') def test_get_next_disk_dev(self): mapping = {} mapping['disk.local'] = blockinfo.get_next_disk_info(mapping, 'virtio') self.assertEqual({'dev': 'vda', 'bus': 'virtio', 'type': 'disk'}, mapping['disk.local']) mapping['disk.swap'] = blockinfo.get_next_disk_info(mapping, 'virtio') self.assertEqual({'dev': 'vdb', 'bus': 'virtio', 'type': 'disk'}, mapping['disk.swap']) mapping['disk.config'] = blockinfo.get_next_disk_info(mapping, 'ide', 'cdrom', True) self.assertEqual({'dev': 'hdd', 'bus': 'ide', 'type': 'cdrom'}, mapping['disk.config']) def test_get_next_disk_dev_boot_index(self): info = blockinfo.get_next_disk_info({}, 'virtio', boot_index=-1) self.assertEqual({'dev': 'vda', 'bus': 'virtio', 'type': 'disk'}, info) info = blockinfo.get_next_disk_info({}, 'virtio', boot_index=2) self.assertEqual({'dev': 'vda', 'bus': 'virtio', 'type': 'disk', 'boot_index': '2'}, info) def test_get_disk_mapping_simple(self): # The simplest possible disk mapping setup, all defaults instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) with mock.patch.object(instance_ref, 'get_flavor', return_value=instance_ref.flavor) as get_flavor: mapping = blockinfo.get_disk_mapping("kvm", instance_ref, "virtio", "ide", image_meta) # Since there was no block_device_info passed to get_disk_mapping we # expect to get the swap info from the flavor in the instance. get_flavor.assert_called_once_with() expect = { 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'}, 'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'} } self.assertEqual(expect, mapping) def test_get_disk_mapping_simple_rootdev(self): # A simple disk mapping setup, but with custom root device name instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) block_device_info = { 'root_device_name': '/dev/sda' } mapping = blockinfo.get_disk_mapping("kvm", instance_ref, "virtio", "ide", image_meta, block_device_info) expect = { 'disk': {'bus': 'scsi', 'dev': 'sda', 'type': 'disk', 'boot_index': '1'}, 'disk.local': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}, 'root': {'bus': 'scsi', 'dev': 'sda', 'type': 'disk', 'boot_index': '1'} } self.assertEqual(expect, mapping) def test_get_disk_mapping_rescue(self): # A simple disk mapping setup, but in rescue mode instance_ref = objects.Instance() image_meta = objects.ImageMeta.from_dict(self.test_image_meta) mapping = blockinfo.get_disk_mapping("kvm", instance_ref, "virtio", "ide", image_meta, rescue=True) expect = { 'disk.rescue': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'}, 'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, } self.assertEqual(expect, mapping) def test_get_disk_mapping_lxc(self): # A simple disk mapping setup, but for lxc self.test_instance['ephemeral_gb'] = 0 instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) mapping = blockinfo.get_disk_mapping("lxc", instance_ref, "lxc", "lxc", image_meta) expect = { 'disk': {'bus': 'lxc', 'dev': None, 'type': 'disk', 'boot_index': '1'}, 'root': {'bus': 'lxc', 'dev': None, 'type': 'disk', 'boot_index': '1'}, } self.assertEqual(expect, mapping) def test_get_disk_mapping_simple_iso(self): # A simple disk mapping setup, but with a ISO for root device instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({'disk_format': 'iso'}) mapping = blockinfo.get_disk_mapping("kvm", instance_ref, "virtio", "ide", image_meta) expect = { 'disk': {'bus': 'ide', 'dev': 'hda', 'type': 'cdrom', 'boot_index': '1'}, 'disk.local': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}, 'root': {'bus': 'ide', 'dev': 'hda', 'type': 'cdrom', 'boot_index': '1'}, } self.assertEqual(expect, mapping) def test_get_disk_mapping_simple_swap(self): # A simple disk mapping setup, but with a swap device added instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.swap = 5 image_meta = objects.ImageMeta.from_dict(self.test_image_meta) mapping = blockinfo.get_disk_mapping("kvm", instance_ref, "virtio", "ide", image_meta) expect = { 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'}, 'disk.swap': {'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'}, 'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, } self.assertEqual(expect, mapping) def test_get_disk_mapping_volumes_swap(self): # A disk mapping setup with volumes attached, then a swap device added instance_ref = objects.Instance(**self.test_instance) instance_ref.root_device_name = '/dev/vda' instance_ref.ephemeral_gb = 0 block_dev_info = {'swap': None, 'root_device_name': u'/dev/vda', 'ephemerals': [], 'block_device_mapping': [{'boot_index': None, 'mount_device': u'/dev/vdb', 'connection_info': {}, 'disk_bus': None, 'device_type': None}, {'boot_index': 0, 'mount_device': u'/dev/vda', 'connection_info': {}, 'disk_bus': u'virtio', 'device_type': u'disk'}]} instance_ref.flavor.swap = 5 image_meta = {} mapping = blockinfo.get_disk_mapping("kvm", instance_ref, "virtio", "ide", image_meta, block_device_info=block_dev_info) expect = { '/dev/vda': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, '/dev/vdb': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'}, 'disk.swap': {'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'}, 'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, } self.assertEqual(expect, mapping) def test_get_disk_mapping_simple_configdrive(self): # A simple disk mapping setup, but with configdrive added # It's necessary to check if the architecture is power, because # power doesn't have support to ide, and so libvirt translate # all ide calls to scsi self.flags(force_config_drive=True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) mapping = blockinfo.get_disk_mapping("kvm", instance_ref, "virtio", "ide", image_meta) # The last device is selected for this. on x86 is the last ide # device (hdd). Since power only support scsi, the last device # is sdz bus_ppc = ("scsi", "sdz") expect_bus = {"ppc": bus_ppc, "ppc64": bus_ppc} bus, dev = expect_bus.get(blockinfo.libvirt_utils.get_arch({}), ("ide", "hdd")) expect = { 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'}, 'disk.config': {'bus': bus, 'dev': dev, 'type': 'cdrom'}, 'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'} } self.assertEqual(expect, mapping) def test_get_disk_mapping_cdrom_configdrive(self): # A simple disk mapping setup, with configdrive added as cdrom # It's necessary to check if the architecture is power, because # power doesn't have support to ide, and so libvirt translate # all ide calls to scsi self.flags(force_config_drive=True) self.flags(config_drive_format='iso9660') instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) mapping = blockinfo.get_disk_mapping("kvm", instance_ref, "virtio", "ide", image_meta) bus_ppc = ("scsi", "sdz") expect_bus = {"ppc": bus_ppc, "ppc64": bus_ppc} bus, dev = expect_bus.get(blockinfo.libvirt_utils.get_arch({}), ("ide", "hdd")) expect = { 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'}, 'disk.config': {'bus': bus, 'dev': dev, 'type': 'cdrom'}, 'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'} } self.assertEqual(expect, mapping) def test_get_disk_mapping_disk_configdrive(self): # A simple disk mapping setup, with configdrive added as disk self.flags(force_config_drive=True) self.flags(config_drive_format='vfat') instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) mapping = blockinfo.get_disk_mapping("kvm", instance_ref, "virtio", "ide", image_meta) expect = { 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'}, 'disk.config': {'bus': 'virtio', 'dev': 'vdz', 'type': 'disk'}, 'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, } self.assertEqual(expect, mapping) def test_get_disk_mapping_ephemeral(self): # A disk mapping with ephemeral devices instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.swap = 5 image_meta = objects.ImageMeta.from_dict(self.test_image_meta) block_device_info = { 'ephemerals': [ {'device_type': 'disk', 'guest_format': 'ext4', 'device_name': '/dev/vdb', 'size': 10}, {'disk_bus': 'ide', 'guest_format': None, 'device_name': '/dev/vdc', 'size': 10}, {'device_type': 'floppy', 'device_name': '/dev/vdd', 'size': 10}, ] } mapping = blockinfo.get_disk_mapping("kvm", instance_ref, "virtio", "ide", image_meta, block_device_info) expect = { 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk.eph0': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk', 'format': 'ext4'}, 'disk.eph1': {'bus': 'ide', 'dev': 'vdc', 'type': 'disk'}, 'disk.eph2': {'bus': 'virtio', 'dev': 'vdd', 'type': 'floppy'}, 'disk.swap': {'bus': 'virtio', 'dev': 'vde', 'type': 'disk'}, 'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, } self.assertEqual(expect, mapping) def test_get_disk_mapping_custom_swap(self): # A disk mapping with a swap device at position vdb. This # should cause disk.local to be removed instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) block_device_info = { 'swap': {'device_name': '/dev/vdb', 'swap_size': 10}, } mapping = blockinfo.get_disk_mapping("kvm", instance_ref, "virtio", "ide", image_meta, block_device_info) expect = { 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk.swap': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'}, 'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, } self.assertEqual(expect, mapping) def test_get_disk_mapping_blockdev_root(self): # A disk mapping with a blockdev replacing the default root instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) block_device_info = { 'block_device_mapping': [ {'connection_info': "fake", 'mount_device': "/dev/vda", 'boot_index': 0, 'device_type': 'disk', 'delete_on_termination': True}, ] } mapping = blockinfo.get_disk_mapping("kvm", instance_ref, "virtio", "ide", image_meta, block_device_info) expect = { '/dev/vda': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'}, 'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, } self.assertEqual(expect, mapping) def test_get_disk_mapping_blockdev_root_on_spawn(self): # A disk mapping with a blockdev initializing the default root instance_ref = objects.Instance(**self.test_instance) image_meta = {} block_device_info = { 'block_device_mapping': [ {'connection_info': None, 'mount_device': None, 'boot_index': 0, 'device_type': None, 'delete_on_termination': True}, ] } mapping = blockinfo.get_disk_mapping("kvm", instance_ref, "virtio", "ide", image_meta, block_device_info) expect = { '/dev/vda': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'}, 'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, } self.assertEqual(expect, mapping) def test_get_disk_mapping_blockdev_eph(self): # A disk mapping with a blockdev replacing the ephemeral device instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) block_device_info = { 'block_device_mapping': [ {'connection_info': "fake", 'mount_device': "/dev/vdb", 'boot_index': -1, 'delete_on_termination': True}, ] } mapping = blockinfo.get_disk_mapping("kvm", instance_ref, "virtio", "ide", image_meta, block_device_info) expect = { 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, '/dev/vdb': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'}, 'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, } self.assertEqual(expect, mapping) def test_get_disk_mapping_blockdev_many(self): # A disk mapping with a blockdev replacing all devices instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) block_device_info = { 'block_device_mapping': [ {'connection_info': "fake", 'mount_device': "/dev/vda", 'boot_index': 0, 'disk_bus': 'scsi', 'delete_on_termination': True}, {'connection_info': "fake", 'mount_device': "/dev/vdb", 'boot_index': -1, 'delete_on_termination': True}, {'connection_info': "fake", 'mount_device': "/dev/vdc", 'boot_index': -1, 'device_type': 'cdrom', 'delete_on_termination': True}, ] } mapping = blockinfo.get_disk_mapping("kvm", instance_ref, "virtio", "ide", image_meta, block_device_info) expect = { '/dev/vda': {'bus': 'scsi', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, '/dev/vdb': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'}, '/dev/vdc': {'bus': 'virtio', 'dev': 'vdc', 'type': 'cdrom'}, 'root': {'bus': 'scsi', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, } self.assertEqual(expect, mapping) def test_get_disk_mapping_complex(self): # The strangest possible disk mapping setup instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) block_device_info = { 'root_device_name': '/dev/vdf', 'swap': {'device_name': '/dev/vdy', 'swap_size': 10}, 'ephemerals': [ {'device_type': 'disk', 'guest_format': 'ext4', 'device_name': '/dev/vdb', 'size': 10}, {'disk_bus': 'ide', 'guest_format': None, 'device_name': '/dev/vdc', 'size': 10}, ], 'block_device_mapping': [ {'connection_info': "fake", 'mount_device': "/dev/vda", 'boot_index': 1, 'delete_on_termination': True}, ] } mapping = blockinfo.get_disk_mapping("kvm", instance_ref, "virtio", "ide", image_meta, block_device_info) expect = { 'disk': {'bus': 'virtio', 'dev': 'vdf', 'type': 'disk', 'boot_index': '1'}, '/dev/vda': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '2'}, 'disk.eph0': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk', 'format': 'ext4'}, 'disk.eph1': {'bus': 'ide', 'dev': 'vdc', 'type': 'disk'}, 'disk.swap': {'bus': 'virtio', 'dev': 'vdy', 'type': 'disk'}, 'root': {'bus': 'virtio', 'dev': 'vdf', 'type': 'disk', 'boot_index': '1'}, } self.assertEqual(expect, mapping) def test_get_disk_mapping_updates_original(self): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) block_device_info = { 'root_device_name': '/dev/vda', 'swap': {'device_name': '/dev/vdb', 'device_type': 'really_lame_type', 'swap_size': 10}, 'ephemerals': [{'disk_bus': 'no_such_bus', 'device_type': 'yeah_right', 'device_name': '/dev/vdc', 'size': 10}], 'block_device_mapping': [ {'connection_info': "fake", 'mount_device': None, 'device_type': 'lawnmower', 'delete_on_termination': True}] } expected_swap = {'device_name': '/dev/vdb', 'disk_bus': 'virtio', 'device_type': 'disk', 'swap_size': 10} expected_ephemeral = {'disk_bus': 'virtio', 'device_type': 'disk', 'device_name': '/dev/vdc', 'size': 10} expected_bdm = {'connection_info': "fake", 'mount_device': '/dev/vdd', 'device_type': 'disk', 'disk_bus': 'virtio', 'delete_on_termination': True} with mock.patch.object(instance_ref, 'get_flavor') as get_flavor_mock: blockinfo.get_disk_mapping("kvm", instance_ref, "virtio", "ide", image_meta, block_device_info) # we should have gotten the swap info from block_device_info rather # than the flavor information on the instance self.assertFalse(get_flavor_mock.called) self.assertEqual(expected_swap, block_device_info['swap']) self.assertEqual(expected_ephemeral, block_device_info['ephemerals'][0]) self.assertEqual(expected_bdm, block_device_info['block_device_mapping'][0]) def test_get_disk_bus(self): instance = objects.Instance(**self.test_instance) expected = ( (arch.X86_64, 'disk', 'virtio'), (arch.X86_64, 'cdrom', 'ide'), (arch.X86_64, 'floppy', 'fdc'), (arch.PPC, 'disk', 'virtio'), (arch.PPC, 'cdrom', 'scsi'), (arch.PPC64, 'disk', 'virtio'), (arch.PPC64, 'cdrom', 'scsi'), (arch.PPCLE, 'disk', 'virtio'), (arch.PPCLE, 'cdrom', 'scsi'), (arch.PPC64LE, 'disk', 'virtio'), (arch.PPC64LE, 'cdrom', 'scsi'), (arch.S390, 'disk', 'virtio'), (arch.S390, 'cdrom', 'scsi'), (arch.S390X, 'disk', 'virtio'), (arch.S390X, 'cdrom', 'scsi') ) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) for guestarch, dev, res in expected: with mock.patch.object(blockinfo.libvirt_utils, 'get_arch', return_value=guestarch): bus = blockinfo.get_disk_bus_for_device_type( instance, 'kvm', image_meta, dev) self.assertEqual(res, bus) expected = ( ('scsi', None, 'disk', 'scsi'), (None, 'scsi', 'cdrom', 'scsi'), ('usb', None, 'disk', 'usb') ) for dbus, cbus, dev, res in expected: props = {} if dbus is not None: props['hw_disk_bus'] = dbus if cbus is not None: props['hw_cdrom_bus'] = cbus image_meta = objects.ImageMeta.from_dict( {'properties': props}) bus = blockinfo.get_disk_bus_for_device_type( instance, 'kvm', image_meta, device_type=dev) self.assertEqual(res, bus) image_meta = objects.ImageMeta.from_dict( {'properties': {'hw_disk_bus': 'xen'}}) self.assertRaises(exception.UnsupportedHardware, blockinfo.get_disk_bus_for_device_type, instance, 'kvm', image_meta) def test_get_disk_bus_with_osinfo(self): self.useFixture(fixtures.MonkeyPatch( 'nova.virt.osinfo.libosinfo', fakelibosinfo)) instance = objects.Instance(**self.test_instance) image_meta = {'properties': {'os_name': 'fedora22'}} image_meta = objects.ImageMeta.from_dict(image_meta) bus = blockinfo.get_disk_bus_for_device_type(instance, 'kvm', image_meta) self.assertEqual('virtio', bus) def test_success_get_disk_bus_for_disk_dev(self): expected = ( ('ide', ("kvm", "hda")), ('scsi', ("kvm", "sdf")), ('virtio', ("kvm", "vds")), ('fdc', ("kvm", "fdc")), ('uml', ("kvm", "ubd")), ('xen', ("xen", "sdf")), ('xen', ("xen", "xvdb")) ) for res, args in expected: self.assertEqual(res, blockinfo.get_disk_bus_for_disk_dev(*args)) def test_fail_get_disk_bus_for_disk_dev_unsupported_virt_type(self): instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) self.assertRaises(exception.UnsupportedVirtType, blockinfo.get_disk_bus_for_device_type, instance, 'kvm1', image_meta) def test_fail_get_disk_bus_for_disk_dev(self): self.assertRaises(exception.NovaException, blockinfo.get_disk_bus_for_disk_dev, 'inv', 'val') def test_get_config_drive_type_default(self): config_drive_type = blockinfo.get_config_drive_type() self.assertEqual('cdrom', config_drive_type) def test_get_config_drive_type_cdrom(self): self.flags(config_drive_format='iso9660') config_drive_type = blockinfo.get_config_drive_type() self.assertEqual('cdrom', config_drive_type) def test_get_config_drive_type_disk(self): self.flags(config_drive_format='vfat') config_drive_type = blockinfo.get_config_drive_type() self.assertEqual('disk', config_drive_type) def test_get_info_from_bdm(self): instance = objects.Instance(**self.test_instance) bdms = [{'device_name': '/dev/vds', 'device_type': 'disk', 'disk_bus': 'usb', 'swap_size': 4}, {'device_type': 'disk', 'guest_format': 'ext4', 'device_name': '/dev/vdb', 'size': 2}, {'disk_bus': 'ide', 'guest_format': None, 'device_name': '/dev/vdc', 'size': 3}, {'connection_info': "fake", 'mount_device': "/dev/sdr", 'disk_bus': 'lame_bus', 'device_type': 'cdrom', 'boot_index': 0, 'delete_on_termination': True}, {'connection_info': "fake", 'mount_device': "/dev/vdo", 'disk_bus': 'scsi', 'boot_index': 1, 'device_type': 'lame_type', 'delete_on_termination': True}] expected = [{'dev': 'vds', 'type': 'disk', 'bus': 'usb'}, {'dev': 'vdb', 'type': 'disk', 'bus': 'virtio', 'format': 'ext4'}, {'dev': 'vdc', 'type': 'disk', 'bus': 'ide'}, {'dev': 'sdr', 'type': 'cdrom', 'bus': 'scsi', 'boot_index': '1'}, {'dev': 'vdo', 'type': 'disk', 'bus': 'scsi', 'boot_index': '2'}] image_meta = objects.ImageMeta.from_dict(self.test_image_meta) for bdm, expected in zip(bdms, expected): self.assertEqual(expected, blockinfo.get_info_from_bdm(instance, 'kvm', image_meta, bdm)) # Test that passed bus and type are considered bdm = {'device_name': '/dev/vda'} expected = {'dev': 'vda', 'type': 'disk', 'bus': 'ide'} self.assertEqual( expected, blockinfo.get_info_from_bdm(instance, 'kvm', image_meta, bdm, disk_bus='ide', dev_type='disk')) # Test that lame bus values are defaulted properly bdm = {'disk_bus': 'lame_bus', 'device_type': 'cdrom'} with mock.patch.object(blockinfo, 'get_disk_bus_for_device_type', return_value='ide') as get_bus: blockinfo.get_info_from_bdm(instance, 'kvm', image_meta, bdm) get_bus.assert_called_once_with(instance, 'kvm', image_meta, 'cdrom') # Test that missing device is defaulted as expected bdm = {'disk_bus': 'ide', 'device_type': 'cdrom'} expected = {'dev': 'vdd', 'type': 'cdrom', 'bus': 'ide'} mapping = {'root': {'dev': 'vda'}} with mock.patch.object(blockinfo, 'find_disk_dev_for_disk_bus', return_value='vdd') as find_dev: got = blockinfo.get_info_from_bdm( instance, 'kvm', image_meta, bdm, mapping, assigned_devices=['vdb', 'vdc']) find_dev.assert_called_once_with( {'root': {'dev': 'vda'}, 'vdb': {'dev': 'vdb'}, 'vdc': {'dev': 'vdc'}}, 'ide') self.assertEqual(expected, got) def test_get_device_name(self): bdm_obj = objects.BlockDeviceMapping(self.context, **fake_block_device.FakeDbBlockDeviceDict( {'id': 3, 'instance_uuid': 'fake-instance', 'device_name': '/dev/vda', 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': 'fake-volume-id-1', 'boot_index': 0})) self.assertEqual('/dev/vda', blockinfo.get_device_name(bdm_obj)) driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm_obj) self.assertEqual('/dev/vda', blockinfo.get_device_name(driver_bdm)) bdm_obj.device_name = None self.assertIsNone(blockinfo.get_device_name(bdm_obj)) driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm_obj) self.assertIsNone(blockinfo.get_device_name(driver_bdm)) @mock.patch('nova.virt.libvirt.blockinfo.find_disk_dev_for_disk_bus', return_value='vda') @mock.patch('nova.virt.libvirt.blockinfo.get_disk_bus_for_disk_dev', return_value='virtio') def test_get_root_info_no_bdm(self, mock_get_bus, mock_find_dev): instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) blockinfo.get_root_info(instance, 'kvm', image_meta, None, 'virtio', 'ide') mock_find_dev.assert_called_once_with({}, 'virtio') blockinfo.get_root_info(instance, 'kvm', image_meta, None, 'virtio', 'ide', root_device_name='/dev/vda') mock_get_bus.assert_called_once_with('kvm', '/dev/vda') @mock.patch('nova.virt.libvirt.blockinfo.find_disk_dev_for_disk_bus', return_value='vda') @mock.patch('nova.virt.libvirt.blockinfo.get_disk_bus_for_disk_dev', return_value='virtio') def test_get_root_info_no_bdm_empty_image_meta(self, mock_get_bus, mock_find_dev): # The evacuate operation passes image_ref=None to the compute node for # rebuild which then defaults image_meta to {}, so we don't have any # attributes in the ImageMeta object passed to get_root_info and we # need to make sure we don't try lazy-loading anything. instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({}) blockinfo.get_root_info(instance, 'kvm', image_meta, None, 'virtio', 'ide') mock_find_dev.assert_called_once_with({}, 'virtio') blockinfo.get_root_info(instance, 'kvm', image_meta, None, 'virtio', 'ide', root_device_name='/dev/vda') mock_get_bus.assert_called_once_with('kvm', '/dev/vda') @mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm') def test_get_root_info_bdm(self, mock_get_info): instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) root_bdm = {'mount_device': '/dev/vda', 'disk_bus': 'scsi', 'device_type': 'disk'} # No root_device_name blockinfo.get_root_info(instance, 'kvm', image_meta, root_bdm, 'virtio', 'ide') mock_get_info.assert_called_once_with(instance, 'kvm', image_meta, root_bdm, {}, 'virtio') mock_get_info.reset_mock() # Both device names blockinfo.get_root_info(instance, 'kvm', image_meta, root_bdm, 'virtio', 'ide', root_device_name='sda') mock_get_info.assert_called_once_with(instance, 'kvm', image_meta, root_bdm, {}, 'virtio') mock_get_info.reset_mock() # Missing device names del root_bdm['mount_device'] blockinfo.get_root_info(instance, 'kvm', image_meta, root_bdm, 'virtio', 'ide', root_device_name='sda') mock_get_info.assert_called_once_with(instance, 'kvm', image_meta, {'device_name': 'sda', 'disk_bus': 'scsi', 'device_type': 'disk'}, {}, 'virtio') def test_get_boot_order_simple(self): disk_info = { 'disk_bus': 'virtio', 'cdrom_bus': 'ide', 'mapping': { 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, } } expected_order = ['hd'] self.assertEqual(expected_order, blockinfo.get_boot_order(disk_info)) def test_get_boot_order_complex(self): disk_info = { 'disk_bus': 'virtio', 'cdrom_bus': 'ide', 'mapping': { 'disk': {'bus': 'virtio', 'dev': 'vdf', 'type': 'disk', 'boot_index': '1'}, '/dev/hda': {'bus': 'ide', 'dev': 'hda', 'type': 'cdrom', 'boot_index': '3'}, '/dev/fda': {'bus': 'fdc', 'dev': 'fda', 'type': 'floppy', 'boot_index': '2'}, 'disk.eph0': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk', 'format': 'ext4'}, 'disk.eph1': {'bus': 'ide', 'dev': 'vdc', 'type': 'disk'}, 'disk.swap': {'bus': 'virtio', 'dev': 'vdy', 'type': 'disk'}, 'root': {'bus': 'virtio', 'dev': 'vdf', 'type': 'disk', 'boot_index': '1'}, } } expected_order = ['hd', 'fd', 'cdrom'] self.assertEqual(expected_order, blockinfo.get_boot_order(disk_info)) def test_get_boot_order_overlapping(self): disk_info = { 'disk_bus': 'virtio', 'cdrom_bus': 'ide', 'mapping': { '/dev/vda': {'bus': 'scsi', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, '/dev/vdb': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk', 'boot_index': '2'}, '/dev/vdc': {'bus': 'virtio', 'dev': 'vdc', 'type': 'cdrom', 'boot_index': '3'}, 'root': {'bus': 'scsi', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, } } expected_order = ['hd', 'cdrom'] self.assertEqual(expected_order, blockinfo.get_boot_order(disk_info)) class DefaultDeviceNamesTestCase(test.NoDBTestCase): def setUp(self): super(DefaultDeviceNamesTestCase, self).setUp() self.context = context.get_admin_context() self.instance = objects.Instance( uuid='32dfcb37-5af1-552b-357c-be8c3aa38310', memory_kb='1024000', basepath='/some/path', bridge_name='br100', vcpus=2, project_id='fake', bridge='br101', image_ref='155d900f-4e14-4e4c-a73d-069cbf4541e6', root_gb=10, ephemeral_gb=20, instance_type_id=2, config_drive=False, root_device_name = '/dev/vda', system_metadata={}) self.image_meta = objects.ImageMeta( disk_format='raw', properties=objects.ImageMetaProps()) self.virt_type = 'kvm' self.flavor = objects.Flavor(swap=4) self.patchers = [] self.patchers.append(mock.patch.object(self.instance, 'get_flavor', return_value=self.flavor)) self.patchers.append(mock.patch( 'nova.objects.block_device.BlockDeviceMapping.save')) for patcher in self.patchers: patcher.start() self.ephemerals = [objects.BlockDeviceMapping( self.context, **fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'instance_uuid': 'fake-instance', 'device_name': '/dev/vdb', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'disk_bus': 'virtio', 'delete_on_termination': True, 'guest_format': None, 'volume_size': 1, 'boot_index': -1}))] self.swap = [objects.BlockDeviceMapping( self.context, **fake_block_device.FakeDbBlockDeviceDict( {'id': 2, 'instance_uuid': 'fake-instance', 'device_name': '/dev/vdc', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'disk_bus': 'virtio', 'delete_on_termination': True, 'guest_format': 'swap', 'volume_size': 1, 'boot_index': -1}))] self.block_device_mapping = [ objects.BlockDeviceMapping(self.context, **fake_block_device.FakeDbBlockDeviceDict( {'id': 3, 'instance_uuid': 'fake-instance', 'device_name': '/dev/vda', 'source_type': 'volume', 'destination_type': 'volume', 'device_type': 'disk', 'disk_bus': 'virtio', 'volume_id': 'fake-volume-id-1', 'boot_index': 0})), objects.BlockDeviceMapping(self.context, **fake_block_device.FakeDbBlockDeviceDict( {'id': 4, 'instance_uuid': 'fake-instance', 'device_name': '/dev/vdd', 'source_type': 'snapshot', 'device_type': 'disk', 'disk_bus': 'virtio', 'destination_type': 'volume', 'snapshot_id': 'fake-snapshot-id-1', 'boot_index': -1})), objects.BlockDeviceMapping(self.context, **fake_block_device.FakeDbBlockDeviceDict( {'id': 5, 'instance_uuid': 'fake-instance', 'device_name': '/dev/vde', 'source_type': 'blank', 'device_type': 'disk', 'disk_bus': 'virtio', 'destination_type': 'volume', 'boot_index': -1}))] def tearDown(self): super(DefaultDeviceNamesTestCase, self).tearDown() for patcher in self.patchers: patcher.stop() def _test_default_device_names(self, eph, swap, bdm): bdms = eph + swap + bdm bdi = driver.get_block_device_info(self.instance, bdms) blockinfo.default_device_names(self.virt_type, self.context, self.instance, bdi, self.image_meta) def test_only_block_device_mapping(self): # Test no-op original_bdm = copy.deepcopy(self.block_device_mapping) self._test_default_device_names([], [], self.block_device_mapping) for original, defaulted in zip( original_bdm, self.block_device_mapping): self.assertEqual(original.device_name, defaulted.device_name) # Assert it defaults the missing one as expected self.block_device_mapping[1]['device_name'] = None self.block_device_mapping[2]['device_name'] = None self._test_default_device_names([], [], self.block_device_mapping) self.assertEqual('/dev/vdd', self.block_device_mapping[1]['device_name']) self.assertEqual('/dev/vde', self.block_device_mapping[2]['device_name']) def test_with_ephemerals(self): # Test ephemeral gets assigned self.ephemerals[0]['device_name'] = None self._test_default_device_names(self.ephemerals, [], self.block_device_mapping) self.assertEqual('/dev/vdb', self.ephemerals[0]['device_name']) self.block_device_mapping[1]['device_name'] = None self.block_device_mapping[2]['device_name'] = None self._test_default_device_names(self.ephemerals, [], self.block_device_mapping) self.assertEqual('/dev/vdd', self.block_device_mapping[1]['device_name']) self.assertEqual('/dev/vde', self.block_device_mapping[2]['device_name']) def test_with_swap(self): # Test swap only self.swap[0]['device_name'] = None self._test_default_device_names([], self.swap, []) self.assertEqual('/dev/vdc', self.swap[0]['device_name']) # Test swap and block_device_mapping self.swap[0]['device_name'] = None self.block_device_mapping[1]['device_name'] = None self.block_device_mapping[2]['device_name'] = None self._test_default_device_names([], self.swap, self.block_device_mapping) self.assertEqual('/dev/vdc', self.swap[0]['device_name']) self.assertEqual('/dev/vdd', self.block_device_mapping[1]['device_name']) self.assertEqual('/dev/vde', self.block_device_mapping[2]['device_name']) def test_all_together(self): # Test swap missing self.swap[0]['device_name'] = None self._test_default_device_names(self.ephemerals, self.swap, self.block_device_mapping) self.assertEqual('/dev/vdc', self.swap[0]['device_name']) # Test swap and eph missing self.swap[0]['device_name'] = None self.ephemerals[0]['device_name'] = None self._test_default_device_names(self.ephemerals, self.swap, self.block_device_mapping) self.assertEqual('/dev/vdb', self.ephemerals[0]['device_name']) self.assertEqual('/dev/vdc', self.swap[0]['device_name']) # Test all missing self.swap[0]['device_name'] = None self.ephemerals[0]['device_name'] = None self.block_device_mapping[1]['device_name'] = None self.block_device_mapping[2]['device_name'] = None self._test_default_device_names(self.ephemerals, self.swap, self.block_device_mapping) self.assertEqual('/dev/vdb', self.ephemerals[0]['device_name']) self.assertEqual('/dev/vdc', self.swap[0]['device_name']) self.assertEqual('/dev/vdd', self.block_device_mapping[1]['device_name']) self.assertEqual('/dev/vde', self.block_device_mapping[2]['device_name']) nova-13.1.4/nova/tests/unit/virt/libvirt/test_imagebackend.py0000664000567000056710000022546613064447152025457 0ustar jenkinsjenkins00000000000000# Copyright 2012 Grid Dynamics # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import inspect import os import shutil import tempfile import fixtures import mock from oslo_concurrency import lockutils from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_utils import imageutils from oslo_utils import units from oslo_utils import uuidutils from nova import context from nova import exception from nova import keymgr from nova import objects from nova import test from nova.tests.unit import fake_processutils from nova.tests.unit.virt.libvirt import fake_libvirt_utils from nova.virt.image import model as imgmodel from nova.virt import images from nova.virt.libvirt import config as vconfig from nova.virt.libvirt import imagebackend from nova.virt.libvirt.storage import rbd_utils CONF = cfg.CONF CONF.import_opt('fixed_key', 'nova.keymgr.conf_key_mgr', group='keymgr') class FakeSecret(object): def value(self): return base64.b64decode("MTIzNDU2Cg==") class FakeConn(object): def secretLookupByUUIDString(self, uuid): return FakeSecret() class _ImageTestCase(object): def mock_create_image(self, image): def create_image(fn, base, size, *args, **kwargs): fn(target=base, *args, **kwargs) image.create_image = create_image def setUp(self): super(_ImageTestCase, self).setUp() self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF)) self.INSTANCES_PATH = tempfile.mkdtemp(suffix='instances') self.fixture.config(disable_process_locking=True, group='oslo_concurrency') self.flags(instances_path=self.INSTANCES_PATH) self.INSTANCE = objects.Instance(id=1, uuid=uuidutils.generate_uuid()) self.DISK_INFO_PATH = os.path.join(self.INSTANCES_PATH, self.INSTANCE['uuid'], 'disk.info') self.NAME = 'fake.vm' self.TEMPLATE = 'template' self.CONTEXT = context.get_admin_context() self.OLD_STYLE_INSTANCE_PATH = \ fake_libvirt_utils.get_instance_path(self.INSTANCE, forceold=True) self.PATH = os.path.join( fake_libvirt_utils.get_instance_path(self.INSTANCE), self.NAME) # TODO(mikal): rename template_dir to base_dir and template_path # to cached_image_path. This will be less confusing. self.TEMPLATE_DIR = os.path.join(CONF.instances_path, '_base') self.TEMPLATE_PATH = os.path.join(self.TEMPLATE_DIR, 'template') self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.imagebackend.libvirt_utils', fake_libvirt_utils)) def tearDown(self): super(_ImageTestCase, self).tearDown() shutil.rmtree(self.INSTANCES_PATH) def test_prealloc_image(self): CONF.set_override('preallocate_images', 'space') fake_processutils.fake_execute_clear_log() fake_processutils.stub_out_processutils_execute(self.stubs) image = self.image_class(self.INSTANCE, self.NAME) def fake_fetch(target, *args, **kwargs): return self.stubs.Set(image, 'get_disk_size', lambda _: self.SIZE) self.stub_out('os.path.exists', lambda _: True) self.stub_out('os.access', lambda p, w: True) # Call twice to verify testing fallocate is only called once. image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE) image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE) self.assertEqual(fake_processutils.fake_execute_get_log(), ['fallocate -l 1 %s.fallocate_test' % self.PATH, 'fallocate -n -l %s %s' % (self.SIZE, self.PATH), 'fallocate -n -l %s %s' % (self.SIZE, self.PATH)]) def test_prealloc_image_without_write_access(self): CONF.set_override('preallocate_images', 'space') fake_processutils.fake_execute_clear_log() fake_processutils.stub_out_processutils_execute(self.stubs) image = self.image_class(self.INSTANCE, self.NAME) def fake_fetch(target, *args, **kwargs): return self.stubs.Set(image, 'check_image_exists', lambda: True) self.stubs.Set(image, '_can_fallocate', lambda: True) self.stubs.Set(image, 'get_disk_size', lambda _: self.SIZE) self.stub_out('os.path.exists', lambda _: True) self.stub_out('os.access', lambda p, w: False) # Testing fallocate is only called when user has write access. image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE) self.assertEqual(fake_processutils.fake_execute_get_log(), []) def test_libvirt_fs_info(self): image = self.image_class(self.INSTANCE, self.NAME) fs = image.libvirt_fs_info("/mnt") # check that exception hasn't been raised and the method # returned correct object self.assertIsInstance(fs, vconfig.LibvirtConfigGuestFilesys) self.assertEqual(fs.target_dir, "/mnt") if image.is_block_dev: self.assertEqual(fs.source_type, "block") self.assertEqual(fs.source_dev, image.path) else: self.assertEqual(fs.source_type, "file") self.assertEqual(fs.source_file, image.path) def test_libvirt_info(self): image = self.image_class(self.INSTANCE, self.NAME) extra_specs = { 'quota:disk_read_bytes_sec': 10 * units.Mi, 'quota:disk_read_iops_sec': 1 * units.Ki, 'quota:disk_write_bytes_sec': 20 * units.Mi, 'quota:disk_write_iops_sec': 2 * units.Ki, 'quota:disk_total_bytes_sec': 30 * units.Mi, 'quota:disk_total_iops_sec': 3 * units.Ki, } disk = image.libvirt_info(disk_bus="virtio", disk_dev="/dev/vda", device_type="cdrom", cache_mode="none", extra_specs=extra_specs, hypervisor_version=4004001) self.assertIsInstance(disk, vconfig.LibvirtConfigGuestDisk) self.assertEqual("/dev/vda", disk.target_dev) self.assertEqual("virtio", disk.target_bus) self.assertEqual("none", disk.driver_cache) self.assertEqual("cdrom", disk.source_device) self.assertEqual(10 * units.Mi, disk.disk_read_bytes_sec) self.assertEqual(1 * units.Ki, disk.disk_read_iops_sec) self.assertEqual(20 * units.Mi, disk.disk_write_bytes_sec) self.assertEqual(2 * units.Ki, disk.disk_write_iops_sec) self.assertEqual(30 * units.Mi, disk.disk_total_bytes_sec) self.assertEqual(3 * units.Ki, disk.disk_total_iops_sec) @mock.patch('nova.virt.disk.api.get_disk_size') def test_get_disk_size(self, get_disk_size): get_disk_size.return_value = 2361393152 image = self.image_class(self.INSTANCE, self.NAME) self.assertEqual(2361393152, image.get_disk_size(image.path)) get_disk_size.assert_called_once_with(image.path) class RawTestCase(_ImageTestCase, test.NoDBTestCase): SIZE = 1024 def setUp(self): self.image_class = imagebackend.Raw super(RawTestCase, self).setUp() self.stubs.Set(imagebackend.Raw, 'correct_format', lambda _: None) def prepare_mocks(self): fn = self.mox.CreateMockAnything() self.mox.StubOutWithMock(imagebackend.utils.synchronized, '__call__') self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image') self.mox.StubOutWithMock(imagebackend.disk, 'extend') return fn def test_cache(self): self.mox.StubOutWithMock(os.path, 'exists') if self.OLD_STYLE_INSTANCE_PATH: os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False) os.path.exists(self.TEMPLATE_DIR).AndReturn(False) os.path.exists(self.PATH).AndReturn(False) os.path.exists(self.TEMPLATE_PATH).AndReturn(False) fn = self.mox.CreateMockAnything() fn(target=self.TEMPLATE_PATH) self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree') imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR) self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) self.mock_create_image(image) image.cache(fn, self.TEMPLATE) self.mox.VerifyAll() def test_cache_image_exists(self): self.mox.StubOutWithMock(os.path, 'exists') if self.OLD_STYLE_INSTANCE_PATH: os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False) os.path.exists(self.TEMPLATE_DIR).AndReturn(True) os.path.exists(self.PATH).AndReturn(True) os.path.exists(self.TEMPLATE_PATH).AndReturn(True) self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) image.cache(None, self.TEMPLATE) self.mox.VerifyAll() def test_cache_base_dir_exists(self): self.mox.StubOutWithMock(os.path, 'exists') if self.OLD_STYLE_INSTANCE_PATH: os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False) os.path.exists(self.TEMPLATE_DIR).AndReturn(True) os.path.exists(self.PATH).AndReturn(False) os.path.exists(self.TEMPLATE_PATH).AndReturn(False) fn = self.mox.CreateMockAnything() fn(target=self.TEMPLATE_PATH) self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree') self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) self.mock_create_image(image) image.cache(fn, self.TEMPLATE) self.mox.VerifyAll() def test_cache_template_exists(self): self.mox.StubOutWithMock(os.path, 'exists') if self.OLD_STYLE_INSTANCE_PATH: os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False) os.path.exists(self.TEMPLATE_DIR).AndReturn(True) os.path.exists(self.PATH).AndReturn(False) os.path.exists(self.TEMPLATE_PATH).AndReturn(True) self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) self.mock_create_image(image) image.cache(None, self.TEMPLATE) self.mox.VerifyAll() @mock.patch('os.path.exists') def test_cache_generating_resize(self, mock_path_exists): # Test for bug 1608934 # The Raw backend doesn't write to the image cache when creating a # non-image backend. Test that we don't try to get the disk size of # a non-existent backend. base_dir = os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name) # Lets assume the base image cache directory already exists existing = set([base_dir]) def fake_exists(path): # Return True only for files previously created during # execution. This allows us to test that we're not calling # get_disk_size() on something which hasn't been previously # created. return path in existing def fake_get_disk_size(path): # get_disk_size will explode if called on a path which doesn't # exist. Specific exception not important for this test. if path not in existing: raise AssertionError # Not important, won't actually be called by patched code. return 2 * units.Gi def fake_template(target=None, **kwargs): # The template function we pass to cache. Calling this will # cause target to be created. existing.add(target) mock_path_exists.side_effect = fake_exists image = self.image_class(self.INSTANCE, self.NAME) # We're not testing preallocation image.preallocate = False with test.nested( mock.patch.object(image, 'check_image_exists'), mock.patch.object(image, 'correct_format'), mock.patch.object(image, 'get_disk_size'), mock.patch.object(image, 'resize_image') ) as ( mock_disk_exists, mock_correct_format, mock_get_disk_size, mock_resize_image ): # Assume the disk doesn't already exist mock_disk_exists.return_value = False # This won't actually be executed since change I46b5658e, # but this is how the unpatched code will fail. We include this # here as a belt-and-braces sentinel. mock_get_disk_size.side_effect = fake_get_disk_size # Try to create a 2G image image.cache(fake_template, 'fake_cache_name', 2 * units.Gi) # The real assertion is that the above call to cache() didn't # raise AssertionError which, if we get here, it clearly didn't. self.assertFalse(image.resize_image.called) def test_create_image(self): fn = self.prepare_mocks() fn(target=self.TEMPLATE_PATH, max_size=None, image_id=None) imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.PATH) self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) image.create_image(fn, self.TEMPLATE_PATH, None, image_id=None) self.mox.VerifyAll() def test_create_image_generated(self): fn = self.prepare_mocks() fn(target=self.PATH) self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) image.create_image(fn, self.TEMPLATE_PATH, None) self.mox.VerifyAll() @mock.patch.object(images, 'qemu_img_info', return_value=imageutils.QemuImgInfo()) def test_create_image_extend(self, fake_qemu_img_info): fn = self.prepare_mocks() fn(max_size=self.SIZE, target=self.TEMPLATE_PATH, image_id=None) imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.PATH) image = imgmodel.LocalFileImage(self.PATH, imgmodel.FORMAT_RAW) imagebackend.disk.extend(image, self.SIZE) self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) image.create_image(fn, self.TEMPLATE_PATH, self.SIZE, image_id=None) self.mox.VerifyAll() def test_correct_format(self): self.stubs.UnsetAll() self.mox.StubOutWithMock(os.path, 'exists') self.mox.StubOutWithMock(imagebackend.images, 'qemu_img_info') os.path.exists(self.PATH).AndReturn(True) os.path.exists(self.DISK_INFO_PATH).AndReturn(False) info = self.mox.CreateMockAnything() info.file_format = 'foo' imagebackend.images.qemu_img_info(self.PATH).AndReturn(info) os.path.exists(CONF.instances_path).AndReturn(True) self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME, path=self.PATH) self.assertEqual(image.driver_format, 'foo') self.mox.VerifyAll() @mock.patch.object(images, 'qemu_img_info', side_effect=exception.InvalidDiskInfo( reason='invalid path')) def test_resolve_driver_format(self, fake_qemu_img_info): image = self.image_class(self.INSTANCE, self.NAME) driver_format = image.resolve_driver_format() self.assertEqual(driver_format, 'raw') def test_get_model(self): image = self.image_class(self.INSTANCE, self.NAME) model = image.get_model(FakeConn()) self.assertEqual(imgmodel.LocalFileImage(self.PATH, imgmodel.FORMAT_RAW), model) class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase): SIZE = units.Gi def setUp(self): self.image_class = imagebackend.Qcow2 super(Qcow2TestCase, self).setUp() self.QCOW2_BASE = (self.TEMPLATE_PATH + '_%d' % (self.SIZE / units.Gi)) def prepare_mocks(self): fn = self.mox.CreateMockAnything() self.mox.StubOutWithMock(imagebackend.utils.synchronized, '__call__') self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'create_cow_image') self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image') self.mox.StubOutWithMock(imagebackend.disk, 'extend') return fn def test_cache(self): self.mox.StubOutWithMock(os.path, 'exists') if self.OLD_STYLE_INSTANCE_PATH: os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False) os.path.exists(self.DISK_INFO_PATH).AndReturn(False) os.path.exists(CONF.instances_path).AndReturn(True) os.path.exists(self.TEMPLATE_DIR).AndReturn(False) os.path.exists(self.INSTANCES_PATH).AndReturn(True) os.path.exists(self.PATH).AndReturn(False) os.path.exists(self.TEMPLATE_PATH).AndReturn(False) fn = self.mox.CreateMockAnything() fn(target=self.TEMPLATE_PATH) self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) self.mock_create_image(image) image.cache(fn, self.TEMPLATE) self.mox.VerifyAll() def test_cache_image_exists(self): self.mox.StubOutWithMock(os.path, 'exists') if self.OLD_STYLE_INSTANCE_PATH: os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False) os.path.exists(self.DISK_INFO_PATH).AndReturn(False) os.path.exists(self.INSTANCES_PATH).AndReturn(True) os.path.exists(self.TEMPLATE_DIR).AndReturn(True) os.path.exists(self.PATH).AndReturn(True) os.path.exists(self.TEMPLATE_PATH).AndReturn(True) self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) image.cache(None, self.TEMPLATE) self.mox.VerifyAll() def test_cache_base_dir_exists(self): self.mox.StubOutWithMock(os.path, 'exists') if self.OLD_STYLE_INSTANCE_PATH: os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False) os.path.exists(self.DISK_INFO_PATH).AndReturn(False) os.path.exists(self.INSTANCES_PATH).AndReturn(True) os.path.exists(self.TEMPLATE_DIR).AndReturn(True) os.path.exists(self.PATH).AndReturn(False) os.path.exists(self.TEMPLATE_PATH).AndReturn(False) fn = self.mox.CreateMockAnything() fn(target=self.TEMPLATE_PATH) self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) self.mock_create_image(image) image.cache(fn, self.TEMPLATE) self.mox.VerifyAll() def test_cache_template_exists(self): self.mox.StubOutWithMock(os.path, 'exists') if self.OLD_STYLE_INSTANCE_PATH: os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False) os.path.exists(self.DISK_INFO_PATH).AndReturn(False) os.path.exists(self.INSTANCES_PATH).AndReturn(True) os.path.exists(self.TEMPLATE_DIR).AndReturn(True) os.path.exists(self.PATH).AndReturn(False) os.path.exists(self.TEMPLATE_PATH).AndReturn(True) self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) self.mock_create_image(image) image.cache(None, self.TEMPLATE) self.mox.VerifyAll() def test_create_image(self): fn = self.prepare_mocks() fn(max_size=None, target=self.TEMPLATE_PATH) imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH, self.PATH) self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) image.create_image(fn, self.TEMPLATE_PATH, None) self.mox.VerifyAll() def test_create_image_with_size(self): fn = self.prepare_mocks() fn(max_size=self.SIZE, target=self.TEMPLATE_PATH) self.mox.StubOutWithMock(os.path, 'exists') self.mox.StubOutWithMock(imagebackend.Image, 'verify_base_size') if self.OLD_STYLE_INSTANCE_PATH: os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False) os.path.exists(self.DISK_INFO_PATH).AndReturn(False) os.path.exists(self.INSTANCES_PATH).AndReturn(True) os.path.exists(self.TEMPLATE_PATH).AndReturn(False) os.path.exists(self.PATH).AndReturn(False) os.path.exists(self.PATH).AndReturn(False) imagebackend.Image.verify_base_size(self.TEMPLATE_PATH, self.SIZE) imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH, self.PATH) image = imgmodel.LocalFileImage(self.PATH, imgmodel.FORMAT_QCOW2) imagebackend.disk.extend(image, self.SIZE) self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) image.create_image(fn, self.TEMPLATE_PATH, self.SIZE) self.mox.VerifyAll() def test_create_image_too_small(self): fn = self.prepare_mocks() self.mox.StubOutWithMock(os.path, 'exists') self.mox.StubOutWithMock(imagebackend.Qcow2, 'get_disk_size') if self.OLD_STYLE_INSTANCE_PATH: os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False) os.path.exists(self.DISK_INFO_PATH).AndReturn(False) os.path.exists(self.INSTANCES_PATH).AndReturn(True) os.path.exists(self.TEMPLATE_PATH).AndReturn(True) imagebackend.Qcow2.get_disk_size(self.TEMPLATE_PATH ).AndReturn(self.SIZE) self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) self.assertRaises(exception.FlavorDiskSmallerThanImage, image.create_image, fn, self.TEMPLATE_PATH, 1) self.mox.VerifyAll() def test_generate_resized_backing_files(self): fn = self.prepare_mocks() fn(max_size=self.SIZE, target=self.TEMPLATE_PATH) self.mox.StubOutWithMock(os.path, 'exists') self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'get_disk_backing_file') self.mox.StubOutWithMock(imagebackend.Image, 'verify_base_size') if self.OLD_STYLE_INSTANCE_PATH: os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False) os.path.exists(self.DISK_INFO_PATH).AndReturn(False) os.path.exists(CONF.instances_path).AndReturn(True) os.path.exists(self.TEMPLATE_PATH).AndReturn(False) os.path.exists(self.PATH).AndReturn(True) imagebackend.libvirt_utils.get_disk_backing_file(self.PATH)\ .AndReturn(self.QCOW2_BASE) os.path.exists(self.QCOW2_BASE).AndReturn(False) imagebackend.Image.verify_base_size(self.TEMPLATE_PATH, self.SIZE) imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.QCOW2_BASE) image = imgmodel.LocalFileImage(self.QCOW2_BASE, imgmodel.FORMAT_QCOW2) imagebackend.disk.extend(image, self.SIZE) os.path.exists(self.PATH).AndReturn(True) self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) image.create_image(fn, self.TEMPLATE_PATH, self.SIZE) self.mox.VerifyAll() def test_qcow2_exists_and_has_no_backing_file(self): fn = self.prepare_mocks() fn(max_size=self.SIZE, target=self.TEMPLATE_PATH) self.mox.StubOutWithMock(os.path, 'exists') self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'get_disk_backing_file') self.mox.StubOutWithMock(imagebackend.Image, 'verify_base_size') if self.OLD_STYLE_INSTANCE_PATH: os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False) os.path.exists(self.DISK_INFO_PATH).AndReturn(False) os.path.exists(self.INSTANCES_PATH).AndReturn(True) os.path.exists(self.TEMPLATE_PATH).AndReturn(False) os.path.exists(self.PATH).AndReturn(True) imagebackend.libvirt_utils.get_disk_backing_file(self.PATH)\ .AndReturn(None) imagebackend.Image.verify_base_size(self.TEMPLATE_PATH, self.SIZE) os.path.exists(self.PATH).AndReturn(True) self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) image.create_image(fn, self.TEMPLATE_PATH, self.SIZE) self.mox.VerifyAll() def test_resolve_driver_format(self): image = self.image_class(self.INSTANCE, self.NAME) driver_format = image.resolve_driver_format() self.assertEqual(driver_format, 'qcow2') def test_get_model(self): image = self.image_class(self.INSTANCE, self.NAME) model = image.get_model(FakeConn()) self.assertEqual(imgmodel.LocalFileImage(self.PATH, imgmodel.FORMAT_QCOW2), model) class LvmTestCase(_ImageTestCase, test.NoDBTestCase): VG = 'FakeVG' TEMPLATE_SIZE = 512 SIZE = 1024 def setUp(self): self.image_class = imagebackend.Lvm super(LvmTestCase, self).setUp() self.flags(images_volume_group=self.VG, group='libvirt') self.flags(enabled=False, group='ephemeral_storage_encryption') self.INSTANCE['ephemeral_key_uuid'] = None self.LV = '%s_%s' % (self.INSTANCE['uuid'], self.NAME) self.OLD_STYLE_INSTANCE_PATH = None self.PATH = os.path.join('/dev', self.VG, self.LV) self.disk = imagebackend.disk self.utils = imagebackend.utils self.lvm = imagebackend.lvm def prepare_mocks(self): fn = self.mox.CreateMockAnything() self.mox.StubOutWithMock(self.disk, 'resize2fs') self.mox.StubOutWithMock(self.lvm, 'create_volume') self.mox.StubOutWithMock(self.disk, 'get_disk_size') self.mox.StubOutWithMock(self.utils, 'execute') return fn def _create_image(self, sparse): fn = self.prepare_mocks() fn(max_size=None, target=self.TEMPLATE_PATH) self.lvm.create_volume(self.VG, self.LV, self.TEMPLATE_SIZE, sparse=sparse) self.disk.get_disk_size(self.TEMPLATE_PATH ).AndReturn(self.TEMPLATE_SIZE) cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH, self.PATH) self.utils.execute(*cmd, run_as_root=True) self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) image.create_image(fn, self.TEMPLATE_PATH, None) self.mox.VerifyAll() def _create_image_generated(self, sparse): fn = self.prepare_mocks() self.lvm.create_volume(self.VG, self.LV, self.SIZE, sparse=sparse) fn(target=self.PATH, ephemeral_size=None) self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) image.create_image(fn, self.TEMPLATE_PATH, self.SIZE, ephemeral_size=None) self.mox.VerifyAll() def _create_image_resize(self, sparse): fn = self.prepare_mocks() fn(max_size=self.SIZE, target=self.TEMPLATE_PATH) self.lvm.create_volume(self.VG, self.LV, self.SIZE, sparse=sparse) self.disk.get_disk_size(self.TEMPLATE_PATH ).AndReturn(self.TEMPLATE_SIZE) cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH, self.PATH) self.utils.execute(*cmd, run_as_root=True) self.disk.resize2fs(self.PATH, run_as_root=True) self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) image.create_image(fn, self.TEMPLATE_PATH, self.SIZE) self.mox.VerifyAll() def test_cache(self): self.mox.StubOutWithMock(os.path, 'exists') if self.OLD_STYLE_INSTANCE_PATH: os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False) os.path.exists(self.TEMPLATE_DIR).AndReturn(False) os.path.exists(self.PATH).AndReturn(False) os.path.exists(self.TEMPLATE_PATH).AndReturn(False) fn = self.mox.CreateMockAnything() fn(target=self.TEMPLATE_PATH) self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree') imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR) self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) self.mock_create_image(image) image.cache(fn, self.TEMPLATE) self.mox.VerifyAll() def test_cache_image_exists(self): self.mox.StubOutWithMock(os.path, 'exists') if self.OLD_STYLE_INSTANCE_PATH: os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False) os.path.exists(self.TEMPLATE_DIR).AndReturn(True) os.path.exists(self.PATH).AndReturn(True) os.path.exists(self.TEMPLATE_PATH).AndReturn(True) self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) image.cache(None, self.TEMPLATE) self.mox.VerifyAll() def test_cache_base_dir_exists(self): self.mox.StubOutWithMock(os.path, 'exists') if self.OLD_STYLE_INSTANCE_PATH: os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False) os.path.exists(self.TEMPLATE_DIR).AndReturn(True) os.path.exists(self.PATH).AndReturn(False) os.path.exists(self.TEMPLATE_PATH).AndReturn(False) fn = self.mox.CreateMockAnything() fn(target=self.TEMPLATE_PATH) self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree') self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) self.mock_create_image(image) image.cache(fn, self.TEMPLATE) self.mox.VerifyAll() def test_create_image(self): self._create_image(False) def test_create_image_sparsed(self): self.flags(sparse_logical_volumes=True, group='libvirt') self._create_image(True) def test_create_image_generated(self): self._create_image_generated(False) def test_create_image_generated_sparsed(self): self.flags(sparse_logical_volumes=True, group='libvirt') self._create_image_generated(True) def test_create_image_resize(self): self._create_image_resize(False) def test_create_image_resize_sparsed(self): self.flags(sparse_logical_volumes=True, group='libvirt') self._create_image_resize(True) def test_create_image_negative(self): fn = self.prepare_mocks() fn(max_size=self.SIZE, target=self.TEMPLATE_PATH) self.lvm.create_volume(self.VG, self.LV, self.SIZE, sparse=False ).AndRaise(RuntimeError()) self.disk.get_disk_size(self.TEMPLATE_PATH ).AndReturn(self.TEMPLATE_SIZE) self.mox.StubOutWithMock(self.lvm, 'remove_volumes') self.lvm.remove_volumes([self.PATH]) self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) self.assertRaises(RuntimeError, image.create_image, fn, self.TEMPLATE_PATH, self.SIZE) self.mox.VerifyAll() def test_create_image_generated_negative(self): fn = self.prepare_mocks() fn(target=self.PATH, ephemeral_size=None).AndRaise(RuntimeError()) self.lvm.create_volume(self.VG, self.LV, self.SIZE, sparse=False) self.mox.StubOutWithMock(self.lvm, 'remove_volumes') self.lvm.remove_volumes([self.PATH]) self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) self.assertRaises(RuntimeError, image.create_image, fn, self.TEMPLATE_PATH, self.SIZE, ephemeral_size=None) self.mox.VerifyAll() def test_prealloc_image(self): CONF.set_override('preallocate_images', 'space') fake_processutils.fake_execute_clear_log() fake_processutils.stub_out_processutils_execute(self.stubs) image = self.image_class(self.INSTANCE, self.NAME) def fake_fetch(target, *args, **kwargs): return self.stub_out('os.path.exists', lambda _: True) self.stubs.Set(image, 'check_image_exists', lambda: True) self.stubs.Set(image, 'get_disk_size', lambda _: self.SIZE) image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE) self.assertEqual(fake_processutils.fake_execute_get_log(), []) class EncryptedLvmTestCase(_ImageTestCase, test.NoDBTestCase): VG = 'FakeVG' TEMPLATE_SIZE = 512 SIZE = 1024 def setUp(self): super(EncryptedLvmTestCase, self).setUp() self.image_class = imagebackend.Lvm self.flags(enabled=True, group='ephemeral_storage_encryption') self.flags(cipher='aes-xts-plain64', group='ephemeral_storage_encryption') self.flags(key_size=512, group='ephemeral_storage_encryption') self.flags(fixed_key='00000000000000000000000000000000' '00000000000000000000000000000000', group='keymgr') self.flags(images_volume_group=self.VG, group='libvirt') self.LV = '%s_%s' % (self.INSTANCE['uuid'], self.NAME) self.OLD_STYLE_INSTANCE_PATH = None self.LV_PATH = os.path.join('/dev', self.VG, self.LV) self.PATH = os.path.join('/dev/mapper', imagebackend.dmcrypt.volume_name(self.LV)) self.key_manager = keymgr.API() self.INSTANCE['ephemeral_key_uuid'] =\ self.key_manager.create_key(self.CONTEXT) self.KEY = self.key_manager.get_key(self.CONTEXT, self.INSTANCE['ephemeral_key_uuid']).get_encoded() self.lvm = imagebackend.lvm self.disk = imagebackend.disk self.utils = imagebackend.utils self.libvirt_utils = imagebackend.libvirt_utils self.dmcrypt = imagebackend.dmcrypt def _create_image(self, sparse): with test.nested( mock.patch.object(self.lvm, 'create_volume', mock.Mock()), mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()), mock.patch.object(self.disk, 'resize2fs', mock.Mock()), mock.patch.object(self.disk, 'get_disk_size', mock.Mock(return_value=self.TEMPLATE_SIZE)), mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()), mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()), mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()), mock.patch.object(self.libvirt_utils, 'create_lvm_image', mock.Mock()), mock.patch.object(self.libvirt_utils, 'remove_logical_volumes', mock.Mock()), mock.patch.object(self.utils, 'execute', mock.Mock())): fn = mock.Mock() image = self.image_class(self.INSTANCE, self.NAME) image.create_image(fn, self.TEMPLATE_PATH, self.TEMPLATE_SIZE, context=self.CONTEXT) fn.assert_called_with(context=self.CONTEXT, max_size=self.TEMPLATE_SIZE, target=self.TEMPLATE_PATH) self.lvm.create_volume.assert_called_with(self.VG, self.LV, self.TEMPLATE_SIZE, sparse=sparse) self.dmcrypt.create_volume.assert_called_with( self.PATH.rpartition('/')[2], self.LV_PATH, CONF.ephemeral_storage_encryption.cipher, CONF.ephemeral_storage_encryption.key_size, self.KEY) cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH, self.PATH) self.utils.execute.assert_called_with(*cmd, run_as_root=True) def _create_image_generated(self, sparse): with test.nested( mock.patch.object(self.lvm, 'create_volume', mock.Mock()), mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()), mock.patch.object(self.disk, 'resize2fs', mock.Mock()), mock.patch.object(self.disk, 'get_disk_size', mock.Mock(return_value=self.TEMPLATE_SIZE)), mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()), mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()), mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()), mock.patch.object(self.libvirt_utils, 'create_lvm_image', mock.Mock()), mock.patch.object(self.libvirt_utils, 'remove_logical_volumes', mock.Mock()), mock.patch.object(self.utils, 'execute', mock.Mock())): fn = mock.Mock() image = self.image_class(self.INSTANCE, self.NAME) image.create_image(fn, self.TEMPLATE_PATH, self.SIZE, ephemeral_size=None, context=self.CONTEXT) self.lvm.create_volume.assert_called_with( self.VG, self.LV, self.SIZE, sparse=sparse) self.dmcrypt.create_volume.assert_called_with( self.PATH.rpartition('/')[2], self.LV_PATH, CONF.ephemeral_storage_encryption.cipher, CONF.ephemeral_storage_encryption.key_size, self.KEY) fn.assert_called_with(target=self.PATH, ephemeral_size=None, context=self.CONTEXT) def _create_image_resize(self, sparse): with test.nested( mock.patch.object(self.lvm, 'create_volume', mock.Mock()), mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()), mock.patch.object(self.disk, 'resize2fs', mock.Mock()), mock.patch.object(self.disk, 'get_disk_size', mock.Mock(return_value=self.TEMPLATE_SIZE)), mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()), mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()), mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()), mock.patch.object(self.libvirt_utils, 'create_lvm_image', mock.Mock()), mock.patch.object(self.libvirt_utils, 'remove_logical_volumes', mock.Mock()), mock.patch.object(self.utils, 'execute', mock.Mock())): fn = mock.Mock() image = self.image_class(self.INSTANCE, self.NAME) image.create_image(fn, self.TEMPLATE_PATH, self.SIZE, context=self.CONTEXT) fn.assert_called_with(context=self.CONTEXT, max_size=self.SIZE, target=self.TEMPLATE_PATH) self.disk.get_disk_size.assert_called_with(self.TEMPLATE_PATH) self.lvm.create_volume.assert_called_with( self.VG, self.LV, self.SIZE, sparse=sparse) self.dmcrypt.create_volume.assert_called_with( self.PATH.rpartition('/')[2], self.LV_PATH, CONF.ephemeral_storage_encryption.cipher, CONF.ephemeral_storage_encryption.key_size, self.KEY) cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH, self.PATH) self.utils.execute.assert_called_with(*cmd, run_as_root=True) self.disk.resize2fs.assert_called_with(self.PATH, run_as_root=True) def test_create_image(self): self._create_image(False) def test_create_image_sparsed(self): self.flags(sparse_logical_volumes=True, group='libvirt') self._create_image(True) def test_create_image_generated(self): self._create_image_generated(False) def test_create_image_generated_sparsed(self): self.flags(sparse_logical_volumes=True, group='libvirt') self._create_image_generated(True) def test_create_image_resize(self): self._create_image_resize(False) def test_create_image_resize_sparsed(self): self.flags(sparse_logical_volumes=True, group='libvirt') self._create_image_resize(True) def test_create_image_negative(self): with test.nested( mock.patch.object(self.lvm, 'create_volume', mock.Mock()), mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()), mock.patch.object(self.disk, 'resize2fs', mock.Mock()), mock.patch.object(self.disk, 'get_disk_size', mock.Mock(return_value=self.TEMPLATE_SIZE)), mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()), mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()), mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()), mock.patch.object(self.libvirt_utils, 'create_lvm_image', mock.Mock()), mock.patch.object(self.libvirt_utils, 'remove_logical_volumes', mock.Mock()), mock.patch.object(self.utils, 'execute', mock.Mock())): fn = mock.Mock() self.lvm.create_volume.side_effect = RuntimeError() image = self.image_class(self.INSTANCE, self.NAME) self.assertRaises( RuntimeError, image.create_image, fn, self.TEMPLATE_PATH, self.SIZE, context=self.CONTEXT) fn.assert_called_with( context=self.CONTEXT, max_size=self.SIZE, target=self.TEMPLATE_PATH) self.disk.get_disk_size.assert_called_with( self.TEMPLATE_PATH) self.lvm.create_volume.assert_called_with( self.VG, self.LV, self.SIZE, sparse=False) self.dmcrypt.delete_volume.assert_called_with( self.PATH.rpartition('/')[2]) self.lvm.remove_volumes.assert_called_with([self.LV_PATH]) def test_create_image_encrypt_negative(self): with test.nested( mock.patch.object(self.lvm, 'create_volume', mock.Mock()), mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()), mock.patch.object(self.disk, 'resize2fs', mock.Mock()), mock.patch.object(self.disk, 'get_disk_size', mock.Mock(return_value=self.TEMPLATE_SIZE)), mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()), mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()), mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()), mock.patch.object(self.libvirt_utils, 'create_lvm_image', mock.Mock()), mock.patch.object(self.libvirt_utils, 'remove_logical_volumes', mock.Mock()), mock.patch.object(self.utils, 'execute', mock.Mock())): fn = mock.Mock() self.dmcrypt.create_volume.side_effect = RuntimeError() image = self.image_class(self.INSTANCE, self.NAME) self.assertRaises( RuntimeError, image.create_image, fn, self.TEMPLATE_PATH, self.SIZE, context=self.CONTEXT) fn.assert_called_with( context=self.CONTEXT, max_size=self.SIZE, target=self.TEMPLATE_PATH) self.disk.get_disk_size.assert_called_with(self.TEMPLATE_PATH) self.lvm.create_volume.assert_called_with( self.VG, self.LV, self.SIZE, sparse=False) self.dmcrypt.create_volume.assert_called_with( self.dmcrypt.volume_name(self.LV), self.LV_PATH, CONF.ephemeral_storage_encryption.cipher, CONF.ephemeral_storage_encryption.key_size, self.KEY) self.dmcrypt.delete_volume.assert_called_with( self.PATH.rpartition('/')[2]) self.lvm.remove_volumes.assert_called_with([self.LV_PATH]) def test_create_image_generated_negative(self): with test.nested( mock.patch.object(self.lvm, 'create_volume', mock.Mock()), mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()), mock.patch.object(self.disk, 'resize2fs', mock.Mock()), mock.patch.object(self.disk, 'get_disk_size', mock.Mock(return_value=self.TEMPLATE_SIZE)), mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()), mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()), mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()), mock.patch.object(self.libvirt_utils, 'create_lvm_image', mock.Mock()), mock.patch.object(self.libvirt_utils, 'remove_logical_volumes', mock.Mock()), mock.patch.object(self.utils, 'execute', mock.Mock())): fn = mock.Mock() fn.side_effect = RuntimeError() image = self.image_class(self.INSTANCE, self.NAME) self.assertRaises(RuntimeError, image.create_image, fn, self.TEMPLATE_PATH, self.SIZE, ephemeral_size=None, context=self.CONTEXT) self.lvm.create_volume.assert_called_with( self.VG, self.LV, self.SIZE, sparse=False) self.dmcrypt.create_volume.assert_called_with( self.PATH.rpartition('/')[2], self.LV_PATH, CONF.ephemeral_storage_encryption.cipher, CONF.ephemeral_storage_encryption.key_size, self.KEY) fn.assert_called_with( target=self.PATH, ephemeral_size=None, context=self.CONTEXT) self.dmcrypt.delete_volume.assert_called_with( self.PATH.rpartition('/')[2]) self.lvm.remove_volumes.assert_called_with([self.LV_PATH]) def test_create_image_generated_encrypt_negative(self): with test.nested( mock.patch.object(self.lvm, 'create_volume', mock.Mock()), mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()), mock.patch.object(self.disk, 'resize2fs', mock.Mock()), mock.patch.object(self.disk, 'get_disk_size', mock.Mock(return_value=self.TEMPLATE_SIZE)), mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()), mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()), mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()), mock.patch.object(self.libvirt_utils, 'create_lvm_image', mock.Mock()), mock.patch.object(self.libvirt_utils, 'remove_logical_volumes', mock.Mock()), mock.patch.object(self.utils, 'execute', mock.Mock())): fn = mock.Mock() fn.side_effect = RuntimeError() image = self.image_class(self.INSTANCE, self.NAME) self.assertRaises( RuntimeError, image.create_image, fn, self.TEMPLATE_PATH, self.SIZE, ephemeral_size=None, context=self.CONTEXT) self.lvm.create_volume.assert_called_with( self.VG, self.LV, self.SIZE, sparse=False) self.dmcrypt.create_volume.assert_called_with( self.PATH.rpartition('/')[2], self.LV_PATH, CONF.ephemeral_storage_encryption.cipher, CONF.ephemeral_storage_encryption.key_size, self.KEY) self.dmcrypt.delete_volume.assert_called_with( self.PATH.rpartition('/')[2]) self.lvm.remove_volumes.assert_called_with([self.LV_PATH]) def test_prealloc_image(self): self.flags(preallocate_images='space') fake_processutils.fake_execute_clear_log() fake_processutils.stub_out_processutils_execute(self.stubs) image = self.image_class(self.INSTANCE, self.NAME) def fake_fetch(target, *args, **kwargs): return self.stub_out('os.path.exists', lambda _: True) self.stubs.Set(image, 'check_image_exists', lambda: True) self.stubs.Set(image, 'get_disk_size', lambda _: self.SIZE) image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE) self.assertEqual(fake_processutils.fake_execute_get_log(), []) def test_get_model(self): image = self.image_class(self.INSTANCE, self.NAME) model = image.get_model(FakeConn()) self.assertEqual(imgmodel.LocalBlockImage(self.PATH), model) class RbdTestCase(_ImageTestCase, test.NoDBTestCase): FSID = "FakeFsID" POOL = "FakePool" USER = "FakeUser" CONF = "FakeConf" SIZE = 1024 def setUp(self): self.image_class = imagebackend.Rbd super(RbdTestCase, self).setUp() self.flags(images_rbd_pool=self.POOL, rbd_user=self.USER, images_rbd_ceph_conf=self.CONF, group='libvirt') self.libvirt_utils = imagebackend.libvirt_utils self.utils = imagebackend.utils self.mox.StubOutWithMock(rbd_utils, 'rbd') self.mox.StubOutWithMock(rbd_utils, 'rados') def test_cache(self): image = self.image_class(self.INSTANCE, self.NAME) self.mox.StubOutWithMock(os.path, 'exists') self.mox.StubOutWithMock(image, 'check_image_exists') os.path.exists(self.TEMPLATE_DIR).AndReturn(False) image.check_image_exists().AndReturn(False) os.path.exists(self.TEMPLATE_PATH).AndReturn(False) fn = self.mox.CreateMockAnything() fn(target=self.TEMPLATE_PATH) self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree') imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR) self.mox.ReplayAll() self.mock_create_image(image) image.cache(fn, self.TEMPLATE) self.mox.VerifyAll() def test_cache_base_dir_exists(self): fn = self.mox.CreateMockAnything() image = self.image_class(self.INSTANCE, self.NAME) self.mox.StubOutWithMock(os.path, 'exists') self.mox.StubOutWithMock(image, 'check_image_exists') os.path.exists(self.TEMPLATE_DIR).AndReturn(True) image.check_image_exists().AndReturn(False) os.path.exists(self.TEMPLATE_PATH).AndReturn(False) fn = self.mox.CreateMockAnything() fn(target=self.TEMPLATE_PATH) self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree') self.mox.ReplayAll() self.mock_create_image(image) image.cache(fn, self.TEMPLATE) self.mox.VerifyAll() def test_cache_image_exists(self): image = self.image_class(self.INSTANCE, self.NAME) self.mox.StubOutWithMock(os.path, 'exists') self.mox.StubOutWithMock(image, 'check_image_exists') os.path.exists(self.TEMPLATE_DIR).AndReturn(True) image.check_image_exists().AndReturn(True) os.path.exists(self.TEMPLATE_PATH).AndReturn(True) self.mox.ReplayAll() image.cache(None, self.TEMPLATE) self.mox.VerifyAll() def test_cache_template_exists(self): image = self.image_class(self.INSTANCE, self.NAME) self.mox.StubOutWithMock(os.path, 'exists') self.mox.StubOutWithMock(image, 'check_image_exists') os.path.exists(self.TEMPLATE_DIR).AndReturn(True) image.check_image_exists().AndReturn(False) os.path.exists(self.TEMPLATE_PATH).AndReturn(True) self.mox.ReplayAll() self.mock_create_image(image) image.cache(None, self.TEMPLATE) self.mox.VerifyAll() def test_create_image(self): fn = self.mox.CreateMockAnything() fn(max_size=None, target=self.TEMPLATE_PATH) rbd_utils.rbd.RBD_FEATURE_LAYERING = 1 fake_processutils.fake_execute_clear_log() fake_processutils.stub_out_processutils_execute(self.stubs) image = self.image_class(self.INSTANCE, self.NAME) self.mox.StubOutWithMock(image, 'check_image_exists') image.check_image_exists().AndReturn(False) image.check_image_exists().AndReturn(False) self.mox.ReplayAll() image.create_image(fn, self.TEMPLATE_PATH, None) rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME) cmd = ('rbd', 'import', '--pool', self.POOL, self.TEMPLATE_PATH, rbd_name, '--image-format=2', '--id', self.USER, '--conf', self.CONF) self.assertEqual(fake_processutils.fake_execute_get_log(), [' '.join(cmd)]) self.mox.VerifyAll() def test_create_image_resize(self): fn = self.mox.CreateMockAnything() full_size = self.SIZE * 2 fn(max_size=full_size, target=self.TEMPLATE_PATH) rbd_utils.rbd.RBD_FEATURE_LAYERING = 1 fake_processutils.fake_execute_clear_log() fake_processutils.stub_out_processutils_execute(self.stubs) image = self.image_class(self.INSTANCE, self.NAME) self.mox.StubOutWithMock(image, 'check_image_exists') image.check_image_exists().AndReturn(False) image.check_image_exists().AndReturn(False) rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME) cmd = ('rbd', 'import', '--pool', self.POOL, self.TEMPLATE_PATH, rbd_name, '--image-format=2', '--id', self.USER, '--conf', self.CONF) self.mox.StubOutWithMock(image, 'get_disk_size') image.get_disk_size(rbd_name).AndReturn(self.SIZE) self.mox.StubOutWithMock(image.driver, 'resize') image.driver.resize(rbd_name, full_size) self.mox.StubOutWithMock(image, 'verify_base_size') image.verify_base_size(self.TEMPLATE_PATH, full_size) self.mox.ReplayAll() image.create_image(fn, self.TEMPLATE_PATH, full_size) self.assertEqual(fake_processutils.fake_execute_get_log(), [' '.join(cmd)]) self.mox.VerifyAll() def test_create_image_already_exists(self): rbd_utils.rbd.RBD_FEATURE_LAYERING = 1 image = self.image_class(self.INSTANCE, self.NAME) self.mox.StubOutWithMock(image, 'check_image_exists') image.check_image_exists().AndReturn(True) self.mox.StubOutWithMock(image, 'get_disk_size') image.get_disk_size(self.TEMPLATE_PATH).AndReturn(self.SIZE) image.check_image_exists().AndReturn(True) rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME) image.get_disk_size(rbd_name).AndReturn(self.SIZE) self.mox.ReplayAll() fn = self.mox.CreateMockAnything() image.create_image(fn, self.TEMPLATE_PATH, self.SIZE) self.mox.VerifyAll() def test_prealloc_image(self): CONF.set_override('preallocate_images', 'space') fake_processutils.fake_execute_clear_log() fake_processutils.stub_out_processutils_execute(self.stubs) image = self.image_class(self.INSTANCE, self.NAME) def fake_fetch(target, *args, **kwargs): return self.stub_out('os.path.exists', lambda _: True) self.stubs.Set(image, 'check_image_exists', lambda: True) self.stubs.Set(image, 'get_disk_size', lambda _: self.SIZE) image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE) self.assertEqual(fake_processutils.fake_execute_get_log(), []) def test_parent_compatible(self): self.assertEqual(inspect.getargspec(imagebackend.Image.libvirt_info), inspect.getargspec(self.image_class.libvirt_info)) def test_image_path(self): conf = "FakeConf" pool = "FakePool" user = "FakeUser" self.flags(images_rbd_pool=pool, group='libvirt') self.flags(images_rbd_ceph_conf=conf, group='libvirt') self.flags(rbd_user=user, group='libvirt') image = self.image_class(self.INSTANCE, self.NAME) rbd_path = "rbd:%s/%s:id=%s:conf=%s" % (pool, image.rbd_name, user, conf) self.assertEqual(image.path, rbd_path) def test_get_disk_size(self): image = self.image_class(self.INSTANCE, self.NAME) with mock.patch.object(image.driver, 'size') as size_mock: size_mock.return_value = 2361393152 self.assertEqual(2361393152, image.get_disk_size(image.path)) size_mock.assert_called_once_with(image.rbd_name) def test_create_image_too_small(self): image = self.image_class(self.INSTANCE, self.NAME) with mock.patch.object(image, 'driver') as driver_mock: driver_mock.exists.return_value = True driver_mock.size.return_value = 2 self.assertRaises(exception.FlavorDiskSmallerThanImage, image.create_image, mock.MagicMock(), self.TEMPLATE_PATH, 1) driver_mock.size.assert_called_once_with(image.rbd_name) @mock.patch.object(rbd_utils.RBDDriver, "get_mon_addrs") def test_libvirt_info(self, mock_mon_addrs): def get_mon_addrs(): hosts = ["server1", "server2"] ports = ["1899", "1920"] return hosts, ports mock_mon_addrs.side_effect = get_mon_addrs super(RbdTestCase, self).test_libvirt_info() @mock.patch.object(rbd_utils.RBDDriver, "get_mon_addrs") def test_get_model(self, mock_mon_addrs): pool = "FakePool" user = "FakeUser" self.flags(images_rbd_pool=pool, group='libvirt') self.flags(rbd_user=user, group='libvirt') self.flags(rbd_secret_uuid="3306a5c4-8378-4b3c-aa1f-7b48d3a26172", group='libvirt') def get_mon_addrs(): hosts = ["server1", "server2"] ports = ["1899", "1920"] return hosts, ports mock_mon_addrs.side_effect = get_mon_addrs image = self.image_class(self.INSTANCE, self.NAME) model = image.get_model(FakeConn()) self.assertEqual(imgmodel.RBDImage( self.INSTANCE["uuid"] + "_fake.vm", "FakePool", "FakeUser", "MTIzNDU2Cg==", ["server1:1899", "server2:1920"]), model) def test_import_file(self): image = self.image_class(self.INSTANCE, self.NAME) @mock.patch.object(image, 'check_image_exists') @mock.patch.object(image.driver, 'remove_image') @mock.patch.object(image.driver, 'import_image') def _test(mock_import, mock_remove, mock_exists): mock_exists.return_value = True image.import_file(self.INSTANCE, mock.sentinel.file, mock.sentinel.remote_name) name = '%s_%s' % (self.INSTANCE.uuid, mock.sentinel.remote_name) mock_exists.assert_called_once_with() mock_remove.assert_called_once_with(name) mock_import.assert_called_once_with(mock.sentinel.file, name) _test() def test_import_file_not_found(self): image = self.image_class(self.INSTANCE, self.NAME) @mock.patch.object(image, 'check_image_exists') @mock.patch.object(image.driver, 'remove_image') @mock.patch.object(image.driver, 'import_image') def _test(mock_import, mock_remove, mock_exists): mock_exists.return_value = False image.import_file(self.INSTANCE, mock.sentinel.file, mock.sentinel.remote_name) name = '%s_%s' % (self.INSTANCE.uuid, mock.sentinel.remote_name) mock_exists.assert_called_once_with() self.assertFalse(mock_remove.called) mock_import.assert_called_once_with(mock.sentinel.file, name) _test() def test_get_parent_pool(self): image = self.image_class(self.INSTANCE, self.NAME) with mock.patch.object(rbd_utils.RBDDriver, 'parent_info') as mock_pi: mock_pi.return_value = [self.POOL, 'fake-image', 'fake-snap'] parent_pool = image._get_parent_pool(self.CONTEXT, 'fake-image', self.FSID) self.assertEqual(self.POOL, parent_pool) def test_get_parent_pool_no_parent_info(self): image = self.image_class(self.INSTANCE, self.NAME) rbd_uri = 'rbd://%s/%s/fake-image/fake-snap' % (self.FSID, self.POOL) with test.nested(mock.patch.object(rbd_utils.RBDDriver, 'parent_info'), mock.patch.object(imagebackend.IMAGE_API, 'get'), ) as (mock_pi, mock_get): mock_pi.side_effect = exception.ImageUnacceptable(image_id='test', reason='test') mock_get.return_value = {'locations': [{'url': rbd_uri}]} parent_pool = image._get_parent_pool(self.CONTEXT, 'fake-image', self.FSID) self.assertEqual(self.POOL, parent_pool) def test_get_parent_pool_non_local_image(self): image = self.image_class(self.INSTANCE, self.NAME) rbd_uri = 'rbd://remote-cluster/remote-pool/fake-image/fake-snap' with test.nested( mock.patch.object(rbd_utils.RBDDriver, 'parent_info'), mock.patch.object(imagebackend.IMAGE_API, 'get') ) as (mock_pi, mock_get): mock_pi.side_effect = exception.ImageUnacceptable(image_id='test', reason='test') mock_get.return_value = {'locations': [{'url': rbd_uri}]} self.assertRaises(exception.ImageUnacceptable, image._get_parent_pool, self.CONTEXT, 'fake-image', self.FSID) def test_direct_snapshot(self): image = self.image_class(self.INSTANCE, self.NAME) test_snap = 'rbd://%s/%s/fake-image-id/snap' % (self.FSID, self.POOL) with test.nested( mock.patch.object(rbd_utils.RBDDriver, 'get_fsid', return_value=self.FSID), mock.patch.object(image, '_get_parent_pool', return_value=self.POOL), mock.patch.object(rbd_utils.RBDDriver, 'create_snap'), mock.patch.object(rbd_utils.RBDDriver, 'clone'), mock.patch.object(rbd_utils.RBDDriver, 'flatten'), mock.patch.object(image, 'cleanup_direct_snapshot') ) as (mock_fsid, mock_parent, mock_create_snap, mock_clone, mock_flatten, mock_cleanup): location = image.direct_snapshot(self.CONTEXT, 'fake-snapshot', 'fake-format', 'fake-image-id', 'fake-base-image') mock_fsid.assert_called_once_with() mock_parent.assert_called_once_with(self.CONTEXT, 'fake-base-image', self.FSID) mock_create_snap.assert_has_calls([mock.call(image.rbd_name, 'fake-snapshot', protect=True), mock.call('fake-image-id', 'snap', pool=self.POOL, protect=True)]) mock_clone.assert_called_once_with(mock.ANY, 'fake-image-id', dest_pool=self.POOL) mock_flatten.assert_called_once_with('fake-image-id', pool=self.POOL) mock_cleanup.assert_called_once_with(mock.ANY) self.assertEqual(test_snap, location) def test_direct_snapshot_cleans_up_on_failures(self): image = self.image_class(self.INSTANCE, self.NAME) test_snap = 'rbd://%s/%s/%s/snap' % (self.FSID, image.pool, image.rbd_name) with test.nested( mock.patch.object(rbd_utils.RBDDriver, 'get_fsid', return_value=self.FSID), mock.patch.object(image, '_get_parent_pool', return_value=self.POOL), mock.patch.object(rbd_utils.RBDDriver, 'create_snap'), mock.patch.object(rbd_utils.RBDDriver, 'clone', side_effect=exception.Forbidden('testing')), mock.patch.object(rbd_utils.RBDDriver, 'flatten'), mock.patch.object(image, 'cleanup_direct_snapshot')) as ( mock_fsid, mock_parent, mock_create_snap, mock_clone, mock_flatten, mock_cleanup): self.assertRaises(exception.Forbidden, image.direct_snapshot, self.CONTEXT, 'snap', 'fake-format', 'fake-image-id', 'fake-base-image') mock_create_snap.assert_called_once_with(image.rbd_name, 'snap', protect=True) self.assertFalse(mock_flatten.called) mock_cleanup.assert_called_once_with(dict(url=test_snap)) def test_cleanup_direct_snapshot(self): image = self.image_class(self.INSTANCE, self.NAME) test_snap = 'rbd://%s/%s/%s/snap' % (self.FSID, image.pool, image.rbd_name) with test.nested( mock.patch.object(rbd_utils.RBDDriver, 'remove_snap'), mock.patch.object(rbd_utils.RBDDriver, 'destroy_volume') ) as (mock_rm, mock_destroy): # Ensure that the method does nothing when no location is provided image.cleanup_direct_snapshot(None) self.assertFalse(mock_rm.called) # Ensure that destroy_volume is not called image.cleanup_direct_snapshot(dict(url=test_snap)) mock_rm.assert_called_once_with(image.rbd_name, 'snap', force=True, ignore_errors=False, pool=image.pool) self.assertFalse(mock_destroy.called) def test_cleanup_direct_snapshot_destroy_volume(self): image = self.image_class(self.INSTANCE, self.NAME) test_snap = 'rbd://%s/%s/%s/snap' % (self.FSID, image.pool, image.rbd_name) with test.nested( mock.patch.object(rbd_utils.RBDDriver, 'remove_snap'), mock.patch.object(rbd_utils.RBDDriver, 'destroy_volume') ) as (mock_rm, mock_destroy): # Ensure that destroy_volume is called image.cleanup_direct_snapshot(dict(url=test_snap), also_destroy_volume=True) mock_rm.assert_called_once_with(image.rbd_name, 'snap', force=True, ignore_errors=False, pool=image.pool) mock_destroy.assert_called_once_with(image.rbd_name, pool=image.pool) class PloopTestCase(_ImageTestCase, test.NoDBTestCase): SIZE = 1024 def setUp(self): self.image_class = imagebackend.Ploop super(PloopTestCase, self).setUp() self.utils = imagebackend.utils def prepare_mocks(self): fn = self.mox.CreateMockAnything() self.mox.StubOutWithMock(imagebackend.utils.synchronized, '__call__') self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image') self.mox.StubOutWithMock(self.utils, 'execute') return fn def test_cache(self): self.mox.StubOutWithMock(os.path, 'exists') if self.OLD_STYLE_INSTANCE_PATH: os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False) os.path.exists(self.TEMPLATE_DIR).AndReturn(False) os.path.exists(self.PATH).AndReturn(False) os.path.exists(self.TEMPLATE_PATH).AndReturn(False) fn = self.mox.CreateMockAnything() fn(target=self.TEMPLATE_PATH) self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree') imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR) self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) self.mock_create_image(image) image.cache(fn, self.TEMPLATE) self.mox.VerifyAll() def test_create_image(self): self.stubs.Set(imagebackend.Ploop, 'get_disk_size', lambda a, b: 2048) fn = self.prepare_mocks() fn(target=self.TEMPLATE_PATH, max_size=2048, image_id=None) img_path = os.path.join(self.PATH, "root.hds") imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, img_path) self.utils.execute("ploop", "restore-descriptor", "-f", "raw", self.PATH, img_path) self.utils.execute("ploop", "grow", '-s', "2K", os.path.join(self.PATH, "DiskDescriptor.xml"), run_as_root=True) self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) image.create_image(fn, self.TEMPLATE_PATH, 2048, image_id=None) self.mox.VerifyAll() def test_prealloc_image(self): self.flags(preallocate_images='space') fake_processutils.fake_execute_clear_log() fake_processutils.stub_out_processutils_execute(self.stubs) image = self.image_class(self.INSTANCE, self.NAME) def fake_fetch(target, *args, **kwargs): return self.stub_out('os.path.exists', lambda _: True) self.stubs.Set(image, 'check_image_exists', lambda: True) self.stubs.Set(image, 'get_disk_size', lambda _: self.SIZE) image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE) class BackendTestCase(test.NoDBTestCase): INSTANCE = objects.Instance(id=1, uuid=uuidutils.generate_uuid()) NAME = 'fake-name.suffix' def setUp(self): super(BackendTestCase, self).setUp() self.flags(enabled=False, group='ephemeral_storage_encryption') self.INSTANCE['ephemeral_key_uuid'] = None def get_image(self, use_cow, image_type): return imagebackend.Backend(use_cow).image(self.INSTANCE, self.NAME, image_type) def _test_image(self, image_type, image_not_cow, image_cow): image1 = self.get_image(False, image_type) image2 = self.get_image(True, image_type) def assertIsInstance(instance, class_object): failure = ('Expected %s,' + ' but got %s.') % (class_object.__name__, instance.__class__.__name__) self.assertIsInstance(instance, class_object, msg=failure) assertIsInstance(image1, image_not_cow) assertIsInstance(image2, image_cow) def test_image_raw(self): self._test_image('raw', imagebackend.Raw, imagebackend.Raw) def test_image_raw_preallocate_images(self): flags = ('space', 'Space', 'SPACE') for f in flags: self.flags(preallocate_images=f) raw = imagebackend.Raw(self.INSTANCE, 'fake_disk', '/tmp/xyz') self.assertTrue(raw.preallocate) def test_image_raw_preallocate_images_bad_conf(self): self.flags(preallocate_images='space1') raw = imagebackend.Raw(self.INSTANCE, 'fake_disk', '/tmp/xyz') self.assertFalse(raw.preallocate) def test_image_raw_native_io(self): self.flags(preallocate_images="space") raw = imagebackend.Raw(self.INSTANCE, 'fake_disk', '/tmp/xyz') self.assertEqual(raw.driver_io, "native") def test_image_qcow2(self): self._test_image('qcow2', imagebackend.Qcow2, imagebackend.Qcow2) def test_image_qcow2_preallocate_images(self): flags = ('space', 'Space', 'SPACE') for f in flags: self.flags(preallocate_images=f) qcow = imagebackend.Qcow2(self.INSTANCE, 'fake_disk', '/tmp/xyz') self.assertTrue(qcow.preallocate) def test_image_qcow2_preallocate_images_bad_conf(self): self.flags(preallocate_images='space1') qcow = imagebackend.Qcow2(self.INSTANCE, 'fake_disk', '/tmp/xyz') self.assertFalse(qcow.preallocate) def test_image_qcow2_native_io(self): self.flags(preallocate_images="space") qcow = imagebackend.Qcow2(self.INSTANCE, 'fake_disk', '/tmp/xyz') self.assertEqual(qcow.driver_io, "native") def test_image_lvm_native_io(self): def _test_native_io(is_sparse, driver_io): self.flags(images_volume_group='FakeVG', group='libvirt') self.flags(sparse_logical_volumes=is_sparse, group='libvirt') lvm = imagebackend.Lvm(self.INSTANCE, 'fake_disk') self.assertEqual(lvm.driver_io, driver_io) _test_native_io(is_sparse=False, driver_io="native") _test_native_io(is_sparse=True, driver_io=None) def test_image_lvm(self): self.flags(images_volume_group='FakeVG', group='libvirt') self._test_image('lvm', imagebackend.Lvm, imagebackend.Lvm) def test_image_rbd(self): conf = "FakeConf" pool = "FakePool" self.flags(images_rbd_pool=pool, group='libvirt') self.flags(images_rbd_ceph_conf=conf, group='libvirt') self.mox.StubOutWithMock(rbd_utils, 'rbd') self.mox.StubOutWithMock(rbd_utils, 'rados') self._test_image('rbd', imagebackend.Rbd, imagebackend.Rbd) def test_image_default(self): self._test_image('default', imagebackend.Raw, imagebackend.Qcow2) nova-13.1.4/nova/tests/unit/virt/libvirt/test_compat.py0000664000567000056710000000474713064447140024342 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova.compute import power_state from nova import test from nova.tests.unit.virt.libvirt import fakelibvirt from nova.virt.libvirt import compat from nova.virt.libvirt import host class CompatTestCase(test.NoDBTestCase): def setUp(self): super(CompatTestCase, self).setUp() self.useFixture(fakelibvirt.FakeLibvirtFixture()) @mock.patch.object(host.Host, 'has_min_version') def test_get_domain_info(self, mock_has_min_version): test_host = host.Host("qemu:///system") domain = mock.MagicMock() expected = [power_state.RUNNING, 512, 512, None, None] race = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'ERR', error_code=fakelibvirt.VIR_ERR_OPERATION_FAILED, error_message='cannot read cputime for domain') mock_has_min_version.return_value = True domain.info.return_value = expected actual = compat.get_domain_info(fakelibvirt, test_host, domain) self.assertEqual(actual, expected) self.assertEqual(domain.info.call_count, 1) domain.info.reset_mock() domain.info.side_effect = race self.assertRaises(fakelibvirt.libvirtError, compat.get_domain_info, fakelibvirt, test_host, domain) self.assertEqual(domain.info.call_count, 1) domain.info.reset_mock() mock_has_min_version.return_value = False domain.info.side_effect = [race, expected] actual = compat.get_domain_info(fakelibvirt, test_host, domain) self.assertEqual(actual, expected) self.assertEqual(domain.info.call_count, 2) domain.info.reset_mock() domain.info.side_effect = race self.assertRaises(fakelibvirt.libvirtError, compat.get_domain_info, fakelibvirt, test_host, domain) self.assertEqual(domain.info.call_count, 2) nova-13.1.4/nova/tests/unit/virt/libvirt/test_driver.py0000664000567000056710000276367713064447152024375 0ustar jenkinsjenkins00000000000000# Copyright 2010 OpenStack Foundation # Copyright 2012 University Of Minho # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import copy import datetime import errno import glob import os import random import re import shutil import signal import threading import time import uuid import eventlet from eventlet import greenthread import fixtures from lxml import etree import mock from mox3 import mox from os_brick.initiator import connector from oslo_concurrency import lockutils from oslo_concurrency import processutils from oslo_config import cfg from oslo_serialization import jsonutils from oslo_service import loopingcall from oslo_utils import encodeutils from oslo_utils import fileutils from oslo_utils import fixture as utils_fixture from oslo_utils import importutils from oslo_utils import units from oslo_utils import uuidutils from oslo_utils import versionutils import six from six.moves import builtins from six.moves import range from nova.api.metadata import base as instance_metadata from nova.compute import arch from nova.compute import cpumodel from nova.compute import manager from nova.compute import power_state from nova.compute import task_states from nova.compute import vm_mode from nova.compute import vm_states from nova import context from nova import db from nova import exception from nova.network import model as network_model from nova import objects from nova.objects import block_device as block_device_obj from nova.objects import fields from nova.pci import manager as pci_manager from nova.pci import utils as pci_utils from nova import test from nova.tests.unit import fake_block_device from nova.tests.unit import fake_instance from nova.tests.unit import fake_network import nova.tests.unit.image.fake from nova.tests.unit import matchers from nova.tests.unit.objects import test_pci_device from nova.tests.unit.objects import test_vcpu_model from nova.tests.unit.virt.libvirt import fake_imagebackend from nova.tests.unit.virt.libvirt import fake_libvirt_utils from nova.tests.unit.virt.libvirt import fakelibvirt from nova import utils from nova import version from nova.virt import block_device as driver_block_device from nova.virt.disk import api as disk from nova.virt import driver from nova.virt import fake from nova.virt import firewall as base_firewall from nova.virt import hardware from nova.virt.image import model as imgmodel from nova.virt import images from nova.virt.libvirt import blockinfo from nova.virt.libvirt import config as vconfig from nova.virt.libvirt import driver as libvirt_driver from nova.virt.libvirt import firewall from nova.virt.libvirt import guest as libvirt_guest from nova.virt.libvirt import host from nova.virt.libvirt import imagebackend from nova.virt.libvirt.storage import dmcrypt from nova.virt.libvirt.storage import lvm from nova.virt.libvirt.storage import rbd_utils from nova.virt.libvirt import utils as libvirt_utils from nova.virt.libvirt.volume import volume as volume_drivers libvirt_driver.libvirt = fakelibvirt host.libvirt = fakelibvirt libvirt_guest.libvirt = fakelibvirt CONF = cfg.CONF CONF.import_opt('compute_manager', 'nova.service') CONF.import_opt('host', 'nova.netconf') CONF.import_opt('my_ip', 'nova.netconf') CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache') CONF.import_opt('instances_path', 'nova.compute.manager') _fake_network_info = fake_network.fake_get_instance_nw_info _fake_NodeDevXml = \ {"pci_0000_04_00_3": """ pci_0000_04_00_3 pci_0000_00_01_1 igb 0 4 0 3 I350 Gigabit Network Connection Intel Corporation

""", "pci_0000_04_10_7": """ pci_0000_04_10_7 pci_0000_00_01_1 igbvf 0 4 16 7 I350 Ethernet Controller Virtual Function Intel Corporation
""", "pci_0000_04_11_7": """ pci_0000_04_11_7 pci_0000_00_01_1 igbvf 0 4 17 7 I350 Ethernet Controller Virtual Function Intel Corporation
""", "pci_0000_04_00_1": """ pci_0000_04_00_1 /sys/devices/pci0000:00/0000:00:02.0/0000:04:00.1 pci_0000_00_02_0 mlx5_core 0 4 0 1 MT27700 Family [ConnectX-4] Mellanox Technologies
""", # libvirt >= 1.3.0 nodedev-dumpxml "pci_0000_03_00_0": """ pci_0000_03_00_0 /sys/devices/pci0000:00/0000:00:02.0/0000:03:00.0 pci_0000_00_02_0 mlx5_core 0 3 0 0 MT27700 Family [ConnectX-4] Mellanox Technologies
""", "pci_0000_03_00_1": """ pci_0000_03_00_1 /sys/devices/pci0000:00/0000:00:02.0/0000:03:00.1 pci_0000_00_02_0 mlx5_core 0 3 0 1 MT27700 Family [ConnectX-4] Mellanox Technologies
""", } _fake_cpu_info = { "arch": "test_arch", "model": "test_model", "vendor": "test_vendor", "topology": { "sockets": 1, "cores": 8, "threads": 16 }, "features": ["feature1", "feature2"] } def _concurrency(signal, wait, done, target, is_block_dev=False): signal.send() wait.wait() done.send() class FakeVirtDomain(object): def __init__(self, fake_xml=None, uuidstr=None, id=None, name=None): if uuidstr is None: uuidstr = str(uuid.uuid4()) self.uuidstr = uuidstr self.id = id self.domname = name self._info = [power_state.RUNNING, 2048 * units.Mi, 1234 * units.Mi, None, None] if fake_xml: self._fake_dom_xml = fake_xml else: self._fake_dom_xml = """ """ def name(self): if self.domname is None: return "fake-domain %s" % self else: return self.domname def ID(self): return self.id def info(self): return self._info def create(self): pass def managedSave(self, *args): pass def createWithFlags(self, launch_flags): pass def XMLDesc(self, flags): return self._fake_dom_xml def UUIDString(self): return self.uuidstr def attachDeviceFlags(self, xml, flags): pass def attachDevice(self, xml): pass def detachDeviceFlags(self, xml, flags): pass def snapshotCreateXML(self, xml, flags): pass def blockCommit(self, disk, base, top, bandwidth=0, flags=0): pass def blockRebase(self, disk, base, bandwidth=0, flags=0): pass def blockJobInfo(self, path, flags): pass def resume(self): pass def destroy(self): pass def fsFreeze(self, disks=None, flags=0): pass def fsThaw(self, disks=None, flags=0): pass def isActive(self): return True class CacheConcurrencyTestCase(test.NoDBTestCase): def setUp(self): super(CacheConcurrencyTestCase, self).setUp() self.flags(instances_path=self.useFixture(fixtures.TempDir()).path) # utils.synchronized() will create the lock_path for us if it # doesn't already exist. It will also delete it when it's done, # which can cause race conditions with the multiple threads we # use for tests. So, create the path here so utils.synchronized() # won't delete it out from under one of the threads. self.lock_path = os.path.join(CONF.instances_path, 'locks') fileutils.ensure_tree(self.lock_path) def fake_exists(fname): basedir = os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name) if fname == basedir or fname == self.lock_path: return True return False def fake_execute(*args, **kwargs): pass def fake_extend(image, size, use_cow=False): pass self.stub_out('os.path.exists', fake_exists) self.stubs.Set(utils, 'execute', fake_execute) self.stubs.Set(imagebackend.disk, 'extend', fake_extend) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.imagebackend.libvirt_utils', fake_libvirt_utils)) def _fake_instance(self, uuid): return objects.Instance(id=1, uuid=uuid) def test_same_fname_concurrency(self): # Ensures that the same fname cache runs at a sequentially. uuid = uuidutils.generate_uuid() backend = imagebackend.Backend(False) wait1 = eventlet.event.Event() done1 = eventlet.event.Event() sig1 = eventlet.event.Event() thr1 = eventlet.spawn(backend.image(self._fake_instance(uuid), 'name').cache, _concurrency, 'fname', None, signal=sig1, wait=wait1, done=done1) eventlet.sleep(0) # Thread 1 should run before thread 2. sig1.wait() wait2 = eventlet.event.Event() done2 = eventlet.event.Event() sig2 = eventlet.event.Event() thr2 = eventlet.spawn(backend.image(self._fake_instance(uuid), 'name').cache, _concurrency, 'fname', None, signal=sig2, wait=wait2, done=done2) wait2.send() eventlet.sleep(0) try: self.assertFalse(done2.ready()) finally: wait1.send() done1.wait() eventlet.sleep(0) self.assertTrue(done2.ready()) # Wait on greenthreads to assert they didn't raise exceptions # during execution thr1.wait() thr2.wait() def test_different_fname_concurrency(self): # Ensures that two different fname caches are concurrent. uuid = uuidutils.generate_uuid() backend = imagebackend.Backend(False) wait1 = eventlet.event.Event() done1 = eventlet.event.Event() sig1 = eventlet.event.Event() thr1 = eventlet.spawn(backend.image(self._fake_instance(uuid), 'name').cache, _concurrency, 'fname2', None, signal=sig1, wait=wait1, done=done1) eventlet.sleep(0) # Thread 1 should run before thread 2. sig1.wait() wait2 = eventlet.event.Event() done2 = eventlet.event.Event() sig2 = eventlet.event.Event() thr2 = eventlet.spawn(backend.image(self._fake_instance(uuid), 'name').cache, _concurrency, 'fname1', None, signal=sig2, wait=wait2, done=done2) eventlet.sleep(0) # Wait for thread 2 to start. sig2.wait() wait2.send() tries = 0 while not done2.ready() and tries < 10: eventlet.sleep(0) tries += 1 try: self.assertTrue(done2.ready()) finally: wait1.send() eventlet.sleep(0) # Wait on greenthreads to assert they didn't raise exceptions # during execution thr1.wait() thr2.wait() class FakeVolumeDriver(object): def __init__(self, *args, **kwargs): pass def attach_volume(self, *args): pass def detach_volume(self, *args): pass def get_xml(self, *args): return "" def get_config(self, *args): """Connect the volume to a fake device.""" conf = vconfig.LibvirtConfigGuestDisk() conf.source_type = "network" conf.source_protocol = "fake" conf.source_name = "fake" conf.target_dev = "fake" conf.target_bus = "fake" return conf def connect_volume(self, *args): """Connect the volume to a fake device.""" pass class FakeConfigGuestDisk(object): def __init__(self, *args, **kwargs): self.source_type = None self.driver_cache = None class FakeConfigGuest(object): def __init__(self, *args, **kwargs): self.driver_cache = None class FakeNodeDevice(object): def __init__(self, fakexml): self.xml = fakexml def XMLDesc(self, flags): return self.xml def _create_test_instance(): flavor = objects.Flavor(memory_mb=2048, swap=0, vcpu_weight=None, root_gb=1, id=2, name=u'm1.small', ephemeral_gb=0, rxtx_factor=1.0, flavorid=u'1', vcpus=1, extra_specs={}) return { 'id': 1, 'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310', 'memory_kb': '1024000', 'basepath': '/some/path', 'bridge_name': 'br100', 'display_name': "Acme webserver", 'vcpus': 2, 'project_id': 'fake', 'bridge': 'br101', 'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6', 'root_gb': 10, 'ephemeral_gb': 20, 'instance_type_id': '5', # m1.small 'extra_specs': {}, 'system_metadata': { 'image_disk_format': 'raw', }, 'flavor': flavor, 'new_flavor': None, 'old_flavor': None, 'pci_devices': objects.PciDeviceList(), 'numa_topology': None, 'config_drive': None, 'vm_mode': None, 'kernel_id': None, 'ramdisk_id': None, 'os_type': 'linux', 'user_id': '838a72b0-0d54-4827-8fd6-fb1227633ceb', 'ephemeral_key_uuid': None, 'vcpu_model': None, 'host': 'fake-host', 'task_state': None, } class LibvirtConnTestCase(test.NoDBTestCase): REQUIRES_LOCKING = True _EPHEMERAL_20_DEFAULT = ('ephemeral_20_%s' % utils.get_hash_str(disk._DEFAULT_FILE_SYSTEM)[:7]) def setUp(self): super(LibvirtConnTestCase, self).setUp() self.flags(fake_call=True) self.user_id = 'fake' self.project_id = 'fake' self.context = context.get_admin_context() temp_dir = self.useFixture(fixtures.TempDir()).path self.flags(instances_path=temp_dir) self.flags(snapshots_directory=temp_dir, group='libvirt') self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.libvirt_utils', fake_libvirt_utils)) self.flags(sysinfo_serial="hardware", group="libvirt") self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.imagebackend.libvirt_utils', fake_libvirt_utils)) def fake_extend(image, size, use_cow=False): pass self.stubs.Set(libvirt_driver.disk, 'extend', fake_extend) self.stubs.Set(imagebackend.Image, 'resolve_driver_format', imagebackend.Image._get_driver_format) self.useFixture(fakelibvirt.FakeLibvirtFixture()) self.test_instance = _create_test_instance() self.test_image_meta = { "disk_format": "raw", } self.image_service = nova.tests.unit.image.fake.stub_out_image_service( self) self.device_xml_tmpl = """ 58a84f6d-3f0c-4e19-a0af-eb657b790657
""" def relpath(self, path): return os.path.relpath(path, CONF.instances_path) def tearDown(self): nova.tests.unit.image.fake.FakeImageService_reset() super(LibvirtConnTestCase, self).tearDown() def test_driver_capabilities(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertTrue(drvr.capabilities['has_imagecache'], 'Driver capabilities for \'has_imagecache\' ' 'is invalid') self.assertTrue(drvr.capabilities['supports_recreate'], 'Driver capabilities for \'supports_recreate\' ' 'is invalid') self.assertFalse(drvr.capabilities['supports_migrate_to_same_host'], 'Driver capabilities for ' '\'supports_migrate_to_same_host\' is invalid') def create_fake_libvirt_mock(self, **kwargs): """Defining mocks for LibvirtDriver(libvirt is not used).""" # A fake libvirt.virConnect class FakeLibvirtDriver(object): def defineXML(self, xml): return FakeVirtDomain() # Creating mocks volume_driver = ['iscsi=nova.tests.unit.virt.libvirt.test_driver' '.FakeVolumeDriver'] fake = FakeLibvirtDriver() # Customizing above fake if necessary for key, val in kwargs.items(): fake.__setattr__(key, val) self.stubs.Set(libvirt_driver.LibvirtDriver, '_conn', fake) self.stubs.Set(libvirt_driver.LibvirtDriver, '_get_volume_drivers', lambda x: volume_driver) self.stubs.Set(host.Host, 'get_connection', lambda x: fake) def fake_lookup(self, instance_name): return FakeVirtDomain() def fake_execute(self, *args, **kwargs): open(args[-1], "a").close() def _create_service(self, **kwargs): service_ref = {'host': kwargs.get('host', 'dummy'), 'disabled': kwargs.get('disabled', False), 'binary': 'nova-compute', 'topic': 'compute', 'report_count': 0} return objects.Service(**service_ref) def _get_pause_flag(self, drvr, network_info, power_on=True, vifs_already_plugged=False): timeout = CONF.vif_plugging_timeout events = [] if (drvr._conn_supports_start_paused and utils.is_neutron() and not vifs_already_plugged and power_on and timeout): events = drvr._get_neutron_events(network_info) return bool(events) def test_public_api_signatures(self): baseinst = driver.ComputeDriver(None) inst = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertPublicAPISignatures(baseinst, inst) def test_legacy_block_device_info(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertFalse(drvr.need_legacy_block_device_info) @mock.patch.object(host.Host, "has_min_version") def test_min_version_start_ok(self, mock_version): mock_version.return_value = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("dummyhost") @mock.patch.object(host.Host, "has_min_version") def test_min_version_start_abort(self, mock_version): mock_version.return_value = False drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.NovaException, drvr.init_host, "dummyhost") @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) - 1) @mock.patch.object(libvirt_driver.LOG, 'warning') def test_next_min_version_deprecation_warning(self, mock_warning, mock_get_libversion): # Skip test if there's no currently planned new min version if (versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) == versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_VERSION)): self.skipTest("NEXT_MIN_LIBVIRT_VERSION == MIN_LIBVIRT_VERSION") # Test that a warning is logged if the libvirt version is less than # the next required minimum version. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("dummyhost") # assert that the next min version is in a warning message expected_arg = {'version': versionutils.convert_version_to_str( versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_LIBVIRT_VERSION))} version_arg_found = False for call in mock_warning.call_args_list: if call[0][1] == expected_arg: version_arg_found = True break self.assertTrue(version_arg_found) @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_LIBVIRT_VERSION)) @mock.patch.object(libvirt_driver.LOG, 'warning') def test_next_min_version_ok(self, mock_warning, mock_get_libversion): # Skip test if there's no currently planned new min version if (versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) == versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_VERSION)): self.skipTest("NEXT_MIN_LIBVIRT_VERSION == MIN_LIBVIRT_VERSION") # Test that a warning is not logged if the libvirt version is greater # than or equal to NEXT_MIN_LIBVIRT_VERSION. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("dummyhost") # assert that the next min version is in a warning message expected_arg = {'version': versionutils.convert_version_to_str( versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_LIBVIRT_VERSION))} version_arg_found = False for call in mock_warning.call_args_list: if call[0][1] == expected_arg: version_arg_found = True break self.assertFalse(version_arg_found) @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get( arch.PPC64)) - 1) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_OTHER_ARCH.get( arch.PPC64))) @mock.patch.object(arch, "from_host", return_value=arch.PPC64) def test_min_version_ppc_old_libvirt(self, mock_libv, mock_qemu, mock_arch): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.NovaException, drvr.init_host, "dummyhost") @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get( arch.PPC64))) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_OTHER_ARCH.get( arch.PPC64)) - 1) @mock.patch.object(arch, "from_host", return_value=arch.PPC64) def test_min_version_ppc_old_qemu(self, mock_libv, mock_qemu, mock_arch): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.NovaException, drvr.init_host, "dummyhost") @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get( arch.PPC64))) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_OTHER_ARCH.get( arch.PPC64))) @mock.patch.object(arch, "from_host", return_value=arch.PPC64) def test_min_version_ppc_ok(self, mock_libv, mock_qemu, mock_arch): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("dummyhost") @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get( arch.S390X)) - 1) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_OTHER_ARCH.get( arch.S390X))) @mock.patch.object(arch, "from_host", return_value=arch.S390X) def test_min_version_s390_old_libvirt(self, mock_libv, mock_qemu, mock_arch): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.NovaException, drvr.init_host, "dummyhost") @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get( arch.S390X))) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_OTHER_ARCH.get( arch.S390X)) - 1) @mock.patch.object(arch, "from_host", return_value=arch.S390X) def test_min_version_s390_old_qemu(self, mock_libv, mock_qemu, mock_arch): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.NovaException, drvr.init_host, "dummyhost") @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get( arch.S390X))) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_OTHER_ARCH.get( arch.S390X))) @mock.patch.object(arch, "from_host", return_value=arch.S390X) def test_min_version_s390_ok(self, mock_libv, mock_qemu, mock_arch): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("dummyhost") def _do_test_parse_migration_flags(self, lm_config=None, lm_expected=None, bm_config=None, bm_expected=None): if lm_config is not None: self.flags(live_migration_flag=lm_config, group='libvirt') if bm_config is not None: self.flags(block_migration_flag=bm_config, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr._parse_migration_flags() if lm_expected is not None: self.assertEqual(lm_expected, drvr._live_migration_flags) if bm_expected is not None: self.assertEqual(bm_expected, drvr._block_migration_flags) def test_parse_live_migration_flags_default(self): self._do_test_parse_migration_flags( lm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, ' 'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED'), lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED)) def test_parse_live_migration_flags(self): self._do_test_parse_migration_flags( lm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, ' 'VIR_MIGRATE_LIVE'), lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE)) def test_parse_block_migration_flags_default(self): self._do_test_parse_migration_flags( bm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, ' 'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, ' 'VIR_MIGRATE_NON_SHARED_INC'), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) def test_parse_block_migration_flags(self): self._do_test_parse_migration_flags( bm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, ' 'VIR_MIGRATE_LIVE, VIR_MIGRATE_NON_SHARED_INC'), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) @mock.patch('nova.virt.libvirt.driver.LOG') def test_parse_live_migration_flag_with_invalid_flag(self, mock_log): self._do_test_parse_migration_flags( lm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, ' 'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, ' 'VIR_MIGRATE_FOO_BAR'), lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED), bm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, ' 'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, ' 'VIR_MIGRATE_NON_SHARED_INC, VIR_MIGRATE_FOO_BAR'), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) msg = mock_log.warning.call_args_list[0] self.assertIn("unknown libvirt live migration flag", msg[0][0]) msg = mock_log.warning.call_args_list[1] self.assertIn("unknown libvirt live migration flag", msg[0][0]) @mock.patch('nova.virt.libvirt.driver.LOG') def test_parse_migration_flags_unsafe_block(self, mock_log): '''Test if the driver logs a warning if the live_migration_flag and/or block_migration_flag config option uses a value which can cause potential damage. ''' self._do_test_parse_migration_flags( lm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, ' 'VIR_MIGRATE_PEER2PEER, ' 'VIR_MIGRATE_LIVE, ' 'VIR_MIGRATE_TUNNELLED, ' 'VIR_MIGRATE_NON_SHARED_INC'), bm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, ' 'VIR_MIGRATE_PEER2PEER, ' 'VIR_MIGRATE_LIVE, ' 'VIR_MIGRATE_TUNNELLED'), lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) msg = mock_log.warning.call_args_list[0] self.assertIn('Removing the VIR_MIGRATE_NON_SHARED_INC', msg[0][0]) msg = mock_log.warning.call_args_list[1] self.assertIn('Adding the VIR_MIGRATE_NON_SHARED_INC', msg[0][0]) @mock.patch('nova.virt.libvirt.driver.LOG') def test_parse_migration_flags_p2p_missing(self, mock_log): self._do_test_parse_migration_flags( lm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, ' 'VIR_MIGRATE_LIVE, ' 'VIR_MIGRATE_TUNNELLED'), bm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, ' 'VIR_MIGRATE_LIVE, ' 'VIR_MIGRATE_TUNNELLED, ' 'VIR_MIGRATE_NON_SHARED_INC'), lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) msg = mock_log.warning.call_args_list[0] self.assertIn('Adding the VIR_MIGRATE_PEER2PEER flag', msg[0][0]) msg = mock_log.warning.call_args_list[1] self.assertIn('Adding the VIR_MIGRATE_PEER2PEER flag', msg[0][0]) @mock.patch('nova.virt.libvirt.driver.LOG') def test_parse_migration_flags_p2p_xen(self, mock_log): self.flags(virt_type='xen', group='libvirt') self._do_test_parse_migration_flags( lm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, ' 'VIR_MIGRATE_PEER2PEER, ' 'VIR_MIGRATE_LIVE, ' 'VIR_MIGRATE_TUNNELLED'), bm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, ' 'VIR_MIGRATE_PEER2PEER, ' 'VIR_MIGRATE_LIVE, ' 'VIR_MIGRATE_TUNNELLED, ' 'VIR_MIGRATE_NON_SHARED_INC'), lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) msg = mock_log.warning.call_args_list[0] self.assertIn('Removing the VIR_MIGRATE_PEER2PEER flag', msg[0][0]) msg = mock_log.warning.call_args_list[1] self.assertIn('Removing the VIR_MIGRATE_PEER2PEER flag', msg[0][0]) @mock.patch('nova.virt.libvirt.driver.LOG') def test_parse_migration_flags_config_mgmt(self, mock_log): self._do_test_parse_migration_flags( lm_config=('VIR_MIGRATE_PERSIST_DEST, ' 'VIR_MIGRATE_PEER2PEER, ' 'VIR_MIGRATE_LIVE, ' 'VIR_MIGRATE_TUNNELLED'), bm_config=('VIR_MIGRATE_PERSIST_DEST, ' 'VIR_MIGRATE_PEER2PEER, ' 'VIR_MIGRATE_LIVE, ' 'VIR_MIGRATE_TUNNELLED, ' 'VIR_MIGRATE_NON_SHARED_INC'), lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) msg = mock_log.warning.call_args_list[0] self.assertIn('Adding the VIR_MIGRATE_UNDEFINE_SOURCE flag', msg[0][0]) msg = mock_log.warning.call_args_list[1] self.assertIn('Removing the VIR_MIGRATE_PERSIST_DEST flag', msg[0][0]) msg = mock_log.warning.call_args_list[2] self.assertIn('Adding the VIR_MIGRATE_UNDEFINE_SOURCE flag', msg[0][0]) msg = mock_log.warning.call_args_list[3] self.assertIn('Removing the VIR_MIGRATE_PERSIST_DEST flag', msg[0][0]) @mock.patch('nova.virt.libvirt.driver.LOG') def test_live_migration_tunnelled_true(self, mock_log): self.flags(live_migration_tunnelled=True, group='libvirt') self._do_test_parse_migration_flags( lm_config=('VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_UNDEFINE_SOURCE, ' 'VIR_MIGRATE_LIVE'), bm_config=('VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_UNDEFINE_SOURCE, ' 'VIR_MIGRATE_LIVE, VIR_MIGRATE_NON_SHARED_INC'), lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED)) msg = mock_log.warning.call_args_list[0] self.assertIn('does not contain the VIR_MIGRATE_TUNNELLED', msg[0][0]) msg = mock_log.warning.call_args_list[1] self.assertIn('does not contain the VIR_MIGRATE_TUNNELLED', msg[0][0]) @mock.patch('nova.virt.libvirt.driver.LOG') def test_live_migration_tunnelled_false(self, mock_log): self.flags(live_migration_tunnelled=False, group='libvirt') self._do_test_parse_migration_flags( lm_config=('VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_UNDEFINE_SOURCE, ' 'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED'), bm_config=('VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_UNDEFINE_SOURCE, ' 'VIR_MIGRATE_LIVE, VIR_MIGRATE_NON_SHARED_INC, ' 'VIR_MIGRATE_TUNNELLED'), lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) msg = mock_log.warning.call_args_list[0] self.assertIn('contains the VIR_MIGRATE_TUNNELLED flag', msg[0][0]) msg = mock_log.warning.call_args_list[1] self.assertIn('contains the VIR_MIGRATE_TUNNELLED flag', msg[0][0]) @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch('nova.virt.libvirt.host.Host.get_guest') def test_set_admin_password(self, mock_get_guest, ver, mock_image): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.test_instance) mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes"}} mock_guest = mock.Mock(spec=libvirt_guest.Guest) mock_get_guest.return_value = mock_guest drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.set_admin_password(instance, "123") mock_guest.set_user_password.assert_called_once_with("root", "123") @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch('nova.virt.libvirt.host.Host.get_guest') def test_set_admin_password_windows(self, mock_get_guest, ver, mock_image): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.test_instance) instance.os_type = "windows" mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes"}} mock_guest = mock.Mock(spec=libvirt_guest.Guest) mock_get_guest.return_value = mock_guest drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.set_admin_password(instance, "123") mock_guest.set_user_password.assert_called_once_with( "Administrator", "123") @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch('nova.virt.libvirt.host.Host.get_guest') def test_set_admin_password_image(self, mock_get_guest, ver, mock_image): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.test_instance) mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes", "os_admin_user": "foo" }} mock_guest = mock.Mock(spec=libvirt_guest.Guest) mock_get_guest.return_value = mock_guest drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.set_admin_password(instance, "123") mock_guest.set_user_password.assert_called_once_with("foo", "123") @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(host.Host, 'has_min_version', return_value=False) def test_set_admin_password_bad_version(self, mock_svc, mock_image): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.test_instance) mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes"}} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.SetAdminPasswdNotSupported, drvr.set_admin_password, instance, "123") @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_set_admin_password_bad_hyp(self, mock_svc, mock_image): self.flags(virt_type='foo', group='libvirt') instance = objects.Instance(**self.test_instance) mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes"}} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.SetAdminPasswdNotSupported, drvr.set_admin_password, instance, "123") @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_set_admin_password_guest_agent_not_running(self, mock_svc): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.QemuGuestAgentNotEnabled, drvr.set_admin_password, instance, "123") @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch('nova.virt.libvirt.host.Host.get_guest') def test_set_admin_password_error(self, mock_get_guest, ver, mock_image): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.test_instance) mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes"}} mock_guest = mock.Mock(spec=libvirt_guest.Guest) mock_guest.set_user_password.side_effect = ( fakelibvirt.libvirtError("error")) mock_get_guest.return_value = mock_guest drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.NovaException, drvr.set_admin_password, instance, "123") @mock.patch.object(objects.Service, 'get_by_compute_host') def test_set_host_enabled_with_disable(self, mock_svc): # Tests disabling an enabled host. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) svc = self._create_service(host='fake-mini') mock_svc.return_value = svc drvr._set_host_enabled(False) self.assertTrue(svc.disabled) @mock.patch.object(objects.Service, 'get_by_compute_host') def test_set_host_enabled_with_enable(self, mock_svc): # Tests enabling a disabled host. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) svc = self._create_service(disabled=True, host='fake-mini') mock_svc.return_value = svc drvr._set_host_enabled(True) self.assertTrue(svc.disabled) @mock.patch.object(objects.Service, 'get_by_compute_host') def test_set_host_enabled_with_enable_state_enabled(self, mock_svc): # Tests enabling an enabled host. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) svc = self._create_service(disabled=False, host='fake-mini') mock_svc.return_value = svc drvr._set_host_enabled(True) self.assertFalse(svc.disabled) @mock.patch.object(objects.Service, 'get_by_compute_host') def test_set_host_enabled_with_disable_state_disabled(self, mock_svc): # Tests disabling a disabled host. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) svc = self._create_service(disabled=True, host='fake-mini') mock_svc.return_value = svc drvr._set_host_enabled(False) self.assertTrue(svc.disabled) def test_set_host_enabled_swallows_exceptions(self): # Tests that set_host_enabled will swallow exceptions coming from the # db_api code so they don't break anything calling it, e.g. the # _get_new_connection method. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) with mock.patch.object(db, 'service_get_by_compute_host') as db_mock: # Make db.service_get_by_compute_host raise NovaException; this # is more robust than just raising ComputeHostNotFound. db_mock.side_effect = exception.NovaException drvr._set_host_enabled(False) @mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName") def test_prepare_pci_device(self, mock_lookup): pci_devices = [dict(hypervisor_name='xxx')] self.flags(virt_type='xen', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) conn = drvr._host.get_connection() mock_lookup.side_effect = lambda x: fakelibvirt.NodeDevice(conn) drvr._prepare_pci_devices_for_use(pci_devices) @mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName") @mock.patch.object(fakelibvirt.virNodeDevice, "dettach") def test_prepare_pci_device_exception(self, mock_detach, mock_lookup): pci_devices = [dict(hypervisor_name='xxx', id='id1', instance_uuid='uuid')] self.flags(virt_type='xen', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) conn = drvr._host.get_connection() mock_lookup.side_effect = lambda x: fakelibvirt.NodeDevice(conn) mock_detach.side_effect = fakelibvirt.libvirtError("xxxx") self.assertRaises(exception.PciDevicePrepareFailed, drvr._prepare_pci_devices_for_use, pci_devices) def test_detach_pci_devices_exception(self): pci_devices = [dict(hypervisor_name='xxx', id='id1', instance_uuid='uuid')] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.mox.StubOutWithMock(host.Host, 'has_min_version') host.Host.has_min_version = lambda x, y: False self.assertRaises(exception.PciDeviceDetachFailed, drvr._detach_pci_devices, None, pci_devices) @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch.object(nova.virt.libvirt.guest.Guest, 'get_xml_desc') def test_detach_pci_devices(self, mocked_get_xml_desc, *args): fake_domXML1_with_pci = ( """
""") fake_domXML1_without_pci = ( """
""") pci_device_info = {'compute_node_id': 1, 'instance_uuid': 'uuid', 'address': '0001:04:10.1'} pci_device = objects.PciDevice(**pci_device_info) pci_devices = [pci_device] mocked_get_xml_desc.return_value = fake_domXML1_without_pci drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) dom = fakelibvirt.Domain( drvr._get_connection(), fake_domXML1_with_pci, False) guest = libvirt_guest.Guest(dom) drvr._detach_pci_devices(guest, pci_devices) @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch.object(nova.virt.libvirt.guest.Guest, 'get_xml_desc') def test_detach_pci_devices_timeout(self, mocked_get_xml_desc, *args): fake_domXML1_with_pci = ( """
""") pci_device_info = {'compute_node_id': 1, 'instance_uuid': 'uuid', 'address': '0001:04:10.1'} pci_device = objects.PciDevice(**pci_device_info) pci_devices = [pci_device] mocked_get_xml_desc.return_value = fake_domXML1_with_pci drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) dom = fakelibvirt.Domain( drvr._get_connection(), fake_domXML1_with_pci, False) guest = libvirt_guest.Guest(dom) self.assertRaises(exception.PciDeviceDetachFailed, drvr._detach_pci_devices, guest, pci_devices) @mock.patch.object(connector, 'get_connector_properties') def test_get_connector(self, fake_get_connector): initiator = 'fake.initiator.iqn' ip = 'fakeip' host = 'fakehost' wwpns = ['100010604b019419'] wwnns = ['200010604b019419'] self.flags(my_ip=ip) self.flags(host=host) expected = { 'ip': ip, 'initiator': initiator, 'host': host, 'wwpns': wwpns, 'wwnns': wwnns } volume = { 'id': 'fake' } # TODO(walter-boring) add the fake in os-brick fake_get_connector.return_value = expected drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) result = drvr.get_volume_connector(volume) self.assertThat(expected, matchers.DictMatches(result)) @mock.patch.object(connector, 'get_connector_properties') def test_get_connector_storage_ip(self, fake_get_connector): ip = '100.100.100.100' storage_ip = '101.101.101.101' self.flags(my_block_storage_ip=storage_ip, my_ip=ip) volume = { 'id': 'fake' } expected = { 'ip': storage_ip } # TODO(walter-boring) add the fake in os-brick fake_get_connector.return_value = expected drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) result = drvr.get_volume_connector(volume) self.assertEqual(storage_ip, result['ip']) def test_lifecycle_event_registration(self): calls = [] def fake_registerErrorHandler(*args, **kwargs): calls.append('fake_registerErrorHandler') def fake_get_host_capabilities(**args): cpu = vconfig.LibvirtConfigGuestCPU() cpu.arch = arch.ARMV7 caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = cpu calls.append('fake_get_host_capabilities') return caps @mock.patch.object(fakelibvirt, 'registerErrorHandler', side_effect=fake_registerErrorHandler) @mock.patch.object(host.Host, "get_capabilities", side_effect=fake_get_host_capabilities) def test_init_host(get_host_capabilities, register_error_handler): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("test_host") test_init_host() # NOTE(dkliban): Will fail if get_host_capabilities is called before # registerErrorHandler self.assertEqual(['fake_registerErrorHandler', 'fake_get_host_capabilities'], calls) def test_sanitize_log_to_xml(self): # setup fake data data = {'auth_password': 'scrubme'} bdm = [{'connection_info': {'data': data}}] bdi = {'block_device_mapping': bdm} # Tests that the parameters to the _get_guest_xml method # are sanitized for passwords when logged. def fake_debug(*args, **kwargs): if 'auth_password' in args[0]: self.assertNotIn('scrubme', args[0]) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) conf = mock.Mock() with test.nested( mock.patch.object(libvirt_driver.LOG, 'debug', side_effect=fake_debug), mock.patch.object(drvr, '_get_guest_config', return_value=conf) ) as ( debug_mock, conf_mock ): drvr._get_guest_xml(self.context, self.test_instance, network_info={}, disk_info={}, image_meta={}, block_device_info=bdi) # we don't care what the log message is, we just want to make sure # our stub method is called which asserts the password is scrubbed self.assertTrue(debug_mock.called) @mock.patch.object(time, "time") def test_get_guest_config(self, time_mock): time_mock.return_value = 1234567.89 drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) test_instance = copy.deepcopy(self.test_instance) test_instance["display_name"] = "purple tomatoes" ctxt = context.RequestContext(project_id=123, project_name="aubergine", user_id=456, user_name="pie") flavor = objects.Flavor(name='m1.small', memory_mb=6, vcpus=28, root_gb=496, ephemeral_gb=8128, swap=33550336, extra_specs={}) instance_ref = objects.Instance(**test_instance) instance_ref.flavor = flavor image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info, context=ctxt) self.assertEqual(cfg.uuid, instance_ref["uuid"]) self.assertEqual(2, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeatureACPI) self.assertIsInstance(cfg.features[1], vconfig.LibvirtConfigGuestFeatureAPIC) self.assertEqual(cfg.memory, 6 * units.Ki) self.assertEqual(cfg.vcpus, 28) self.assertEqual(cfg.os_type, vm_mode.HVM) self.assertEqual(cfg.os_boot_dev, ["hd"]) self.assertIsNone(cfg.os_root) self.assertEqual(len(cfg.devices), 10) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[9], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(len(cfg.metadata), 1) self.assertIsInstance(cfg.metadata[0], vconfig.LibvirtConfigGuestMetaNovaInstance) self.assertEqual(version.version_string_with_package(), cfg.metadata[0].package) self.assertEqual("purple tomatoes", cfg.metadata[0].name) self.assertEqual(1234567.89, cfg.metadata[0].creationTime) self.assertEqual("image", cfg.metadata[0].roottype) self.assertEqual(str(instance_ref["image_ref"]), cfg.metadata[0].rootid) self.assertIsInstance(cfg.metadata[0].owner, vconfig.LibvirtConfigGuestMetaNovaOwner) self.assertEqual(456, cfg.metadata[0].owner.userid) self.assertEqual("pie", cfg.metadata[0].owner.username) self.assertEqual(123, cfg.metadata[0].owner.projectid) self.assertEqual("aubergine", cfg.metadata[0].owner.projectname) self.assertIsInstance(cfg.metadata[0].flavor, vconfig.LibvirtConfigGuestMetaNovaFlavor) self.assertEqual("m1.small", cfg.metadata[0].flavor.name) self.assertEqual(6, cfg.metadata[0].flavor.memory) self.assertEqual(28, cfg.metadata[0].flavor.vcpus) self.assertEqual(496, cfg.metadata[0].flavor.disk) self.assertEqual(8128, cfg.metadata[0].flavor.ephemeral) self.assertEqual(33550336, cfg.metadata[0].flavor.swap) def test_get_guest_config_lxc(self): self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, {'mapping': {}}) self.assertEqual(instance_ref["uuid"], cfg.uuid) self.assertEqual(2 * units.Mi, cfg.memory) self.assertEqual(1, cfg.vcpus) self.assertEqual(vm_mode.EXE, cfg.os_type) self.assertEqual("/sbin/init", cfg.os_init_path) self.assertEqual("console=tty0 console=ttyS0", cfg.os_cmdline) self.assertIsNone(cfg.os_root) self.assertEqual(3, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestFilesys) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestConsole) def test_get_guest_config_lxc_with_id_maps(self): self.flags(virt_type='lxc', group='libvirt') self.flags(uid_maps=['0:1000:100'], group='libvirt') self.flags(gid_maps=['0:1000:100'], group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, {'mapping': {}}) self.assertEqual(instance_ref["uuid"], cfg.uuid) self.assertEqual(2 * units.Mi, cfg.memory) self.assertEqual(1, cfg.vcpus) self.assertEqual(vm_mode.EXE, cfg.os_type) self.assertEqual("/sbin/init", cfg.os_init_path) self.assertEqual("console=tty0 console=ttyS0", cfg.os_cmdline) self.assertIsNone(cfg.os_root) self.assertEqual(3, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestFilesys) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestConsole) self.assertEqual(len(cfg.idmaps), 2) self.assertIsInstance(cfg.idmaps[0], vconfig.LibvirtConfigGuestUIDMap) self.assertIsInstance(cfg.idmaps[1], vconfig.LibvirtConfigGuestGIDMap) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_numa_host_instance_fits(self, is_able): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps)): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsNone(cfg.cpuset) self.assertEqual(0, len(cfg.cputune.vcpupin)) self.assertIsNone(cfg.cpu.numa) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_numa_host_instance_no_fit(self, is_able): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([3])), mock.patch.object(random, 'choice') ) as (get_host_cap_mock, get_vcpu_pin_set_mock, choice_mock): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertFalse(choice_mock.called) self.assertEqual(set([3]), cfg.cpuset) self.assertEqual(0, len(cfg.cputune.vcpupin)) self.assertIsNone(cfg.cpu.numa) def _test_get_guest_memory_backing_config( self, host_topology, inst_topology, numatune): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) with mock.patch.object( drvr, "_get_host_numa_topology", return_value=host_topology): return drvr._get_guest_memory_backing_config( inst_topology, numatune, {}) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_get_guest_memory_backing_config_large_success(self, mock_version): host_topology = objects.NUMATopology( cells=[ objects.NUMACell( id=3, cpuset=set([1]), memory=1024, mempages=[ objects.NUMAPagesTopology(size_kb=4, total=2000, used=0), objects.NUMAPagesTopology(size_kb=2048, total=512, used=0), objects.NUMAPagesTopology(size_kb=1048576, total=0, used=0), ])]) inst_topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell( id=3, cpuset=set([0, 1]), memory=1024, pagesize=2048)]) numa_tune = vconfig.LibvirtConfigGuestNUMATune() numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()] numa_tune.memnodes[0].cellid = 0 numa_tune.memnodes[0].nodeset = [3] result = self._test_get_guest_memory_backing_config( host_topology, inst_topology, numa_tune) self.assertEqual(1, len(result.hugepages)) self.assertEqual(2048, result.hugepages[0].size_kb) self.assertEqual([0], result.hugepages[0].nodeset) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_get_guest_memory_backing_config_smallest(self, mock_version): host_topology = objects.NUMATopology( cells=[ objects.NUMACell( id=3, cpuset=set([1]), memory=1024, mempages=[ objects.NUMAPagesTopology(size_kb=4, total=2000, used=0), objects.NUMAPagesTopology(size_kb=2048, total=512, used=0), objects.NUMAPagesTopology(size_kb=1048576, total=0, used=0), ])]) inst_topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell( id=3, cpuset=set([0, 1]), memory=1024, pagesize=4)]) numa_tune = vconfig.LibvirtConfigGuestNUMATune() numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()] numa_tune.memnodes[0].cellid = 0 numa_tune.memnodes[0].nodeset = [3] result = self._test_get_guest_memory_backing_config( host_topology, inst_topology, numa_tune) self.assertIsNone(result) def test_get_guest_memory_backing_config_realtime(self): flavor = {"extra_specs": { "hw:cpu_realtime": "yes", "hw:cpu_policy": "dedicated" }} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) membacking = drvr._get_guest_memory_backing_config( None, None, flavor) self.assertTrue(membacking.locked) self.assertFalse(membacking.sharedpages) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_numa_host_instance_pci_no_numa_info( self, is_able): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) pci_device_info = dict(test_pci_device.fake_db_dev) pci_device_info.update(compute_node_id=1, label='fake', status=fields.PciDeviceStatus.AVAILABLE, address='0000:00:00.1', instance_uuid=None, request_id=None, extra_info={}, numa_node=None) pci_device = objects.PciDevice(**pci_device_info) with test.nested( mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object( host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([3])), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))), mock.patch.object(pci_manager, "get_instance_pci_devs", return_value=[pci_device])): cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(set([3]), cfg.cpuset) self.assertEqual(0, len(cfg.cputune.vcpupin)) self.assertIsNone(cfg.cpu.numa) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_numa_host_instance_2pci_no_fit(self, is_able): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) pci_device_info = dict(test_pci_device.fake_db_dev) pci_device_info.update(compute_node_id=1, label='fake', status=fields.PciDeviceStatus.AVAILABLE, address='0000:00:00.1', instance_uuid=None, request_id=None, extra_info={}, numa_node=1) pci_device = objects.PciDevice(**pci_device_info) pci_device_info.update(numa_node=0, address='0000:00:00.2') pci_device2 = objects.PciDevice(**pci_device_info) with test.nested( mock.patch.object( host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([3])), mock.patch.object(random, 'choice'), mock.patch.object(pci_manager, "get_instance_pci_devs", return_value=[pci_device, pci_device2]) ) as (get_host_cap_mock, get_vcpu_pin_set_mock, choice_mock, pci_mock): cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertFalse(choice_mock.called) self.assertEqual(set([3]), cfg.cpuset) self.assertEqual(0, len(cfg.cputune.vcpupin)) self.assertIsNone(cfg.cpu.numa) @mock.patch.object(fakelibvirt.Connection, 'getType') @mock.patch.object(fakelibvirt.Connection, 'getVersion') @mock.patch.object(fakelibvirt.Connection, 'getLibVersion') @mock.patch.object(host.Host, 'get_capabilities') @mock.patch.object(libvirt_driver.LibvirtDriver, '_set_host_enabled') def _test_get_guest_config_numa_unsupported(self, fake_lib_version, fake_version, fake_type, fake_arch, exception_class, pagesize, mock_host, mock_caps, mock_lib_version, mock_version, mock_type): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024, pagesize=pagesize)]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = fake_arch caps.host.topology = self._fake_caps_numa_topology() mock_type.return_value = fake_type mock_version.return_value = fake_version mock_lib_version.return_value = fake_lib_version mock_caps.return_value = caps drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.assertRaises(exception_class, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) def test_get_guest_config_numa_old_version_libvirt(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION) - 1, versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION), host.HV_DRIVER_QEMU, arch.X86_64, exception.NUMATopologyUnsupported, None) def test_get_guest_config_numa_old_version_libvirt_ppc(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION_PPC) - 1, versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION), host.HV_DRIVER_QEMU, arch.PPC64LE, exception.NUMATopologyUnsupported, None) def test_get_guest_config_numa_bad_version_libvirt(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.BAD_LIBVIRT_NUMA_VERSIONS[0]), versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION), host.HV_DRIVER_QEMU, arch.X86_64, exception.NUMATopologyUnsupported, None) @mock.patch.object(libvirt_driver.LOG, 'warn') def test_has_numa_support_bad_version_libvirt_log(self, mock_warn): # Tests that a warning is logged once and only once when there is a bad # BAD_LIBVIRT_NUMA_VERSIONS detected. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertFalse(hasattr(drvr, '_bad_libvirt_numa_version_warn')) with mock.patch.object(drvr._host, 'has_version', return_value=True): for i in range(2): self.assertFalse(drvr._has_numa_support()) self.assertTrue(drvr._bad_libvirt_numa_version_warn) self.assertEqual(1, mock_warn.call_count) # assert the version is logged properly self.assertEqual('1.2.9.2', mock_warn.call_args[0][1]) def test_get_guest_config_numa_old_version_qemu(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION), versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) - 1, host.HV_DRIVER_QEMU, arch.X86_64, exception.NUMATopologyUnsupported, None) def test_get_guest_config_numa_other_arch_qemu(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION), versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION), host.HV_DRIVER_QEMU, arch.S390, exception.NUMATopologyUnsupported, None) def test_get_guest_config_numa_xen(self): self.flags(virt_type='xen', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION), versionutils.convert_version_to_int((4, 5, 0)), 'XEN', arch.X86_64, exception.NUMATopologyUnsupported, None) def test_get_guest_config_numa_old_pages_libvirt(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_HUGEPAGE_VERSION) - 1, versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION), host.HV_DRIVER_QEMU, arch.X86_64, exception.MemoryPagesUnsupported, 2048) def test_get_guest_config_numa_old_pages_qemu(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_HUGEPAGE_VERSION), versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) - 1, host.HV_DRIVER_QEMU, arch.X86_64, exception.NUMATopologyUnsupported, 2048) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset( self, is_able): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=1024, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology(kb_mem=4194304) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([2, 3])), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))) ) as (has_min_version_mock, get_host_cap_mock, get_vcpu_pin_set_mock, get_online_cpus_mock): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) # NOTE(ndipanov): we make sure that pin_set was taken into account # when choosing viable cells self.assertEqual(set([2, 3]), cfg.cpuset) self.assertEqual(0, len(cfg.cputune.vcpupin)) self.assertIsNone(cfg.cpu.numa) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_non_numa_host_instance_topo(self, is_able): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024), objects.InstanceNUMACell( id=1, cpuset=set([2]), memory=1024)]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object( objects.InstanceNUMATopology, "get_by_instance_uuid", return_value=instance_topology), mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps)): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsNone(cfg.cpuset) self.assertEqual(0, len(cfg.cputune.vcpupin)) self.assertIsNone(cfg.numatune) self.assertIsNotNone(cfg.cpu.numa) for instance_cell, numa_cfg_cell in zip( instance_topology.cells, cfg.cpu.numa.cells): self.assertEqual(instance_cell.id, numa_cfg_cell.id) self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) self.assertEqual(instance_cell.memory * units.Ki, numa_cfg_cell.memory) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_numa_host_instance_topo(self, is_able): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=1, cpuset=set([0, 1]), memory=1024, pagesize=None), objects.InstanceNUMACell( id=2, cpuset=set([2, 3]), memory=1024, pagesize=None)]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object( objects.InstanceNUMATopology, "get_by_instance_uuid", return_value=instance_topology), mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([2, 3, 4, 5])), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))), ): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsNone(cfg.cpuset) # Test that the pinning is correct and limited to allowed only self.assertEqual(0, cfg.cputune.vcpupin[0].id) self.assertEqual(set([2, 3]), cfg.cputune.vcpupin[0].cpuset) self.assertEqual(1, cfg.cputune.vcpupin[1].id) self.assertEqual(set([2, 3]), cfg.cputune.vcpupin[1].cpuset) self.assertEqual(2, cfg.cputune.vcpupin[2].id) self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[2].cpuset) self.assertEqual(3, cfg.cputune.vcpupin[3].id) self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[3].cpuset) self.assertIsNotNone(cfg.cpu.numa) self.assertIsInstance(cfg.cputune.emulatorpin, vconfig.LibvirtConfigGuestCPUTuneEmulatorPin) self.assertEqual(set([2, 3, 4, 5]), cfg.cputune.emulatorpin.cpuset) for instance_cell, numa_cfg_cell, index in zip( instance_topology.cells, cfg.cpu.numa.cells, range(len(instance_topology.cells))): self.assertEqual(index, numa_cfg_cell.id) self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) self.assertEqual(instance_cell.memory * units.Ki, numa_cfg_cell.memory) allnodes = [cell.id for cell in instance_topology.cells] self.assertEqual(allnodes, cfg.numatune.memory.nodeset) self.assertEqual("strict", cfg.numatune.memory.mode) for instance_cell, memnode, index in zip( instance_topology.cells, cfg.numatune.memnodes, range(len(instance_topology.cells))): self.assertEqual(index, memnode.cellid) self.assertEqual([instance_cell.id], memnode.nodeset) self.assertEqual("strict", memnode.mode) def test_get_guest_config_numa_host_instance_topo_reordered(self): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=3, cpuset=set([0, 1]), memory=1024), objects.InstanceNUMACell( id=0, cpuset=set([2, 3]), memory=1024)]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object( objects.InstanceNUMATopology, "get_by_instance_uuid", return_value=instance_topology), mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))), ): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsNone(cfg.cpuset) # Test that the pinning is correct and limited to allowed only self.assertEqual(0, cfg.cputune.vcpupin[0].id) self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[0].cpuset) self.assertEqual(1, cfg.cputune.vcpupin[1].id) self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[1].cpuset) self.assertEqual(2, cfg.cputune.vcpupin[2].id) self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[2].cpuset) self.assertEqual(3, cfg.cputune.vcpupin[3].id) self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[3].cpuset) self.assertIsNotNone(cfg.cpu.numa) self.assertIsInstance(cfg.cputune.emulatorpin, vconfig.LibvirtConfigGuestCPUTuneEmulatorPin) self.assertEqual(set([0, 1, 6, 7]), cfg.cputune.emulatorpin.cpuset) for index, (instance_cell, numa_cfg_cell) in enumerate(zip( instance_topology.cells, cfg.cpu.numa.cells)): self.assertEqual(index, numa_cfg_cell.id) self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) self.assertEqual(instance_cell.memory * units.Ki, numa_cfg_cell.memory) self.assertIsNone(numa_cfg_cell.memAccess) allnodes = set([cell.id for cell in instance_topology.cells]) self.assertEqual(allnodes, set(cfg.numatune.memory.nodeset)) self.assertEqual("strict", cfg.numatune.memory.mode) for index, (instance_cell, memnode) in enumerate(zip( instance_topology.cells, cfg.numatune.memnodes)): self.assertEqual(index, memnode.cellid) self.assertEqual([instance_cell.id], memnode.nodeset) self.assertEqual("strict", memnode.mode) def test_get_guest_config_numa_host_instance_topo_cpu_pinning(self): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=1, cpuset=set([0, 1]), memory=1024, cpu_pinning={0: 24, 1: 25}), objects.InstanceNUMACell( id=0, cpuset=set([2, 3]), memory=1024, cpu_pinning={2: 0, 3: 1})]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology( sockets_per_cell=4, cores_per_socket=3, threads_per_core=2) conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object( objects.InstanceNUMATopology, "get_by_instance_uuid", return_value=instance_topology), mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))), ): cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsNone(cfg.cpuset) # Test that the pinning is correct and limited to allowed only self.assertEqual(0, cfg.cputune.vcpupin[0].id) self.assertEqual(set([24]), cfg.cputune.vcpupin[0].cpuset) self.assertEqual(1, cfg.cputune.vcpupin[1].id) self.assertEqual(set([25]), cfg.cputune.vcpupin[1].cpuset) self.assertEqual(2, cfg.cputune.vcpupin[2].id) self.assertEqual(set([0]), cfg.cputune.vcpupin[2].cpuset) self.assertEqual(3, cfg.cputune.vcpupin[3].id) self.assertEqual(set([1]), cfg.cputune.vcpupin[3].cpuset) self.assertIsNotNone(cfg.cpu.numa) # Emulator must be pinned to union of cfg.cputune.vcpupin[*].cpuset self.assertIsInstance(cfg.cputune.emulatorpin, vconfig.LibvirtConfigGuestCPUTuneEmulatorPin) self.assertEqual(set([0, 1, 24, 25]), cfg.cputune.emulatorpin.cpuset) for i, (instance_cell, numa_cfg_cell) in enumerate(zip( instance_topology.cells, cfg.cpu.numa.cells)): self.assertEqual(i, numa_cfg_cell.id) self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) self.assertEqual(instance_cell.memory * units.Ki, numa_cfg_cell.memory) self.assertIsNone(numa_cfg_cell.memAccess) allnodes = set([cell.id for cell in instance_topology.cells]) self.assertEqual(allnodes, set(cfg.numatune.memory.nodeset)) self.assertEqual("strict", cfg.numatune.memory.mode) for i, (instance_cell, memnode) in enumerate(zip( instance_topology.cells, cfg.numatune.memnodes)): self.assertEqual(i, memnode.cellid) self.assertEqual([instance_cell.id], memnode.nodeset) self.assertEqual("strict", memnode.mode) def test_get_guest_config_numa_host_mempages_shared(self): instance_topology = objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=1, cpuset=set([0, 1]), memory=1024, pagesize=2048), objects.InstanceNUMACell( id=2, cpuset=set([2, 3]), memory=1024, pagesize=2048)]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object( objects.InstanceNUMATopology, "get_by_instance_uuid", return_value=instance_topology), mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([2, 3, 4, 5])), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))), ): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) for instance_cell, numa_cfg_cell, index in zip( instance_topology.cells, cfg.cpu.numa.cells, range(len(instance_topology.cells))): self.assertEqual(index, numa_cfg_cell.id) self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) self.assertEqual(instance_cell.memory * units.Ki, numa_cfg_cell.memory) self.assertEqual("shared", numa_cfg_cell.memAccess) allnodes = [cell.id for cell in instance_topology.cells] self.assertEqual(allnodes, cfg.numatune.memory.nodeset) self.assertEqual("strict", cfg.numatune.memory.mode) for instance_cell, memnode, index in zip( instance_topology.cells, cfg.numatune.memnodes, range(len(instance_topology.cells))): self.assertEqual(index, memnode.cellid) self.assertEqual([instance_cell.id], memnode.nodeset) self.assertEqual("strict", memnode.mode) self.assertEqual(0, len(cfg.cputune.vcpusched)) self.assertEqual(set([2, 3, 4, 5]), cfg.cputune.emulatorpin.cpuset) def test_get_guest_config_numa_host_instance_cpu_pinning_realtime(self): instance_topology = objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=1, cpuset=set([0, 1]), memory=1024, pagesize=2048), objects.InstanceNUMACell( id=2, cpuset=set([2, 3]), memory=1024, pagesize=2048)]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={ "hw:cpu_realtime": "yes", "hw:cpu_policy": "dedicated", "hw:cpu_realtime_mask": "^0-1" }) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object( objects.InstanceNUMATopology, "get_by_instance_uuid", return_value=instance_topology), mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([2, 3, 4, 5])), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))), ): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) for instance_cell, numa_cfg_cell, index in zip( instance_topology.cells, cfg.cpu.numa.cells, range(len(instance_topology.cells))): self.assertEqual(index, numa_cfg_cell.id) self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) self.assertEqual(instance_cell.memory * units.Ki, numa_cfg_cell.memory) self.assertEqual("shared", numa_cfg_cell.memAccess) allnodes = [cell.id for cell in instance_topology.cells] self.assertEqual(allnodes, cfg.numatune.memory.nodeset) self.assertEqual("strict", cfg.numatune.memory.mode) for instance_cell, memnode, index in zip( instance_topology.cells, cfg.numatune.memnodes, range(len(instance_topology.cells))): self.assertEqual(index, memnode.cellid) self.assertEqual([instance_cell.id], memnode.nodeset) self.assertEqual("strict", memnode.mode) self.assertEqual(1, len(cfg.cputune.vcpusched)) self.assertEqual("fifo", cfg.cputune.vcpusched[0].scheduler) self.assertEqual(set([2, 3]), cfg.cputune.vcpusched[0].vcpus) self.assertEqual(set([0, 1]), cfg.cputune.emulatorpin.cpuset) def test_get_cpu_numa_config_from_instance(self): topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell(id=0, cpuset=set([1, 2]), memory=128), objects.InstanceNUMACell(id=1, cpuset=set([3, 4]), memory=128), ]) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) conf = drvr._get_cpu_numa_config_from_instance(topology, True) self.assertIsInstance(conf, vconfig.LibvirtConfigGuestCPUNUMA) self.assertEqual(0, conf.cells[0].id) self.assertEqual(set([1, 2]), conf.cells[0].cpus) self.assertEqual(131072, conf.cells[0].memory) self.assertEqual("shared", conf.cells[0].memAccess) self.assertEqual(1, conf.cells[1].id) self.assertEqual(set([3, 4]), conf.cells[1].cpus) self.assertEqual(131072, conf.cells[1].memory) self.assertEqual("shared", conf.cells[1].memAccess) def test_get_cpu_numa_config_from_instance_none(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) conf = drvr._get_cpu_numa_config_from_instance(None, False) self.assertIsNone(conf) @mock.patch.object(host.Host, 'has_version', return_value=True) def test_has_cpu_policy_support(self, mock_has_version): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.CPUPinningNotSupported, drvr._has_cpu_policy_support) @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support", return_value=True) @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_hugepage_support", return_value=True) @mock.patch.object(host.Host, "get_capabilities") def test_does_not_want_hugepages(self, mock_caps, mock_numa, mock_hp): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_topology = objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=1, cpuset=set([0, 1]), memory=1024, pagesize=4), objects.InstanceNUMACell( id=2, cpuset=set([2, 3]), memory=1024, pagesize=4)]) caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() mock_caps.return_value = caps host_topology = drvr._get_host_numa_topology() self.assertFalse(drvr._wants_hugepages(None, None)) self.assertFalse(drvr._wants_hugepages(host_topology, None)) self.assertFalse(drvr._wants_hugepages(None, instance_topology)) self.assertFalse(drvr._wants_hugepages(host_topology, instance_topology)) @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support", return_value=True) @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_hugepage_support", return_value=True) @mock.patch.object(host.Host, "get_capabilities") def test_does_want_hugepages(self, mock_caps, mock_numa, mock_hp): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_topology = objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=1, cpuset=set([0, 1]), memory=1024, pagesize=2048), objects.InstanceNUMACell( id=2, cpuset=set([2, 3]), memory=1024, pagesize=2048)]) caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() mock_caps.return_value = caps host_topology = drvr._get_host_numa_topology() self.assertTrue(drvr._wants_hugepages(host_topology, instance_topology)) def test_get_guest_config_clock(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) hpet_map = { arch.X86_64: True, arch.I686: True, arch.PPC: False, arch.PPC64: False, arch.ARMV7: False, arch.AARCH64: False, } for guestarch, expect_hpet in hpet_map.items(): with mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch', return_value=guestarch): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsInstance(cfg.clock, vconfig.LibvirtConfigGuestClock) self.assertEqual(cfg.clock.offset, "utc") self.assertIsInstance(cfg.clock.timers[0], vconfig.LibvirtConfigGuestTimer) self.assertIsInstance(cfg.clock.timers[1], vconfig.LibvirtConfigGuestTimer) self.assertEqual(cfg.clock.timers[0].name, "pit") self.assertEqual(cfg.clock.timers[0].tickpolicy, "delay") self.assertEqual(cfg.clock.timers[1].name, "rtc") self.assertEqual(cfg.clock.timers[1].tickpolicy, "catchup") if expect_hpet: self.assertEqual(3, len(cfg.clock.timers)) self.assertIsInstance(cfg.clock.timers[2], vconfig.LibvirtConfigGuestTimer) self.assertEqual('hpet', cfg.clock.timers[2].name) self.assertFalse(cfg.clock.timers[2].present) else: self.assertEqual(2, len(cfg.clock.timers)) @mock.patch.object(libvirt_utils, 'get_arch') @mock.patch.object(host.Host, 'has_min_version') def test_get_guest_config_windows(self, mock_version, mock_get_arch): mock_version.return_value = False mock_get_arch.return_value = arch.I686 drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref['os_type'] = 'windows' image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(cfg.clock, vconfig.LibvirtConfigGuestClock) self.assertEqual(cfg.clock.offset, "localtime") self.assertEqual(3, len(cfg.clock.timers), cfg.clock.timers) self.assertEqual("pit", cfg.clock.timers[0].name) self.assertEqual("rtc", cfg.clock.timers[1].name) self.assertEqual("hpet", cfg.clock.timers[2].name) self.assertFalse(cfg.clock.timers[2].present) @mock.patch.object(libvirt_utils, 'get_arch') @mock.patch.object(host.Host, 'has_min_version') def test_get_guest_config_windows_timer(self, mock_version, mock_get_arch): mock_version.return_value = True mock_get_arch.return_value = arch.I686 drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref['os_type'] = 'windows' image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(cfg.clock, vconfig.LibvirtConfigGuestClock) self.assertEqual(cfg.clock.offset, "localtime") self.assertEqual(4, len(cfg.clock.timers), cfg.clock.timers) self.assertEqual("pit", cfg.clock.timers[0].name) self.assertEqual("rtc", cfg.clock.timers[1].name) self.assertEqual("hpet", cfg.clock.timers[2].name) self.assertFalse(cfg.clock.timers[2].present) self.assertEqual("hypervclock", cfg.clock.timers[3].name) self.assertTrue(cfg.clock.timers[3].present) self.assertEqual(3, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeatureACPI) self.assertIsInstance(cfg.features[1], vconfig.LibvirtConfigGuestFeatureAPIC) self.assertIsInstance(cfg.features[2], vconfig.LibvirtConfigGuestFeatureHyperV) @mock.patch.object(host.Host, 'has_min_version') def test_get_guest_config_windows_hyperv_feature1(self, mock_version): def fake_version(lv_ver=None, hv_ver=None, hv_type=None): if lv_ver == (1, 0, 0) and hv_ver == (1, 1, 0): return True return False mock_version.side_effect = fake_version drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref['os_type'] = 'windows' image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(cfg.clock, vconfig.LibvirtConfigGuestClock) self.assertEqual(cfg.clock.offset, "localtime") self.assertEqual(3, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeatureACPI) self.assertIsInstance(cfg.features[1], vconfig.LibvirtConfigGuestFeatureAPIC) self.assertIsInstance(cfg.features[2], vconfig.LibvirtConfigGuestFeatureHyperV) self.assertTrue(cfg.features[2].relaxed) self.assertFalse(cfg.features[2].spinlocks) self.assertFalse(cfg.features[2].vapic) @mock.patch.object(host.Host, 'has_min_version') def test_get_guest_config_windows_hyperv_feature2(self, mock_version): mock_version.return_value = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref['os_type'] = 'windows' image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(cfg.clock, vconfig.LibvirtConfigGuestClock) self.assertEqual(cfg.clock.offset, "localtime") self.assertEqual(3, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeatureACPI) self.assertIsInstance(cfg.features[1], vconfig.LibvirtConfigGuestFeatureAPIC) self.assertIsInstance(cfg.features[2], vconfig.LibvirtConfigGuestFeatureHyperV) self.assertTrue(cfg.features[2].relaxed) self.assertTrue(cfg.features[2].spinlocks) self.assertEqual(8191, cfg.features[2].spinlock_retries) self.assertTrue(cfg.features[2].vapic) def test_get_guest_config_with_two_nics(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 2), image_meta, disk_info) self.assertEqual(2, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeatureACPI) self.assertIsInstance(cfg.features[1], vconfig.LibvirtConfigGuestFeatureAPIC) self.assertEqual(cfg.memory, 2 * units.Mi) self.assertEqual(cfg.vcpus, 1) self.assertEqual(cfg.os_type, vm_mode.HVM) self.assertEqual(cfg.os_boot_dev, ["hd"]) self.assertIsNone(cfg.os_root) self.assertEqual(len(cfg.devices), 10) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[9], vconfig.LibvirtConfigMemoryBalloon) def test_get_guest_config_bug_1118829(self): self.flags(virt_type='uml', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) disk_info = {'disk_bus': 'virtio', 'cdrom_bus': 'ide', 'mapping': {u'vda': {'bus': 'virtio', 'type': 'disk', 'dev': u'vda'}, 'root': {'bus': 'virtio', 'type': 'disk', 'dev': 'vda'}}} # NOTE(jdg): For this specific test leave this blank # This will exercise the failed code path still, # and won't require fakes and stubs of the iscsi discovery block_device_info = {} image_meta = objects.ImageMeta.from_dict(self.test_image_meta) drvr._get_guest_config(instance_ref, [], image_meta, disk_info, None, block_device_info) self.assertEqual(instance_ref['root_device_name'], '/dev/vda') def test_get_guest_config_with_root_device_name(self): self.flags(virt_type='uml', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) block_device_info = {'root_device_name': '/dev/vdb'} disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, block_device_info) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info, None, block_device_info) self.assertEqual(0, len(cfg.features)) self.assertEqual(cfg.memory, 2 * units.Mi) self.assertEqual(cfg.vcpus, 1) self.assertEqual(cfg.os_type, "uml") self.assertEqual(cfg.os_boot_dev, []) self.assertEqual(cfg.os_root, '/dev/vdb') self.assertEqual(len(cfg.devices), 3) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestConsole) def test_has_uefi_support_with_invalid_version(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) with mock.patch.object(drvr._host, 'has_min_version', return_value=False): self.assertFalse(drvr._has_uefi_support()) def test_has_uefi_support_not_supported_arch(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "alpha" self.assertFalse(drvr._has_uefi_support()) @mock.patch('os.path.exists', return_value=False) def test_has_uefi_support_with_no_loader_existed(self, mock_exist): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertFalse(drvr._has_uefi_support()) @mock.patch('os.path.exists', return_value=True) def test_has_uefi_support(self, mock_has_version): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" with mock.patch.object(drvr._host, 'has_min_version', return_value=True): self.assertTrue(drvr._has_uefi_support()) def test_get_guest_config_with_uefi(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_firmware_type": "uefi"}}) instance_ref = objects.Instance(**self.test_instance) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object(drvr, "_has_uefi_support", return_value=True)): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(cfg.os_loader_type, "pflash") def test_get_guest_config_with_block_device(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) conn_info = {'driver_volume_type': 'fake'} bdms = block_device_obj.block_device_make_list_from_dicts( self.context, [ fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/vdc'}), fake_block_device.FakeDbBlockDeviceDict( {'id': 2, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/vdd'}), ] ) info = {'block_device_mapping': driver_block_device.convert_volumes( bdms )} info['block_device_mapping'][0]['connection_info'] = conn_info info['block_device_mapping'][1]['connection_info'] = conn_info disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, info) with mock.patch.object( driver_block_device.DriverVolumeBlockDevice, 'save' ) as mock_save: cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info, None, info) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[2].target_dev, 'vdc') self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[3].target_dev, 'vdd') mock_save.assert_called_with() def test_get_guest_config_lxc_with_attached_volume(self): self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) conn_info = {'driver_volume_type': 'fake'} bdms = block_device_obj.block_device_make_list_from_dicts( self.context, [ fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'boot_index': 0}), fake_block_device.FakeDbBlockDeviceDict( {'id': 2, 'source_type': 'volume', 'destination_type': 'volume', }), fake_block_device.FakeDbBlockDeviceDict( {'id': 3, 'source_type': 'volume', 'destination_type': 'volume', }), ] ) info = {'block_device_mapping': driver_block_device.convert_volumes( bdms )} info['block_device_mapping'][0]['connection_info'] = conn_info info['block_device_mapping'][1]['connection_info'] = conn_info info['block_device_mapping'][2]['connection_info'] = conn_info info['block_device_mapping'][0]['mount_device'] = '/dev/vda' info['block_device_mapping'][1]['mount_device'] = '/dev/vdc' info['block_device_mapping'][2]['mount_device'] = '/dev/vdd' with mock.patch.object( driver_block_device.DriverVolumeBlockDevice, 'save' ) as mock_save: disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, info) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info, None, info) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[1].target_dev, 'vdc') self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[2].target_dev, 'vdd') mock_save.assert_called_with() def test_get_guest_config_with_configdrive(self): # It's necessary to check if the architecture is power, because # power doesn't have support to ide, and so libvirt translate # all ide calls to scsi drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) # make configdrive.required_by() return True instance_ref['config_drive'] = True disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) # The last device is selected for this. on x86 is the last ide # device (hdd). Since power only support scsi, the last device # is sdz expect = {"ppc": "sdz", "ppc64": "sdz"} disk = expect.get(blockinfo.libvirt_utils.get_arch({}), "hdd") self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[2].target_dev, disk) def test_get_guest_config_with_virtio_scsi_bus(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_scsi_model": "virtio-scsi"}}) instance_ref = objects.Instance(**self.test_instance) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, []) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestController) self.assertEqual(cfg.devices[2].model, 'virtio-scsi') def test_get_guest_config_with_virtio_scsi_bus_bdm(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_scsi_model": "virtio-scsi"}}) instance_ref = objects.Instance(**self.test_instance) conn_info = {'driver_volume_type': 'fake'} bdms = block_device_obj.block_device_make_list_from_dicts( self.context, [ fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/sdc', 'disk_bus': 'scsi'}), fake_block_device.FakeDbBlockDeviceDict( {'id': 2, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/sdd', 'disk_bus': 'scsi'}), ] ) bd_info = { 'block_device_mapping': driver_block_device.convert_volumes(bdms)} bd_info['block_device_mapping'][0]['connection_info'] = conn_info bd_info['block_device_mapping'][1]['connection_info'] = conn_info disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, bd_info) with mock.patch.object( driver_block_device.DriverVolumeBlockDevice, 'save' ) as mock_save: cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info, [], bd_info) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[2].target_dev, 'sdc') self.assertEqual(cfg.devices[2].target_bus, 'scsi') self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[3].target_dev, 'sdd') self.assertEqual(cfg.devices[3].target_bus, 'scsi') self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestController) self.assertEqual(cfg.devices[4].model, 'virtio-scsi') mock_save.assert_called_with() def test_get_guest_config_with_vnc(self): self.flags(enabled=True, group='vnc') self.flags(virt_type='kvm', use_usb_tablet=False, group='libvirt') self.flags(enabled=False, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 7) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].type, "vnc") def test_get_guest_config_with_vnc_and_tablet(self): self.flags(enabled=True, group='vnc') self.flags(virt_type='kvm', use_usb_tablet=True, group='libvirt') self.flags(enabled=False, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].type, "tablet") self.assertEqual(cfg.devices[5].type, "vnc") def test_get_guest_config_with_spice_and_tablet(self): self.flags(enabled=False, group='vnc') self.flags(virt_type='kvm', use_usb_tablet=True, group='libvirt') self.flags(enabled=True, agent_enabled=False, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].type, "tablet") self.assertEqual(cfg.devices[5].type, "spice") def test_get_guest_config_with_spice_and_agent(self): self.flags(enabled=False, group='vnc') self.flags(virt_type='kvm', use_usb_tablet=True, group='libvirt') self.flags(enabled=True, agent_enabled=True, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestChannel) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].target_name, "com.redhat.spice.0") self.assertEqual(cfg.devices[5].type, "spice") self.assertEqual(cfg.devices[6].type, "qxl") @mock.patch('nova.console.serial.acquire_port') @mock.patch('nova.virt.hardware.get_number_of_serial_ports', return_value=1) @mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch',) def test_create_serial_console_devices_based_on_arch(self, mock_get_arch, mock_get_port_number, mock_acquire_port): self.flags(enabled=True, group='serial_console') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) expected = {arch.X86_64: vconfig.LibvirtConfigGuestSerial, arch.S390: vconfig.LibvirtConfigGuestConsole, arch.S390X: vconfig.LibvirtConfigGuestConsole} for guest_arch, device_type in expected.items(): mock_get_arch.return_value = guest_arch guest = vconfig.LibvirtConfigGuest() drvr._create_serial_console_devices(guest, instance=None, flavor={}, image_meta={}) self.assertEqual(1, len(guest.devices)) console_device = guest.devices[0] self.assertIsInstance(console_device, device_type) self.assertEqual("tcp", console_device.type) @mock.patch('nova.console.serial.acquire_port') def test_get_guest_config_serial_console(self, acquire_port): self.flags(enabled=True, group='serial_console') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) acquire_port.return_value = 11111 cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(8, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual("tcp", cfg.devices[2].type) self.assertEqual(11111, cfg.devices[2].listen_port) def test_get_guest_config_serial_console_through_flavor(self): self.flags(enabled=True, group='serial_console') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw:serial_port_count': 3} image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(10, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[9], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual("tcp", cfg.devices[2].type) self.assertEqual("tcp", cfg.devices[3].type) self.assertEqual("tcp", cfg.devices[4].type) def test_get_guest_config_serial_console_invalid_flavor(self): self.flags(enabled=True, group='serial_console') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw:serial_port_count': "a"} image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.assertRaises( exception.ImageSerialPortNumberInvalid, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) def test_get_guest_config_serial_console_image_and_flavor(self): self.flags(enabled=True, group='serial_console') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_serial_port_count": "3"}}) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw:serial_port_count': 4} disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(10, len(cfg.devices), cfg.devices) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[9], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual("tcp", cfg.devices[2].type) self.assertEqual("tcp", cfg.devices[3].type) self.assertEqual("tcp", cfg.devices[4].type) @mock.patch('nova.console.serial.acquire_port') def test_get_guest_config_serial_console_through_port_rng_exhausted( self, acquire_port): self.flags(enabled=True, group='serial_console') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) acquire_port.side_effect = exception.SocketPortRangeExhaustedException( '127.0.0.1') self.assertRaises( exception.SocketPortRangeExhaustedException, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) @mock.patch('nova.console.serial.release_port') @mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info') @mock.patch.object(host.Host, 'get_guest') @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_serial_ports_from_guest') def test_serial_console_release_port( self, mock_get_serial_ports_from_guest, mock_get_guest, mock_get_info, mock_release_port): self.flags(enabled="True", group='serial_console') guest = libvirt_guest.Guest(FakeVirtDomain()) guest.power_off = mock.Mock() mock_get_info.return_value = hardware.InstanceInfo( state=power_state.SHUTDOWN) mock_get_guest.return_value = guest mock_get_serial_ports_from_guest.return_value = iter([ ('127.0.0.1', 10000), ('127.0.0.1', 10001)]) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._destroy(objects.Instance(**self.test_instance)) mock_release_port.assert_has_calls( [mock.call(host='127.0.0.1', port=10000), mock.call(host='127.0.0.1', port=10001)]) @mock.patch('os.path.getsize', return_value=0) # size doesn't matter @mock.patch('nova.virt.libvirt.storage.lvm.get_volume_size', return_value='fake-size') def test_detach_encrypted_volumes(self, mock_getsize, mock_get_volume_size): """Test that unencrypted volumes are not disconnected with dmcrypt.""" instance = objects.Instance(**self.test_instance) xml = """ """ dom = FakeVirtDomain(fake_xml=xml) instance.ephemeral_key_uuid = 'fake-id' # encrypted conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) @mock.patch.object(dmcrypt, 'delete_volume') @mock.patch.object(conn._host, 'get_domain', return_value=dom) def detach_encrypted_volumes(block_device_info, mock_get_domain, mock_delete_volume): conn._detach_encrypted_volumes(instance, block_device_info) mock_get_domain.assert_called_once_with(instance) self.assertFalse(mock_delete_volume.called) block_device_info = {'root_device_name': '/dev/vda', 'ephemerals': [], 'block_device_mapping': []} detach_encrypted_volumes(block_device_info) @mock.patch.object(libvirt_guest.Guest, "get_xml_desc") def test_get_serial_ports_from_guest(self, mock_get_xml_desc): i = self._test_get_serial_ports_from_guest(None, mock_get_xml_desc) self.assertEqual([ ('127.0.0.1', 100), ('127.0.0.1', 101), ('127.0.0.2', 100), ('127.0.0.2', 101)], list(i)) @mock.patch.object(libvirt_guest.Guest, "get_xml_desc") def test_get_serial_ports_from_guest_bind_only(self, mock_get_xml_desc): i = self._test_get_serial_ports_from_guest('bind', mock_get_xml_desc) self.assertEqual([ ('127.0.0.1', 101), ('127.0.0.2', 100)], list(i)) @mock.patch.object(libvirt_guest.Guest, "get_xml_desc") def test_get_serial_ports_from_guest_connect_only(self, mock_get_xml_desc): i = self._test_get_serial_ports_from_guest('connect', mock_get_xml_desc) self.assertEqual([ ('127.0.0.1', 100), ('127.0.0.2', 101)], list(i)) @mock.patch.object(libvirt_guest.Guest, "get_xml_desc") def test_get_serial_ports_from_guest_on_s390(self, mock_get_xml_desc): i = self._test_get_serial_ports_from_guest(None, mock_get_xml_desc, 'console') self.assertEqual([ ('127.0.0.1', 100), ('127.0.0.1', 101), ('127.0.0.2', 100), ('127.0.0.2', 101)], list(i)) def _test_get_serial_ports_from_guest(self, mode, mock_get_xml_desc, dev_name='serial'): xml = """ <%(dev_name)s type="tcp"> <%(dev_name)s type="tcp"> <%(dev_name)s type="tcp"> <%(dev_name)s type="tcp"> """ % {'dev_name': dev_name} mock_get_xml_desc.return_value = xml drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) guest = libvirt_guest.Guest(FakeVirtDomain()) return drvr._get_serial_ports_from_guest(guest, mode=mode) def test_get_guest_config_with_type_xen(self): self.flags(enabled=True, group='vnc') self.flags(virt_type='xen', use_usb_tablet=False, group='libvirt') self.flags(enabled=False, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 6) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestConsole) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[3].type, "vnc") self.assertEqual(cfg.devices[4].type, "xen") @mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch', return_value=arch.S390X) def test_get_guest_config_with_type_kvm_on_s390(self, mock_get_arch): self.flags(enabled=False, group='vnc') self.flags(virt_type='kvm', use_usb_tablet=False, group='libvirt') self._stub_host_capabilities_cpu_arch(arch.S390X) instance_ref = objects.Instance(**self.test_instance) cfg = self._get_guest_config_via_fake_api(instance_ref) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) log_file_device = cfg.devices[2] self.assertIsInstance(log_file_device, vconfig.LibvirtConfigGuestConsole) self.assertEqual("sclplm", log_file_device.target_type) self.assertEqual("file", log_file_device.type) terminal_device = cfg.devices[3] self.assertIsInstance(terminal_device, vconfig.LibvirtConfigGuestConsole) self.assertEqual("sclp", terminal_device.target_type) self.assertEqual("pty", terminal_device.type) self.assertEqual("s390-ccw-virtio", cfg.os_mach_type) def _stub_host_capabilities_cpu_arch(self, cpu_arch): def get_host_capabilities_stub(self): cpu = vconfig.LibvirtConfigGuestCPU() cpu.arch = cpu_arch caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = cpu return caps self.stubs.Set(host.Host, "get_capabilities", get_host_capabilities_stub) def _get_guest_config_via_fake_api(self, instance): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) return drvr._get_guest_config(instance, [], image_meta, disk_info) def test_get_guest_config_with_type_xen_pae_hvm(self): self.flags(enabled=True, group='vnc') self.flags(virt_type='xen', use_usb_tablet=False, group='libvirt') self.flags(enabled=False, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref['vm_mode'] = vm_mode.HVM image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(cfg.os_type, vm_mode.HVM) self.assertEqual(cfg.os_loader, CONF.libvirt.xen_hvmloader_path) self.assertEqual(3, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeaturePAE) self.assertIsInstance(cfg.features[1], vconfig.LibvirtConfigGuestFeatureACPI) self.assertIsInstance(cfg.features[2], vconfig.LibvirtConfigGuestFeatureAPIC) def test_get_guest_config_with_type_xen_pae_pvm(self): self.flags(enabled=True, group='vnc') self.flags(virt_type='xen', use_usb_tablet=False, group='libvirt') self.flags(enabled=False, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(cfg.os_type, vm_mode.XEN) self.assertEqual(1, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeaturePAE) def test_get_guest_config_with_vnc_and_spice(self): self.flags(enabled=True, group='vnc') self.flags(virt_type='kvm', use_usb_tablet=True, group='libvirt') self.flags(enabled=True, agent_enabled=True, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 10) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestChannel) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[9], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].type, "tablet") self.assertEqual(cfg.devices[5].target_name, "com.redhat.spice.0") self.assertEqual(cfg.devices[6].type, "vnc") self.assertEqual(cfg.devices[7].type, "spice") def test_get_guest_config_with_watchdog_action_image_meta(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_watchdog_action": "none"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 9) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestWatchdog) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual("none", cfg.devices[7].action) def _test_get_guest_usb_tablet(self, vnc_enabled, spice_enabled, os_type, agent_enabled=False): self.flags(enabled=vnc_enabled, group='vnc') self.flags(enabled=spice_enabled, agent_enabled=agent_enabled, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) return drvr._get_guest_usb_tablet(os_type) def test_get_guest_usb_tablet_wipe(self): self.flags(use_usb_tablet=True, group='libvirt') tablet = self._test_get_guest_usb_tablet(True, True, vm_mode.HVM) self.assertIsNotNone(tablet) tablet = self._test_get_guest_usb_tablet(True, False, vm_mode.HVM) self.assertIsNotNone(tablet) tablet = self._test_get_guest_usb_tablet(False, True, vm_mode.HVM) self.assertIsNotNone(tablet) tablet = self._test_get_guest_usb_tablet(False, False, vm_mode.HVM) self.assertIsNone(tablet) tablet = self._test_get_guest_usb_tablet(True, True, "foo") self.assertIsNone(tablet) tablet = self._test_get_guest_usb_tablet( False, True, vm_mode.HVM, True) self.assertIsNone(tablet) def _test_get_guest_config_with_watchdog_action_flavor(self, hw_watchdog_action="hw:watchdog_action"): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {hw_watchdog_action: 'none'} image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(9, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestWatchdog) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual("none", cfg.devices[7].action) def test_get_guest_config_with_watchdog_action_through_flavor(self): self._test_get_guest_config_with_watchdog_action_flavor() # TODO(pkholkin): the test accepting old property name 'hw_watchdog_action' # should be removed in the next release def test_get_guest_config_with_watchdog_action_through_flavor_no_scope( self): self._test_get_guest_config_with_watchdog_action_flavor( hw_watchdog_action="hw_watchdog_action") def test_get_guest_config_with_watchdog_overrides_flavor(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw_watchdog_action': 'none'} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_watchdog_action": "pause"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(9, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestWatchdog) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual("pause", cfg.devices[7].action) def test_get_guest_config_with_video_driver_image_meta(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_video_model": "vmvga"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[5].type, "vnc") self.assertEqual(cfg.devices[6].type, "vmvga") def test_get_guest_config_with_qga_through_image_meta(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_qemu_guest_agent": "yes"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 9) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestChannel) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].type, "tablet") self.assertEqual(cfg.devices[5].type, "vnc") self.assertEqual(cfg.devices[7].type, "unix") self.assertEqual(cfg.devices[7].target_name, "org.qemu.guest_agent.0") def test_get_guest_config_with_video_driver_vram(self): self.flags(enabled=False, group='vnc') self.flags(virt_type='kvm', group='libvirt') self.flags(enabled=True, agent_enabled=True, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw_video:ram_max_mb': "100"} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_video_model": "qxl", "hw_video_ram": "64"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestChannel) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[5].type, "spice") self.assertEqual(cfg.devices[6].type, "qxl") self.assertEqual(cfg.devices[6].vram, 64 * units.Mi / units.Ki) @mock.patch('nova.virt.disk.api.teardown_container') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info') @mock.patch('nova.virt.disk.api.setup_container') @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch.object(fake_libvirt_utils, 'get_instance_path') def test_unmount_fs_if_error_during_lxc_create_domain(self, mock_get_inst_path, mock_ensure_tree, mock_setup_container, mock_get_info, mock_teardown): """If we hit an error during a `_create_domain` call to `libvirt+lxc` we need to ensure the guest FS is unmounted from the host so that any future `lvremove` calls will work. """ self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) mock_instance = mock.MagicMock() mock_get_inst_path.return_value = '/tmp/' mock_image_backend = mock.MagicMock() drvr.image_backend = mock_image_backend mock_image = mock.MagicMock() mock_image.path = '/tmp/test.img' drvr.image_backend.image.return_value = mock_image mock_setup_container.return_value = '/dev/nbd0' mock_get_info.side_effect = exception.InstanceNotFound( instance_id='foo') drvr._conn.defineXML = mock.Mock() drvr._conn.defineXML.side_effect = ValueError('somethingbad') with test.nested( mock.patch.object(drvr, '_is_booted_from_volume', return_value=False), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr, 'firewall_driver'), mock.patch.object(drvr, 'cleanup')): self.assertRaises(ValueError, drvr._create_domain_and_network, self.context, 'xml', mock_instance, None, None) mock_teardown.assert_called_with(container_dir='/tmp/rootfs') def test_video_driver_flavor_limit_not_set(self): self.flags(virt_type='kvm', group='libvirt') self.flags(enabled=True, agent_enabled=True, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_video_model": "qxl", "hw_video_ram": "64"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with mock.patch.object(objects.Instance, 'save'): self.assertRaises(exception.RequestedVRamTooHigh, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) def test_video_driver_ram_above_flavor_limit(self): self.flags(virt_type='kvm', group='libvirt') self.flags(enabled=True, agent_enabled=True, group='spice') instance_ref = objects.Instance(**self.test_instance) instance_type = instance_ref.get_flavor() instance_type.extra_specs = {'hw_video:ram_max_mb': "50"} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_video_model": "qxl", "hw_video_ram": "64"}}) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with mock.patch.object(objects.Instance, 'save'): self.assertRaises(exception.RequestedVRamTooHigh, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) def test_get_guest_config_without_qga_through_image_meta(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_qemu_guest_agent": "no"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].type, "tablet") self.assertEqual(cfg.devices[5].type, "vnc") def test_get_guest_config_with_rng_device(self): self.flags(virt_type='kvm', use_usb_tablet=False, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_rng_model": "virtio"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestRng) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[6].model, 'random') self.assertIsNone(cfg.devices[6].backend) self.assertIsNone(cfg.devices[6].rate_bytes) self.assertIsNone(cfg.devices[6].rate_period) def test_get_guest_config_with_rng_not_allowed(self): self.flags(virt_type='kvm', use_usb_tablet=False, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_rng_model": "virtio"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 7) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigMemoryBalloon) def test_get_guest_config_with_rng_limits(self): self.flags(virt_type='kvm', use_usb_tablet=False, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True', 'hw_rng:rate_bytes': '1024', 'hw_rng:rate_period': '2'} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_rng_model": "virtio"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestRng) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[6].model, 'random') self.assertIsNone(cfg.devices[6].backend) self.assertEqual(cfg.devices[6].rate_bytes, 1024) self.assertEqual(cfg.devices[6].rate_period, 2) @mock.patch('nova.virt.libvirt.driver.os.path.exists') def test_get_guest_config_with_rng_backend(self, mock_path): self.flags(virt_type='kvm', use_usb_tablet=False, rng_dev_path='/dev/hw_rng', group='libvirt') mock_path.return_value = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_rng_model": "virtio"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestRng) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[6].model, 'random') self.assertEqual(cfg.devices[6].backend, '/dev/hw_rng') self.assertIsNone(cfg.devices[6].rate_bytes) self.assertIsNone(cfg.devices[6].rate_period) @mock.patch('nova.virt.libvirt.driver.os.path.exists') def test_get_guest_config_with_rng_dev_not_present(self, mock_path): self.flags(virt_type='kvm', use_usb_tablet=False, rng_dev_path='/dev/hw_rng', group='libvirt') mock_path.return_value = False drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_rng_model": "virtio"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.assertRaises(exception.RngDeviceNotExist, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_guest_cpu_shares_with_multi_vcpu(self, is_able): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.vcpus = 4 image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(4096, cfg.cputune.shares) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_with_cpu_quota(self, is_able): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'quota:cpu_shares': '10000', 'quota:cpu_period': '20000'} image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(10000, cfg.cputune.shares) self.assertEqual(20000, cfg.cputune.period) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_with_bogus_cpu_quota(self, is_able): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'quota:cpu_shares': 'fishfood', 'quota:cpu_period': '20000'} image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.assertRaises(ValueError, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=False) def test_get_update_guest_cputune(self, is_able): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'quota:cpu_shares': '10000', 'quota:cpu_period': '20000'} self.assertRaises( exception.UnsupportedHostCPUControlPolicy, drvr._update_guest_cputune, {}, instance_ref.flavor, "kvm") def _test_get_guest_config_sysinfo_serial(self, expected_serial): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) cfg = drvr._get_guest_config_sysinfo(instance_ref) self.assertIsInstance(cfg, vconfig.LibvirtConfigGuestSysinfo) self.assertEqual(version.vendor_string(), cfg.system_manufacturer) self.assertEqual(version.product_string(), cfg.system_product) self.assertEqual(version.version_string_with_package(), cfg.system_version) self.assertEqual(expected_serial, cfg.system_serial) self.assertEqual(instance_ref['uuid'], cfg.system_uuid) self.assertEqual("Virtual Machine", cfg.system_family) def test_get_guest_config_sysinfo_serial_none(self): self.flags(sysinfo_serial="none", group="libvirt") self._test_get_guest_config_sysinfo_serial(None) @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_host_sysinfo_serial_hardware") def test_get_guest_config_sysinfo_serial_hardware(self, mock_uuid): self.flags(sysinfo_serial="hardware", group="libvirt") theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc" mock_uuid.return_value = theuuid self._test_get_guest_config_sysinfo_serial(theuuid) @contextlib.contextmanager def patch_exists(self, result): real_exists = os.path.exists def fake_exists(filename): if filename == "/etc/machine-id": return result return real_exists(filename) with mock.patch.object(os.path, "exists") as mock_exists: mock_exists.side_effect = fake_exists yield mock_exists def test_get_guest_config_sysinfo_serial_os(self): self.flags(sysinfo_serial="os", group="libvirt") theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc" with test.nested( mock.patch.object(six.moves.builtins, "open", mock.mock_open(read_data=theuuid)), self.patch_exists(True)): self._test_get_guest_config_sysinfo_serial(theuuid) def test_get_guest_config_sysinfo_serial_os_empty_machine_id(self): self.flags(sysinfo_serial="os", group="libvirt") with test.nested( mock.patch.object(six.moves.builtins, "open", mock.mock_open(read_data="")), self.patch_exists(True)): self.assertRaises(exception.NovaException, self._test_get_guest_config_sysinfo_serial, None) def test_get_guest_config_sysinfo_serial_os_no_machine_id_file(self): self.flags(sysinfo_serial="os", group="libvirt") with self.patch_exists(False): self.assertRaises(exception.NovaException, self._test_get_guest_config_sysinfo_serial, None) def test_get_guest_config_sysinfo_serial_auto_hardware(self): self.flags(sysinfo_serial="auto", group="libvirt") real_exists = os.path.exists with test.nested( mock.patch.object(os.path, "exists"), mock.patch.object(libvirt_driver.LibvirtDriver, "_get_host_sysinfo_serial_hardware") ) as (mock_exists, mock_uuid): def fake_exists(filename): if filename == "/etc/machine-id": return False return real_exists(filename) mock_exists.side_effect = fake_exists theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc" mock_uuid.return_value = theuuid self._test_get_guest_config_sysinfo_serial(theuuid) def test_get_guest_config_sysinfo_serial_auto_os(self): self.flags(sysinfo_serial="auto", group="libvirt") real_exists = os.path.exists real_open = builtins.open with test.nested( mock.patch.object(os.path, "exists"), mock.patch.object(builtins, "open"), ) as (mock_exists, mock_open): def fake_exists(filename): if filename == "/etc/machine-id": return True return real_exists(filename) mock_exists.side_effect = fake_exists theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc" def fake_open(filename, *args, **kwargs): if filename == "/etc/machine-id": h = mock.MagicMock() h.read.return_value = theuuid h.__enter__.return_value = h return h return real_open(filename, *args, **kwargs) mock_open.side_effect = fake_open self._test_get_guest_config_sysinfo_serial(theuuid) def _create_fake_service_compute(self): service_info = { 'id': 1729, 'host': 'fake', 'report_count': 0 } service_ref = objects.Service(**service_info) compute_info = { 'id': 1729, 'vcpus': 2, 'memory_mb': 1024, 'local_gb': 2048, 'vcpus_used': 0, 'memory_mb_used': 0, 'local_gb_used': 0, 'free_ram_mb': 1024, 'free_disk_gb': 2048, 'hypervisor_type': 'xen', 'hypervisor_version': 1, 'running_vms': 0, 'cpu_info': '', 'current_workload': 0, 'service_id': service_ref['id'], 'host': service_ref['host'] } compute_ref = objects.ComputeNode(**compute_info) return (service_ref, compute_ref) def test_get_guest_config_with_pci_passthrough_kvm(self): self.flags(virt_type='kvm', group='libvirt') service_ref, compute_ref = self._create_fake_service_compute() instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) pci_device_info = dict(test_pci_device.fake_db_dev) pci_device_info.update(compute_node_id=1, label='fake', status=fields.PciDeviceStatus.ALLOCATED, address='0000:00:00.1', compute_id=compute_ref['id'], instance_uuid=instance.uuid, request_id=None, extra_info={}) pci_device = objects.PciDevice(**pci_device_info) pci_list = objects.PciDeviceList() pci_list.objects.append(pci_device) instance.pci_devices = pci_list drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) cfg = drvr._get_guest_config(instance, [], image_meta, disk_info) had_pci = 0 # care only about the PCI devices for dev in cfg.devices: if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI: had_pci += 1 self.assertEqual(dev.type, 'pci') self.assertEqual(dev.managed, 'yes') self.assertEqual(dev.mode, 'subsystem') self.assertEqual(dev.domain, "0000") self.assertEqual(dev.bus, "00") self.assertEqual(dev.slot, "00") self.assertEqual(dev.function, "1") self.assertEqual(had_pci, 1) def test_get_guest_config_with_pci_passthrough_xen(self): self.flags(virt_type='xen', group='libvirt') service_ref, compute_ref = self._create_fake_service_compute() instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) pci_device_info = dict(test_pci_device.fake_db_dev) pci_device_info.update(compute_node_id=1, label='fake', status=fields.PciDeviceStatus.ALLOCATED, address='0000:00:00.2', compute_id=compute_ref['id'], instance_uuid=instance.uuid, request_id=None, extra_info={}) pci_device = objects.PciDevice(**pci_device_info) pci_list = objects.PciDeviceList() pci_list.objects.append(pci_device) instance.pci_devices = pci_list drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) cfg = drvr._get_guest_config(instance, [], image_meta, disk_info) had_pci = 0 # care only about the PCI devices for dev in cfg.devices: if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI: had_pci += 1 self.assertEqual(dev.type, 'pci') self.assertEqual(dev.managed, 'no') self.assertEqual(dev.mode, 'subsystem') self.assertEqual(dev.domain, "0000") self.assertEqual(dev.bus, "00") self.assertEqual(dev.slot, "00") self.assertEqual(dev.function, "2") self.assertEqual(had_pci, 1) def test_get_guest_config_os_command_line_through_image_meta(self): self.flags(virt_type="kvm", cpu_mode=None, group='libvirt') self.test_instance['kernel_id'] = "fake_kernel_id" drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"os_command_line": "fake_os_command_line"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertEqual(cfg.os_cmdline, "fake_os_command_line") def test_get_guest_config_os_command_line_without_kernel_id(self): self.flags(virt_type="kvm", cpu_mode=None, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"os_command_line": "fake_os_command_line"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsNone(cfg.os_cmdline) def test_get_guest_config_os_command_empty(self): self.flags(virt_type="kvm", cpu_mode=None, group='libvirt') self.test_instance['kernel_id'] = "fake_kernel_id" drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"os_command_line": ""}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) # the instance has 'root=/dev/vda console=tty0 console=ttyS0' set by # default, so testing an empty string and None value in the # os_command_line image property must pass cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertNotEqual(cfg.os_cmdline, "") def test_get_guest_config_armv7(self): def get_host_capabilities_stub(self): cpu = vconfig.LibvirtConfigGuestCPU() cpu.arch = arch.ARMV7 caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = cpu return caps self.flags(virt_type="kvm", group="libvirt") instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.stubs.Set(host.Host, "get_capabilities", get_host_capabilities_stub) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertEqual(cfg.os_mach_type, "vexpress-a15") def test_get_guest_config_aarch64(self): def get_host_capabilities_stub(self): cpu = vconfig.LibvirtConfigGuestCPU() cpu.arch = arch.AARCH64 caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = cpu return caps self.flags(virt_type="kvm", group="libvirt") instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.stubs.Set(host.Host, "get_capabilities", get_host_capabilities_stub) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertEqual(cfg.os_mach_type, "virt") def test_get_guest_config_machine_type_s390(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigGuestCPU() image_meta = objects.ImageMeta.from_dict(self.test_image_meta) host_cpu_archs = (arch.S390, arch.S390X) for host_cpu_arch in host_cpu_archs: caps.host.cpu.arch = host_cpu_arch os_mach_type = drvr._get_machine_type(image_meta, caps) self.assertEqual('s390-ccw-virtio', os_mach_type) def test_get_guest_config_machine_type_through_image_meta(self): self.flags(virt_type="kvm", group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_machine_type": "fake_machine_type"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertEqual(cfg.os_mach_type, "fake_machine_type") def test_get_guest_config_machine_type_from_config(self): self.flags(virt_type='kvm', group='libvirt') self.flags(hw_machine_type=['x86_64=fake_machine_type'], group='libvirt') def fake_getCapabilities(): return """ cef19ce0-0ca2-11df-855d-b19fbce37686 x86_64 Penryn Intel """ def fake_baselineCPU(cpu, flag): return """ Penryn Intel """ # Make sure the host arch is mocked as x86_64 self.create_fake_libvirt_mock(getCapabilities=fake_getCapabilities, baselineCPU=fake_baselineCPU, getVersion=lambda: 1005001) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertEqual(cfg.os_mach_type, "fake_machine_type") def _test_get_guest_config_ppc64(self, device_index): """Test for nova.virt.libvirt.driver.LibvirtDriver._get_guest_config. """ self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) expected = (arch.PPC64, arch.PPC) for guestarch in expected: with mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch', return_value=guestarch): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsInstance(cfg.devices[device_index], vconfig.LibvirtConfigGuestVideo) self.assertEqual(cfg.devices[device_index].type, 'vga') def test_get_guest_config_ppc64_through_image_meta_vnc_enabled(self): self.flags(enabled=True, group='vnc') self._test_get_guest_config_ppc64(6) def test_get_guest_config_ppc64_through_image_meta_spice_enabled(self): self.flags(enabled=True, agent_enabled=True, group='spice') self._test_get_guest_config_ppc64(8) def _test_get_guest_config_bootmenu(self, image_meta, extra_specs): self.flags(virt_type='kvm', group='libvirt') conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = extra_specs disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = conn._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertTrue(conf.os_bootmenu) def test_get_guest_config_bootmenu_via_image_meta(self): image_meta = objects.ImageMeta.from_dict( {"disk_format": "raw", "properties": {"hw_boot_menu": "True"}}) self._test_get_guest_config_bootmenu(image_meta, {}) def test_get_guest_config_bootmenu_via_extra_specs(self): image_meta = objects.ImageMeta.from_dict( self.test_image_meta) self._test_get_guest_config_bootmenu(image_meta, {'hw:boot_menu': 'True'}) def test_get_guest_cpu_config_none(self): self.flags(cpu_mode="none", group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU) self.assertIsNone(conf.cpu.mode) self.assertIsNone(conf.cpu.model) self.assertEqual(conf.cpu.sockets, 1) self.assertEqual(conf.cpu.cores, 1) self.assertEqual(conf.cpu.threads, 1) def test_get_guest_cpu_config_default_kvm(self): self.flags(virt_type="kvm", cpu_mode=None, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU) self.assertEqual(conf.cpu.mode, "host-model") self.assertIsNone(conf.cpu.model) self.assertEqual(conf.cpu.sockets, 1) self.assertEqual(conf.cpu.cores, 1) self.assertEqual(conf.cpu.threads, 1) def test_get_guest_cpu_config_default_uml(self): self.flags(virt_type="uml", cpu_mode=None, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsNone(conf.cpu) def test_get_guest_cpu_config_default_lxc(self): self.flags(virt_type="lxc", cpu_mode=None, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsNone(conf.cpu) def test_get_guest_cpu_config_host_passthrough(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) self.flags(cpu_mode="host-passthrough", group='libvirt') disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU) self.assertEqual(conf.cpu.mode, "host-passthrough") self.assertIsNone(conf.cpu.model) self.assertEqual(conf.cpu.sockets, 1) self.assertEqual(conf.cpu.cores, 1) self.assertEqual(conf.cpu.threads, 1) def test_get_guest_cpu_config_host_model(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) self.flags(cpu_mode="host-model", group='libvirt') disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU) self.assertEqual(conf.cpu.mode, "host-model") self.assertIsNone(conf.cpu.model) self.assertEqual(conf.cpu.sockets, 1) self.assertEqual(conf.cpu.cores, 1) self.assertEqual(conf.cpu.threads, 1) def test_get_guest_cpu_config_custom(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) self.flags(cpu_mode="custom", cpu_model="Penryn", group='libvirt') disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU) self.assertEqual(conf.cpu.mode, "custom") self.assertEqual(conf.cpu.model, "Penryn") self.assertEqual(conf.cpu.sockets, 1) self.assertEqual(conf.cpu.cores, 1) self.assertEqual(conf.cpu.threads, 1) def test_get_guest_cpu_topology(self): instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.vcpus = 8 instance_ref.flavor.extra_specs = {'hw:cpu_max_sockets': '4'} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU) self.assertEqual(conf.cpu.mode, "host-model") self.assertEqual(conf.cpu.sockets, 4) self.assertEqual(conf.cpu.cores, 2) self.assertEqual(conf.cpu.threads, 1) def test_get_guest_memory_balloon_config_by_default(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) for device in cfg.devices: if device.root_name == 'memballoon': self.assertIsInstance(device, vconfig.LibvirtConfigMemoryBalloon) self.assertEqual('virtio', device.model) self.assertEqual(10, device.period) def test_get_guest_memory_balloon_config_disable(self): self.flags(mem_stats_period_seconds=0, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) no_exist = True for device in cfg.devices: if device.root_name == 'memballoon': no_exist = False break self.assertTrue(no_exist) def test_get_guest_memory_balloon_config_period_value(self): self.flags(mem_stats_period_seconds=21, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) for device in cfg.devices: if device.root_name == 'memballoon': self.assertIsInstance(device, vconfig.LibvirtConfigMemoryBalloon) self.assertEqual('virtio', device.model) self.assertEqual(21, device.period) def test_get_guest_memory_balloon_config_qemu(self): self.flags(virt_type='qemu', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) for device in cfg.devices: if device.root_name == 'memballoon': self.assertIsInstance(device, vconfig.LibvirtConfigMemoryBalloon) self.assertEqual('virtio', device.model) self.assertEqual(10, device.period) def test_get_guest_memory_balloon_config_xen(self): self.flags(virt_type='xen', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) for device in cfg.devices: if device.root_name == 'memballoon': self.assertIsInstance(device, vconfig.LibvirtConfigMemoryBalloon) self.assertEqual('xen', device.model) self.assertEqual(10, device.period) def test_get_guest_memory_balloon_config_lxc(self): self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) no_exist = True for device in cfg.devices: if device.root_name == 'memballoon': no_exist = False break self.assertTrue(no_exist) def test_xml_and_uri_no_ramdisk_no_kernel(self): instance_data = dict(self.test_instance) self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=False) def test_xml_and_uri_no_ramdisk_no_kernel_xen_hvm(self): instance_data = dict(self.test_instance) instance_data.update({'vm_mode': vm_mode.HVM}) self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=False, expect_xen_hvm=True) def test_xml_and_uri_no_ramdisk_no_kernel_xen_pv(self): instance_data = dict(self.test_instance) instance_data.update({'vm_mode': vm_mode.XEN}) self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=False, expect_xen_hvm=False, xen_only=True) def test_xml_and_uri_no_ramdisk(self): instance_data = dict(self.test_instance) instance_data['kernel_id'] = 'aki-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=True, expect_ramdisk=False) def test_xml_and_uri_no_kernel(self): instance_data = dict(self.test_instance) instance_data['ramdisk_id'] = 'ari-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=False) def test_xml_and_uri(self): instance_data = dict(self.test_instance) instance_data['ramdisk_id'] = 'ari-deadbeef' instance_data['kernel_id'] = 'aki-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=True, expect_ramdisk=True) def test_xml_and_uri_rescue(self): instance_data = dict(self.test_instance) instance_data['ramdisk_id'] = 'ari-deadbeef' instance_data['kernel_id'] = 'aki-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=True, expect_ramdisk=True, rescue=instance_data) def test_xml_and_uri_rescue_no_kernel_no_ramdisk(self): instance_data = dict(self.test_instance) self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=False, rescue=instance_data) def test_xml_and_uri_rescue_no_kernel(self): instance_data = dict(self.test_instance) instance_data['ramdisk_id'] = 'aki-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=True, rescue=instance_data) def test_xml_and_uri_rescue_no_ramdisk(self): instance_data = dict(self.test_instance) instance_data['kernel_id'] = 'aki-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=True, expect_ramdisk=False, rescue=instance_data) def test_xml_uuid(self): self._check_xml_and_uuid(self.test_image_meta) def test_lxc_container_and_uri(self): instance_data = dict(self.test_instance) self._check_xml_and_container(instance_data) def test_xml_disk_prefix(self): instance_data = dict(self.test_instance) self._check_xml_and_disk_prefix(instance_data, None) def test_xml_user_specified_disk_prefix(self): instance_data = dict(self.test_instance) self._check_xml_and_disk_prefix(instance_data, 'sd') def test_xml_disk_driver(self): instance_data = dict(self.test_instance) self._check_xml_and_disk_driver(instance_data) def test_xml_disk_bus_virtio(self): image_meta = objects.ImageMeta.from_dict(self.test_image_meta) self._check_xml_and_disk_bus(image_meta, None, (("disk", "virtio", "vda"),)) def test_xml_disk_bus_ide(self): # It's necessary to check if the architecture is power, because # power doesn't have support to ide, and so libvirt translate # all ide calls to scsi expected = {arch.PPC: ("cdrom", "scsi", "sda"), arch.PPC64: ("cdrom", "scsi", "sda")} expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}), ("cdrom", "ide", "hda")) image_meta = objects.ImageMeta.from_dict({ "disk_format": "iso"}) self._check_xml_and_disk_bus(image_meta, None, (expec_val,)) def test_xml_disk_bus_ide_and_virtio(self): # It's necessary to check if the architecture is power, because # power doesn't have support to ide, and so libvirt translate # all ide calls to scsi expected = {arch.PPC: ("cdrom", "scsi", "sda"), arch.PPC64: ("cdrom", "scsi", "sda")} swap = {'device_name': '/dev/vdc', 'swap_size': 1} ephemerals = [{'device_type': 'disk', 'disk_bus': 'virtio', 'device_name': '/dev/vdb', 'size': 1}] block_device_info = { 'swap': swap, 'ephemerals': ephemerals} expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}), ("cdrom", "ide", "hda")) image_meta = objects.ImageMeta.from_dict({ "disk_format": "iso"}) self._check_xml_and_disk_bus(image_meta, block_device_info, (expec_val, ("disk", "virtio", "vdb"), ("disk", "virtio", "vdc"))) @mock.patch.object(host.Host, "list_instance_domains") def test_list_instances(self, mock_list): vm1 = FakeVirtDomain(id=3, name="instance00000001") vm2 = FakeVirtDomain(id=17, name="instance00000002") vm3 = FakeVirtDomain(name="instance00000003") vm4 = FakeVirtDomain(name="instance00000004") mock_list.return_value = [vm1, vm2, vm3, vm4] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) names = drvr.list_instances() self.assertEqual(names[0], vm1.name()) self.assertEqual(names[1], vm2.name()) self.assertEqual(names[2], vm3.name()) self.assertEqual(names[3], vm4.name()) mock_list.assert_called_with(only_guests=True, only_running=False) @mock.patch.object(host.Host, "list_instance_domains") def test_list_instance_uuids(self, mock_list): vm1 = FakeVirtDomain(id=3, name="instance00000001") vm2 = FakeVirtDomain(id=17, name="instance00000002") vm3 = FakeVirtDomain(name="instance00000003") vm4 = FakeVirtDomain(name="instance00000004") mock_list.return_value = [vm1, vm2, vm3, vm4] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) uuids = drvr.list_instance_uuids() self.assertEqual(len(uuids), 4) self.assertEqual(uuids[0], vm1.UUIDString()) self.assertEqual(uuids[1], vm2.UUIDString()) self.assertEqual(uuids[2], vm3.UUIDString()) self.assertEqual(uuids[3], vm4.UUIDString()) mock_list.assert_called_with(only_guests=True, only_running=False) @mock.patch('nova.virt.libvirt.host.Host.get_online_cpus') def test_get_host_vcpus(self, get_online_cpus): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.flags(vcpu_pin_set="4-5") get_online_cpus.return_value = set([4, 5, 6]) expected_vcpus = 2 vcpus = drvr._get_vcpu_total() self.assertEqual(expected_vcpus, vcpus) @mock.patch('nova.virt.libvirt.host.Host.get_online_cpus') def test_get_host_vcpus_out_of_range(self, get_online_cpus): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.flags(vcpu_pin_set="4-6") get_online_cpus.return_value = set([4, 5]) self.assertRaises(exception.Invalid, drvr._get_vcpu_total) @mock.patch('nova.virt.libvirt.host.Host.get_online_cpus') def test_get_host_vcpus_libvirt_error(self, get_online_cpus): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) not_supported_exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'this function is not supported by the connection driver:' ' virNodeNumOfDevices', error_code=fakelibvirt.VIR_ERR_NO_SUPPORT) self.flags(vcpu_pin_set="4-6") get_online_cpus.side_effect = not_supported_exc self.assertRaises(exception.Invalid, drvr._get_vcpu_total) @mock.patch('nova.virt.libvirt.host.Host.get_online_cpus') def test_get_host_vcpus_libvirt_error_success(self, get_online_cpus): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) not_supported_exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'this function is not supported by the connection driver:' ' virNodeNumOfDevices', error_code=fakelibvirt.VIR_ERR_NO_SUPPORT) self.flags(vcpu_pin_set="1") get_online_cpus.side_effect = not_supported_exc expected_vcpus = 1 vcpus = drvr._get_vcpu_total() self.assertEqual(expected_vcpus, vcpus) @mock.patch('nova.virt.libvirt.host.Host.get_cpu_count') def test_get_host_vcpus_after_hotplug(self, get_cpu_count): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) get_cpu_count.return_value = 2 expected_vcpus = 2 vcpus = drvr._get_vcpu_total() self.assertEqual(expected_vcpus, vcpus) get_cpu_count.return_value = 3 expected_vcpus = 3 vcpus = drvr._get_vcpu_total() self.assertEqual(expected_vcpus, vcpus) @mock.patch.object(host.Host, "has_min_version", return_value=True) def test_quiesce(self, mock_has_min_version): self.create_fake_libvirt_mock(lookupByName=self.fake_lookup) with mock.patch.object(FakeVirtDomain, "fsFreeze") as mock_fsfreeze: drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict( {"properties": {"hw_qemu_guest_agent": "yes"}}) self.assertIsNone(drvr.quiesce(self.context, instance, image_meta)) mock_fsfreeze.assert_called_once_with() def test_quiesce_not_supported(self): self.create_fake_libvirt_mock() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) instance = objects.Instance(**self.test_instance) self.assertRaises(exception.InstanceQuiesceNotSupported, drvr.quiesce, self.context, instance, None) @mock.patch.object(host.Host, "has_min_version", return_value=True) def test_unquiesce(self, mock_has_min_version): self.create_fake_libvirt_mock(getLibVersion=lambda: 1002005, lookupByName=self.fake_lookup) with mock.patch.object(FakeVirtDomain, "fsThaw") as mock_fsthaw: drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict( {"properties": {"hw_qemu_guest_agent": "yes"}}) self.assertIsNone(drvr.unquiesce(self.context, instance, image_meta)) mock_fsthaw.assert_called_once_with() def test_create_snapshot_metadata(self): base = objects.ImageMeta.from_dict( {'disk_format': 'raw'}) instance_data = {'kernel_id': 'kernel', 'project_id': 'prj_id', 'ramdisk_id': 'ram_id', 'os_type': None} instance = objects.Instance(**instance_data) img_fmt = 'raw' snp_name = 'snapshot_name' drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ret = drvr._create_snapshot_metadata(base, instance, img_fmt, snp_name) expected = {'is_public': False, 'status': 'active', 'name': snp_name, 'properties': { 'kernel_id': instance['kernel_id'], 'image_location': 'snapshot', 'image_state': 'available', 'owner_id': instance['project_id'], 'ramdisk_id': instance['ramdisk_id'], }, 'disk_format': img_fmt, 'container_format': 'bare', } self.assertEqual(ret, expected) # simulate an instance with os_type field defined # disk format equals to ami # container format not equals to bare instance['os_type'] = 'linux' base = objects.ImageMeta.from_dict( {'disk_format': 'ami', 'container_format': 'test_container'}) expected['properties']['os_type'] = instance['os_type'] expected['disk_format'] = base.disk_format expected['container_format'] = base.container_format ret = drvr._create_snapshot_metadata(base, instance, img_fmt, snp_name) self.assertEqual(ret, expected) def test_get_volume_driver(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) connection_info = {'driver_volume_type': 'fake', 'data': {'device_path': '/fake', 'access_mode': 'rw'}} driver = conn._get_volume_driver(connection_info) result = isinstance(driver, volume_drivers.LibvirtFakeVolumeDriver) self.assertTrue(result) def test_get_volume_driver_unknown(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) connection_info = {'driver_volume_type': 'unknown', 'data': {'device_path': '/fake', 'access_mode': 'rw'}} self.assertRaises( exception.VolumeDriverNotFound, conn._get_volume_driver, connection_info ) @mock.patch.object(volume_drivers.LibvirtFakeVolumeDriver, 'connect_volume') @mock.patch.object(volume_drivers.LibvirtFakeVolumeDriver, 'get_config') def test_get_volume_config(self, get_config, connect_volume): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) connection_info = {'driver_volume_type': 'fake', 'data': {'device_path': '/fake', 'access_mode': 'rw'}} bdm = {'device_name': 'vdb', 'disk_bus': 'fake-bus', 'device_type': 'fake-type'} disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'], 'dev': 'vdb'} mock_config = mock.MagicMock() get_config.return_value = mock_config config = drvr._get_volume_config(connection_info, disk_info) get_config.assert_called_once_with(connection_info, disk_info) self.assertEqual(mock_config, config) def test_attach_invalid_volume_type(self): self.create_fake_libvirt_mock() libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup instance = objects.Instance(**self.test_instance) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.VolumeDriverNotFound, drvr.attach_volume, None, {"driver_volume_type": "badtype"}, instance, "/dev/sda") def test_attach_blockio_invalid_hypervisor(self): self.flags(virt_type='fake_type', group='libvirt') self.create_fake_libvirt_mock() libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup instance = objects.Instance(**self.test_instance) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.InvalidHypervisorType, drvr.attach_volume, None, {"driver_volume_type": "fake", "data": {"logical_block_size": "4096", "physical_block_size": "4096"} }, instance, "/dev/sda") def _test_check_discard(self, mock_log, driver_discard=None, bus=None, should_log=False): mock_config = mock.Mock() mock_config.driver_discard = driver_discard mock_config.target_bus = bus mock_instance = mock.Mock() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._check_discard_for_attach_volume(mock_config, mock_instance) self.assertEqual(should_log, mock_log.called) @mock.patch('nova.virt.libvirt.driver.LOG.debug') def test_check_discard_for_attach_volume_no_unmap(self, mock_log): self._test_check_discard(mock_log, driver_discard=None, bus='scsi', should_log=False) @mock.patch('nova.virt.libvirt.driver.LOG.debug') def test_check_discard_for_attach_volume_blk_controller(self, mock_log): self._test_check_discard(mock_log, driver_discard='unmap', bus='virtio', should_log=True) @mock.patch('nova.virt.libvirt.driver.LOG.debug') def test_check_discard_for_attach_volume_valid_controller(self, mock_log): self._test_check_discard(mock_log, driver_discard='unmap', bus='scsi', should_log=False) @mock.patch('nova.virt.libvirt.driver.LOG.debug') def test_check_discard_for_attach_volume_blk_controller_no_unmap(self, mock_log): self._test_check_discard(mock_log, driver_discard=None, bus='virtio', should_log=False) @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm') @mock.patch('nova.virt.libvirt.host.Host.get_domain') def test_attach_volume_with_vir_domain_affect_live_flag(self, mock_get_domain, mock_get_info, get_image): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) image_meta = {} get_image.return_value = image_meta mock_dom = mock.MagicMock() mock_get_domain.return_value = mock_dom connection_info = {"driver_volume_type": "fake", "data": {"device_path": "/fake", "access_mode": "rw"}} bdm = {'device_name': 'vdb', 'disk_bus': 'fake-bus', 'device_type': 'fake-type'} disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'], 'dev': 'vdb'} mock_get_info.return_value = disk_info mock_conf = mock.MagicMock() flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE) with test.nested( mock.patch.object(drvr, '_connect_volume'), mock.patch.object(drvr, '_get_volume_config', return_value=mock_conf), mock.patch.object(drvr, '_set_cache_mode'), mock.patch.object(drvr, '_check_discard_for_attach_volume') ) as (mock_connect_volume, mock_get_volume_config, mock_set_cache_mode, mock_check_discard): for state in (power_state.RUNNING, power_state.PAUSED): mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678] drvr.attach_volume(self.context, connection_info, instance, "/dev/vdb", disk_bus=bdm['disk_bus'], device_type=bdm['device_type']) mock_get_domain.assert_called_with(instance) mock_get_info.assert_called_with( instance, CONF.libvirt.virt_type, test.MatchType(objects.ImageMeta), bdm) mock_connect_volume.assert_called_with( connection_info, disk_info) mock_get_volume_config.assert_called_with( connection_info, disk_info) mock_set_cache_mode.assert_called_with(mock_conf) mock_dom.attachDeviceFlags.assert_called_with( mock_conf.to_xml(), flags=flags) mock_check_discard.assert_called_with(mock_conf, instance) @mock.patch('nova.virt.libvirt.host.Host.get_domain') def test_detach_volume_with_vir_domain_affect_live_flag(self, mock_get_domain): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) mock_xml_with_disk = """ """ mock_xml_without_disk = """ """ mock_dom = mock.MagicMock() # Second time don't return anything about disk vdc so it looks removed return_list = [mock_xml_with_disk, mock_xml_without_disk] # Doubling the size of return list because we test with two guest power # states mock_dom.XMLDesc.side_effect = return_list + return_list connection_info = {"driver_volume_type": "fake", "data": {"device_path": "/fake", "access_mode": "rw"}} flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE) with mock.patch.object(drvr, '_disconnect_volume') as \ mock_disconnect_volume: for state in (power_state.RUNNING, power_state.PAUSED): mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678] mock_get_domain.return_value = mock_dom drvr.detach_volume(connection_info, instance, '/dev/vdc') mock_get_domain.assert_called_with(instance) mock_dom.detachDeviceFlags.assert_called_with(""" """, flags=flags) mock_disconnect_volume.assert_called_with( connection_info, 'vdc') @mock.patch('nova.virt.libvirt.host.Host.get_domain') def test_detach_volume_disk_not_found(self, mock_get_domain): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) mock_xml_without_disk = """ """ mock_dom = mock.MagicMock(return_value=mock_xml_without_disk) connection_info = {"driver_volume_type": "fake", "data": {"device_path": "/fake", "access_mode": "rw"}} mock_dom.info.return_value = [power_state.RUNNING, 512, 512, 2, 1234, 5678] mock_get_domain.return_value = mock_dom self.assertRaises(exception.DiskNotFound, drvr.detach_volume, connection_info, instance, '/dev/vdc') mock_get_domain.assert_called_once_with(instance) def test_multi_nic(self): network_info = _fake_network_info(self, 2) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) xml = drvr._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta) tree = etree.fromstring(xml) interfaces = tree.findall("./devices/interface") self.assertEqual(len(interfaces), 2) self.assertEqual(interfaces[0].get('type'), 'bridge') def _behave_supports_direct_io(self, raise_open=False, raise_write=False, exc=ValueError()): open_behavior = os.open(os.path.join('.', '.directio.test'), os.O_CREAT | os.O_WRONLY | os.O_DIRECT) if raise_open: open_behavior.AndRaise(exc) else: open_behavior.AndReturn(3) write_bahavior = os.write(3, mox.IgnoreArg()) if raise_write: write_bahavior.AndRaise(exc) else: os.close(3) os.unlink(3) def test_supports_direct_io(self): # O_DIRECT is not supported on all Python runtimes, so on platforms # where it's not supported (e.g. Mac), we can still test the code-path # by stubbing out the value. if not hasattr(os, 'O_DIRECT'): # `mock` seems to have trouble stubbing an attr that doesn't # originally exist, so falling back to stubbing out the attribute # directly. os.O_DIRECT = 16384 self.addCleanup(delattr, os, 'O_DIRECT') einval = OSError() einval.errno = errno.EINVAL self.mox.StubOutWithMock(os, 'open') self.mox.StubOutWithMock(os, 'write') self.mox.StubOutWithMock(os, 'close') self.mox.StubOutWithMock(os, 'unlink') _supports_direct_io = libvirt_driver.LibvirtDriver._supports_direct_io self._behave_supports_direct_io() self._behave_supports_direct_io(raise_write=True) self._behave_supports_direct_io(raise_open=True) self._behave_supports_direct_io(raise_write=True, exc=einval) self._behave_supports_direct_io(raise_open=True, exc=einval) self.mox.ReplayAll() self.assertTrue(_supports_direct_io('.')) self.assertRaises(ValueError, _supports_direct_io, '.') self.assertRaises(ValueError, _supports_direct_io, '.') self.assertFalse(_supports_direct_io('.')) self.assertFalse(_supports_direct_io('.')) self.mox.VerifyAll() def _check_xml_and_container(self, instance): instance_ref = objects.Instance(**instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertEqual(drvr._uri(), 'lxc:///') network_info = _fake_network_info(self, 1) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) xml = drvr._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta) tree = etree.fromstring(xml) check = [ (lambda t: t.find('.').get('type'), 'lxc'), (lambda t: t.find('./os/type').text, 'exe'), (lambda t: t.find('./devices/filesystem/target').get('dir'), '/')] for i, (check, expected_result) in enumerate(check): self.assertEqual(check(tree), expected_result, '%s failed common check %d' % (xml, i)) target = tree.find('./devices/filesystem/source').get('dir') self.assertTrue(len(target) > 0) def _check_xml_and_disk_prefix(self, instance, prefix): instance_ref = objects.Instance(**instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) def _get_prefix(p, default): if p: return p + 'a' return default type_disk_map = { 'qemu': [ (lambda t: t.find('.').get('type'), 'qemu'), (lambda t: t.find('./devices/disk/target').get('dev'), _get_prefix(prefix, 'vda'))], 'xen': [ (lambda t: t.find('.').get('type'), 'xen'), (lambda t: t.find('./devices/disk/target').get('dev'), _get_prefix(prefix, 'xvda'))], 'kvm': [ (lambda t: t.find('.').get('type'), 'kvm'), (lambda t: t.find('./devices/disk/target').get('dev'), _get_prefix(prefix, 'vda'))], 'uml': [ (lambda t: t.find('.').get('type'), 'uml'), (lambda t: t.find('./devices/disk/target').get('dev'), _get_prefix(prefix, 'ubda'))] } for (virt_type, checks) in six.iteritems(type_disk_map): self.flags(virt_type=virt_type, group='libvirt') if prefix: self.flags(disk_prefix=prefix, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) network_info = _fake_network_info(self, 1) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) xml = drvr._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta) tree = etree.fromstring(xml) for i, (check, expected_result) in enumerate(checks): self.assertEqual(check(tree), expected_result, '%s != %s failed check %d' % (check(tree), expected_result, i)) def _check_xml_and_disk_driver(self, image_meta): os_open = os.open directio_supported = True def os_open_stub(path, flags, *args, **kwargs): if flags & os.O_DIRECT: if not directio_supported: raise OSError(errno.EINVAL, '%s: %s' % (os.strerror(errno.EINVAL), path)) flags &= ~os.O_DIRECT return os_open(path, flags, *args, **kwargs) self.stub_out('os.open', os_open_stub) @staticmethod def connection_supports_direct_io_stub(dirpath): return directio_supported self.stubs.Set(libvirt_driver.LibvirtDriver, '_supports_direct_io', connection_supports_direct_io_stub) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) network_info = _fake_network_info(self, 1) drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) xml = drv._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta) tree = etree.fromstring(xml) disks = tree.findall('./devices/disk/driver') for guest_disk in disks: self.assertEqual(guest_disk.get("cache"), "none") directio_supported = False # The O_DIRECT availability is cached on first use in # LibvirtDriver, hence we re-create it here drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) xml = drv._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta) tree = etree.fromstring(xml) disks = tree.findall('./devices/disk/driver') for guest_disk in disks: self.assertEqual(guest_disk.get("cache"), "writethrough") def _check_xml_and_disk_bus(self, image_meta, block_device_info, wantConfig): instance_ref = objects.Instance(**self.test_instance) network_info = _fake_network_info(self, 1) drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, block_device_info) xml = drv._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta, block_device_info=block_device_info) tree = etree.fromstring(xml) got_disks = tree.findall('./devices/disk') got_disk_targets = tree.findall('./devices/disk/target') for i in range(len(wantConfig)): want_device_type = wantConfig[i][0] want_device_bus = wantConfig[i][1] want_device_dev = wantConfig[i][2] got_device_type = got_disks[i].get('device') got_device_bus = got_disk_targets[i].get('bus') got_device_dev = got_disk_targets[i].get('dev') self.assertEqual(got_device_type, want_device_type) self.assertEqual(got_device_bus, want_device_bus) self.assertEqual(got_device_dev, want_device_dev) def _check_xml_and_uuid(self, image_meta): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) network_info = _fake_network_info(self, 1) drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) xml = drv._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta) tree = etree.fromstring(xml) self.assertEqual(tree.find('./uuid').text, instance_ref['uuid']) @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_host_sysinfo_serial_hardware",) def _check_xml_and_uri(self, instance, mock_serial, expect_ramdisk=False, expect_kernel=False, rescue=None, expect_xen_hvm=False, xen_only=False): mock_serial.return_value = "cef19ce0-0ca2-11df-855d-b19fbce37686" instance_ref = objects.Instance(**instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) xen_vm_mode = vm_mode.XEN if expect_xen_hvm: xen_vm_mode = vm_mode.HVM type_uri_map = {'qemu': ('qemu:///system', [(lambda t: t.find('.').get('type'), 'qemu'), (lambda t: t.find('./os/type').text, vm_mode.HVM), (lambda t: t.find('./devices/emulator'), None)]), 'kvm': ('qemu:///system', [(lambda t: t.find('.').get('type'), 'kvm'), (lambda t: t.find('./os/type').text, vm_mode.HVM), (lambda t: t.find('./devices/emulator'), None)]), 'uml': ('uml:///system', [(lambda t: t.find('.').get('type'), 'uml'), (lambda t: t.find('./os/type').text, vm_mode.UML)]), 'xen': ('xen:///', [(lambda t: t.find('.').get('type'), 'xen'), (lambda t: t.find('./os/type').text, xen_vm_mode)])} if expect_xen_hvm or xen_only: hypervisors_to_check = ['xen'] else: hypervisors_to_check = ['qemu', 'kvm', 'xen'] for hypervisor_type in hypervisors_to_check: check_list = type_uri_map[hypervisor_type][1] if rescue: suffix = '.rescue' else: suffix = '' if expect_kernel: check = (lambda t: self.relpath(t.find('./os/kernel').text). split('/')[1], 'kernel' + suffix) else: check = (lambda t: t.find('./os/kernel'), None) check_list.append(check) if expect_kernel: check = (lambda t: "no_timer_check" in t.find('./os/cmdline'). text, hypervisor_type == "qemu") check_list.append(check) # Hypervisors that only support vm_mode.HVM and Xen # should not produce configuration that results in kernel # arguments if not expect_kernel and (hypervisor_type in ['qemu', 'kvm', 'xen']): check = (lambda t: t.find('./os/root'), None) check_list.append(check) check = (lambda t: t.find('./os/cmdline'), None) check_list.append(check) if expect_ramdisk: check = (lambda t: self.relpath(t.find('./os/initrd').text). split('/')[1], 'ramdisk' + suffix) else: check = (lambda t: t.find('./os/initrd'), None) check_list.append(check) if hypervisor_type in ['qemu', 'kvm']: xpath = "./sysinfo/system/entry" check = (lambda t: t.findall(xpath)[0].get("name"), "manufacturer") check_list.append(check) check = (lambda t: t.findall(xpath)[0].text, version.vendor_string()) check_list.append(check) check = (lambda t: t.findall(xpath)[1].get("name"), "product") check_list.append(check) check = (lambda t: t.findall(xpath)[1].text, version.product_string()) check_list.append(check) check = (lambda t: t.findall(xpath)[2].get("name"), "version") check_list.append(check) # NOTE(sirp): empty strings don't roundtrip in lxml (they are # converted to None), so we need an `or ''` to correct for that check = (lambda t: t.findall(xpath)[2].text or '', version.version_string_with_package()) check_list.append(check) check = (lambda t: t.findall(xpath)[3].get("name"), "serial") check_list.append(check) check = (lambda t: t.findall(xpath)[3].text, "cef19ce0-0ca2-11df-855d-b19fbce37686") check_list.append(check) check = (lambda t: t.findall(xpath)[4].get("name"), "uuid") check_list.append(check) check = (lambda t: t.findall(xpath)[4].text, instance['uuid']) check_list.append(check) if hypervisor_type in ['qemu', 'kvm']: check = (lambda t: t.findall('./devices/serial')[0].get( 'type'), 'file') check_list.append(check) check = (lambda t: t.findall('./devices/serial')[1].get( 'type'), 'pty') check_list.append(check) check = (lambda t: self.relpath(t.findall( './devices/serial/source')[0].get('path')). split('/')[1], 'console.log') check_list.append(check) else: check = (lambda t: t.find('./devices/console').get( 'type'), 'pty') check_list.append(check) common_checks = [ (lambda t: t.find('.').tag, 'domain'), (lambda t: t.find('./memory').text, '2097152')] if rescue: common_checks += [ (lambda t: self.relpath(t.findall('./devices/disk/source')[0]. get('file')).split('/')[1], 'disk.rescue'), (lambda t: self.relpath(t.findall('./devices/disk/source')[1]. get('file')).split('/')[1], 'disk')] else: common_checks += [(lambda t: self.relpath(t.findall( './devices/disk/source')[0].get('file')).split('/')[1], 'disk')] common_checks += [(lambda t: self.relpath(t.findall( './devices/disk/source')[1].get('file')).split('/')[1], 'disk.local')] for virt_type in hypervisors_to_check: expected_uri = type_uri_map[virt_type][0] checks = type_uri_map[virt_type][1] self.flags(virt_type=virt_type, group='libvirt') with mock.patch('nova.virt.libvirt.driver.libvirt') as old_virt: del old_virt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertEqual(drvr._uri(), expected_uri) network_info = _fake_network_info(self, 1) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, rescue=rescue) xml = drvr._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta, rescue=rescue) tree = etree.fromstring(xml) for i, (check, expected_result) in enumerate(checks): self.assertEqual(check(tree), expected_result, '%s != %s failed check %d' % (check(tree), expected_result, i)) for i, (check, expected_result) in enumerate(common_checks): self.assertEqual(check(tree), expected_result, '%s != %s failed common check %d' % (check(tree), expected_result, i)) filterref = './devices/interface/filterref' vif = network_info[0] nic_id = vif['address'].replace(':', '') fw = firewall.NWFilterFirewall(drvr) instance_filter_name = fw._instance_filter_name(instance_ref, nic_id) self.assertEqual(tree.find(filterref).get('filter'), instance_filter_name) # This test is supposed to make sure we don't # override a specifically set uri # # Deliberately not just assigning this string to CONF.connection_uri # and checking against that later on. This way we make sure the # implementation doesn't fiddle around with the CONF. testuri = 'something completely different' self.flags(connection_uri=testuri, group='libvirt') for (virt_type, (expected_uri, checks)) in six.iteritems(type_uri_map): self.flags(virt_type=virt_type, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertEqual(drvr._uri(), testuri) def test_ensure_filtering_rules_for_instance_timeout(self): # ensure_filtering_fules_for_instance() finishes with timeout. # Preparing mocks def fake_none(self, *args): return class FakeTime(object): def __init__(self): self.counter = 0 def sleep(self, t): self.counter += t fake_timer = FakeTime() def fake_sleep(t): fake_timer.sleep(t) # _fake_network_info must be called before create_fake_libvirt_mock(), # as _fake_network_info calls importutils.import_class() and # create_fake_libvirt_mock() mocks importutils.import_class(). network_info = _fake_network_info(self, 1) self.create_fake_libvirt_mock() instance_ref = objects.Instance(**self.test_instance) # Start test self.mox.ReplayAll() try: drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr.firewall_driver, 'setup_basic_filtering', fake_none) self.stubs.Set(drvr.firewall_driver, 'prepare_instance_filter', fake_none) self.stubs.Set(drvr.firewall_driver, 'instance_filter_exists', fake_none) self.stubs.Set(greenthread, 'sleep', fake_sleep) drvr.ensure_filtering_rules_for_instance(instance_ref, network_info) except exception.NovaException as e: msg = ('The firewall filter for %s does not exist' % instance_ref['name']) c1 = (0 <= six.text_type(e).find(msg)) self.assertTrue(c1) self.assertEqual(29, fake_timer.counter, "Didn't wait the expected " "amount of time") @mock.patch.object(libvirt_driver.LibvirtDriver, '_create_shared_storage_test_file') @mock.patch.object(fakelibvirt.Connection, 'compareCPU') def test_check_can_live_migrate_dest_all_pass_with_block_migration( self, mock_cpu, mock_test_file): instance_ref = objects.Instance(**self.test_instance) instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) compute_info = {'disk_available_least': 400, 'cpu_info': 'asdf', } filename = "file" # _check_cpu_match mock_cpu.return_value = 1 # mounted_on_same_shared_storage mock_test_file.return_value = filename # No need for the src_compute_info return_value = drvr.check_can_live_migrate_destination(self.context, instance_ref, None, compute_info, True) return_value.is_volume_backed = False self.assertThat({"filename": "file", 'image_type': 'default', 'disk_available_mb': 409600, "disk_over_commit": False, "block_migration": True, "is_volume_backed": False}, matchers.DictMatches(return_value.to_legacy_dict())) @mock.patch.object(libvirt_driver.LibvirtDriver, '_create_shared_storage_test_file') @mock.patch.object(fakelibvirt.Connection, 'compareCPU') def test_check_can_live_migrate_dest_all_pass_no_block_migration( self, mock_cpu, mock_test_file): instance_ref = objects.Instance(**self.test_instance) instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) compute_info = {'disk_available_least': 400, 'cpu_info': 'asdf', } filename = "file" # _check_cpu_match mock_cpu.return_value = 1 # mounted_on_same_shared_storage mock_test_file.return_value = filename # No need for the src_compute_info return_value = drvr.check_can_live_migrate_destination(self.context, instance_ref, None, compute_info, False) return_value.is_volume_backed = False self.assertThat({"filename": "file", "image_type": 'default', "block_migration": False, "disk_over_commit": False, "disk_available_mb": 409600, "is_volume_backed": False}, matchers.DictMatches(return_value.to_legacy_dict())) @mock.patch.object(libvirt_driver.LibvirtDriver, '_create_shared_storage_test_file', return_value='fake') @mock.patch.object(libvirt_driver.LibvirtDriver, '_compare_cpu') def test_check_can_live_migrate_guest_cpu_none_model( self, mock_cpu, mock_test_file): # Tests that when instance.vcpu_model.model is None, the host cpu # model is used for live migration. instance_ref = objects.Instance(**self.test_instance) instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel instance_ref.vcpu_model.model = None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) compute_info = {'cpu_info': 'asdf', 'disk_available_least': 1} result = drvr.check_can_live_migrate_destination( self.context, instance_ref, compute_info, compute_info) result.is_volume_backed = False mock_cpu.assert_called_once_with(None, 'asdf') expected_result = {"filename": 'fake', "image_type": CONF.libvirt.images_type, "block_migration": False, "disk_over_commit": False, "disk_available_mb": 1024, "is_volume_backed": False} self.assertDictEqual(expected_result, result.to_legacy_dict()) @mock.patch.object(libvirt_driver.LibvirtDriver, '_create_shared_storage_test_file') @mock.patch.object(fakelibvirt.Connection, 'compareCPU') def test_check_can_live_migrate_dest_no_instance_cpu_info( self, mock_cpu, mock_test_file): instance_ref = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) compute_info = {'cpu_info': jsonutils.dumps({ "vendor": "AMD", "arch": arch.I686, "features": ["sse3"], "model": "Opteron_G3", "topology": {"cores": 2, "threads": 1, "sockets": 4} }), 'disk_available_least': 1} filename = "file" # _check_cpu_match mock_cpu.return_value = 1 # mounted_on_same_shared_storage mock_test_file.return_value = filename return_value = drvr.check_can_live_migrate_destination(self.context, instance_ref, compute_info, compute_info, False) # NOTE(danms): Compute manager would have set this, so set it here return_value.is_volume_backed = False self.assertThat({"filename": "file", "image_type": 'default', "block_migration": False, "disk_over_commit": False, "disk_available_mb": 1024, "is_volume_backed": False}, matchers.DictMatches(return_value.to_legacy_dict())) @mock.patch.object(fakelibvirt.Connection, 'compareCPU') def test_check_can_live_migrate_dest_incompatible_cpu_raises( self, mock_cpu): instance_ref = objects.Instance(**self.test_instance) instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) compute_info = {'cpu_info': 'asdf', 'disk_available_least': 1} mock_cpu.side_effect = exception.InvalidCPUInfo(reason='foo') self.assertRaises(exception.InvalidCPUInfo, drvr.check_can_live_migrate_destination, self.context, instance_ref, compute_info, compute_info, False) @mock.patch.object(host.Host, 'compare_cpu') @mock.patch.object(nova.virt.libvirt, 'config') def test_compare_cpu_compatible_host_cpu(self, mock_vconfig, mock_compare): mock_compare.return_value = 5 conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ret = conn._compare_cpu(None, jsonutils.dumps(_fake_cpu_info)) self.assertIsNone(ret) @mock.patch.object(host.Host, 'compare_cpu') @mock.patch.object(nova.virt.libvirt, 'config') def test_compare_cpu_handles_not_supported_error_gracefully(self, mock_vconfig, mock_compare): not_supported_exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'this function is not supported by the connection driver:' ' virCompareCPU', error_code=fakelibvirt.VIR_ERR_NO_SUPPORT) mock_compare.side_effect = not_supported_exc conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ret = conn._compare_cpu(None, jsonutils.dumps(_fake_cpu_info)) self.assertIsNone(ret) @mock.patch.object(host.Host, 'compare_cpu') @mock.patch.object(nova.virt.libvirt.LibvirtDriver, '_vcpu_model_to_cpu_config') def test_compare_cpu_compatible_guest_cpu(self, mock_vcpu_to_cpu, mock_compare): mock_compare.return_value = 6 conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ret = conn._compare_cpu(jsonutils.dumps(_fake_cpu_info), None) self.assertIsNone(ret) def test_compare_cpu_virt_type_xen(self): self.flags(virt_type='xen', group='libvirt') conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ret = conn._compare_cpu(None, None) self.assertIsNone(ret) def test_compare_cpu_virt_type_qemu(self): self.flags(virt_type='qemu', group='libvirt') conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ret = conn._compare_cpu(None, None) self.assertIsNone(ret) @mock.patch.object(host.Host, 'compare_cpu') @mock.patch.object(nova.virt.libvirt, 'config') def test_compare_cpu_invalid_cpuinfo_raises(self, mock_vconfig, mock_compare): mock_compare.return_value = 0 conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.InvalidCPUInfo, conn._compare_cpu, None, jsonutils.dumps(_fake_cpu_info)) @mock.patch.object(host.Host, 'compare_cpu') @mock.patch.object(nova.virt.libvirt, 'config') def test_compare_cpu_incompatible_cpu_raises(self, mock_vconfig, mock_compare): mock_compare.side_effect = fakelibvirt.libvirtError('cpu') conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.MigrationPreCheckError, conn._compare_cpu, None, jsonutils.dumps(_fake_cpu_info)) def test_check_can_live_migrate_dest_cleanup_works_correctly(self): objects.Instance(**self.test_instance) dest_check_data = objects.LibvirtLiveMigrateData( filename="file", block_migration=True, disk_over_commit=False, disk_available_mb=1024) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(drvr, '_cleanup_shared_storage_test_file') drvr._cleanup_shared_storage_test_file("file") self.mox.ReplayAll() drvr.check_can_live_migrate_destination_cleanup(self.context, dest_check_data) @mock.patch('os.path.exists', return_value=True) @mock.patch('os.utime') def test_check_shared_storage_test_file_exists(self, mock_utime, mock_path_exists): tmpfile_path = os.path.join(CONF.instances_path, 'tmp123') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertTrue(drvr._check_shared_storage_test_file('tmp123')) mock_utime.assert_called_once_with(CONF.instances_path, None) mock_path_exists.assert_called_once_with(tmpfile_path) @mock.patch('os.path.exists', return_value=False) @mock.patch('os.utime') def test_check_shared_storage_test_file_does_not_exist(self, mock_utime, mock_path_exists): tmpfile_path = os.path.join(CONF.instances_path, 'tmp123') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertFalse(drvr._check_shared_storage_test_file('tmp123')) mock_utime.assert_called_once_with(CONF.instances_path, None) mock_path_exists.assert_called_once_with(tmpfile_path) def _mock_can_live_migrate_source(self, block_migration=False, is_shared_block_storage=False, is_shared_instance_path=False, is_booted_from_volume=False, disk_available_mb=1024, block_device_info=None, block_device_text=None): instance = objects.Instance(**self.test_instance) dest_check_data = objects.LibvirtLiveMigrateData( filename='file', image_type='default', block_migration=block_migration, disk_over_commit=False, disk_available_mb=disk_available_mb) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(drvr, '_is_shared_block_storage') drvr._is_shared_block_storage(instance, dest_check_data, block_device_info).AndReturn(is_shared_block_storage) self.mox.StubOutWithMock(drvr, '_check_shared_storage_test_file') drvr._check_shared_storage_test_file('file').AndReturn( is_shared_instance_path) self.mox.StubOutWithMock(drvr, "get_instance_disk_info") drvr.get_instance_disk_info(instance, block_device_info=block_device_info).\ AndReturn(block_device_text) self.mox.StubOutWithMock(drvr, '_is_booted_from_volume') drvr._is_booted_from_volume(instance, block_device_text).AndReturn( is_booted_from_volume) return (instance, dest_check_data, drvr) def test_check_can_live_migrate_source_block_migration(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( block_migration=True) self.mox.StubOutWithMock(drvr, "_assert_dest_node_has_enough_disk") drvr._assert_dest_node_has_enough_disk( self.context, instance, dest_check_data.disk_available_mb, False, None) self.mox.ReplayAll() ret = drvr.check_can_live_migrate_source(self.context, instance, dest_check_data) self.assertIsInstance(ret, objects.LibvirtLiveMigrateData) self.assertIn('is_shared_block_storage', ret) self.assertIn('is_shared_instance_path', ret) def test_check_can_live_migrate_source_shared_block_storage(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( is_shared_block_storage=True) self.mox.ReplayAll() drvr.check_can_live_migrate_source(self.context, instance, dest_check_data) def test_check_can_live_migrate_source_shared_instance_path(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( is_shared_instance_path=True) self.mox.ReplayAll() drvr.check_can_live_migrate_source(self.context, instance, dest_check_data) def test_check_can_live_migrate_source_non_shared_fails(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source() self.mox.ReplayAll() self.assertRaises(exception.InvalidSharedStorage, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data) def test_check_can_live_migrate_source_shared_block_migration_fails(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( block_migration=True, is_shared_block_storage=True) self.mox.ReplayAll() self.assertRaises(exception.InvalidLocalStorage, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data) def test_check_can_live_migrate_shared_path_block_migration_fails(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( block_migration=True, is_shared_instance_path=True) self.mox.ReplayAll() self.assertRaises(exception.InvalidLocalStorage, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data, None) def test_check_can_live_migrate_non_shared_non_block_migration_fails(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source() self.mox.ReplayAll() self.assertRaises(exception.InvalidSharedStorage, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data) def test_check_can_live_migrate_source_with_dest_not_enough_disk(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( block_migration=True, disk_available_mb=0) drvr.get_instance_disk_info(instance, block_device_info=None).AndReturn( '[{"virt_disk_size":2}]') self.mox.ReplayAll() self.assertRaises(exception.MigrationError, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data) def test_check_can_live_migrate_source_booted_from_volume(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( is_booted_from_volume=True, block_device_text='[]') self.mox.ReplayAll() drvr.check_can_live_migrate_source(self.context, instance, dest_check_data) def test_check_can_live_migrate_source_booted_from_volume_with_swap(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( is_booted_from_volume=True, block_device_text='[{"path":"disk.swap"}]') self.mox.ReplayAll() self.assertRaises(exception.InvalidSharedStorage, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data) @mock.patch.object(host.Host, 'has_min_version', return_value=False) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_assert_dest_node_has_enough_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_has_local_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_booted_from_volume') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' 'get_instance_disk_info') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_shared_block_storage', return_value=False) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_check_shared_storage_test_file', return_value=False) def test_check_can_live_migrate_source_block_migration_with_bdm_error( self, mock_check, mock_shared_block, mock_get_bdi, mock_booted_from_volume, mock_has_local, mock_enough, mock_min_version): bdi = {'block_device_mapping': ['bdm']} instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) dest_check_data = objects.LibvirtLiveMigrateData( filename='file', image_type='default', block_migration=True, disk_over_commit=False, disk_available_mb=100) self.assertRaises(exception.MigrationPreCheckError, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data, block_device_info=bdi) @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_assert_dest_node_has_enough_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_has_local_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_booted_from_volume') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' 'get_instance_disk_info') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_shared_block_storage', return_value=False) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_check_shared_storage_test_file', return_value=False) def test_check_can_live_migrate_source_bm_with_bdm_tunnelled_error( self, mock_check, mock_shared_block, mock_get_bdi, mock_booted_from_volume, mock_has_local, mock_enough, mock_min_version): self.flags(live_migration_tunnelled=True, group='libvirt') bdi = {'block_device_mapping': ['bdm']} instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) dest_check_data = objects.LibvirtLiveMigrateData( filename='file', image_type='default', block_migration=True, disk_over_commit=False, disk_available_mb=100) drvr._parse_migration_flags() self.assertRaises(exception.MigrationPreCheckError, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data, block_device_info=bdi) @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_assert_dest_node_has_enough_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_has_local_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_booted_from_volume') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' 'get_instance_disk_info') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_shared_block_storage') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_check_shared_storage_test_file') def _test_check_can_live_migrate_source_block_migration_none( self, block_migrate, is_shared_instance_path, is_share_block, mock_check, mock_shared_block, mock_get_bdi, mock_booted_from_volume, mock_has_local, mock_enough, mock_verson): mock_check.return_value = is_shared_instance_path mock_shared_block.return_value = is_share_block instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) dest_check_data = objects.LibvirtLiveMigrateData( filename='file', image_type='default', disk_over_commit=False, disk_available_mb=100) dest_check_data_ret = drvr.check_can_live_migrate_source( self.context, instance, dest_check_data) self.assertEqual(block_migrate, dest_check_data_ret.block_migration) def test_check_can_live_migrate_source_block_migration_none_shared1(self): self._test_check_can_live_migrate_source_block_migration_none( False, True, False) def test_check_can_live_migrate_source_block_migration_none_shared2(self): self._test_check_can_live_migrate_source_block_migration_none( False, False, True) def test_check_can_live_migrate_source_block_migration_none_no_share(self): self._test_check_can_live_migrate_source_block_migration_none( True, False, False) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_assert_dest_node_has_enough_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_assert_dest_node_has_enough_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_has_local_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_booted_from_volume') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' 'get_instance_disk_info') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_shared_block_storage') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_check_shared_storage_test_file') def test_check_can_live_migration_source_disk_over_commit_none(self, mock_check, mock_shared_block, mock_get_bdi, mock_booted_from_volume, mock_has_local, mock_enough, mock_disk_check): mock_check.return_value = False mock_shared_block.return_value = False instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) dest_check_data = objects.LibvirtLiveMigrateData( filename='file', image_type='default', disk_available_mb=100) drvr.check_can_live_migrate_source( self.context, instance, dest_check_data) self.assertFalse(mock_disk_check.called) def _is_shared_block_storage_test_create_mocks(self, disks): # Test data instance_xml = ("instance-0000000a" "{}") disks_xml = '' for dsk in disks: if dsk['type'] is not 'network': disks_xml = ''.join([disks_xml, "" "" "" "" "".format(**dsk)]) else: disks_xml = ''.join([disks_xml, "" "" "" "" "" "" "".format(**dsk)]) # Preparing mocks mock_virDomain = mock.Mock(fakelibvirt.virDomain) mock_virDomain.XMLDesc = mock.Mock() mock_virDomain.XMLDesc.return_value = (instance_xml.format(disks_xml)) mock_lookup = mock.Mock() def mock_lookup_side_effect(name): return mock_virDomain mock_lookup.side_effect = mock_lookup_side_effect mock_getsize = mock.Mock() mock_getsize.return_value = "10737418240" return (mock_getsize, mock_lookup) def test_is_shared_block_storage_rbd(self): self.flags(images_type='rbd', group='libvirt') bdi = {'block_device_mapping': []} instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_get_instance_disk_info = mock.Mock() data = objects.LibvirtLiveMigrateData(image_type='rbd') with mock.patch.object(drvr, 'get_instance_disk_info', mock_get_instance_disk_info): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertTrue(drvr._is_shared_block_storage(instance, data, block_device_info=bdi)) self.assertEqual(0, mock_get_instance_disk_info.call_count) def test_is_shared_block_storage_lvm(self): self.flags(images_type='lvm', group='libvirt') bdi = {'block_device_mapping': []} instance = objects.Instance(**self.test_instance) mock_get_instance_disk_info = mock.Mock() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) data = objects.LibvirtLiveMigrateData(image_type='lvm', is_volume_backed=False, is_shared_instance_path=False) with mock.patch.object(drvr, 'get_instance_disk_info', mock_get_instance_disk_info): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertFalse(drvr._is_shared_block_storage( instance, data, block_device_info=bdi)) self.assertEqual(0, mock_get_instance_disk_info.call_count) def test_is_shared_block_storage_qcow2(self): self.flags(images_type='qcow2', group='libvirt') bdi = {'block_device_mapping': []} instance = objects.Instance(**self.test_instance) mock_get_instance_disk_info = mock.Mock() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) data = objects.LibvirtLiveMigrateData(image_type='qcow2', is_volume_backed=False, is_shared_instance_path=False) with mock.patch.object(drvr, 'get_instance_disk_info', mock_get_instance_disk_info): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertFalse(drvr._is_shared_block_storage( instance, data, block_device_info=bdi)) self.assertEqual(0, mock_get_instance_disk_info.call_count) def test_is_shared_block_storage_rbd_only_source(self): self.flags(images_type='rbd', group='libvirt') bdi = {'block_device_mapping': []} instance = objects.Instance(**self.test_instance) mock_get_instance_disk_info = mock.Mock() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) data = objects.LibvirtLiveMigrateData(is_shared_instance_path=False, is_volume_backed=False) with mock.patch.object(drvr, 'get_instance_disk_info', mock_get_instance_disk_info): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertFalse(drvr._is_shared_block_storage( instance, data, block_device_info=bdi)) self.assertEqual(0, mock_get_instance_disk_info.call_count) def test_is_shared_block_storage_rbd_only_dest(self): bdi = {'block_device_mapping': []} instance = objects.Instance(**self.test_instance) mock_get_instance_disk_info = mock.Mock() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) data = objects.LibvirtLiveMigrateData(image_type='rbd', is_volume_backed=False, is_shared_instance_path=False) with mock.patch.object(drvr, 'get_instance_disk_info', mock_get_instance_disk_info): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertFalse(drvr._is_shared_block_storage( instance, data, block_device_info=bdi)) self.assertEqual(0, mock_get_instance_disk_info.call_count) def test_is_shared_block_storage_volume_backed(self): disks = [{'type': 'block', 'driver': 'raw', 'source': 'dev', 'source_path': '/dev/disk', 'target_dev': 'vda'}] bdi = {'block_device_mapping': [ {'connection_info': 'info', 'mount_device': '/dev/vda'}]} instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) (mock_getsize, mock_lookup) =\ self._is_shared_block_storage_test_create_mocks(disks) data = objects.LibvirtLiveMigrateData(is_volume_backed=True, is_shared_instance_path=False) with mock.patch.object(host.Host, 'get_domain', mock_lookup): self.assertTrue(drvr._is_shared_block_storage(instance, data, block_device_info = bdi)) mock_lookup.assert_called_once_with(instance) def test_is_shared_block_storage_volume_backed_with_disk(self): disks = [{'type': 'block', 'driver': 'raw', 'source': 'dev', 'source_path': '/dev/disk', 'target_dev': 'vda'}, {'type': 'file', 'driver': 'raw', 'source': 'file', 'source_path': '/instance/disk.local', 'target_dev': 'vdb'}] bdi = {'block_device_mapping': [ {'connection_info': 'info', 'mount_device': '/dev/vda'}]} instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) (mock_getsize, mock_lookup) =\ self._is_shared_block_storage_test_create_mocks(disks) data = objects.LibvirtLiveMigrateData(is_volume_backed=True, is_shared_instance_path=False) with test.nested( mock.patch.object(os.path, 'getsize', mock_getsize), mock.patch.object(host.Host, 'get_domain', mock_lookup)): self.assertFalse(drvr._is_shared_block_storage( instance, data, block_device_info = bdi)) mock_getsize.assert_called_once_with('/instance/disk.local') mock_lookup.assert_called_once_with(instance) def test_is_shared_block_storage_nfs(self): bdi = {'block_device_mapping': []} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_image_backend = mock.MagicMock() drvr.image_backend = mock_image_backend mock_backend = mock.MagicMock() mock_image_backend.backend.return_value = mock_backend mock_backend.is_file_in_instance_path.return_value = True mock_get_instance_disk_info = mock.Mock() data = objects.LibvirtLiveMigrateData( is_shared_instance_path=True, image_type='foo') with mock.patch.object(drvr, 'get_instance_disk_info', mock_get_instance_disk_info): self.assertTrue(drvr._is_shared_block_storage( 'instance', data, block_device_info=bdi)) self.assertEqual(0, mock_get_instance_disk_info.call_count) def test_live_migration_update_graphics_xml(self): self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) xml_tmpl = ("" "" "" "" "" "" "" "" "" "") initial_xml = xml_tmpl.format(vnc='1.2.3.4', spice='5.6.7.8') target_xml = xml_tmpl.format(vnc='10.0.0.1', spice='10.0.0.2') target_xml = etree.tostring(etree.fromstring(target_xml)) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "migrateToURI2") _bandwidth = CONF.libvirt.live_migration_bandwidth vdmock.XMLDesc(flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn( initial_xml) vdmock.migrateToURI2(drvr._live_migration_uri('dest'), None, target_xml, mox.IgnoreArg(), None, _bandwidth).AndRaise( fakelibvirt.libvirtError("ERR")) # start test migrate_data = {'pre_live_migration_result': {'graphics_listen_addrs': {'vnc': '10.0.0.1', 'spice': '10.0.0.2'}}} migrate_data = objects.LibvirtLiveMigrateData( graphics_listen_addr_vnc='10.0.0.1', graphics_listen_addr_spice='10.0.0.2', serial_listen_addr='127.0.0.1', target_connect_addr=None, bdms=[], block_migration=False) self.mox.ReplayAll() self.assertRaises(fakelibvirt.libvirtError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, vdmock, []) def test_live_migration_update_volume_xml(self): self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) target_xml = self.device_xml_tmpl.format( device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'cde.67890.opst-lun-Z') # start test connection_info = { u'driver_volume_type': u'iscsi', u'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', u'data': { u'access_mode': u'rw', u'target_discovered': False, u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', 'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', }, } bdm = objects.LibvirtLiveMigrateBDMInfo( serial='58a84f6d-3f0c-4e19-a0af-eb657b790657', bus='virtio', type='disk', dev='vdb', connection_info=connection_info) migrate_data = objects.LibvirtLiveMigrateData( serial_listen_addr='', target_connect_addr=None, bdms=[bdm], block_migration=False) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) test_mock = mock.MagicMock() with mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info') as \ mget_info,\ mock.patch.object(drvr._host, 'get_domain') as mget_domain,\ mock.patch.object(fakelibvirt.virDomain, 'migrateToURI2'),\ mock.patch.object(drvr, '_update_xml') as mupdate: mget_info.side_effect = exception.InstanceNotFound( instance_id='foo') mget_domain.return_value = test_mock test_mock.XMLDesc.return_value = target_xml self.assertFalse(drvr._live_migration_operation( self.context, instance_ref, 'dest', False, migrate_data, test_mock, [])) mupdate.assert_called_once_with(target_xml, migrate_data.bdms, {}, '') def test_live_migration_with_valid_target_connect_addr(self): self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) target_xml = self.device_xml_tmpl.format( device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'cde.67890.opst-lun-Z') # start test connection_info = { u'driver_volume_type': u'iscsi', u'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', u'data': { u'access_mode': u'rw', u'target_discovered': False, u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', 'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', }, } bdm = objects.LibvirtLiveMigrateBDMInfo( serial='58a84f6d-3f0c-4e19-a0af-eb657b790657', bus='virtio', type='disk', dev='vdb', connection_info=connection_info) migrate_data = objects.LibvirtLiveMigrateData( serial_listen_addr='', target_connect_addr='127.0.0.2', bdms=[bdm], block_migration=False) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) test_mock = mock.MagicMock() with mock.patch.object(drvr, '_update_xml') as mupdate: test_mock.XMLDesc.return_value = target_xml drvr._live_migration_operation(self.context, instance_ref, 'dest', False, migrate_data, test_mock, []) test_mock.migrateToURI2.assert_called_once_with( 'qemu+tcp://127.0.0.2/system', None, mupdate(), None, None, 0) def test_update_volume_xml(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) initial_xml = self.device_xml_tmpl.format( device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'abc.12345.opst-lun-X') target_xml = self.device_xml_tmpl.format( device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'cde.67890.opst-lun-Z') target_xml = etree.tostring(etree.fromstring(target_xml)) serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657" bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial, bus='virtio', type='disk', dev='vdb') bdmi.connection_info = {u'driver_volume_type': u'iscsi', 'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', u'data': {u'access_mode': u'rw', u'target_discovered': False, u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', 'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}} conf = vconfig.LibvirtConfigGuestDisk() conf.source_device = bdmi.type conf.driver_name = "qemu" conf.driver_format = "raw" conf.driver_cache = "none" conf.target_dev = bdmi.dev conf.target_bus = bdmi.bus conf.serial = bdmi.connection_info.get('serial') conf.source_type = "block" conf.source_path = bdmi.connection_info['data'].get('device_path') with mock.patch.object(drvr, '_get_volume_config', return_value=conf): parser = etree.XMLParser(remove_blank_text=True) xml_doc = etree.fromstring(initial_xml, parser) config = drvr._update_volume_xml(xml_doc, [bdmi]) xml_doc = etree.fromstring(target_xml, parser) self.assertEqual(etree.tostring(xml_doc), etree.tostring(config)) def test_live_migration_uri(self): hypervisor_uri_map = ( ('xen', 'xenmigr://%s/system'), ('kvm', 'qemu+tcp://%s/system'), ('qemu', 'qemu+tcp://%s/system'), # anything else will return None ('lxc', None), ('parallels', None), ('', None), ) dest = 'destination' for hyperv, uri in hypervisor_uri_map: self.flags(virt_type=hyperv, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) if uri is not None: uri = uri % dest self.assertEqual(uri, drvr._live_migration_uri(dest)) else: self.assertRaises(exception.LiveMigrationURINotAvailable, drvr._live_migration_uri, dest) def test_live_migration_uri_forced(self): dest = 'destination' for hyperv in ('kvm', 'xen'): self.flags(virt_type=hyperv, group='libvirt') forced_uri = 'foo://%s/bar' self.flags(live_migration_uri=forced_uri, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertEqual(forced_uri % dest, drvr._live_migration_uri(dest)) def test_update_volume_xml_no_serial(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) xml_tmpl = """
""" initial_xml = xml_tmpl.format(device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'abc.12345.opst-lun-X') target_xml = xml_tmpl.format(device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'abc.12345.opst-lun-X') target_xml = etree.tostring(etree.fromstring(target_xml)) serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657" connection_info = { u'driver_volume_type': u'iscsi', 'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', u'data': { u'access_mode': u'rw', u'target_discovered': False, u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', u'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', }, } bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial, bus='virtio', dev='vdb', type='disk') bdmi.connection_info = connection_info conf = vconfig.LibvirtConfigGuestDisk() conf.source_device = bdmi.type conf.driver_name = "qemu" conf.driver_format = "raw" conf.driver_cache = "none" conf.target_dev = bdmi.dev conf.target_bus = bdmi.bus conf.serial = bdmi.connection_info.get('serial') conf.source_type = "block" conf.source_path = bdmi.connection_info['data'].get('device_path') with mock.patch.object(drvr, '_get_volume_config', return_value=conf): xml_doc = etree.fromstring(initial_xml) config = drvr._update_volume_xml(xml_doc, [bdmi]) self.assertEqual(target_xml, etree.tostring(config)) def test_update_volume_xml_no_connection_info(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) initial_xml = self.device_xml_tmpl.format( device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'abc.12345.opst-lun-X') target_xml = self.device_xml_tmpl.format( device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'abc.12345.opst-lun-X') target_xml = etree.tostring(etree.fromstring(target_xml)) serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657" bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial, dev='vdb', type='disk', bus='scsi', format='qcow') bdmi.connection_info = {} conf = vconfig.LibvirtConfigGuestDisk() with mock.patch.object(drvr, '_get_volume_config', return_value=conf): xml_doc = etree.fromstring(initial_xml) config = drvr._update_volume_xml(xml_doc, [bdmi]) self.assertEqual(target_xml, etree.tostring(config)) @mock.patch.object(fakelibvirt.virDomain, "migrateToURI2") @mock.patch.object(fakelibvirt.virDomain, "XMLDesc") def test_live_migration_update_serial_console_xml(self, mock_xml, mock_migrate): self.compute = importutils.import_object(CONF.compute_manager) instance_ref = self.test_instance xml_tmpl = ("" "" "" "" "" "" "") initial_xml = xml_tmpl.format(addr='9.0.0.1') target_xml = xml_tmpl.format(addr='9.0.0.12') target_xml = etree.tostring(etree.fromstring(target_xml)) # Preparing mocks mock_xml.return_value = initial_xml mock_migrate.side_effect = fakelibvirt.libvirtError("ERR") # start test bandwidth = CONF.libvirt.live_migration_bandwidth migrate_data = objects.LibvirtLiveMigrateData( graphics_listen_addr_vnc='10.0.0.1', graphics_listen_addr_spice='10.0.0.2', serial_listen_addr='9.0.0.12', target_connect_addr=None, bdms=[], block_migration=False) dom = fakelibvirt.virDomain drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(fakelibvirt.libvirtError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, dom, []) mock_xml.assert_called_once_with( flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE) mock_migrate.assert_called_once_with( drvr._live_migration_uri('dest'), None, target_xml, mock.ANY, None, bandwidth) @mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None, create=True) def test_live_migration_fails_with_serial_console_without_migratable(self): self.compute = importutils.import_object(CONF.compute_manager) instance_ref = self.test_instance CONF.set_override("enabled", True, "serial_console") dom = fakelibvirt.virDomain migrate_data = objects.LibvirtLiveMigrateData( serial_listen_addr='', target_connect_addr=None, block_migration=False) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.MigrationError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, dom, []) @mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None, create=True) def test_live_migration_uses_migrateToURI_without_migratable_flag(self): self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "migrateToURI") _bandwidth = CONF.libvirt.live_migration_bandwidth vdmock.migrateToURI(drvr._live_migration_uri('dest'), mox.IgnoreArg(), None, _bandwidth).AndRaise( fakelibvirt.libvirtError("ERR")) # start test migrate_data = objects.LibvirtLiveMigrateData( graphics_listen_addr_vnc='0.0.0.0', graphics_listen_addr_spice='0.0.0.0', serial_listen_addr='127.0.0.1', target_connect_addr=None, bdms=[], block_migration=False) self.mox.ReplayAll() self.assertRaises(fakelibvirt.libvirtError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, vdmock, []) def test_live_migration_uses_migrateToURI_without_dest_listen_addrs(self): self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "migrateToURI") _bandwidth = CONF.libvirt.live_migration_bandwidth vdmock.migrateToURI(drvr._live_migration_uri('dest'), mox.IgnoreArg(), None, _bandwidth).AndRaise( fakelibvirt.libvirtError("ERR")) # start test migrate_data = objects.LibvirtLiveMigrateData( serial_listen_addr='', target_connect_addr=None, bdms=[], block_migration=False) self.mox.ReplayAll() self.assertRaises(fakelibvirt.libvirtError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, vdmock, []) @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch.object(fakelibvirt.virDomain, "migrateToURI3") @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._update_xml', return_value='') @mock.patch('nova.virt.libvirt.guest.Guest.get_xml_desc', return_value='') def test_live_migration_uses_migrateToURI3( self, mock_old_xml, mock_new_xml, mock_migrateToURI3, mock_min_version): self.flags(live_migration_tunnelled=False, group='libvirt') # Preparing mocks disk_paths = ['vda', 'vdb'] params = { 'migrate_disks': ['vda', 'vdb'], 'bandwidth': CONF.libvirt.live_migration_bandwidth, 'destination_xml': '', } mock_migrateToURI3.side_effect = fakelibvirt.libvirtError("ERR") # Start test migrate_data = objects.LibvirtLiveMigrateData( graphics_listen_addr_vnc='0.0.0.0', graphics_listen_addr_spice='0.0.0.0', serial_listen_addr='127.0.0.1', target_connect_addr=None, bdms=[], block_migration=False) dom = fakelibvirt.virDomain drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._parse_migration_flags() instance = objects.Instance(**self.test_instance) self.assertRaises(fakelibvirt.libvirtError, drvr._live_migration_operation, self.context, instance, 'dest', False, migrate_data, dom, disk_paths) mock_migrateToURI3.assert_called_once_with( drvr._live_migration_uri('dest'), params, 19) @mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None, create=True) def test_live_migration_fails_without_migratable_flag_or_0_addr(self): self.flags(enabled=True, vncserver_listen='1.2.3.4', group='vnc') self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "migrateToURI") # start test migrate_data = objects.LibvirtLiveMigrateData( graphics_listen_addr_vnc='1.2.3.4', graphics_listen_addr_spice='1.2.3.4', serial_listen_addr='127.0.0.1', target_connect_addr=None, block_migration=False) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.MigrationError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, vdmock, []) @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch.object(fakelibvirt.virDomain, "migrateToURI3") @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._update_xml', return_value='') @mock.patch('nova.virt.libvirt.guest.Guest.get_xml_desc', return_value='') def test_block_live_migration_tunnelled_migrateToURI3( self, mock_old_xml, mock_new_xml, mock_migrateToURI3, mock_min_version): self.flags(live_migration_tunnelled=True, group='libvirt') # Preparing mocks disk_paths = [] params = { 'bandwidth': CONF.libvirt.live_migration_bandwidth, 'destination_xml': '', } # Start test migrate_data = objects.LibvirtLiveMigrateData( graphics_listen_addr_vnc='0.0.0.0', graphics_listen_addr_spice='0.0.0.0', serial_listen_addr='127.0.0.1', target_connect_addr=None, bdms=[], block_migration=True) dom = fakelibvirt.virDomain drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._parse_migration_flags() instance = objects.Instance(**self.test_instance) drvr._live_migration_operation(self.context, instance, 'dest', True, migrate_data, dom, disk_paths) mock_migrateToURI3.assert_called_once_with( drvr._live_migration_uri('dest'), params, 151) def test_live_migration_raises_exception(self): # Confirms recover method is called when exceptions are raised. # Preparing data self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "migrateToURI2") _bandwidth = CONF.libvirt.live_migration_bandwidth if getattr(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None) is None: vdmock.migrateToURI(drvr._live_migration_uri('dest'), mox.IgnoreArg(), None, _bandwidth).AndRaise( fakelibvirt.libvirtError('ERR')) else: vdmock.XMLDesc(flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE ).AndReturn(FakeVirtDomain().XMLDesc(flags=0)) vdmock.migrateToURI2(drvr._live_migration_uri('dest'), None, mox.IgnoreArg(), mox.IgnoreArg(), None, _bandwidth).AndRaise( fakelibvirt.libvirtError('ERR')) # start test migrate_data = objects.LibvirtLiveMigrateData( graphics_listen_addr_vnc='127.0.0.1', graphics_listen_addr_spice='127.0.0.1', serial_listen_addr='127.0.0.1', target_connect_addr=None, bdms=[], block_migration=False) self.mox.ReplayAll() self.assertRaises(fakelibvirt.libvirtError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, vdmock, []) self.assertEqual(vm_states.ACTIVE, instance_ref.vm_state) self.assertEqual(power_state.RUNNING, instance_ref.power_state) def test_live_migration_raises_unsupported_config_exception(self): # Tests that when migrateToURI2 fails with VIR_ERR_CONFIG_UNSUPPORTED, # migrateToURI is used instead. # Preparing data instance_ref = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, 'migrateToURI2') self.mox.StubOutWithMock(vdmock, 'migrateToURI') _bandwidth = CONF.libvirt.live_migration_bandwidth vdmock.XMLDesc(flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn( FakeVirtDomain().XMLDesc(flags=0)) unsupported_config_error = fakelibvirt.libvirtError('ERR') unsupported_config_error.err = ( fakelibvirt.VIR_ERR_CONFIG_UNSUPPORTED,) # This is the first error we hit but since the error code is # VIR_ERR_CONFIG_UNSUPPORTED we'll try migrateToURI. vdmock.migrateToURI2(drvr._live_migration_uri('dest'), None, mox.IgnoreArg(), mox.IgnoreArg(), None, _bandwidth).AndRaise(unsupported_config_error) # This is the second and final error that will actually kill the run, # we use TestingException to make sure it's not the same libvirtError # above. vdmock.migrateToURI(drvr._live_migration_uri('dest'), mox.IgnoreArg(), None, _bandwidth).AndRaise(test.TestingException('oops')) graphics_listen_addrs = {'vnc': '0.0.0.0', 'spice': '127.0.0.1'} migrate_data = objects.LibvirtLiveMigrateData( graphics_listen_addr_vnc='0.0.0.0', graphics_listen_addr_spice='127.0.0.1', serial_listen_addr='127.0.0.1', target_connect_addr=None, bdms=[], block_migration=False) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock( drvr, '_check_graphics_addresses_can_live_migrate') drvr._check_graphics_addresses_can_live_migrate(graphics_listen_addrs) self.mox.ReplayAll() # start test self.assertRaises(test.TestingException, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, vdmock, []) @mock.patch('shutil.rmtree') @mock.patch('os.path.exists', return_value=True) @mock.patch('nova.virt.libvirt.utils.get_instance_path_at_destination') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.destroy') def test_rollback_live_migration_at_dest_not_shared(self, mock_destroy, mock_get_instance_path, mock_exist, mock_shutil ): # destroy method may raise InstanceTerminationFailure or # InstancePowerOffFailure, here use their base class Invalid. mock_destroy.side_effect = exception.Invalid(reason='just test') fake_instance_path = os.path.join(cfg.CONF.instances_path, '/fake_instance_uuid') mock_get_instance_path.return_value = fake_instance_path drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) migrate_data = objects.LibvirtLiveMigrateData( is_shared_instance_path=False, instance_relative_path=False) self.assertRaises(exception.Invalid, drvr.rollback_live_migration_at_destination, "context", "instance", [], None, True, migrate_data) mock_exist.assert_called_once_with(fake_instance_path) mock_shutil.assert_called_once_with(fake_instance_path) @mock.patch('shutil.rmtree') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path_at_destination') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.destroy') def test_rollback_live_migration_at_dest_shared(self, mock_destroy, mock_get_instance_path, mock_exist, mock_shutil ): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) migrate_data = objects.LibvirtLiveMigrateData( is_shared_instance_path=True, instance_relative_path=False) drvr.rollback_live_migration_at_destination("context", "instance", [], None, True, migrate_data) mock_destroy.assert_called_once_with("context", "instance", [], None, True, migrate_data) self.assertFalse(mock_get_instance_path.called) self.assertFalse(mock_exist.called) self.assertFalse(mock_shutil.called) @mock.patch.object(host.Host, "has_min_version", return_value=False) @mock.patch.object(fakelibvirt.Domain, "XMLDesc") def test_live_migration_copy_disk_paths(self, mock_xml, mock_version): self.flags(live_migration_tunnelled=False, group='libvirt') xml = """ dummy d4e13113-918e-42fe-9fc9-861693ffd432 """ mock_xml.return_value = xml drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._parse_migration_flags() dom = fakelibvirt.Domain(drvr._get_connection(), xml, False) guest = libvirt_guest.Guest(dom) paths = drvr._live_migration_copy_disk_paths(None, None, guest) self.assertEqual((["/var/lib/nova/instance/123/disk.root", "/dev/mapper/somevol"], ['vda', 'vdd']), paths) @mock.patch.object(fakelibvirt.Domain, "XMLDesc") def test_live_migration_copy_disk_paths_tunnelled(self, mock_xml): self.flags(live_migration_tunnelled=True, group='libvirt') xml = """ dummy d4e13113-918e-42fe-9fc9-861693ffd432 """ mock_xml.return_value = xml drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._parse_migration_flags() dom = fakelibvirt.Domain(drvr._get_connection(), xml, False) guest = libvirt_guest.Guest(dom) paths = drvr._live_migration_copy_disk_paths(None, None, guest) self.assertEqual((["/var/lib/nova/instance/123/disk.root", "/dev/mapper/somevol"], ['vda', 'vdd']), paths) @mock.patch.object(host.Host, "has_min_version", return_value=True) @mock.patch('nova.virt.driver.get_block_device_info') @mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid') @mock.patch.object(fakelibvirt.Domain, "XMLDesc") def test_live_migration_copy_disk_paths_selective_block_migration( self, mock_xml, mock_get_instance, mock_block_device_info, mock_version): self.flags(live_migration_tunnelled=False, group='libvirt') xml = """ dummy d4e13113-918e-42fe-9fc9-861693ffd432 """ mock_xml.return_value = xml instance = objects.Instance(**self.test_instance) instance.root_device_name = '/dev/vda' block_device_info = { 'swap': { 'disk_bus': u'virtio', 'swap_size': 10, 'device_name': u'/dev/vdc' }, 'root_device_name': u'/dev/vda', 'ephemerals': [{ 'guest_format': u'ext3', 'device_name': u'/dev/vdb', 'disk_bus': u'virtio', 'device_type': u'disk', 'size': 1 }], 'block_device_mapping': [{ 'guest_format': None, 'boot_index': None, 'mount_device': u'/dev/vdd', 'connection_info': { u'driver_volume_type': u'iscsi', 'serial': u'147df29f-aec2-4851-b3fe-f68dad151834', u'data': { u'access_mode': u'rw', u'target_discovered': False, u'encrypted': False, u'qos_specs': None, u'target_iqn': u'iqn.2010-10.org.openstack:' u'volume-147df29f-aec2-4851-b3fe-' u'f68dad151834', u'target_portal': u'10.102.44.141:3260', u'volume_id': u'147df29f-aec2-4851-b3fe-f68dad151834', u'target_lun': 1, u'auth_password': u'cXELT66FngwzTwpf', u'auth_username': u'QbQQjj445uWgeQkFKcVw', u'auth_method': u'CHAP' } }, 'disk_bus': None, 'device_type': None, 'delete_on_termination': False }] } mock_block_device_info.return_value = block_device_info drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._parse_migration_flags() dom = fakelibvirt.Domain(drvr._get_connection(), xml, False) guest = libvirt_guest.Guest(dom) return_value = drvr._live_migration_copy_disk_paths(context, instance, guest) expected = (['/var/lib/nova/instance/123/disk.root', '/var/lib/nova/instance/123/disk.shared', '/var/lib/nova/instance/123/disk.config'], ['vda', 'vdb', 'vdc']) self.assertEqual(expected, return_value) @mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration_copy_disk_paths") def test_live_migration_data_gb_plain(self, mock_paths): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) data_gb = drvr._live_migration_data_gb(instance, []) self.assertEqual(2, data_gb) self.assertEqual(0, mock_paths.call_count) def test_live_migration_data_gb_block(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) def fake_stat(path): class StatResult(object): def __init__(self, size): self._size = size @property def st_size(self): return self._size if path == "/var/lib/nova/instance/123/disk.root": return StatResult(10 * units.Gi) elif path == "/dev/mapper/somevol": return StatResult(1.5 * units.Gi) else: raise Exception("Should not be reached") disk_paths = ["/var/lib/nova/instance/123/disk.root", "/dev/mapper/somevol"] with mock.patch.object(os, "stat") as mock_stat: mock_stat.side_effect = fake_stat data_gb = drvr._live_migration_data_gb(instance, disk_paths) # Expecting 2 GB for RAM, plus 10 GB for disk.root # and 1.5 GB rounded to 2 GB for somevol, so 14 GB self.assertEqual(14, data_gb) EXPECT_SUCCESS = 1 EXPECT_FAILURE = 2 EXPECT_ABORT = 3 @mock.patch.object(time, "time") @mock.patch.object(time, "sleep", side_effect=lambda x: eventlet.sleep(0)) @mock.patch.object(host.DomainJobInfo, "for_domain") @mock.patch.object(objects.Instance, "save") @mock.patch.object(objects.Migration, "save") @mock.patch.object(fakelibvirt.Connection, "_mark_running") @mock.patch.object(fakelibvirt.virDomain, "abortJob") def _test_live_migration_monitoring(self, job_info_records, time_records, expect_result, mock_abort, mock_running, mock_save, mock_mig_save, mock_job_info, mock_sleep, mock_time, expected_mig_status=None): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) dom = fakelibvirt.Domain(drvr._get_connection(), "", True) guest = libvirt_guest.Guest(dom) finish_event = eventlet.event.Event() def fake_job_info(hostself): while True: self.assertTrue(len(job_info_records) > 0) rec = job_info_records.pop(0) if type(rec) == str: if rec == "thread-finish": finish_event.send() elif rec == "domain-stop": dom.destroy() else: if len(time_records) > 0: time_records.pop(0) return rec return rec def fake_time(): if len(time_records) > 0: return time_records[0] else: return int( datetime.datetime(2001, 1, 20, 20, 1, 0) .strftime('%s')) mock_job_info.side_effect = fake_job_info mock_time.side_effect = fake_time dest = mock.sentinel.migrate_dest migration = objects.Migration(context=self.context, id=1) migrate_data = objects.LibvirtLiveMigrateData( migration=migration) fake_post_method = mock.MagicMock() fake_recover_method = mock.MagicMock() drvr._live_migration_monitor(self.context, instance, guest, dest, fake_post_method, fake_recover_method, False, migrate_data, dom, finish_event, []) mock_mig_save.assert_called_with() if expect_result == self.EXPECT_SUCCESS: self.assertFalse(fake_recover_method.called, 'Recover method called when success expected') self.assertFalse(mock_abort.called, 'abortJob not called when success expected') fake_post_method.assert_called_once_with( self.context, instance, dest, False, migrate_data) else: if expect_result == self.EXPECT_ABORT: self.assertTrue(mock_abort.called, 'abortJob called when abort expected') else: self.assertFalse(mock_abort.called, 'abortJob not called when failure expected') self.assertFalse(fake_post_method.called, 'Post method called when success not expected') if expected_mig_status: fake_recover_method.assert_called_once_with( self.context, instance, dest, False, migrate_data, migration_status=expected_mig_status) else: fake_recover_method.assert_called_once_with( self.context, instance, dest, False, migrate_data) def test_live_migration_monitor_success(self): # A normal sequence where see all the normal job states domain_info_records = [ host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_SUCCESS) def test_live_migration_monitor_success_race(self): # A normalish sequence but we're too slow to see the # completed job state domain_info_records = [ host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_SUCCESS) def test_live_migration_monitor_failed(self): # A failed sequence where we see all the expected events domain_info_records = [ host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_FAILED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_FAILURE) def test_live_migration_monitor_failed_race(self): # A failed sequence where we are too slow to see the # failed event domain_info_records = [ host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_FAILURE) def test_live_migration_monitor_cancelled(self): # A cancelled sequence where we see all the events domain_info_records = [ host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_FAILURE, expected_mig_status='cancelled') @mock.patch.object(fakelibvirt.virDomain, "migrateSetMaxDowntime") @mock.patch.object(libvirt_driver.LibvirtDriver, "_migration_downtime_steps") def test_live_migration_monitor_downtime(self, mock_downtime_steps, mock_set_downtime): self.flags(live_migration_completion_timeout=1000000, live_migration_progress_timeout=1000000, group='libvirt') # We've setup 4 fake downtime steps - first value is the # time delay, second is the downtime value downtime_steps = [ (90, 10), (180, 50), (270, 200), (500, 300), ] mock_downtime_steps.return_value = downtime_steps # Each one of these fake times is used for time.time() # when a new domain_info_records entry is consumed. # Times are chosen so that only the first 3 downtime # steps are needed. fake_times = [0, 1, 30, 95, 150, 200, 300] # A normal sequence where see all the normal job states domain_info_records = [ host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED), ] self._test_live_migration_monitoring(domain_info_records, fake_times, self.EXPECT_SUCCESS) mock_set_downtime.assert_has_calls([mock.call(10), mock.call(50), mock.call(200)]) def test_live_migration_monitor_completion(self): self.flags(live_migration_completion_timeout=100, live_migration_progress_timeout=1000000, group='libvirt') # Each one of these fake times is used for time.time() # when a new domain_info_records entry is consumed. fake_times = [0, 40, 80, 120, 160, 200, 240, 280, 320] # A normal sequence where see all the normal job states domain_info_records = [ host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED), ] self._test_live_migration_monitoring(domain_info_records, fake_times, self.EXPECT_ABORT, expected_mig_status='cancelled') def test_live_migration_monitor_progress(self): self.flags(live_migration_completion_timeout=1000000, live_migration_progress_timeout=150, group='libvirt') # Each one of these fake times is used for time.time() # when a new domain_info_records entry is consumed. fake_times = [0, 40, 80, 120, 160, 200, 240, 280, 320] # A normal sequence where see all the normal job states domain_info_records = [ host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=90), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=90), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=90), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=90), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=90), "thread-finish", "domain-stop", host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED), ] self._test_live_migration_monitoring(domain_info_records, fake_times, self.EXPECT_ABORT, expected_mig_status='cancelled') def test_live_migration_monitor_progress_zero_data_remaining(self): self.flags(live_migration_completion_timeout=1000000, live_migration_progress_timeout=150, group='libvirt') # Each one of these fake times is used for time.time() # when a new domain_info_records entry is consumed. fake_times = [0, 40, 80, 120, 160, 200, 240, 280, 320] # A normal sequence where see all the normal job states domain_info_records = [ host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=0), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=90), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=70), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=50), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=30), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=10), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=0), "thread-finish", "domain-stop", host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_FAILED), ] self._test_live_migration_monitoring(domain_info_records, fake_times, self.EXPECT_FAILURE) def test_live_migration_downtime_steps(self): self.flags(live_migration_downtime=400, group='libvirt') self.flags(live_migration_downtime_steps=10, group='libvirt') self.flags(live_migration_downtime_delay=30, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) steps = drvr._migration_downtime_steps(3.0) self.assertEqual([ (0, 37), (90, 38), (180, 39), (270, 42), (360, 46), (450, 55), (540, 70), (630, 98), (720, 148), (810, 238), (900, 400), ], list(steps)) @mock.patch.object(utils, "spawn") @mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration_monitor") @mock.patch.object(host.Host, "get_guest") @mock.patch.object(fakelibvirt.Connection, "_mark_running") @mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration_copy_disk_paths") def test_live_migration_main(self, mock_copy_disk_path, mock_running, mock_guest, mock_monitor, mock_thread): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) dom = fakelibvirt.Domain(drvr._get_connection(), "demo", True) guest = libvirt_guest.Guest(dom) migrate_data = objects.LibvirtLiveMigrateData(block_migration=True) disks_to_copy = (['/some/path/one', '/test/path/two'], ['vda', 'vdb']) mock_copy_disk_path.return_value = disks_to_copy mock_guest.return_value = guest def fake_post(): pass def fake_recover(): pass drvr._live_migration(self.context, instance, "fakehost", fake_post, fake_recover, True, migrate_data) mock_copy_disk_path.assert_called_once_with(self.context, instance, guest) class AnyEventletEvent(object): def __eq__(self, other): return type(other) == eventlet.event.Event mock_thread.assert_called_once_with( drvr._live_migration_operation, self.context, instance, "fakehost", True, migrate_data, dom, disks_to_copy[1]) mock_monitor.assert_called_once_with( self.context, instance, guest, "fakehost", fake_post, fake_recover, True, migrate_data, dom, AnyEventletEvent(), disks_to_copy[0]) def _do_test_create_images_and_backing(self, disk_type): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(drvr, '_fetch_instance_kernel_ramdisk') self.mox.StubOutWithMock(libvirt_driver.libvirt_utils, 'create_image') disk_info = {'path': 'foo', 'type': disk_type, 'disk_size': 1 * 1024 ** 3, 'virt_disk_size': 20 * 1024 ** 3, 'backing_file': None} libvirt_driver.libvirt_utils.create_image( disk_info['type'], mox.IgnoreArg(), disk_info['virt_disk_size']) drvr._fetch_instance_kernel_ramdisk(self.context, self.test_instance, fallback_from_host=None) self.mox.ReplayAll() self.stub_out('os.path.exists', lambda *args: False) drvr._create_images_and_backing(self.context, self.test_instance, "/fake/instance/dir", [disk_info]) def test_create_images_and_backing_qcow2(self): self._do_test_create_images_and_backing('qcow2') def test_create_images_and_backing_raw(self): self._do_test_create_images_and_backing('raw') def test_create_images_and_backing_images_not_exist_no_fallback(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) disk_info = [ {u'backing_file': u'fake_image_backing_file', u'disk_size': 10747904, u'path': u'disk_path', u'type': u'qcow2', u'virt_disk_size': 25165824}] self.test_instance.update({'user_id': 'fake-user', 'os_type': None, 'project_id': 'fake-project'}) instance = objects.Instance(**self.test_instance) with mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image', side_effect=exception.ImageNotFound( image_id="fake_id")): self.assertRaises(exception.ImageNotFound, conn._create_images_and_backing, self.context, instance, "/fake/instance/dir", disk_info) def test_create_images_and_backing_images_not_exist_fallback(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) disk_info = [ {u'backing_file': u'fake_image_backing_file', u'disk_size': 10747904, u'path': u'disk_path', u'type': u'qcow2', u'virt_disk_size': 25165824}] base_dir = os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name) self.test_instance.update({'user_id': 'fake-user', 'os_type': None, 'kernel_id': 'fake_kernel_id', 'ramdisk_id': 'fake_ramdisk_id', 'project_id': 'fake-project'}) instance = objects.Instance(**self.test_instance) with test.nested( mock.patch.object(libvirt_driver.libvirt_utils, 'copy_image'), mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image', side_effect=exception.ImageNotFound( image_id="fake_id")), ) as (copy_image_mock, fetch_image_mock): conn._create_images_and_backing(self.context, instance, "/fake/instance/dir", disk_info, fallback_from_host="fake_host") backfile_path = os.path.join(base_dir, 'fake_image_backing_file') kernel_path = os.path.join(CONF.instances_path, self.test_instance['uuid'], 'kernel') ramdisk_path = os.path.join(CONF.instances_path, self.test_instance['uuid'], 'ramdisk') copy_image_mock.assert_has_calls([ mock.call(dest=backfile_path, src=backfile_path, host='fake_host', receive=True), mock.call(dest=kernel_path, src=kernel_path, host='fake_host', receive=True), mock.call(dest=ramdisk_path, src=ramdisk_path, host='fake_host', receive=True) ]) fetch_image_mock.assert_has_calls([ mock.call(context=self.context, target=backfile_path, image_id=self.test_instance['image_ref'], user_id=self.test_instance['user_id'], project_id=self.test_instance['project_id'], max_size=25165824), mock.call(self.context, kernel_path, self.test_instance['kernel_id'], self.test_instance['user_id'], self.test_instance['project_id']), mock.call(self.context, ramdisk_path, self.test_instance['ramdisk_id'], self.test_instance['user_id'], self.test_instance['project_id']), ]) @mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image') @mock.patch.object(os.path, 'exists', return_value=True) def test_create_images_and_backing_images_exist(self, mock_exists, mock_fetch_image): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) disk_info = [ {u'backing_file': u'fake_image_backing_file', u'disk_size': 10747904, u'path': u'disk_path', u'type': u'qcow2', u'virt_disk_size': 25165824}] self.test_instance.update({'user_id': 'fake-user', 'os_type': None, 'kernel_id': 'fake_kernel_id', 'ramdisk_id': 'fake_ramdisk_id', 'project_id': 'fake-project'}) instance = objects.Instance(**self.test_instance) with mock.patch.object(imagebackend.Image, 'get_disk_size'): conn._create_images_and_backing(self.context, instance, '/fake/instance/dir', disk_info) self.assertFalse(mock_fetch_image.called) def test_create_images_and_backing_ephemeral_gets_created(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) disk_info = [ {u'backing_file': u'fake_image_backing_file', u'disk_size': 10747904, u'path': u'disk_path', u'type': u'qcow2', u'virt_disk_size': 25165824}, {u'backing_file': u'ephemeral_1_default', u'disk_size': 393216, u'over_committed_disk_size': 1073348608, u'path': u'disk_eph_path', u'type': u'qcow2', u'virt_disk_size': 1073741824}] base_dir = os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name) instance = objects.Instance(**self.test_instance) with test.nested( mock.patch.object(drvr, '_fetch_instance_kernel_ramdisk'), mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image'), mock.patch.object(drvr, '_create_ephemeral'), mock.patch.object(imagebackend.Image, 'verify_base_size'), mock.patch.object(imagebackend.Image, 'get_disk_size') ) as (fetch_kernel_ramdisk_mock, fetch_image_mock, create_ephemeral_mock, verify_base_size_mock, disk_size_mock): drvr._create_images_and_backing(self.context, instance, "/fake/instance/dir", disk_info) self.assertEqual(len(create_ephemeral_mock.call_args_list), 1) m_args, m_kwargs = create_ephemeral_mock.call_args_list[0] self.assertEqual( os.path.join(base_dir, 'ephemeral_1_default'), m_kwargs['target']) self.assertEqual(len(fetch_image_mock.call_args_list), 1) m_args, m_kwargs = fetch_image_mock.call_args_list[0] self.assertEqual( os.path.join(base_dir, 'fake_image_backing_file'), m_kwargs['target']) verify_base_size_mock.assert_has_calls([ mock.call(os.path.join(base_dir, 'fake_image_backing_file'), 25165824), mock.call(os.path.join(base_dir, 'ephemeral_1_default'), 1073741824) ]) def test_create_images_and_backing_disk_info_none(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(drvr, '_fetch_instance_kernel_ramdisk') drvr._fetch_instance_kernel_ramdisk(self.context, self.test_instance, fallback_from_host=None) self.mox.ReplayAll() drvr._create_images_and_backing(self.context, self.test_instance, "/fake/instance/dir", None) def _generate_target_ret(self, target_connect_addr=None): target_ret = { 'graphics_listen_addrs': {'spice': '127.0.0.1', 'vnc': '127.0.0.1'}, 'target_connect_addr': target_connect_addr, 'serial_listen_addr': '127.0.0.1', 'volume': { '12345': {'connection_info': {u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'}, 'serial': '12345'}, 'disk_info': {'bus': 'scsi', 'dev': 'sda', 'type': 'disk'}}, '67890': {'connection_info': {u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}, 'serial': '67890'}, 'disk_info': {'bus': 'scsi', 'dev': 'sdb', 'type': 'disk'}}}} return target_ret def test_pre_live_migration_works_correctly_mocked(self): self._test_pre_live_migration_works_correctly_mocked() def test_pre_live_migration_with_transport_ip(self): self.flags(live_migration_inbound_addr='127.0.0.2', group='libvirt') target_ret = self._generate_target_ret('127.0.0.2') self._test_pre_live_migration_works_correctly_mocked(target_ret) def _test_pre_live_migration_works_correctly_mocked(self, target_ret=None): # Creating testdata vol = {'block_device_mapping': [ {'connection_info': {'serial': '12345', u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'}}, 'mount_device': '/dev/sda'}, {'connection_info': {'serial': '67890', u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}}, 'mount_device': '/dev/sdb'}]} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) class FakeNetworkInfo(object): def fixed_ips(self): return ["test_ip_addr"] def fake_none(*args, **kwargs): return self.stubs.Set(drvr, '_create_images_and_backing', fake_none) instance = objects.Instance(**self.test_instance) c = context.get_admin_context() nw_info = FakeNetworkInfo() # Creating mocks self.mox.StubOutWithMock(driver, "block_device_info_get_mapping") driver.block_device_info_get_mapping(vol ).AndReturn(vol['block_device_mapping']) self.mox.StubOutWithMock(drvr, "_connect_volume") for v in vol['block_device_mapping']: disk_info = { 'bus': "scsi", 'dev': v['mount_device'].rpartition("/")[2], 'type': "disk" } drvr._connect_volume(v['connection_info'], disk_info) self.mox.StubOutWithMock(drvr, 'plug_vifs') drvr.plug_vifs(mox.IsA(instance), nw_info) self.mox.ReplayAll() migrate_data = { "block_migration": False, "instance_relative_path": "foo", "is_shared_block_storage": False, "is_shared_instance_path": False, } result = drvr.pre_live_migration( c, instance, vol, nw_info, None, migrate_data=migrate_data) if not target_ret: target_ret = self._generate_target_ret() self.assertEqual( result.to_legacy_dict( pre_migration_result=True)['pre_live_migration_result'], target_ret) @mock.patch.object(os, 'mkdir') @mock.patch('nova.virt.libvirt.utils.get_instance_path_at_destination') @mock.patch('nova.virt.libvirt.driver.remotefs.' 'RemoteFilesystem.copy_file') @mock.patch('nova.virt.driver.block_device_info_get_mapping') @mock.patch('nova.virt.configdrive.required_by', return_value=True) def test_pre_live_migration_block_with_config_drive_success( self, mock_required_by, block_device_info_get_mapping, mock_copy_file, mock_get_instance_path, mock_mkdir): self.flags(config_drive_format='iso9660') vol = {'block_device_mapping': [ {'connection_info': 'dummy', 'mount_device': '/dev/sda'}, {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]} fake_instance_path = os.path.join(cfg.CONF.instances_path, '/fake_instance_uuid') mock_get_instance_path.return_value = fake_instance_path drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) migrate_data = objects.LibvirtLiveMigrateData() migrate_data.is_shared_instance_path = False migrate_data.is_shared_block_storage = False migrate_data.block_migration = True migrate_data.instance_relative_path = 'foo' src = "%s:%s/disk.config" % (instance.host, fake_instance_path) result = drvr.pre_live_migration( self.context, instance, vol, [], None, migrate_data) block_device_info_get_mapping.assert_called_once_with( {'block_device_mapping': [ {'connection_info': 'dummy', 'mount_device': '/dev/sda'}, {'connection_info': 'dummy', 'mount_device': '/dev/sdb'} ]} ) mock_copy_file.assert_called_once_with(src, fake_instance_path) migrate_data.graphics_listen_addrs_vnc = '127.0.0.1' migrate_data.graphics_listen_addrs_spice = '127.0.0.1' migrate_data.serial_listen_addr = '127.0.0.1' self.assertEqual(migrate_data, result) @mock.patch('nova.virt.driver.block_device_info_get_mapping', return_value=()) @mock.patch('nova.virt.configdrive.required_by', return_value=True) def test_pre_live_migration_block_with_config_drive_mocked_with_vfat( self, mock_required_by, block_device_info_get_mapping): self.flags(config_drive_format='vfat') # Creating testdata vol = {'block_device_mapping': [ {'connection_info': 'dummy', 'mount_device': '/dev/sda'}, {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) res_data = drvr.pre_live_migration( self.context, instance, vol, [], None, {'is_shared_instance_path': False, 'is_shared_block_storage': False, 'block_migration': False, 'instance_relative_path': 'foo'}) res_data = res_data.to_legacy_dict(pre_migration_result=True) block_device_info_get_mapping.assert_called_once_with( {'block_device_mapping': [ {'connection_info': 'dummy', 'mount_device': '/dev/sda'}, {'connection_info': 'dummy', 'mount_device': '/dev/sdb'} ]} ) self.assertEqual({'graphics_listen_addrs': {'spice': '127.0.0.1', 'vnc': '127.0.0.1'}, 'target_connect_addr': None, 'serial_listen_addr': '127.0.0.1', 'volume': {}}, res_data['pre_live_migration_result']) def test_pre_live_migration_vol_backed_works_correctly_mocked(self): # Creating testdata, using temp dir. with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) vol = {'block_device_mapping': [ {'connection_info': {'serial': '12345', u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'}}, 'mount_device': '/dev/sda'}, {'connection_info': {'serial': '67890', u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}}, 'mount_device': '/dev/sdb'}]} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) def fake_none(*args, **kwargs): return self.stubs.Set(drvr, '_create_images_and_backing', fake_none) class FakeNetworkInfo(object): def fixed_ips(self): return ["test_ip_addr"] inst_ref = objects.Instance(**self.test_instance) c = context.get_admin_context() nw_info = FakeNetworkInfo() # Creating mocks self.mox.StubOutWithMock(drvr, "_connect_volume") for v in vol['block_device_mapping']: disk_info = { 'bus': "scsi", 'dev': v['mount_device'].rpartition("/")[2], 'type': "disk" } drvr._connect_volume(v['connection_info'], disk_info) self.mox.StubOutWithMock(drvr, 'plug_vifs') drvr.plug_vifs(mox.IsA(inst_ref), nw_info) self.mox.ReplayAll() migrate_data = {'is_shared_instance_path': False, 'is_shared_block_storage': False, 'is_volume_backed': True, 'block_migration': False, 'instance_relative_path': inst_ref['name'], 'disk_over_commit': False, 'disk_available_mb': 123, 'image_type': 'qcow2', 'filename': 'foo', } ret = drvr.pre_live_migration(c, inst_ref, vol, nw_info, None, migrate_data) target_ret = { 'graphics_listen_addrs': {'spice': '127.0.0.1', 'vnc': '127.0.0.1'}, 'target_connect_addr': None, 'serial_listen_addr': '127.0.0.1', 'volume': { '12345': {'connection_info': {u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'}, 'serial': '12345'}, 'disk_info': {'bus': 'scsi', 'dev': 'sda', 'type': 'disk'}}, '67890': {'connection_info': {u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}, 'serial': '67890'}, 'disk_info': {'bus': 'scsi', 'dev': 'sdb', 'type': 'disk'}}}} self.assertEqual( ret.to_legacy_dict(True)['pre_live_migration_result'], target_ret) self.assertTrue(os.path.exists('%s/%s/' % (tmpdir, inst_ref['name']))) def test_pre_live_migration_plug_vifs_retry_fails(self): self.flags(live_migration_retry_count=3) instance = objects.Instance(**self.test_instance) def fake_plug_vifs(instance, network_info): raise processutils.ProcessExecutionError() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, 'plug_vifs', fake_plug_vifs) self.stubs.Set(eventlet.greenthread, 'sleep', lambda x: eventlet.sleep(0)) disk_info_json = jsonutils.dumps({}) self.assertRaises(processutils.ProcessExecutionError, drvr.pre_live_migration, self.context, instance, block_device_info=None, network_info=[], disk_info=disk_info_json) def test_pre_live_migration_plug_vifs_retry_works(self): self.flags(live_migration_retry_count=3) called = {'count': 0} instance = objects.Instance(**self.test_instance) def fake_plug_vifs(instance, network_info): called['count'] += 1 if called['count'] < CONF.live_migration_retry_count: raise processutils.ProcessExecutionError() else: return drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, 'plug_vifs', fake_plug_vifs) self.stubs.Set(eventlet.greenthread, 'sleep', lambda x: eventlet.sleep(0)) disk_info_json = jsonutils.dumps({}) drvr.pre_live_migration(self.context, instance, block_device_info=None, network_info=[], disk_info=disk_info_json) def test_pre_live_migration_image_not_created_with_shared_storage(self): migrate_data_set = [{'is_shared_block_storage': False, 'is_shared_instance_path': True, 'is_volume_backed': False, 'filename': 'foo', 'instance_relative_path': 'bar', 'disk_over_commit': False, 'disk_available_mb': 123, 'image_type': 'qcow2', 'block_migration': False}, {'is_shared_block_storage': True, 'is_shared_instance_path': True, 'is_volume_backed': False, 'filename': 'foo', 'instance_relative_path': 'bar', 'disk_over_commit': False, 'disk_available_mb': 123, 'image_type': 'qcow2', 'block_migration': False}, {'is_shared_block_storage': False, 'is_shared_instance_path': True, 'is_volume_backed': False, 'filename': 'foo', 'instance_relative_path': 'bar', 'disk_over_commit': False, 'disk_available_mb': 123, 'image_type': 'qcow2', 'block_migration': True}] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) # creating mocks with test.nested( mock.patch.object(drvr, '_create_images_and_backing'), mock.patch.object(drvr, 'ensure_filtering_rules_for_instance'), mock.patch.object(drvr, 'plug_vifs'), ) as ( create_image_mock, rules_mock, plug_mock, ): disk_info_json = jsonutils.dumps({}) for migrate_data in migrate_data_set: res = drvr.pre_live_migration(self.context, instance, block_device_info=None, network_info=[], disk_info=disk_info_json, migrate_data=migrate_data) self.assertFalse(create_image_mock.called) self.assertIsInstance(res, objects.LibvirtLiveMigrateData) def test_pre_live_migration_with_not_shared_instance_path(self): migrate_data = {'is_shared_block_storage': False, 'is_shared_instance_path': False, 'block_migration': False, 'instance_relative_path': 'foo'} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) def check_instance_dir(context, instance, instance_dir, disk_info, fallback_from_host=False): self.assertTrue(instance_dir) # creating mocks with test.nested( mock.patch.object(drvr, '_create_images_and_backing', side_effect=check_instance_dir), mock.patch.object(drvr, 'ensure_filtering_rules_for_instance'), mock.patch.object(drvr, 'plug_vifs'), ) as ( create_image_mock, rules_mock, plug_mock, ): disk_info_json = jsonutils.dumps({}) res = drvr.pre_live_migration(self.context, instance, block_device_info=None, network_info=[], disk_info=disk_info_json, migrate_data=migrate_data) create_image_mock.assert_has_calls( [mock.call(self.context, instance, mock.ANY, {}, fallback_from_host=instance.host)]) self.assertIsInstance(res, objects.LibvirtLiveMigrateData) def test_pre_live_migration_recreate_disk_info(self): migrate_data = {'is_shared_block_storage': False, 'is_shared_instance_path': False, 'block_migration': True, 'instance_relative_path': '/some/path/'} disk_info = [{'disk_size': 5368709120, 'type': 'raw', 'virt_disk_size': 5368709120, 'path': '/some/path/disk', 'backing_file': '', 'over_committed_disk_size': 0}, {'disk_size': 1073741824, 'type': 'raw', 'virt_disk_size': 1073741824, 'path': '/some/path/disk.eph0', 'backing_file': '', 'over_committed_disk_size': 0}] image_disk_info = {'/some/path/disk': 'raw', '/some/path/disk.eph0': 'raw'} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) instance_path = os.path.dirname(disk_info[0]['path']) disk_info_path = os.path.join(instance_path, 'disk.info') with test.nested( mock.patch.object(os, 'mkdir'), mock.patch.object(fake_libvirt_utils, 'write_to_file'), mock.patch.object(drvr, '_create_images_and_backing') ) as ( mkdir, write_to_file, create_images_and_backing ): drvr.pre_live_migration(self.context, instance, block_device_info=None, network_info=[], disk_info=jsonutils.dumps(disk_info), migrate_data=migrate_data) write_to_file.assert_called_with(disk_info_path, jsonutils.dumps(image_disk_info)) def test_get_instance_disk_info_works_correctly(self): # Test data instance = objects.Instance(**self.test_instance) dummyxml = ("instance-0000000a" "" "" "" "" "" "" "" "") # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance.name: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file' self.mox.StubOutWithMock(os.path, "getsize") os.path.getsize('/test/disk').AndReturn((10737418240)) os.path.getsize('/test/disk.local').AndReturn((3328599655)) ret = ("image: /test/disk\n" "file format: raw\n" "virtual size: 20G (21474836480 bytes)\n" "disk size: 3.1G\n" "cluster_size: 2097152\n" "backing file: /test/dummy (actual path: /backing/file)\n") self.mox.StubOutWithMock(os.path, "exists") os.path.exists('/test/disk.local').AndReturn(True) self.mox.StubOutWithMock(utils, "execute") utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', '/test/disk.local', prlimit = images.QEMU_IMG_LIMITS, ).AndReturn((ret, '')) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) info = drvr.get_instance_disk_info(instance) info = jsonutils.loads(info) self.assertEqual(info[0]['type'], 'raw') self.assertEqual(info[0]['path'], '/test/disk') self.assertEqual(info[0]['disk_size'], 10737418240) self.assertEqual(info[0]['backing_file'], "") self.assertEqual(info[0]['over_committed_disk_size'], 0) self.assertEqual(info[1]['type'], 'qcow2') self.assertEqual(info[1]['path'], '/test/disk.local') self.assertEqual(info[1]['virt_disk_size'], 21474836480) self.assertEqual(info[1]['backing_file'], "file") self.assertEqual(info[1]['over_committed_disk_size'], 18146236825) def test_post_live_migration(self): vol = {'block_device_mapping': [ {'connection_info': { 'data': {'multipath_id': 'dummy1'}, 'serial': 'fake_serial1'}, 'mount_device': '/dev/sda', }, {'connection_info': { 'data': {}, 'serial': 'fake_serial2'}, 'mount_device': '/dev/sdb', }]} def fake_initialize_connection(context, volume_id, connector): return {'data': {}} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) fake_connector = {'host': 'fake'} inst_ref = {'id': 'foo'} cntx = context.get_admin_context() # Set up the mock expectations with test.nested( mock.patch.object(driver, 'block_device_info_get_mapping', return_value=vol['block_device_mapping']), mock.patch.object(drvr, "get_volume_connector", return_value=fake_connector), mock.patch.object(drvr._volume_api, "initialize_connection", side_effect=fake_initialize_connection), mock.patch.object(drvr, '_disconnect_volume') ) as (block_device_info_get_mapping, get_volume_connector, initialize_connection, _disconnect_volume): drvr.post_live_migration(cntx, inst_ref, vol) block_device_info_get_mapping.assert_has_calls([ mock.call(vol)]) get_volume_connector.assert_has_calls([ mock.call(inst_ref)]) _disconnect_volume.assert_has_calls([ mock.call({'data': {'multipath_id': 'dummy1'}}, 'sda'), mock.call({'data': {}}, 'sdb')]) def test_get_instance_disk_info_excludes_volumes(self): # Test data instance = objects.Instance(**self.test_instance) dummyxml = ("instance-0000000a" "" "" "" "" "" "" "" "" "" "" "" "" "" "") # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance.name: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file' self.mox.StubOutWithMock(os.path, "getsize") os.path.getsize('/test/disk').AndReturn((10737418240)) os.path.getsize('/test/disk.local').AndReturn((3328599655)) ret = ("image: /test/disk\n" "file format: raw\n" "virtual size: 20G (21474836480 bytes)\n" "disk size: 3.1G\n" "cluster_size: 2097152\n" "backing file: /test/dummy (actual path: /backing/file)\n") self.mox.StubOutWithMock(os.path, "exists") os.path.exists('/test/disk.local').AndReturn(True) self.mox.StubOutWithMock(utils, "execute") utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', '/test/disk.local', prlimit = images.QEMU_IMG_LIMITS, ).AndReturn((ret, '')) self.mox.ReplayAll() conn_info = {'driver_volume_type': 'fake'} info = {'block_device_mapping': [ {'connection_info': conn_info, 'mount_device': '/dev/vdc'}, {'connection_info': conn_info, 'mount_device': '/dev/vdd'}]} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) info = drvr.get_instance_disk_info(instance, block_device_info=info) info = jsonutils.loads(info) self.assertEqual(info[0]['type'], 'raw') self.assertEqual(info[0]['path'], '/test/disk') self.assertEqual(info[0]['disk_size'], 10737418240) self.assertEqual(info[0]['backing_file'], "") self.assertEqual(info[0]['over_committed_disk_size'], 0) self.assertEqual(info[1]['type'], 'qcow2') self.assertEqual(info[1]['path'], '/test/disk.local') self.assertEqual(info[1]['virt_disk_size'], 21474836480) self.assertEqual(info[1]['backing_file'], "file") self.assertEqual(info[1]['over_committed_disk_size'], 18146236825) def test_get_instance_disk_info_no_bdinfo_passed(self): # NOTE(ndipanov): _get_disk_overcomitted_size_total calls this method # without access to Nova's block device information. We want to make # sure that we guess volumes mostly correctly in that case as well instance = objects.Instance(**self.test_instance) dummyxml = ("instance-0000000a" "" "" "" "" "" "" "" "") # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance.name: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi self.mox.StubOutWithMock(os.path, "getsize") os.path.getsize('/test/disk').AndReturn((10737418240)) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) info = drvr.get_instance_disk_info(instance) info = jsonutils.loads(info) self.assertEqual(1, len(info)) self.assertEqual(info[0]['type'], 'raw') self.assertEqual(info[0]['path'], '/test/disk') self.assertEqual(info[0]['disk_size'], 10737418240) self.assertEqual(info[0]['backing_file'], "") self.assertEqual(info[0]['over_committed_disk_size'], 0) def test_spawn_with_network_info(self): # Preparing mocks def fake_none(*args, **kwargs): return def fake_getLibVersion(): return fakelibvirt.FAKE_LIBVIRT_VERSION def fake_getCapabilities(): return """ cef19ce0-0ca2-11df-855d-b19fbce37686 x86_64 Penryn Intel """ def fake_baselineCPU(cpu, flag): return """ Penryn Intel """ # _fake_network_info must be called before create_fake_libvirt_mock(), # as _fake_network_info calls importutils.import_class() and # create_fake_libvirt_mock() mocks importutils.import_class(). network_info = _fake_network_info(self, 1) self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion, getCapabilities=fake_getCapabilities, getVersion=lambda: 1005001, baselineCPU=fake_baselineCPU) instance_ref = self.test_instance instance_ref['image_ref'] = 123456 # we send an int to test sha1 call instance = objects.Instance(**instance_ref) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) # Mock out the get_info method of the LibvirtDriver so that the polling # in the spawn method of the LibvirtDriver returns immediately self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, 'get_info') libvirt_driver.LibvirtDriver.get_info(instance ).AndReturn(hardware.InstanceInfo(state=power_state.RUNNING)) # Start test self.mox.ReplayAll() with mock.patch('nova.virt.libvirt.driver.libvirt') as old_virt: del old_virt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr.firewall_driver, 'setup_basic_filtering', fake_none) self.stubs.Set(drvr.firewall_driver, 'prepare_instance_filter', fake_none) self.stubs.Set(imagebackend.Image, 'cache', fake_none) drvr.spawn(self.context, instance, image_meta, [], 'herp', network_info=network_info) path = os.path.join(CONF.instances_path, instance['name']) if os.path.isdir(path): shutil.rmtree(path) path = os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name) if os.path.isdir(path): shutil.rmtree(os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name)) def test_spawn_without_image_meta(self): self.create_image_called = False def fake_none(*args, **kwargs): return def fake_create_image(*args, **kwargs): self.create_image_called = True def fake_get_info(instance): return hardware.InstanceInfo(state=power_state.RUNNING) instance_ref = self.test_instance instance_ref['image_ref'] = 1 instance = objects.Instance(**instance_ref) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, '_get_guest_xml', fake_none) self.stubs.Set(drvr, '_create_image', fake_create_image) self.stubs.Set(drvr, '_create_domain_and_network', fake_none) self.stubs.Set(drvr, 'get_info', fake_get_info) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) drvr.spawn(self.context, instance, image_meta, [], None) self.assertTrue(self.create_image_called) drvr.spawn(self.context, instance, image_meta, [], None) self.assertTrue(self.create_image_called) def test_spawn_from_volume_calls_cache(self): self.cache_called_for_disk = False def fake_none(*args, **kwargs): return def fake_cache(*args, **kwargs): if kwargs.get('image_id') == 'my_fake_image': self.cache_called_for_disk = True def fake_get_info(instance): return hardware.InstanceInfo(state=power_state.RUNNING) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, '_get_guest_xml', fake_none) self.stubs.Set(imagebackend.Image, 'cache', fake_cache) self.stubs.Set(drvr, '_create_domain_and_network', fake_none) self.stubs.Set(drvr, 'get_info', fake_get_info) block_device_info = {'root_device_name': '/dev/vda', 'block_device_mapping': [ {'mount_device': 'vda', 'boot_index': 0} ] } image_meta = objects.ImageMeta.from_dict(self.test_image_meta) # Volume-backed instance created without image instance_ref = self.test_instance instance_ref['image_ref'] = '' instance_ref['root_device_name'] = '/dev/vda' instance_ref['uuid'] = uuidutils.generate_uuid() instance = objects.Instance(**instance_ref) drvr.spawn(self.context, instance, image_meta, [], None, block_device_info=block_device_info) self.assertFalse(self.cache_called_for_disk) # Booted from volume but with placeholder image instance_ref = self.test_instance instance_ref['image_ref'] = 'my_fake_image' instance_ref['root_device_name'] = '/dev/vda' instance_ref['uuid'] = uuidutils.generate_uuid() instance = objects.Instance(**instance_ref) drvr.spawn(self.context, instance, image_meta, [], None, block_device_info=block_device_info) self.assertFalse(self.cache_called_for_disk) # Booted from an image instance_ref['image_ref'] = 'my_fake_image' instance_ref['uuid'] = uuidutils.generate_uuid() instance = objects.Instance(**instance_ref) drvr.spawn(self.context, instance, image_meta, [], None) self.assertTrue(self.cache_called_for_disk) def test_start_lxc_from_volume(self): self.flags(virt_type="lxc", group='libvirt') def check_setup_container(image, container_dir=None): self.assertIsInstance(image, imgmodel.LocalBlockImage) self.assertEqual(image.path, '/dev/path/to/dev') return '/dev/nbd1' bdm = { 'guest_format': None, 'boot_index': 0, 'mount_device': '/dev/sda', 'connection_info': { 'driver_volume_type': 'iscsi', 'serial': 'afc1', 'data': { 'access_mode': 'rw', 'target_discovered': False, 'encrypted': False, 'qos_specs': None, 'target_iqn': 'iqn: volume-afc1', 'target_portal': 'ip: 3260', 'volume_id': 'afc1', 'target_lun': 1, 'auth_password': 'uj', 'auth_username': '47', 'auth_method': 'CHAP' } }, 'disk_bus': 'scsi', 'device_type': 'disk', 'delete_on_termination': False } def _connect_volume_side_effect(connection_info, disk_info): bdm['connection_info']['data']['device_path'] = '/dev/path/to/dev' def _get(key, opt=None): return bdm.get(key, opt) def getitem(key): return bdm[key] def setitem(key, val): bdm[key] = val bdm_mock = mock.MagicMock() bdm_mock.__getitem__.side_effect = getitem bdm_mock.__setitem__.side_effect = setitem bdm_mock.get = _get disk_mock = mock.MagicMock() disk_mock.source_path = '/dev/path/to/dev' block_device_info = {'block_device_mapping': [bdm_mock], 'root_device_name': '/dev/sda'} # Volume-backed instance created without image instance_ref = self.test_instance instance_ref['image_ref'] = '' instance_ref['root_device_name'] = '/dev/sda' instance_ref['ephemeral_gb'] = 0 instance_ref['uuid'] = uuidutils.generate_uuid() inst_obj = objects.Instance(**instance_ref) image_meta = objects.ImageMeta.from_dict({}) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with test.nested( mock.patch.object(drvr, '_create_images_and_backing'), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'), mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'), mock.patch.object(drvr.firewall_driver, 'apply_instance_filter'), mock.patch.object(drvr, '_create_domain'), mock.patch.object(drvr, '_connect_volume', side_effect=_connect_volume_side_effect), mock.patch.object(drvr, '_get_volume_config', return_value=disk_mock), mock.patch.object(drvr, 'get_info', return_value=hardware.InstanceInfo( state=power_state.RUNNING)), mock.patch('nova.virt.disk.api.setup_container', side_effect=check_setup_container), mock.patch('nova.virt.disk.api.teardown_container'), mock.patch.object(objects.Instance, 'save')): drvr.spawn(self.context, inst_obj, image_meta, [], None, network_info=[], block_device_info=block_device_info) self.assertEqual('/dev/nbd1', inst_obj.system_metadata.get( 'rootfs_device_name')) def test_spawn_with_pci_devices(self): def fake_none(*args, **kwargs): return None def fake_get_info(instance): return hardware.InstanceInfo(state=power_state.RUNNING) class FakeLibvirtPciDevice(object): def dettach(self): return None def reset(self): return None def fake_node_device_lookup_by_name(address): pattern = ("pci_%(hex)s{4}_%(hex)s{2}_%(hex)s{2}_%(oct)s{1}" % dict(hex='[\da-f]', oct='[0-8]')) pattern = re.compile(pattern) if pattern.match(address) is None: raise fakelibvirt.libvirtError() return FakeLibvirtPciDevice() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, '_get_guest_xml', fake_none) self.stubs.Set(drvr, '_create_image', fake_none) self.stubs.Set(drvr, '_create_domain_and_network', fake_none) self.stubs.Set(drvr, 'get_info', fake_get_info) drvr._conn.nodeDeviceLookupByName = \ fake_node_device_lookup_by_name instance_ref = self.test_instance instance_ref['image_ref'] = 'my_fake_image' instance = objects.Instance(**instance_ref) instance['pci_devices'] = objects.PciDeviceList( objects=[objects.PciDevice(address='0000:00:00.0')]) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) drvr.spawn(self.context, instance, image_meta, [], None) def test_chown_disk_config_for_instance(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) self.mox.StubOutWithMock(fake_libvirt_utils, 'get_instance_path') self.mox.StubOutWithMock(os.path, 'exists') self.mox.StubOutWithMock(fake_libvirt_utils, 'chown') fake_libvirt_utils.get_instance_path(instance).AndReturn('/tmp/uuid') os.path.exists('/tmp/uuid/disk.config').AndReturn(True) fake_libvirt_utils.chown('/tmp/uuid/disk.config', os.getuid()) self.mox.ReplayAll() drvr._chown_disk_config_for_instance(instance) def _test_create_image_plain(self, os_type='', filename='', mkfs=False): gotFiles = [] def fake_image(self, instance, name, image_type=''): class FakeImage(imagebackend.Image): def __init__(self, instance, name, is_block_dev=False): self.path = os.path.join(instance['name'], name) self.is_block_dev = is_block_dev def create_image(self, prepare_template, base, size, *args, **kwargs): pass def resize_image(self, size): pass def cache(self, fetch_func, filename, size=None, *args, **kwargs): gotFiles.append({'filename': filename, 'size': size}) def snapshot(self, name): pass return FakeImage(instance, name) def fake_none(*args, **kwargs): return def fake_get_info(instance): return hardware.InstanceInfo(state=power_state.RUNNING) # Stop 'libvirt_driver._create_image' touching filesystem self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image", fake_image) instance_ref = self.test_instance instance_ref['image_ref'] = 1 instance = objects.Instance(**instance_ref) instance['os_type'] = os_type drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, '_get_guest_xml', fake_none) self.stubs.Set(drvr, '_create_domain_and_network', fake_none) self.stubs.Set(drvr, 'get_info', fake_get_info) if mkfs: self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND', {os_type: 'mkfs.ext4 --label %(fs_label)s %(target)s'}) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) drvr._create_image(context, instance, disk_info['mapping']) drvr._get_guest_xml(self.context, instance, None, disk_info, image_meta) wantFiles = [ {'filename': '356a192b7913b04c54574d18c28d46e6395428ab', 'size': 10 * units.Gi}, {'filename': filename, 'size': 20 * units.Gi}, ] self.assertEqual(gotFiles, wantFiles) def test_create_image_plain_os_type_blank(self): self._test_create_image_plain(os_type='', filename=self._EPHEMERAL_20_DEFAULT, mkfs=False) def test_create_image_plain_os_type_none(self): self._test_create_image_plain(os_type=None, filename=self._EPHEMERAL_20_DEFAULT, mkfs=False) def test_create_image_plain_os_type_set_no_fs(self): self._test_create_image_plain(os_type='test', filename=self._EPHEMERAL_20_DEFAULT, mkfs=False) def test_create_image_plain_os_type_set_with_fs(self): ephemeral_file_name = ('ephemeral_20_%s' % utils.get_hash_str( 'mkfs.ext4 --label %(fs_label)s %(target)s')[:7]) self._test_create_image_plain(os_type='test', filename=ephemeral_file_name, mkfs=True) @mock.patch('nova.virt.libvirt.driver.imagecache') def test_create_image_initrd(self, mock_imagecache): INITRD = self._EPHEMERAL_20_DEFAULT + '.initrd' KERNEL = 'vmlinuz.' + self._EPHEMERAL_20_DEFAULT mock_imagecache.get_cache_fname.side_effect = \ [KERNEL, INITRD, self._EPHEMERAL_20_DEFAULT + '.img'] filename = self._EPHEMERAL_20_DEFAULT gotFiles = [] outer = self def fake_image(self, instance, name, image_type=''): class FakeImage(imagebackend.Image): def __init__(self, instance, name, is_block_dev=False): self.path = os.path.join(instance['name'], name) self.is_block_dev = is_block_dev def create_image(self, prepare_template, base, size, *args, **kwargs): pass def cache(self, fetch_func, filename, size=None, *args, **kwargs): gotFiles.append({'filename': filename, 'size': size}) if filename == INITRD: outer.assertEqual(fetch_func, fake_libvirt_utils.fetch_raw_image) if filename == KERNEL: outer.assertEqual(fetch_func, fake_libvirt_utils.fetch_raw_image) def resize_image(self, size): pass def snapshot(self, name): pass return FakeImage(instance, name) instance_ref = self.test_instance instance_ref['image_ref'] = 1 instance_ref['kernel_id'] = 2 instance_ref['ramdisk_id'] = 3 instance_ref['os_type'] = 'test' instance = objects.Instance(**instance_ref) driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with test.nested( mock.patch.object(nova.virt.libvirt.imagebackend.Backend, "image", fake_image), mock.patch.object(driver, '_get_guest_xml'), mock.patch.object(driver, '_create_domain_and_network'), mock.patch.object(driver, 'get_info', return_value=[hardware.InstanceInfo(state=power_state.RUNNING)]) ): image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) driver._create_image(context, instance, disk_info['mapping']) wantFiles = [ {'filename': KERNEL, 'size': None}, {'filename': INITRD, 'size': None}, {'filename': self._EPHEMERAL_20_DEFAULT + '.img', 'size': 10 * units.Gi}, {'filename': filename, 'size': 20 * units.Gi}, ] self.assertEqual(wantFiles, gotFiles) def _create_image_helper(self, callback, suffix=''): gotFiles = [] imported_files = [] def fake_image(self, instance, name, image_type=''): class FakeImage(imagebackend.Image): def __init__(self, instance, name, is_block_dev=False): self.path = os.path.join(instance['name'], name) self.is_block_dev = is_block_dev def create_image(self, prepare_template, base, size, *args, **kwargs): pass def resize_image(self, size): pass def cache(self, fetch_func, filename, size=None, *args, **kwargs): gotFiles.append({'filename': filename, 'size': size}) def import_file(self, instance, local_filename, remote_filename): imported_files.append((local_filename, remote_filename)) def snapshot(self, name): pass return FakeImage(instance, name) def fake_none(*args, **kwargs): return def fake_get_info(instance): return hardware.InstanceInfo(state=power_state.RUNNING) # Stop 'libvirt_driver._create_image' touching filesystem self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image", fake_image) instance_ref = self.test_instance instance_ref['image_ref'] = 1 # NOTE(mikal): use this callback to tweak the instance to match # what you're trying to test callback(instance_ref) instance = objects.Instance(**instance_ref) # Turn on some swap to exercise that codepath in _create_image instance.flavor.swap = 500 drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, '_get_guest_xml', fake_none) self.stubs.Set(drvr, '_create_domain_and_network', fake_none) self.stubs.Set(drvr, 'get_info', fake_get_info) self.stubs.Set(instance_metadata, 'InstanceMetadata', fake_none) self.stubs.Set(nova.virt.configdrive.ConfigDriveBuilder, 'make_drive', fake_none) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) drvr._create_image(context, instance, disk_info['mapping'], suffix=suffix) drvr._get_guest_xml(self.context, instance, None, disk_info, image_meta) return gotFiles, imported_files def test_create_image_with_swap(self): def enable_swap(instance_ref): # Turn on some swap to exercise that codepath in _create_image instance_ref['system_metadata']['instance_type_swap'] = 500 gotFiles, _ = self._create_image_helper(enable_swap) wantFiles = [ {'filename': '356a192b7913b04c54574d18c28d46e6395428ab', 'size': 10 * units.Gi}, {'filename': self._EPHEMERAL_20_DEFAULT, 'size': 20 * units.Gi}, {'filename': 'swap_500', 'size': 500 * units.Mi}, ] self.assertEqual(gotFiles, wantFiles) def test_create_image_with_configdrive(self): def enable_configdrive(instance_ref): instance_ref['config_drive'] = 'true' # Ensure that we create a config drive and then import it into the # image backend store _, imported_files = self._create_image_helper(enable_configdrive) self.assertTrue(imported_files[0][0].endswith('/disk.config')) self.assertEqual('disk.config', imported_files[0][1]) def test_create_image_with_configdrive_rescue(self): def enable_configdrive(instance_ref): instance_ref['config_drive'] = 'true' # Ensure that we create a config drive and then import it into the # image backend store _, imported_files = self._create_image_helper(enable_configdrive, suffix='.rescue') self.assertTrue(imported_files[0][0].endswith('/disk.config.rescue')) self.assertEqual('disk.config.rescue', imported_files[0][1]) @mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache', side_effect=exception.ImageNotFound(image_id='fake-id')) def test_create_image_not_exist_no_fallback(self, mock_cache): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) self.assertRaises(exception.ImageNotFound, drvr._create_image, self.context, instance, disk_info['mapping']) @mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache') def test_create_image_not_exist_fallback(self, mock_cache): def side_effect(fetch_func, filename, size=None, *args, **kwargs): def second_call(fetch_func, filename, size=None, *args, **kwargs): # call copy_from_host ourselves because we mocked image.cache() fetch_func('fake-target', 'fake-max-size') # further calls have no side effect mock_cache.side_effect = None mock_cache.side_effect = second_call # raise an error only the first call raise exception.ImageNotFound(image_id='fake-id') mock_cache.side_effect = side_effect drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) with mock.patch.object(libvirt_driver.libvirt_utils, 'copy_image') as mock_copy: drvr._create_image(self.context, instance, disk_info['mapping'], fallback_from_host='fake-source-host') mock_copy.assert_called_once_with(src='fake-target', dest='fake-target', host='fake-source-host', receive=True) @mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache') def test_create_image_resize_snap_backend(self, mock_cache): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.image_backend = mock.Mock() drvr.image_backend.image.return_value = drvr.image_backend instance = objects.Instance(**self.test_instance) instance.task_state = task_states.RESIZE_FINISH image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) with mock.patch.object(drvr.image_backend, 'create_snap') as mock_crt: drvr._create_image(self.context, instance, disk_info['mapping']) mock_crt.assert_called_once_with( libvirt_utils.RESIZE_SNAPSHOT_NAME) @mock.patch.object(utils, 'execute') def test_create_ephemeral_specified_fs(self, mock_exec): self.flags(default_ephemeral_format='ext3') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux', is_block_dev=True, max_size=20, specified_fs='ext4') mock_exec.assert_called_once_with('mkfs', '-t', 'ext4', '-F', '-L', 'myVol', '/dev/something', run_as_root=True) def test_create_ephemeral_specified_fs_not_valid(self): CONF.set_override('default_ephemeral_format', 'ext4') ephemerals = [{'device_type': 'disk', 'disk_bus': 'virtio', 'device_name': '/dev/vdb', 'guest_format': 'dummy', 'size': 1}] block_device_info = { 'ephemerals': ephemerals} instance_ref = self.test_instance instance_ref['image_ref'] = 1 instance = objects.Instance(**instance_ref) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) image_meta = objects.ImageMeta.from_dict({'disk_format': 'raw'}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) disk_info['mapping'].pop('disk.local') with test.nested( mock.patch.object(utils, 'execute'), mock.patch.object(drvr, 'get_info'), mock.patch.object(drvr, '_create_domain_and_network'), mock.patch.object(imagebackend.Image, 'verify_base_size'), mock.patch.object(imagebackend.Image, 'get_disk_size')): self.assertRaises(exception.InvalidBDMFormat, drvr._create_image, context, instance, disk_info['mapping'], block_device_info=block_device_info) def test_create_ephemeral_default(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(utils, 'execute') utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'myVol', '/dev/something', run_as_root=True) self.mox.ReplayAll() drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux', is_block_dev=True, max_size=20) def test_create_ephemeral_with_conf(self): CONF.set_override('default_ephemeral_format', 'ext4') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(utils, 'execute') utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'myVol', '/dev/something', run_as_root=True) self.mox.ReplayAll() drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux', is_block_dev=True) def test_create_ephemeral_with_arbitrary(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND', {'linux': 'mkfs.ext4 --label %(fs_label)s %(target)s'}) self.mox.StubOutWithMock(utils, 'execute') utils.execute('mkfs.ext4', '--label', 'myVol', '/dev/something', run_as_root=True) self.mox.ReplayAll() drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux', is_block_dev=True) def test_create_ephemeral_with_ext3(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND', {'linux': 'mkfs.ext3 --label %(fs_label)s %(target)s'}) self.mox.StubOutWithMock(utils, 'execute') utils.execute('mkfs.ext3', '--label', 'myVol', '/dev/something', run_as_root=True) self.mox.ReplayAll() drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux', is_block_dev=True) def test_create_swap_default(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(utils, 'execute') utils.execute('mkswap', '/dev/something', run_as_root=False) self.mox.ReplayAll() drvr._create_swap('/dev/something', 1, max_size=20) def test_get_console_output_file(self): fake_libvirt_utils.files['console.log'] = '01234567890' with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) instance_ref = self.test_instance instance_ref['image_ref'] = 123456 instance = objects.Instance(**instance_ref) console_dir = (os.path.join(tmpdir, instance['name'])) console_log = '%s/console.log' % (console_dir) fake_dom_xml = """ """ % console_log def fake_lookup(id): return FakeVirtDomain(fake_dom_xml) self.create_fake_libvirt_mock() libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) try: prev_max = libvirt_driver.MAX_CONSOLE_BYTES libvirt_driver.MAX_CONSOLE_BYTES = 5 with mock.patch('os.path.exists', return_value=True): output = drvr.get_console_output(self.context, instance) finally: libvirt_driver.MAX_CONSOLE_BYTES = prev_max self.assertEqual('67890', output) def test_get_console_output_file_missing(self): with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) instance_ref = self.test_instance instance_ref['image_ref'] = 123456 instance = objects.Instance(**instance_ref) console_log = os.path.join(tmpdir, instance['name'], 'non-existent.log') fake_dom_xml = """ """ % console_log def fake_lookup(id): return FakeVirtDomain(fake_dom_xml) self.create_fake_libvirt_mock() libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch('os.path.exists', return_value=False): output = drvr.get_console_output(self.context, instance) self.assertEqual('', output) def test_get_console_output_pty(self): fake_libvirt_utils.files['pty'] = '01234567890' with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) instance_ref = self.test_instance instance_ref['image_ref'] = 123456 instance = objects.Instance(**instance_ref) console_dir = (os.path.join(tmpdir, instance['name'])) pty_file = '%s/fake_pty' % (console_dir) fake_dom_xml = """ """ % pty_file def fake_lookup(id): return FakeVirtDomain(fake_dom_xml) def _fake_flush(self, fake_pty): return 'foo' def _fake_append_to_file(self, data, fpath): return 'pty' self.create_fake_libvirt_mock() libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup libvirt_driver.LibvirtDriver._flush_libvirt_console = _fake_flush libvirt_driver.LibvirtDriver._append_to_file = _fake_append_to_file drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) try: prev_max = libvirt_driver.MAX_CONSOLE_BYTES libvirt_driver.MAX_CONSOLE_BYTES = 5 output = drvr.get_console_output(self.context, instance) finally: libvirt_driver.MAX_CONSOLE_BYTES = prev_max self.assertEqual('67890', output) @mock.patch('nova.virt.libvirt.host.Host.get_domain') @mock.patch.object(libvirt_guest.Guest, "get_xml_desc") def test_get_console_output_not_available(self, mock_get_xml, get_domain): xml = """ """ mock_get_xml.return_value = xml get_domain.return_value = mock.MagicMock() instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.ConsoleNotAvailable, drvr.get_console_output, self.context, instance) def test_get_host_ip_addr(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ip = drvr.get_host_ip_addr() self.assertEqual(ip, CONF.my_ip) @mock.patch.object(libvirt_driver.LOG, 'warn') @mock.patch('nova.compute.utils.get_machine_ips') def test_get_host_ip_addr_failure(self, mock_ips, mock_log): mock_ips.return_value = ['8.8.8.8', '75.75.75.75'] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.get_host_ip_addr() mock_log.assert_called_once_with(u'my_ip address (%(my_ip)s) was ' u'not found on any of the ' u'interfaces: %(ifaces)s', {'ifaces': '8.8.8.8, 75.75.75.75', 'my_ip': mock.ANY}) def test_conn_event_handler(self): self.mox.UnsetStubs() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) service_mock = mock.MagicMock() service_mock.disabled.return_value = False with test.nested( mock.patch.object(drvr._host, "_connect", side_effect=fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "Failed to connect to host", error_code= fakelibvirt.VIR_ERR_INTERNAL_ERROR)), mock.patch.object(drvr._host, "_init_events", return_value=None), mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock)): # verify that the driver registers for the close callback # and re-connects after receiving the callback self.assertRaises(exception.HypervisorUnavailable, drvr.init_host, "wibble") self.assertTrue(service_mock.disabled) def test_command_with_broken_connection(self): self.mox.UnsetStubs() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) service_mock = mock.MagicMock() service_mock.disabled.return_value = False with test.nested( mock.patch.object(drvr._host, "_connect", side_effect=fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "Failed to connect to host", error_code= fakelibvirt.VIR_ERR_INTERNAL_ERROR)), mock.patch.object(drvr._host, "_init_events", return_value=None), mock.patch.object(host.Host, "has_min_version", return_value=True), mock.patch.object(drvr, "_do_quality_warnings", return_value=None), mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock)): drvr.init_host("wibble") self.assertRaises(exception.HypervisorUnavailable, drvr.get_num_instances) self.assertTrue(service_mock.disabled) def test_service_resume_after_broken_connection(self): self.mox.UnsetStubs() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) service_mock = mock.MagicMock() service_mock.disabled.return_value = True with test.nested( mock.patch.object(drvr._host, "_connect", return_value=mock.MagicMock()), mock.patch.object(drvr._host, "_init_events", return_value=None), mock.patch.object(host.Host, "has_min_version", return_value=True), mock.patch.object(drvr, "_do_quality_warnings", return_value=None), mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock)): drvr.init_host("wibble") drvr.get_num_instances() self.assertTrue(not service_mock.disabled and service_mock.disabled_reason is None) @mock.patch.object(objects.Instance, 'save') def test_immediate_delete(self, mock_save): def fake_get_domain(instance): raise exception.InstanceNotFound(instance_id=instance.uuid) def fake_delete_instance_files(instance): pass drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr._host, 'get_domain', fake_get_domain) self.stubs.Set(drvr, 'delete_instance_files', fake_delete_instance_files) instance = objects.Instance(self.context, **self.test_instance) drvr.destroy(self.context, instance, {}) mock_save.assert_called_once_with() @mock.patch.object(objects.Instance, 'get_by_uuid') @mock.patch.object(objects.Instance, 'obj_load_attr', autospec=True) @mock.patch.object(objects.Instance, 'save', autospec=True) @mock.patch.object(libvirt_driver.LibvirtDriver, '_destroy') @mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files') @mock.patch.object(libvirt_driver.LibvirtDriver, '_disconnect_volume') @mock.patch.object(driver, 'block_device_info_get_mapping') @mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain') def _test_destroy_removes_disk(self, mock_undefine_domain, mock_mapping, mock_disconnect_volume, mock_delete_instance_files, mock_destroy, mock_inst_save, mock_inst_obj_load_attr, mock_get_by_uuid, volume_fail=False): instance = objects.Instance(self.context, **self.test_instance) vol = {'block_device_mapping': [ {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]} mock_mapping.return_value = vol['block_device_mapping'] mock_delete_instance_files.return_value = True mock_get_by_uuid.return_value = instance if volume_fail: mock_disconnect_volume.return_value = ( exception.VolumeNotFound('vol')) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.destroy(self.context, instance, [], vol) def test_destroy_removes_disk(self): self._test_destroy_removes_disk(volume_fail=False) def test_destroy_removes_disk_volume_fails(self): self._test_destroy_removes_disk(volume_fail=True) @mock.patch.object(libvirt_driver.LibvirtDriver, 'unplug_vifs') @mock.patch.object(libvirt_driver.LibvirtDriver, '_destroy') @mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain') def test_destroy_not_removes_disk(self, mock_undefine_domain, mock_destroy, mock_unplug_vifs): instance = fake_instance.fake_instance_obj( None, name='instancename', id=1, uuid='875a8070-d0b9-4949-8b31-104d125c9a64') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.destroy(self.context, instance, [], None, False) @mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup') @mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container') @mock.patch.object(host.Host, 'get_domain') def test_destroy_lxc_calls_teardown_container(self, mock_get_domain, mock_teardown_container, mock_cleanup): self.flags(virt_type='lxc', group='libvirt') fake_domain = FakeVirtDomain() def destroy_side_effect(*args, **kwargs): fake_domain._info[0] = power_state.SHUTDOWN with mock.patch.object(fake_domain, 'destroy', side_effect=destroy_side_effect) as mock_domain_destroy: mock_get_domain.return_value = fake_domain instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) network_info = [] drvr.destroy(self.context, instance, network_info, None, False) mock_get_domain.assert_has_calls([mock.call(instance), mock.call(instance)]) mock_domain_destroy.assert_called_once_with() mock_teardown_container.assert_called_once_with(instance) mock_cleanup.assert_called_once_with(self.context, instance, network_info, None, False, None) @mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup') @mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container') @mock.patch.object(host.Host, 'get_domain') def test_destroy_lxc_calls_teardown_container_when_no_domain(self, mock_get_domain, mock_teardown_container, mock_cleanup): self.flags(virt_type='lxc', group='libvirt') instance = objects.Instance(**self.test_instance) inf_exception = exception.InstanceNotFound(instance_id=instance.uuid) mock_get_domain.side_effect = inf_exception drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) network_info = [] drvr.destroy(self.context, instance, network_info, None, False) mock_get_domain.assert_has_calls([mock.call(instance), mock.call(instance)]) mock_teardown_container.assert_called_once_with(instance) mock_cleanup.assert_called_once_with(self.context, instance, network_info, None, False, None) def test_reboot_different_ids(self): class FakeLoopingCall(object): def start(self, *a, **k): return self def wait(self): return None self.flags(wait_soft_reboot_seconds=1, group='libvirt') info_tuple = ('fake', 'fake', 'fake', 'also_fake') self.reboot_create_called = False # Mock domain mock_domain = self.mox.CreateMock(fakelibvirt.virDomain) mock_domain.info().AndReturn( (libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple) mock_domain.ID().AndReturn('some_fake_id') mock_domain.ID().AndReturn('some_fake_id') mock_domain.shutdown() mock_domain.info().AndReturn( (libvirt_guest.VIR_DOMAIN_CRASHED,) + info_tuple) mock_domain.ID().AndReturn('some_other_fake_id') mock_domain.ID().AndReturn('some_other_fake_id') self.mox.ReplayAll() def fake_get_domain(instance): return mock_domain def fake_create_domain(**kwargs): self.reboot_create_called = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) self.stubs.Set(drvr._host, 'get_domain', fake_get_domain) self.stubs.Set(drvr, '_create_domain', fake_create_domain) self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall', lambda *a, **k: FakeLoopingCall()) self.stubs.Set(pci_manager, 'get_instance_pci_devs', lambda *a: []) drvr.reboot(None, instance, [], 'SOFT') self.assertTrue(self.reboot_create_called) @mock.patch.object(pci_manager, 'get_instance_pci_devs') @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall') @mock.patch.object(greenthread, 'sleep') @mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot') @mock.patch.object(host.Host, 'get_domain') def test_reboot_same_ids(self, mock_get_domain, mock_hard_reboot, mock_sleep, mock_loopingcall, mock_get_instance_pci_devs): class FakeLoopingCall(object): def start(self, *a, **k): return self def wait(self): return None self.flags(wait_soft_reboot_seconds=1, group='libvirt') info_tuple = ('fake', 'fake', 'fake', 'also_fake') self.reboot_hard_reboot_called = False # Mock domain mock_domain = mock.Mock(fakelibvirt.virDomain) return_values = [(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple, (libvirt_guest.VIR_DOMAIN_CRASHED,) + info_tuple] mock_domain.info.side_effect = return_values mock_domain.ID.return_value = 'some_fake_id' mock_domain.shutdown.side_effect = mock.Mock() def fake_hard_reboot(*args, **kwargs): self.reboot_hard_reboot_called = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) mock_get_domain.return_value = mock_domain mock_hard_reboot.side_effect = fake_hard_reboot mock_loopingcall.return_value = FakeLoopingCall() mock_get_instance_pci_devs.return_value = [] drvr.reboot(None, instance, [], 'SOFT') self.assertTrue(self.reboot_hard_reboot_called) @mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot') @mock.patch.object(host.Host, 'get_domain') def test_soft_reboot_libvirt_exception(self, mock_get_domain, mock_hard_reboot): # Tests that a hard reboot is performed when a soft reboot results # in raising a libvirtError. info_tuple = ('fake', 'fake', 'fake', 'also_fake') # setup mocks mock_virDomain = mock.Mock(fakelibvirt.virDomain) mock_virDomain.info.return_value = ( (libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple) mock_virDomain.ID.return_value = 'some_fake_id' mock_virDomain.shutdown.side_effect = fakelibvirt.libvirtError('Err') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) context = None instance = objects.Instance(**self.test_instance) network_info = [] mock_get_domain.return_value = mock_virDomain drvr.reboot(context, instance, network_info, 'SOFT') @mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot') @mock.patch.object(host.Host, 'get_domain') def _test_resume_state_on_host_boot_with_state(self, state, mock_get_domain, mock_hard_reboot): mock_virDomain = mock.Mock(fakelibvirt.virDomain) mock_virDomain.info.return_value = ([state, None, None, None, None]) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_get_domain.return_value = mock_virDomain instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self, 1) drvr.resume_state_on_host_boot(self.context, instance, network_info, block_device_info=None) ignored_states = (power_state.RUNNING, power_state.SUSPENDED, power_state.NOSTATE, power_state.PAUSED) self.assertEqual(mock_hard_reboot.called, state not in ignored_states) def test_resume_state_on_host_boot_with_running_state(self): self._test_resume_state_on_host_boot_with_state(power_state.RUNNING) def test_resume_state_on_host_boot_with_suspended_state(self): self._test_resume_state_on_host_boot_with_state(power_state.SUSPENDED) def test_resume_state_on_host_boot_with_paused_state(self): self._test_resume_state_on_host_boot_with_state(power_state.PAUSED) def test_resume_state_on_host_boot_with_nostate(self): self._test_resume_state_on_host_boot_with_state(power_state.NOSTATE) def test_resume_state_on_host_boot_with_shutdown_state(self): self._test_resume_state_on_host_boot_with_state(power_state.RUNNING) def test_resume_state_on_host_boot_with_crashed_state(self): self._test_resume_state_on_host_boot_with_state(power_state.CRASHED) @mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot') @mock.patch.object(host.Host, 'get_domain') def test_resume_state_on_host_boot_with_instance_not_found_on_driver( self, mock_get_domain, mock_hard_reboot): instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_get_domain.side_effect = exception.InstanceNotFound( instance_id='fake') drvr.resume_state_on_host_boot(self.context, instance, network_info=[], block_device_info=None) mock_hard_reboot.assert_called_once_with(self.context, instance, [], None) @mock.patch('nova.virt.libvirt.LibvirtDriver.get_info') @mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network') @mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing') @mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_xml') @mock.patch('nova.virt.libvirt.LibvirtDriver._get_instance_disk_info') @mock.patch('nova.virt.libvirt.blockinfo.get_disk_info') @mock.patch('nova.virt.libvirt.LibvirtDriver._destroy') def test_hard_reboot(self, mock_destroy, mock_get_disk_info, mock_get_instance_disk_info, mock_get_guest_xml, mock_create_images_and_backing, mock_create_domain_and_network, mock_get_info): self.context.auth_token = True # any non-None value will suffice instance = objects.Instance(**self.test_instance) instance_path = libvirt_utils.get_instance_path(instance) network_info = _fake_network_info(self, 1) block_device_info = None dummyxml = ("instance-0000000a" "" "" "" "" "" "" "" "") drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) return_values = [hardware.InstanceInfo(state=power_state.SHUTDOWN), hardware.InstanceInfo(state=power_state.RUNNING)] mock_get_info.side_effect = return_values backing_disk_info = [{"virt_disk_size": 2}] mock_get_disk_info.return_value = mock.sentinel.disk_info mock_get_guest_xml.return_value = dummyxml mock_get_instance_disk_info.return_value = backing_disk_info drvr._hard_reboot(self.context, instance, network_info, block_device_info) # make sure that _create_images_and_backing is passed the disk_info # returned from _get_instance_disk_info and not the one that is in # scope from blockinfo.get_disk_info mock_create_images_and_backing.assert_called_once_with(self.context, instance, instance_path, backing_disk_info) # make sure that _create_domain_and_network is passed the disk_info # returned from blockinfo.get_disk_info and not the one that's # returned from _get_instance_disk_info mock_create_domain_and_network.assert_called_once_with(self.context, dummyxml, instance, network_info, mock.sentinel.disk_info, block_device_info=block_device_info, reboot=True, vifs_already_plugged=True) @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall') @mock.patch('nova.pci.manager.get_instance_pci_devs') @mock.patch('nova.virt.libvirt.LibvirtDriver._prepare_pci_devices_for_use') @mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network') @mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing') @mock.patch('nova.virt.libvirt.LibvirtDriver._get_instance_disk_info') @mock.patch('nova.virt.libvirt.utils.write_to_file') @mock.patch('nova.virt.libvirt.utils.get_instance_path') @mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_config') @mock.patch('nova.virt.libvirt.blockinfo.get_disk_info') @mock.patch('nova.virt.libvirt.LibvirtDriver._destroy') def test_hard_reboot_does_not_call_glance_show(self, mock_destroy, mock_get_disk_info, mock_get_guest_config, mock_get_instance_path, mock_write_to_file, mock_get_instance_disk_info, mock_create_images_and_backing, mock_create_domand_and_network, mock_prepare_pci_devices_for_use, mock_get_instance_pci_devs, mock_looping_call, mock_ensure_tree): """For a hard reboot, we shouldn't need an additional call to glance to get the image metadata. This is important for automatically spinning up instances on a host-reboot, since we won't have a user request context that'll allow the Glance request to go through. We have to rely on the cached image metadata, instead. https://bugs.launchpad.net/nova/+bug/1339386 """ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) network_info = mock.MagicMock() block_device_info = mock.MagicMock() mock_get_disk_info.return_value = {} mock_get_guest_config.return_value = mock.MagicMock() mock_get_instance_path.return_value = '/foo' mock_looping_call.return_value = mock.MagicMock() drvr._image_api = mock.MagicMock() drvr._hard_reboot(self.context, instance, network_info, block_device_info) self.assertFalse(drvr._image_api.get.called) mock_ensure_tree.assert_called_once_with('/foo') def test_suspend(self): guest = libvirt_guest.Guest(FakeVirtDomain(id=1)) dom = guest._domain instance = objects.Instance(**self.test_instance) instance.ephemeral_key_uuid = None conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) @mock.patch.object(dmcrypt, 'delete_volume') @mock.patch.object(conn, '_get_instance_disk_info', return_value=[]) @mock.patch.object(conn, '_detach_sriov_ports') @mock.patch.object(conn, '_detach_pci_devices') @mock.patch.object(pci_manager, 'get_instance_pci_devs', return_value='pci devs') @mock.patch.object(conn._host, 'get_guest', return_value=guest) def suspend(mock_get_guest, mock_get_instance_pci_devs, mock_detach_pci_devices, mock_detach_sriov_ports, mock_get_instance_disk_info, mock_delete_volume): mock_managedSave = mock.Mock() dom.managedSave = mock_managedSave conn.suspend(self.context, instance) mock_managedSave.assert_called_once_with(0) self.assertFalse(mock_get_instance_disk_info.called) mock_delete_volume.assert_has_calls([mock.call(disk['path']) for disk in mock_get_instance_disk_info.return_value], False) suspend() @mock.patch.object(time, 'sleep') @mock.patch.object(libvirt_driver.LibvirtDriver, '_create_domain') @mock.patch.object(host.Host, 'get_domain') def _test_clean_shutdown(self, mock_get_domain, mock_create_domain, mock_sleep, seconds_to_shutdown, timeout, retry_interval, shutdown_attempts, succeeds): info_tuple = ('fake', 'fake', 'fake', 'also_fake') shutdown_count = [] # Mock domain mock_domain = mock.Mock(fakelibvirt.virDomain) return_infos = [(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple] return_shutdowns = [shutdown_count.append("shutdown")] retry_countdown = retry_interval for x in range(min(seconds_to_shutdown, timeout)): return_infos.append( (libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple) if retry_countdown == 0: return_shutdowns.append(shutdown_count.append("shutdown")) retry_countdown = retry_interval else: retry_countdown -= 1 if seconds_to_shutdown < timeout: return_infos.append( (libvirt_guest.VIR_DOMAIN_SHUTDOWN,) + info_tuple) mock_domain.info.side_effect = return_infos mock_domain.shutdown.side_effect = return_shutdowns def fake_create_domain(**kwargs): self.reboot_create_called = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) mock_get_domain.return_value = mock_domain mock_create_domain.side_effect = fake_create_domain result = drvr._clean_shutdown(instance, timeout, retry_interval) self.assertEqual(succeeds, result) self.assertEqual(shutdown_attempts, len(shutdown_count)) def test_clean_shutdown_first_time(self): self._test_clean_shutdown(seconds_to_shutdown=2, timeout=5, retry_interval=3, shutdown_attempts=1, succeeds=True) def test_clean_shutdown_with_retry(self): self._test_clean_shutdown(seconds_to_shutdown=4, timeout=5, retry_interval=3, shutdown_attempts=2, succeeds=True) def test_clean_shutdown_failure(self): self._test_clean_shutdown(seconds_to_shutdown=6, timeout=5, retry_interval=3, shutdown_attempts=2, succeeds=False) def test_clean_shutdown_no_wait(self): self._test_clean_shutdown(seconds_to_shutdown=6, timeout=0, retry_interval=3, shutdown_attempts=1, succeeds=False) @mock.patch.object(FakeVirtDomain, 'attachDeviceFlags') @mock.patch.object(FakeVirtDomain, 'ID', return_value=1) @mock.patch.object(utils, 'get_image_from_system_metadata', return_value=None) def test_attach_sriov_ports(self, mock_get_image_metadata, mock_ID, mock_attachDevice): instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self, 1) network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT guest = libvirt_guest.Guest(FakeVirtDomain()) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._attach_sriov_ports(self.context, instance, guest, network_info) mock_get_image_metadata.assert_called_once_with( instance.system_metadata) self.assertTrue(mock_attachDevice.called) @mock.patch.object(FakeVirtDomain, 'attachDeviceFlags') @mock.patch.object(FakeVirtDomain, 'ID', return_value=1) @mock.patch.object(utils, 'get_image_from_system_metadata', return_value=None) def test_attach_sriov_ports_with_info_cache(self, mock_get_image_metadata, mock_ID, mock_attachDevice): instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self, 1) network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT instance.info_cache = objects.InstanceInfoCache( network_info=network_info) guest = libvirt_guest.Guest(FakeVirtDomain()) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._attach_sriov_ports(self.context, instance, guest, None) mock_get_image_metadata.assert_called_once_with( instance.system_metadata) self.assertTrue(mock_attachDevice.called) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def _test_detach_sriov_ports(self, mock_has_min_version, vif_type): instance = objects.Instance(**self.test_instance) expeted_pci_slot = "0000:00:00.0" network_info = _fake_network_info(self, 1) network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT # some more adjustments for the fake network_info so that # the correct get_config function will be executed (vif's # get_config_hw_veb - which is according to the real SRIOV vif) # and most importantly the pci_slot which is translated to # cfg.source_dev, then to PciDevice.address and sent to # _detach_pci_devices network_info[0]['profile'] = dict(pci_slot=expeted_pci_slot) network_info[0]['type'] = vif_type network_info[0]['details'] = dict(vlan="2145") instance.info_cache = objects.InstanceInfoCache( network_info=network_info) # fill the pci_devices of the instance so that # pci_manager.get_instance_pci_devs will not return an empty list # which will eventually fail the assertion for detachDeviceFlags expected_pci_device_obj = ( objects.PciDevice(address=expeted_pci_slot, request_id=None)) instance.pci_devices = objects.PciDeviceList() instance.pci_devices.objects = [expected_pci_device_obj] domain = FakeVirtDomain() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) guest = libvirt_guest.Guest(domain) with mock.patch.object(drvr, '_detach_pci_devices') as mock_detach_pci: drvr._detach_sriov_ports(self.context, instance, guest) mock_detach_pci.assert_called_once_with( guest, [expected_pci_device_obj]) def test_detach_sriov_ports_interface_interface_hostdev(self): # Note: test detach_sriov_ports method for vif with config # LibvirtConfigGuestInterface self._test_detach_sriov_ports(vif_type="hw_veb") def test_detach_sriov_ports_interface_pci_hostdev(self): # Note: test detach_sriov_ports method for vif with config # LibvirtConfigGuestHostdevPCI self._test_detach_sriov_ports(vif_type="ib_hostdev") @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch.object(FakeVirtDomain, 'detachDeviceFlags') def test_detach_duplicate_mac_sriov_ports(self, mock_detachDeviceFlags, mock_has_min_version): instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self, 2) for network_info_inst in network_info: network_info_inst['vnic_type'] = network_model.VNIC_TYPE_DIRECT network_info_inst['type'] = "hw_veb" network_info_inst['details'] = dict(vlan="2145") network_info_inst['address'] = "fa:16:3e:96:2a:48" network_info[0]['profile'] = dict(pci_slot="0000:00:00.0") network_info[1]['profile'] = dict(pci_slot="0000:00:00.1") instance.info_cache = objects.InstanceInfoCache( network_info=network_info) # fill the pci_devices of the instance so that # pci_manager.get_instance_pci_devs will not return an empty list # which will eventually fail the assertion for detachDeviceFlags instance.pci_devices = objects.PciDeviceList() instance.pci_devices.objects = [ objects.PciDevice(address='0000:00:00.0', request_id=None), objects.PciDevice(address='0000:00:00.1', request_id=None) ] domain = FakeVirtDomain() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) guest = libvirt_guest.Guest(domain) drvr._detach_sriov_ports(self.context, instance, guest) expected_xml = [ ('\n' ' \n' '
\n' ' \n' '\n'), ('\n' ' \n' '
\n' ' \n' '\n') ] mock_detachDeviceFlags.has_calls([ mock.call(expected_xml[0], flags=1), mock.call(expected_xml[1], flags=1) ]) def test_resume(self): dummyxml = ("instance-0000000a" "" "" "" "" "" "" "" "") instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self, 1) block_device_info = None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) guest = libvirt_guest.Guest('fake_dom') with test.nested( mock.patch.object(drvr, '_get_existing_domain_xml', return_value=dummyxml), mock.patch.object(drvr, '_create_domain_and_network', return_value=guest), mock.patch.object(drvr, '_attach_pci_devices'), mock.patch.object(pci_manager, 'get_instance_pci_devs', return_value='fake_pci_devs'), mock.patch.object(utils, 'get_image_from_system_metadata'), mock.patch.object(blockinfo, 'get_disk_info'), ) as (_get_existing_domain_xml, _create_domain_and_network, _attach_pci_devices, get_instance_pci_devs, get_image_metadata, get_disk_info): get_image_metadata.return_value = {'bar': 234} disk_info = {'foo': 123} get_disk_info.return_value = disk_info drvr.resume(self.context, instance, network_info, block_device_info) _get_existing_domain_xml.assert_has_calls([mock.call(instance, network_info, block_device_info)]) _create_domain_and_network.assert_has_calls([mock.call( self.context, dummyxml, instance, network_info, disk_info, block_device_info=block_device_info, vifs_already_plugged=True)]) _attach_pci_devices.assert_has_calls([mock.call(guest, 'fake_pci_devs')]) @mock.patch.object(host.Host, 'get_domain') @mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info') @mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files') @mock.patch.object(objects.Instance, 'save') def test_destroy_undefines(self, mock_save, mock_delete_instance_files, mock_get_info, mock_get_domain): dom_mock = mock.MagicMock() dom_mock.undefineFlags.return_value = 1 drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_get_domain.return_value = dom_mock mock_get_info.return_value = hardware.InstanceInfo( state=power_state.SHUTDOWN, id=-1) mock_delete_instance_files.return_value = None instance = objects.Instance(self.context, **self.test_instance) drvr.destroy(self.context, instance, []) mock_save.assert_called_once_with() @mock.patch.object(rbd_utils, 'RBDDriver') def test_cleanup_rbd(self, mock_driver): driver = mock_driver.return_value driver.cleanup_volumes = mock.Mock() fake_instance = {'uuid': '875a8070-d0b9-4949-8b31-104d125c9a64'} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr._cleanup_rbd(fake_instance) driver.cleanup_volumes.assert_called_once_with(fake_instance) @mock.patch.object(objects.Instance, 'save') def test_destroy_undefines_no_undefine_flags(self, mock_save): mock = self.mox.CreateMock(fakelibvirt.virDomain) mock.ID() mock.destroy() mock.undefineFlags(1).AndRaise(fakelibvirt.libvirtError('Err')) mock.ID().AndReturn(123) mock.undefine() self.mox.ReplayAll() def fake_get_domain(instance): return mock def fake_get_info(instance_name): return hardware.InstanceInfo(state=power_state.SHUTDOWN, id=-1) def fake_delete_instance_files(instance): return None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr._host, 'get_domain', fake_get_domain) self.stubs.Set(drvr, 'get_info', fake_get_info) self.stubs.Set(drvr, 'delete_instance_files', fake_delete_instance_files) instance = objects.Instance(self.context, **self.test_instance) drvr.destroy(self.context, instance, []) mock_save.assert_called_once_with() @mock.patch.object(objects.Instance, 'save') def test_destroy_undefines_no_attribute_with_managed_save(self, mock_save): mock = self.mox.CreateMock(fakelibvirt.virDomain) mock.ID() mock.destroy() mock.undefineFlags(1).AndRaise(AttributeError()) mock.hasManagedSaveImage(0).AndReturn(True) mock.managedSaveRemove(0) mock.undefine() self.mox.ReplayAll() def fake_get_domain(instance): return mock def fake_get_info(instance_name): return hardware.InstanceInfo(state=power_state.SHUTDOWN, id=-1) def fake_delete_instance_files(instance): return None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr._host, 'get_domain', fake_get_domain) self.stubs.Set(drvr, 'get_info', fake_get_info) self.stubs.Set(drvr, 'delete_instance_files', fake_delete_instance_files) instance = objects.Instance(self.context, **self.test_instance) drvr.destroy(self.context, instance, []) mock_save.assert_called_once_with() @mock.patch.object(objects.Instance, 'save') def test_destroy_undefines_no_attribute_no_managed_save(self, mock_save): mock = self.mox.CreateMock(fakelibvirt.virDomain) mock.ID() mock.destroy() mock.undefineFlags(1).AndRaise(AttributeError()) mock.hasManagedSaveImage(0).AndRaise(AttributeError()) mock.undefine() self.mox.ReplayAll() def fake_get_domain(self, instance): return mock def fake_get_info(instance_name): return hardware.InstanceInfo(state=power_state.SHUTDOWN) def fake_delete_instance_files(instance): return None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(host.Host, 'get_domain', fake_get_domain) self.stubs.Set(drvr, 'get_info', fake_get_info) self.stubs.Set(drvr, 'delete_instance_files', fake_delete_instance_files) instance = objects.Instance(self.context, **self.test_instance) drvr.destroy(self.context, instance, []) mock_save.assert_called_once_with() def test_destroy_timed_out(self): mock = self.mox.CreateMock(fakelibvirt.virDomain) mock.ID() mock.destroy().AndRaise(fakelibvirt.libvirtError("timed out")) self.mox.ReplayAll() def fake_get_domain(self, instance): return mock def fake_get_error_code(self): return fakelibvirt.VIR_ERR_OPERATION_TIMEOUT drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(host.Host, 'get_domain', fake_get_domain) self.stubs.Set(fakelibvirt.libvirtError, 'get_error_code', fake_get_error_code) instance = objects.Instance(**self.test_instance) self.assertRaises(exception.InstancePowerOffFailure, drvr.destroy, self.context, instance, []) def test_private_destroy_not_found(self): ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "No such domain", error_code=fakelibvirt.VIR_ERR_NO_DOMAIN) mock = self.mox.CreateMock(fakelibvirt.virDomain) mock.ID() mock.destroy().AndRaise(ex) mock.info().AndRaise(ex) mock.UUIDString() self.mox.ReplayAll() def fake_get_domain(instance): return mock drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr._host, 'get_domain', fake_get_domain) instance = objects.Instance(**self.test_instance) # NOTE(vish): verifies destroy doesn't raise if the instance disappears drvr._destroy(instance) def test_private_destroy_lxc_processes_refused_to_die(self): self.flags(virt_type='lxc', group='libvirt') ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "", error_message="internal error: Some processes refused to die", error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR) conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(conn._host, 'get_domain') as mock_get_domain, \ mock.patch.object(conn, 'get_info') as mock_get_info: mock_domain = mock.MagicMock() mock_domain.ID.return_value = 1 mock_get_domain.return_value = mock_domain mock_domain.destroy.side_effect = ex mock_info = mock.MagicMock() mock_info.id = 1 mock_info.state = power_state.SHUTDOWN mock_get_info.return_value = mock_info instance = objects.Instance(**self.test_instance) conn._destroy(instance) def test_private_destroy_processes_refused_to_die_still_raises(self): ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "", error_message="internal error: Some processes refused to die", error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR) conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(conn._host, 'get_domain') as mock_get_domain: mock_domain = mock.MagicMock() mock_domain.ID.return_value = 1 mock_get_domain.return_value = mock_domain mock_domain.destroy.side_effect = ex instance = objects.Instance(**self.test_instance) self.assertRaises(fakelibvirt.libvirtError, conn._destroy, instance) def test_private_destroy_ebusy_timeout(self): # Tests that _destroy will retry 3 times to destroy the guest when an # EBUSY is raised, but eventually times out and raises the libvirtError ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, ("Failed to terminate process 26425 with SIGKILL: " "Device or resource busy"), error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR, int1=errno.EBUSY) mock_guest = mock.Mock(libvirt_guest.Guest, id=1) mock_guest.poweroff = mock.Mock(side_effect=ex) instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(drvr._host, 'get_guest', return_value=mock_guest): self.assertRaises(fakelibvirt.libvirtError, drvr._destroy, instance) self.assertEqual(3, mock_guest.poweroff.call_count) def test_private_destroy_ebusy_multiple_attempt_ok(self): # Tests that the _destroy attempt loop is broken when EBUSY is no # longer raised. ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, ("Failed to terminate process 26425 with SIGKILL: " "Device or resource busy"), error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR, int1=errno.EBUSY) mock_guest = mock.Mock(libvirt_guest.Guest, id=1) mock_guest.poweroff = mock.Mock(side_effect=[ex, None]) inst_info = hardware.InstanceInfo(power_state.SHUTDOWN, id=1) instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(drvr._host, 'get_guest', return_value=mock_guest): with mock.patch.object(drvr, 'get_info', return_value=inst_info): drvr._destroy(instance) self.assertEqual(2, mock_guest.poweroff.call_count) def test_undefine_domain_with_not_found_instance(self): def fake_get_domain(self, instance): raise exception.InstanceNotFound(instance_id=instance.uuid) self.stubs.Set(host.Host, 'get_domain', fake_get_domain) self.mox.StubOutWithMock(fakelibvirt.libvirtError, "get_error_code") self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) # NOTE(wenjianhn): verifies undefine doesn't raise if the # instance disappears drvr._undefine_domain(instance) @mock.patch.object(host.Host, "list_instance_domains") @mock.patch.object(objects.BlockDeviceMappingList, "bdms_by_instance_uuid") @mock.patch.object(objects.InstanceList, "get_by_filters") def test_disk_over_committed_size_total(self, mock_get, mock_bdms, mock_list): # Ensure destroy calls managedSaveRemove for saved instance. class DiagFakeDomain(object): def __init__(self, name): self._name = name self._uuid = str(uuid.uuid4()) def ID(self): return 1 def name(self): return self._name def UUIDString(self): return self._uuid def XMLDesc(self, flags): return "" instance_domains = [ DiagFakeDomain("instance0000001"), DiagFakeDomain("instance0000002")] mock_list.return_value = instance_domains drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) fake_disks = {'instance0000001': [{'type': 'qcow2', 'path': '/somepath/disk1', 'virt_disk_size': '10737418240', 'backing_file': '/somepath/disk1', 'disk_size': '83886080', 'over_committed_disk_size': '10653532160'}], 'instance0000002': [{'type': 'raw', 'path': '/somepath/disk2', 'virt_disk_size': '0', 'backing_file': '/somepath/disk2', 'disk_size': '10737418240', 'over_committed_disk_size': '0'}]} def get_info(instance_name, xml, **kwargs): return fake_disks.get(instance_name) instance_uuids = [dom.UUIDString() for dom in instance_domains] instances = [objects.Instance( uuid=instance_uuids[0], root_device_name='/dev/vda'), objects.Instance( uuid=instance_uuids[1], root_device_name='/dev/vdb') ] mock_get.return_value = instances with mock.patch.object(drvr, "_get_instance_disk_info") as mock_info: mock_info.side_effect = get_info result = drvr._get_disk_over_committed_size_total() self.assertEqual(result, 10653532160) mock_list.assert_called_once_with() self.assertEqual(2, mock_info.call_count) filters = {'uuid': instance_uuids} mock_get.assert_called_once_with(mock.ANY, filters, use_slave=True) mock_bdms.assert_called_with(mock.ANY, instance_uuids) @mock.patch.object(host.Host, "list_instance_domains") @mock.patch.object(objects.BlockDeviceMappingList, "bdms_by_instance_uuid") @mock.patch.object(objects.InstanceList, "get_by_filters") def test_disk_over_committed_size_total_eperm(self, mock_get, mock_bdms, mock_list): # Ensure destroy calls managedSaveRemove for saved instance. class DiagFakeDomain(object): def __init__(self, name): self._name = name self._uuid = str(uuid.uuid4()) def ID(self): return 1 def name(self): return self._name def UUIDString(self): return self._uuid def XMLDesc(self, flags): return "" instance_domains = [ DiagFakeDomain("instance0000001"), DiagFakeDomain("instance0000002")] mock_list.return_value = instance_domains drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) fake_disks = {'instance0000001': [{'type': 'qcow2', 'path': '/somepath/disk1', 'virt_disk_size': '10737418240', 'backing_file': '/somepath/disk1', 'disk_size': '83886080', 'over_committed_disk_size': '10653532160'}], 'instance0000002': [{'type': 'raw', 'path': '/somepath/disk2', 'virt_disk_size': '0', 'backing_file': '/somepath/disk2', 'disk_size': '10737418240', 'over_committed_disk_size': '21474836480'}]} def side_effect(name, dom, block_device_info): if name == 'instance0000001': self.assertEqual('/dev/vda', block_device_info['root_device_name']) raise OSError(errno.EACCES, 'Permission denied') if name == 'instance0000002': self.assertEqual('/dev/vdb', block_device_info['root_device_name']) return fake_disks.get(name) get_disk_info = mock.Mock() get_disk_info.side_effect = side_effect drvr._get_instance_disk_info = get_disk_info instance_uuids = [dom.UUIDString() for dom in instance_domains] instances = [objects.Instance( uuid=instance_uuids[0], root_device_name='/dev/vda'), objects.Instance( uuid=instance_uuids[1], root_device_name='/dev/vdb') ] mock_get.return_value = instances result = drvr._get_disk_over_committed_size_total() self.assertEqual(21474836480, result) mock_list.assert_called_once_with() self.assertEqual(2, get_disk_info.call_count) filters = {'uuid': instance_uuids} mock_get.assert_called_once_with(mock.ANY, filters, use_slave=True) mock_bdms.assert_called_with(mock.ANY, instance_uuids) @mock.patch.object(host.Host, "list_instance_domains", return_value=[mock.MagicMock(name='foo')]) @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_instance_disk_info", side_effect=exception.VolumeBDMPathNotFound(path='bar')) @mock.patch.object(objects.BlockDeviceMappingList, "bdms_by_instance_uuid") @mock.patch.object(objects.InstanceList, "get_by_filters") def test_disk_over_committed_size_total_bdm_not_found(self, mock_get, mock_bdms, mock_get_disk_info, mock_list_domains): # Tests that we handle VolumeBDMPathNotFound gracefully. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertEqual(0, drvr._get_disk_over_committed_size_total()) def test_cpu_info(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) def get_host_capabilities_stub(self): cpu = vconfig.LibvirtConfigCPU() cpu.model = "Opteron_G4" cpu.vendor = "AMD" cpu.arch = arch.X86_64 cpu.cells = 1 cpu.cores = 2 cpu.threads = 1 cpu.sockets = 4 cpu.add_feature(vconfig.LibvirtConfigCPUFeature("extapic")) cpu.add_feature(vconfig.LibvirtConfigCPUFeature("3dnow")) caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = cpu guest = vconfig.LibvirtConfigGuest() guest.ostype = vm_mode.HVM guest.arch = arch.X86_64 guest.domtype = ["kvm"] caps.guests.append(guest) guest = vconfig.LibvirtConfigGuest() guest.ostype = vm_mode.HVM guest.arch = arch.I686 guest.domtype = ["kvm"] caps.guests.append(guest) return caps self.stubs.Set(host.Host, "get_capabilities", get_host_capabilities_stub) want = {"vendor": "AMD", "features": set(["extapic", "3dnow"]), "model": "Opteron_G4", "arch": arch.X86_64, "topology": {"cells": 1, "cores": 2, "threads": 1, "sockets": 4}} got = drvr._get_cpu_info() self.assertEqual(want, got) def test_get_pcidev_info(self): def fake_nodeDeviceLookupByName(self, name): return FakeNodeDevice(_fake_NodeDevXml[name]) self.mox.StubOutWithMock(host.Host, 'device_lookup_by_name') host.Host.device_lookup_by_name = fake_nodeDeviceLookupByName drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object( fakelibvirt.Connection, 'getLibVersion') as mock_lib_version: mock_lib_version.return_value = ( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_PF_WITH_NO_VFS_CAP_VERSION) - 1) actualvf = drvr._get_pcidev_info("pci_0000_04_00_3") expect_vf = { "dev_id": "pci_0000_04_00_3", "address": "0000:04:00.3", "product_id": '1521', "numa_node": None, "vendor_id": '8086', "label": 'label_8086_1521', "dev_type": fields.PciDeviceType.SRIOV_PF, } self.assertEqual(expect_vf, actualvf) actualvf = drvr._get_pcidev_info("pci_0000_04_10_7") expect_vf = { "dev_id": "pci_0000_04_10_7", "address": "0000:04:10.7", "product_id": '1520', "numa_node": None, "vendor_id": '8086', "label": 'label_8086_1520', "dev_type": fields.PciDeviceType.SRIOV_VF, "parent_addr": '0000:04:00.3', } self.assertEqual(expect_vf, actualvf) actualvf = drvr._get_pcidev_info("pci_0000_04_11_7") expect_vf = { "dev_id": "pci_0000_04_11_7", "address": "0000:04:11.7", "product_id": '1520', "vendor_id": '8086', "numa_node": 0, "label": 'label_8086_1520', "dev_type": fields.PciDeviceType.SRIOV_VF, "parent_addr": '0000:04:00.3', } self.assertEqual(expect_vf, actualvf) with mock.patch.object( pci_utils, 'is_physical_function', return_value=True): actualvf = drvr._get_pcidev_info("pci_0000_04_00_1") expect_vf = { "dev_id": "pci_0000_04_00_1", "address": "0000:04:00.1", "product_id": '1013', "numa_node": 0, "vendor_id": '15b3', "label": 'label_15b3_1013', "dev_type": fields.PciDeviceType.SRIOV_PF, } self.assertEqual(expect_vf, actualvf) with mock.patch.object( pci_utils, 'is_physical_function', return_value=False): actualvf = drvr._get_pcidev_info("pci_0000_04_00_1") expect_vf = { "dev_id": "pci_0000_04_00_1", "address": "0000:04:00.1", "product_id": '1013', "numa_node": 0, "vendor_id": '15b3', "label": 'label_15b3_1013', "dev_type": fields.PciDeviceType.STANDARD, } self.assertEqual(expect_vf, actualvf) with mock.patch.object( fakelibvirt.Connection, 'getLibVersion') as mock_lib_version: mock_lib_version.return_value = ( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_PF_WITH_NO_VFS_CAP_VERSION)) actualvf = drvr._get_pcidev_info("pci_0000_03_00_0") expect_vf = { "dev_id": "pci_0000_03_00_0", "address": "0000:03:00.0", "product_id": '1013', "numa_node": 0, "vendor_id": '15b3', "label": 'label_15b3_1013', "dev_type": fields.PciDeviceType.SRIOV_PF, } self.assertEqual(expect_vf, actualvf) actualvf = drvr._get_pcidev_info("pci_0000_03_00_1") expect_vf = { "dev_id": "pci_0000_03_00_1", "address": "0000:03:00.1", "product_id": '1013', "numa_node": 0, "vendor_id": '15b3', "label": 'label_15b3_1013', "dev_type": fields.PciDeviceType.SRIOV_PF, } self.assertEqual(expect_vf, actualvf) def test_list_devices_not_supported(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # Handle just the NO_SUPPORT error not_supported_exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'this function is not supported by the connection driver:' ' virNodeNumOfDevices', error_code=fakelibvirt.VIR_ERR_NO_SUPPORT) with mock.patch.object(drvr._conn, 'listDevices', side_effect=not_supported_exc): self.assertEqual('[]', drvr._get_pci_passthrough_devices()) # We cache not supported status to avoid emitting too many logging # messages. Clear this value to test the other exception case. del drvr._list_devices_supported # Other errors should not be caught other_exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'other exc', error_code=fakelibvirt.VIR_ERR_NO_DOMAIN) with mock.patch.object(drvr._conn, 'listDevices', side_effect=other_exc): self.assertRaises(fakelibvirt.libvirtError, drvr._get_pci_passthrough_devices) def test_get_pci_passthrough_devices(self): def fakelistDevices(caps, fakeargs=0): return ['pci_0000_04_00_3', 'pci_0000_04_10_7', 'pci_0000_04_11_7'] self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn') libvirt_driver.LibvirtDriver._conn.listDevices = fakelistDevices def fake_nodeDeviceLookupByName(self, name): return FakeNodeDevice(_fake_NodeDevXml[name]) self.mox.StubOutWithMock(host.Host, 'device_lookup_by_name') host.Host.device_lookup_by_name = fake_nodeDeviceLookupByName drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) actjson = drvr._get_pci_passthrough_devices() expectvfs = [ { "dev_id": "pci_0000_04_00_3", "address": "0000:04:00.3", "product_id": '1521', "vendor_id": '8086', "dev_type": fields.PciDeviceType.SRIOV_PF, "phys_function": None, "numa_node": None}, { "dev_id": "pci_0000_04_10_7", "domain": 0, "address": "0000:04:10.7", "product_id": '1520', "vendor_id": '8086', "numa_node": None, "dev_type": fields.PciDeviceType.SRIOV_VF, "phys_function": [('0x0000', '0x04', '0x00', '0x3')]}, { "dev_id": "pci_0000_04_11_7", "domain": 0, "address": "0000:04:11.7", "product_id": '1520', "vendor_id": '8086', "numa_node": 0, "dev_type": fields.PciDeviceType.SRIOV_VF, "phys_function": [('0x0000', '0x04', '0x00', '0x3')], } ] actualvfs = jsonutils.loads(actjson) for dev in range(len(actualvfs)): for key in actualvfs[dev].keys(): if key not in ['phys_function', 'virt_functions', 'label']: self.assertEqual(expectvfs[dev][key], actualvfs[dev][key]) def _fake_caps_numa_topology(self, cells_per_host=4, sockets_per_cell=1, cores_per_socket=1, threads_per_core=2, kb_mem=1048576): # Generate mempages list per cell cell_mempages = list() for cellid in range(cells_per_host): mempages_0 = vconfig.LibvirtConfigCapsNUMAPages() mempages_0.size = 4 mempages_0.total = 1024 * cellid mempages_1 = vconfig.LibvirtConfigCapsNUMAPages() mempages_1.size = 2048 mempages_1.total = 0 + cellid cell_mempages.append([mempages_0, mempages_1]) topology = fakelibvirt.HostInfo._gen_numa_topology(cells_per_host, sockets_per_cell, cores_per_socket, threads_per_core, kb_mem=kb_mem, numa_mempages_list=cell_mempages) return topology def _test_get_host_numa_topology(self, mempages): caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = arch.X86_64 caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) expected_topo_dict = {'cells': [ {'cpus': '0,1', 'cpu_usage': 0, 'mem': {'total': 256, 'used': 0}, 'id': 0}, {'cpus': '3', 'cpu_usage': 0, 'mem': {'total': 256, 'used': 0}, 'id': 1}, {'cpus': '', 'cpu_usage': 0, 'mem': {'total': 256, 'used': 0}, 'id': 2}, {'cpus': '', 'cpu_usage': 0, 'mem': {'total': 256, 'used': 0}, 'id': 3}]} with test.nested( mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([0, 1, 3, 4, 5])), mock.patch.object(host.Host, 'get_online_cpus', return_value=set([0, 1, 2, 3, 6])), ): got_topo = drvr._get_host_numa_topology() got_topo_dict = got_topo._to_dict() self.assertThat( expected_topo_dict, matchers.DictMatches(got_topo_dict)) if mempages: # cells 0 self.assertEqual(4, got_topo.cells[0].mempages[0].size_kb) self.assertEqual(0, got_topo.cells[0].mempages[0].total) self.assertEqual(2048, got_topo.cells[0].mempages[1].size_kb) self.assertEqual(0, got_topo.cells[0].mempages[1].total) # cells 1 self.assertEqual(4, got_topo.cells[1].mempages[0].size_kb) self.assertEqual(1024, got_topo.cells[1].mempages[0].total) self.assertEqual(2048, got_topo.cells[1].mempages[1].size_kb) self.assertEqual(1, got_topo.cells[1].mempages[1].total) else: self.assertEqual([], got_topo.cells[0].mempages) self.assertEqual([], got_topo.cells[1].mempages) self.assertEqual(expected_topo_dict, got_topo_dict) self.assertEqual(set([]), got_topo.cells[0].pinned_cpus) self.assertEqual(set([]), got_topo.cells[1].pinned_cpus) self.assertEqual(set([]), got_topo.cells[2].pinned_cpus) self.assertEqual(set([]), got_topo.cells[3].pinned_cpus) self.assertEqual([set([0, 1])], got_topo.cells[0].siblings) self.assertEqual([], got_topo.cells[1].siblings) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_get_host_numa_topology(self, mock_version): self._test_get_host_numa_topology(mempages=True) @mock.patch.object(fakelibvirt.Connection, 'getType') @mock.patch.object(fakelibvirt.Connection, 'getVersion') @mock.patch.object(fakelibvirt.Connection, 'getLibVersion') def test_get_host_numa_topology_no_mempages(self, mock_lib_version, mock_version, mock_type): self.flags(virt_type='kvm', group='libvirt') mock_lib_version.return_value = versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_HUGEPAGE_VERSION) - 1 mock_version.return_value = versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) mock_type.return_value = host.HV_DRIVER_QEMU self._test_get_host_numa_topology(mempages=False) def test_get_host_numa_topology_empty(self): caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = arch.X86_64 caps.host.topology = None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with test.nested( mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps) ) as (has_min_version, get_caps): self.assertIsNone(drvr._get_host_numa_topology()) self.assertEqual(2, get_caps.call_count) @mock.patch.object(fakelibvirt.Connection, 'getType') @mock.patch.object(fakelibvirt.Connection, 'getVersion') @mock.patch.object(fakelibvirt.Connection, 'getLibVersion') def test_get_host_numa_topology_old_version(self, mock_lib_version, mock_version, mock_type): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_lib_version.return_value = versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION) - 1 mock_version.return_value = versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) mock_type.return_value = host.HV_DRIVER_QEMU self.assertIsNone(drvr._get_host_numa_topology()) @mock.patch.object(fakelibvirt.Connection, 'getType') @mock.patch.object(fakelibvirt.Connection, 'getVersion') @mock.patch.object(fakelibvirt.Connection, 'getLibVersion') def test_get_host_numa_topology_xen(self, mock_lib_version, mock_version, mock_type): self.flags(virt_type='xen', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_lib_version.return_value = versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION) mock_version.return_value = versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) mock_type.return_value = host.HV_DRIVER_XEN self.assertIsNone(drvr._get_host_numa_topology()) def test_diagnostic_vcpus_exception(self): xml = """ """ class DiagFakeDomain(FakeVirtDomain): def __init__(self): super(DiagFakeDomain, self).__init__(fake_xml=xml) def vcpus(self): raise fakelibvirt.libvirtError('vcpus missing') def blockStats(self, path): return (169, 688640, 0, 0, -1) def interfaceStats(self, path): return (4408, 82, 0, 0, 0, 0, 0, 0) def memoryStats(self): return {'actual': 220160, 'rss': 200164} def maxMemory(self): return 280160 def fake_get_domain(self, instance): return DiagFakeDomain() self.stubs.Set(host.Host, "get_domain", fake_get_domain) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) actual = drvr.get_diagnostics(instance) expect = {'vda_read': 688640, 'vda_read_req': 169, 'vda_write': 0, 'vda_write_req': 0, 'vda_errors': -1, 'vdb_read': 688640, 'vdb_read_req': 169, 'vdb_write': 0, 'vdb_write_req': 0, 'vdb_errors': -1, 'memory': 280160, 'memory-actual': 220160, 'memory-rss': 200164, 'vnet0_rx': 4408, 'vnet0_rx_drop': 0, 'vnet0_rx_errors': 0, 'vnet0_rx_packets': 82, 'vnet0_tx': 0, 'vnet0_tx_drop': 0, 'vnet0_tx_errors': 0, 'vnet0_tx_packets': 0, } self.assertEqual(actual, expect) lt = datetime.datetime(2012, 11, 22, 12, 00, 00) diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) self.useFixture(utils_fixture.TimeFixture(diags_time)) instance.launched_at = lt actual = drvr.get_instance_diagnostics(instance) expected = {'config_drive': False, 'cpu_details': [], 'disk_details': [{'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}, {'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}], 'driver': 'libvirt', 'hypervisor_os': 'linux', 'memory_details': {'maximum': 2048, 'used': 1234}, 'nic_details': [{'mac_address': '52:54:00:a4:38:38', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 4408, 'rx_packets': 82, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}], 'state': 'running', 'uptime': 10, 'version': '1.0'} self.assertEqual(expected, actual.serialize()) def test_diagnostic_blockstats_exception(self): xml = """ """ class DiagFakeDomain(FakeVirtDomain): def __init__(self): super(DiagFakeDomain, self).__init__(fake_xml=xml) def vcpus(self): return ([(0, 1, 15340000000, 0), (1, 1, 1640000000, 0), (2, 1, 3040000000, 0), (3, 1, 1420000000, 0)], [(True, False), (True, False), (True, False), (True, False)]) def blockStats(self, path): raise fakelibvirt.libvirtError('blockStats missing') def interfaceStats(self, path): return (4408, 82, 0, 0, 0, 0, 0, 0) def memoryStats(self): return {'actual': 220160, 'rss': 200164} def maxMemory(self): return 280160 def fake_get_domain(self, instance): return DiagFakeDomain() self.stubs.Set(host.Host, "get_domain", fake_get_domain) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) actual = drvr.get_diagnostics(instance) expect = {'cpu0_time': 15340000000, 'cpu1_time': 1640000000, 'cpu2_time': 3040000000, 'cpu3_time': 1420000000, 'memory': 280160, 'memory-actual': 220160, 'memory-rss': 200164, 'vnet0_rx': 4408, 'vnet0_rx_drop': 0, 'vnet0_rx_errors': 0, 'vnet0_rx_packets': 82, 'vnet0_tx': 0, 'vnet0_tx_drop': 0, 'vnet0_tx_errors': 0, 'vnet0_tx_packets': 0, } self.assertEqual(actual, expect) lt = datetime.datetime(2012, 11, 22, 12, 00, 00) diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) self.useFixture(utils_fixture.TimeFixture(diags_time)) instance.launched_at = lt actual = drvr.get_instance_diagnostics(instance) expected = {'config_drive': False, 'cpu_details': [{'time': 15340000000}, {'time': 1640000000}, {'time': 3040000000}, {'time': 1420000000}], 'disk_details': [], 'driver': 'libvirt', 'hypervisor_os': 'linux', 'memory_details': {'maximum': 2048, 'used': 1234}, 'nic_details': [{'mac_address': '52:54:00:a4:38:38', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 4408, 'rx_packets': 82, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}], 'state': 'running', 'uptime': 10, 'version': '1.0'} self.assertEqual(expected, actual.serialize()) def test_diagnostic_interfacestats_exception(self): xml = """ """ class DiagFakeDomain(FakeVirtDomain): def __init__(self): super(DiagFakeDomain, self).__init__(fake_xml=xml) def vcpus(self): return ([(0, 1, 15340000000, 0), (1, 1, 1640000000, 0), (2, 1, 3040000000, 0), (3, 1, 1420000000, 0)], [(True, False), (True, False), (True, False), (True, False)]) def blockStats(self, path): return (169, 688640, 0, 0, -1) def interfaceStats(self, path): raise fakelibvirt.libvirtError('interfaceStat missing') def memoryStats(self): return {'actual': 220160, 'rss': 200164} def maxMemory(self): return 280160 def fake_get_domain(self, instance): return DiagFakeDomain() self.stubs.Set(host.Host, "get_domain", fake_get_domain) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) actual = drvr.get_diagnostics(instance) expect = {'cpu0_time': 15340000000, 'cpu1_time': 1640000000, 'cpu2_time': 3040000000, 'cpu3_time': 1420000000, 'vda_read': 688640, 'vda_read_req': 169, 'vda_write': 0, 'vda_write_req': 0, 'vda_errors': -1, 'vdb_read': 688640, 'vdb_read_req': 169, 'vdb_write': 0, 'vdb_write_req': 0, 'vdb_errors': -1, 'memory': 280160, 'memory-actual': 220160, 'memory-rss': 200164, } self.assertEqual(actual, expect) lt = datetime.datetime(2012, 11, 22, 12, 00, 00) diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) self.useFixture(utils_fixture.TimeFixture(diags_time)) instance.launched_at = lt actual = drvr.get_instance_diagnostics(instance) expected = {'config_drive': False, 'cpu_details': [{'time': 15340000000}, {'time': 1640000000}, {'time': 3040000000}, {'time': 1420000000}], 'disk_details': [{'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}, {'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}], 'driver': 'libvirt', 'hypervisor_os': 'linux', 'memory_details': {'maximum': 2048, 'used': 1234}, 'nic_details': [], 'state': 'running', 'uptime': 10, 'version': '1.0'} self.assertEqual(expected, actual.serialize()) def test_diagnostic_memorystats_exception(self): xml = """ """ class DiagFakeDomain(FakeVirtDomain): def __init__(self): super(DiagFakeDomain, self).__init__(fake_xml=xml) def vcpus(self): return ([(0, 1, 15340000000, 0), (1, 1, 1640000000, 0), (2, 1, 3040000000, 0), (3, 1, 1420000000, 0)], [(True, False), (True, False), (True, False), (True, False)]) def blockStats(self, path): return (169, 688640, 0, 0, -1) def interfaceStats(self, path): return (4408, 82, 0, 0, 0, 0, 0, 0) def memoryStats(self): raise fakelibvirt.libvirtError('memoryStats missing') def maxMemory(self): return 280160 def fake_get_domain(self, instance): return DiagFakeDomain() self.stubs.Set(host.Host, "get_domain", fake_get_domain) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) actual = drvr.get_diagnostics(instance) expect = {'cpu0_time': 15340000000, 'cpu1_time': 1640000000, 'cpu2_time': 3040000000, 'cpu3_time': 1420000000, 'vda_read': 688640, 'vda_read_req': 169, 'vda_write': 0, 'vda_write_req': 0, 'vda_errors': -1, 'vdb_read': 688640, 'vdb_read_req': 169, 'vdb_write': 0, 'vdb_write_req': 0, 'vdb_errors': -1, 'memory': 280160, 'vnet0_rx': 4408, 'vnet0_rx_drop': 0, 'vnet0_rx_errors': 0, 'vnet0_rx_packets': 82, 'vnet0_tx': 0, 'vnet0_tx_drop': 0, 'vnet0_tx_errors': 0, 'vnet0_tx_packets': 0, } self.assertEqual(actual, expect) lt = datetime.datetime(2012, 11, 22, 12, 00, 00) diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) self.useFixture(utils_fixture.TimeFixture(diags_time)) instance.launched_at = lt actual = drvr.get_instance_diagnostics(instance) expected = {'config_drive': False, 'cpu_details': [{'time': 15340000000}, {'time': 1640000000}, {'time': 3040000000}, {'time': 1420000000}], 'disk_details': [{'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}, {'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}], 'driver': 'libvirt', 'hypervisor_os': 'linux', 'memory_details': {'maximum': 2048, 'used': 1234}, 'nic_details': [{'mac_address': '52:54:00:a4:38:38', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 4408, 'rx_packets': 82, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}], 'state': 'running', 'uptime': 10, 'version': '1.0'} self.assertEqual(expected, actual.serialize()) def test_diagnostic_full(self): xml = """ """ class DiagFakeDomain(FakeVirtDomain): def __init__(self): super(DiagFakeDomain, self).__init__(fake_xml=xml) def vcpus(self): return ([(0, 1, 15340000000, 0), (1, 1, 1640000000, 0), (2, 1, 3040000000, 0), (3, 1, 1420000000, 0)], [(True, False), (True, False), (True, False), (True, False)]) def blockStats(self, path): return (169, 688640, 0, 0, -1) def interfaceStats(self, path): return (4408, 82, 0, 0, 0, 0, 0, 0) def memoryStats(self): return {'actual': 220160, 'rss': 200164} def maxMemory(self): return 280160 def fake_get_domain(self, instance): return DiagFakeDomain() self.stubs.Set(host.Host, "get_domain", fake_get_domain) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) actual = drvr.get_diagnostics(instance) expect = {'cpu0_time': 15340000000, 'cpu1_time': 1640000000, 'cpu2_time': 3040000000, 'cpu3_time': 1420000000, 'vda_read': 688640, 'vda_read_req': 169, 'vda_write': 0, 'vda_write_req': 0, 'vda_errors': -1, 'vdb_read': 688640, 'vdb_read_req': 169, 'vdb_write': 0, 'vdb_write_req': 0, 'vdb_errors': -1, 'memory': 280160, 'memory-actual': 220160, 'memory-rss': 200164, 'vnet0_rx': 4408, 'vnet0_rx_drop': 0, 'vnet0_rx_errors': 0, 'vnet0_rx_packets': 82, 'vnet0_tx': 0, 'vnet0_tx_drop': 0, 'vnet0_tx_errors': 0, 'vnet0_tx_packets': 0, } self.assertEqual(actual, expect) lt = datetime.datetime(2012, 11, 22, 12, 00, 00) diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) self.useFixture(utils_fixture.TimeFixture(diags_time)) instance.launched_at = lt actual = drvr.get_instance_diagnostics(instance) expected = {'config_drive': False, 'cpu_details': [{'time': 15340000000}, {'time': 1640000000}, {'time': 3040000000}, {'time': 1420000000}], 'disk_details': [{'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}, {'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}], 'driver': 'libvirt', 'hypervisor_os': 'linux', 'memory_details': {'maximum': 2048, 'used': 1234}, 'nic_details': [{'mac_address': '52:54:00:a4:38:38', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 4408, 'rx_packets': 82, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}], 'state': 'running', 'uptime': 10, 'version': '1.0'} self.assertEqual(expected, actual.serialize()) @mock.patch.object(host.Host, 'get_domain') def test_diagnostic_full_with_multiple_interfaces(self, mock_get_domain): xml = """ """ class DiagFakeDomain(FakeVirtDomain): def __init__(self): super(DiagFakeDomain, self).__init__(fake_xml=xml) def vcpus(self): return ([(0, 1, 15340000000, 0), (1, 1, 1640000000, 0), (2, 1, 3040000000, 0), (3, 1, 1420000000, 0)], [(True, False), (True, False), (True, False), (True, False)]) def blockStats(self, path): return (169, 688640, 0, 0, -1) def interfaceStats(self, path): return (4408, 82, 0, 0, 0, 0, 0, 0) def memoryStats(self): return {'actual': 220160, 'rss': 200164} def maxMemory(self): return 280160 def fake_get_domain(self): return DiagFakeDomain() mock_get_domain.side_effect = fake_get_domain drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) actual = drvr.get_diagnostics(instance) expect = {'cpu0_time': 15340000000, 'cpu1_time': 1640000000, 'cpu2_time': 3040000000, 'cpu3_time': 1420000000, 'vda_read': 688640, 'vda_read_req': 169, 'vda_write': 0, 'vda_write_req': 0, 'vda_errors': -1, 'vdb_read': 688640, 'vdb_read_req': 169, 'vdb_write': 0, 'vdb_write_req': 0, 'vdb_errors': -1, 'memory': 280160, 'memory-actual': 220160, 'memory-rss': 200164, 'vnet0_rx': 4408, 'vnet0_rx_drop': 0, 'vnet0_rx_errors': 0, 'vnet0_rx_packets': 82, 'vnet0_tx': 0, 'vnet0_tx_drop': 0, 'vnet0_tx_errors': 0, 'vnet0_tx_packets': 0, 'br0_rx': 4408, 'br0_rx_drop': 0, 'br0_rx_errors': 0, 'br0_rx_packets': 82, 'br0_tx': 0, 'br0_tx_drop': 0, 'br0_tx_errors': 0, 'br0_tx_packets': 0, } self.assertEqual(actual, expect) lt = datetime.datetime(2012, 11, 22, 12, 00, 00) diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) self.useFixture(utils_fixture.TimeFixture(diags_time)) instance.launched_at = lt actual = drvr.get_instance_diagnostics(instance) expected = {'config_drive': False, 'cpu_details': [{'time': 15340000000}, {'time': 1640000000}, {'time': 3040000000}, {'time': 1420000000}], 'disk_details': [{'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}, {'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}], 'driver': 'libvirt', 'hypervisor_os': 'linux', 'memory_details': {'maximum': 2048, 'used': 1234}, 'nic_details': [{'mac_address': '52:54:00:a4:38:38', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 4408, 'rx_packets': 82, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}, {'mac_address': '53:55:00:a5:39:39', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 4408, 'rx_packets': 82, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}], 'state': 'running', 'uptime': 10., 'version': '1.0'} self.assertEqual(expected, actual.serialize()) @mock.patch.object(host.Host, "list_instance_domains") def test_failing_vcpu_count(self, mock_list): """Domain can fail to return the vcpu description in case it's just starting up or shutting down. Make sure None is handled gracefully. """ class DiagFakeDomain(object): def __init__(self, vcpus): self._vcpus = vcpus def vcpus(self): if self._vcpus is None: raise fakelibvirt.libvirtError("fake-error") else: return ([[1, 2, 3, 4]] * self._vcpus, [True] * self._vcpus) def ID(self): return 1 def name(self): return "instance000001" def UUIDString(self): return "19479fee-07a5-49bb-9138-d3738280d63c" mock_list.return_value = [ DiagFakeDomain(None), DiagFakeDomain(5)] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertEqual(5, drvr._get_vcpu_used()) mock_list.assert_called_with(only_guests=True, only_running=True) @mock.patch.object(host.Host, "list_instance_domains") def test_failing_vcpu_count_none(self, mock_list): """Domain will return zero if the current number of vcpus used is None. This is in case of VM state starting up or shutting down. None type returned is counted as zero. """ class DiagFakeDomain(object): def __init__(self): pass def vcpus(self): return None def ID(self): return 1 def name(self): return "instance000001" mock_list.return_value = [DiagFakeDomain()] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertEqual(0, drvr._get_vcpu_used()) mock_list.assert_called_with(only_guests=True, only_running=True) def test_get_instance_capabilities(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) def get_host_capabilities_stub(self): caps = vconfig.LibvirtConfigCaps() guest = vconfig.LibvirtConfigGuest() guest.ostype = 'hvm' guest.arch = arch.X86_64 guest.domtype = ['kvm', 'qemu'] caps.guests.append(guest) guest = vconfig.LibvirtConfigGuest() guest.ostype = 'hvm' guest.arch = arch.I686 guest.domtype = ['kvm'] caps.guests.append(guest) return caps self.stubs.Set(host.Host, "get_capabilities", get_host_capabilities_stub) want = [(arch.X86_64, 'kvm', 'hvm'), (arch.X86_64, 'qemu', 'hvm'), (arch.I686, 'kvm', 'hvm')] got = drvr._get_instance_capabilities() self.assertEqual(want, got) def test_set_cache_mode(self): self.flags(disk_cachemodes=['file=directsync'], group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) fake_conf = FakeConfigGuestDisk() fake_conf.source_type = 'file' drvr._set_cache_mode(fake_conf) self.assertEqual(fake_conf.driver_cache, 'directsync') def test_set_cache_mode_invalid_mode(self): self.flags(disk_cachemodes=['file=FAKE'], group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) fake_conf = FakeConfigGuestDisk() fake_conf.source_type = 'file' drvr._set_cache_mode(fake_conf) self.assertIsNone(fake_conf.driver_cache) def test_set_cache_mode_invalid_object(self): self.flags(disk_cachemodes=['file=directsync'], group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) fake_conf = FakeConfigGuest() fake_conf.driver_cache = 'fake' drvr._set_cache_mode(fake_conf) self.assertEqual(fake_conf.driver_cache, 'fake') @mock.patch('os.unlink') @mock.patch.object(os.path, 'exists') def _test_shared_storage_detection(self, is_same, mock_exists, mock_unlink): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.get_host_ip_addr = mock.MagicMock(return_value='bar') mock_exists.return_value = is_same with test.nested( mock.patch.object(drvr._remotefs, 'create_file'), mock.patch.object(drvr._remotefs, 'remove_file') ) as (mock_rem_fs_create, mock_rem_fs_remove): result = drvr._is_storage_shared_with('host', '/path') mock_rem_fs_create.assert_any_call('host', mock.ANY) create_args, create_kwargs = mock_rem_fs_create.call_args self.assertTrue(create_args[1].startswith('/path')) if is_same: mock_unlink.assert_called_once_with(mock.ANY) else: mock_rem_fs_remove.assert_called_with('host', mock.ANY) remove_args, remove_kwargs = mock_rem_fs_remove.call_args self.assertTrue(remove_args[1].startswith('/path')) return result def test_shared_storage_detection_same_host(self): self.assertTrue(self._test_shared_storage_detection(True)) def test_shared_storage_detection_different_host(self): self.assertFalse(self._test_shared_storage_detection(False)) def test_shared_storage_detection_easy(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.mox.StubOutWithMock(drvr, 'get_host_ip_addr') self.mox.StubOutWithMock(utils, 'execute') self.mox.StubOutWithMock(os.path, 'exists') self.mox.StubOutWithMock(os, 'unlink') drvr.get_host_ip_addr().AndReturn('foo') self.mox.ReplayAll() self.assertTrue(drvr._is_storage_shared_with('foo', '/path')) def test_store_pid_remove_pid(self): instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) popen = mock.Mock(pid=3) drvr.job_tracker.add_job(instance, popen.pid) self.assertIn(3, drvr.job_tracker.jobs[instance.uuid]) drvr.job_tracker.remove_job(instance, popen.pid) self.assertNotIn(instance.uuid, drvr.job_tracker.jobs) @mock.patch('nova.virt.libvirt.host.Host.get_domain') def test_get_domain_info_with_more_return(self, mock_get_domain): instance = objects.Instance(**self.test_instance) dom_mock = mock.MagicMock() dom_mock.info.return_value = [ 1, 2048, 737, 8, 12345, 888888 ] dom_mock.ID.return_value = mock.sentinel.instance_id mock_get_domain.return_value = dom_mock drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) info = drvr.get_info(instance) self.assertEqual(1, info.state) self.assertEqual(2048, info.max_mem_kb) self.assertEqual(737, info.mem_kb) self.assertEqual(8, info.num_cpu) self.assertEqual(12345, info.cpu_time_ns) self.assertEqual(mock.sentinel.instance_id, info.id) dom_mock.info.assert_called_once_with() dom_mock.ID.assert_called_once_with() mock_get_domain.assert_called_once_with(instance) def test_create_domain(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) mock_domain = mock.MagicMock() guest = drvr._create_domain(domain=mock_domain) self.assertEqual(mock_domain, guest._domain) mock_domain.createWithFlags.assert_has_calls([mock.call(0)]) @mock.patch('nova.virt.disk.api.clean_lxc_namespace') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info') @mock.patch('nova.virt.disk.api.setup_container') @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch.object(fake_libvirt_utils, 'get_instance_path') def test_create_domain_lxc(self, mock_get_inst_path, mock_ensure_tree, mock_setup_container, mock_get_info, mock_clean): self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) mock_instance = mock.MagicMock() inst_sys_meta = dict() mock_instance.system_metadata = inst_sys_meta mock_get_inst_path.return_value = '/tmp/' mock_image_backend = mock.MagicMock() drvr.image_backend = mock_image_backend mock_image = mock.MagicMock() mock_image.path = '/tmp/test.img' drvr.image_backend.image.return_value = mock_image mock_setup_container.return_value = '/dev/nbd0' mock_get_info.return_value = hardware.InstanceInfo( state=power_state.RUNNING) with test.nested( mock.patch.object(drvr, '_create_images_and_backing'), mock.patch.object(drvr, '_is_booted_from_volume', return_value=False), mock.patch.object(drvr, '_create_domain'), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'), mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'), mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')): drvr._create_domain_and_network(self.context, 'xml', mock_instance, [], None) self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name']) self.assertFalse(mock_instance.called) mock_get_inst_path.assert_has_calls([mock.call(mock_instance)]) mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')]) drvr.image_backend.image.assert_has_calls([mock.call(mock_instance, 'disk')]) setup_container_call = mock.call( mock_image.get_model(), container_dir='/tmp/rootfs') mock_setup_container.assert_has_calls([setup_container_call]) mock_get_info.assert_has_calls([mock.call(mock_instance)]) mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')]) @mock.patch('nova.virt.disk.api.clean_lxc_namespace') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info') @mock.patch.object(fake_libvirt_utils, 'chown_for_id_maps') @mock.patch('nova.virt.disk.api.setup_container') @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch.object(fake_libvirt_utils, 'get_instance_path') def test_create_domain_lxc_id_maps(self, mock_get_inst_path, mock_ensure_tree, mock_setup_container, mock_chown, mock_get_info, mock_clean): self.flags(virt_type='lxc', uid_maps=["0:1000:100"], gid_maps=["0:1000:100"], group='libvirt') def chown_side_effect(path, id_maps): self.assertEqual('/tmp/rootfs', path) self.assertIsInstance(id_maps[0], vconfig.LibvirtConfigGuestUIDMap) self.assertEqual(0, id_maps[0].start) self.assertEqual(1000, id_maps[0].target) self.assertEqual(100, id_maps[0].count) self.assertIsInstance(id_maps[1], vconfig.LibvirtConfigGuestGIDMap) self.assertEqual(0, id_maps[1].start) self.assertEqual(1000, id_maps[1].target) self.assertEqual(100, id_maps[1].count) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) mock_instance = mock.MagicMock() inst_sys_meta = dict() mock_instance.system_metadata = inst_sys_meta mock_get_inst_path.return_value = '/tmp/' mock_image_backend = mock.MagicMock() drvr.image_backend = mock_image_backend mock_image = mock.MagicMock() mock_image.path = '/tmp/test.img' drvr.image_backend.image.return_value = mock_image mock_setup_container.return_value = '/dev/nbd0' mock_chown.side_effect = chown_side_effect mock_get_info.return_value = hardware.InstanceInfo( state=power_state.RUNNING) with test.nested( mock.patch.object(drvr, '_create_images_and_backing'), mock.patch.object(drvr, '_is_booted_from_volume', return_value=False), mock.patch.object(drvr, '_create_domain'), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'), mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'), mock.patch.object(drvr.firewall_driver, 'apply_instance_filter') ) as ( mock_create_images_and_backing, mock_is_booted_from_volume, mock_create_domain, mock_plug_vifs, mock_setup_basic_filtering, mock_prepare_instance_filter, mock_apply_instance_filter ): drvr._create_domain_and_network(self.context, 'xml', mock_instance, [], None) self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name']) self.assertFalse(mock_instance.called) mock_get_inst_path.assert_has_calls([mock.call(mock_instance)]) mock_is_booted_from_volume.assert_called_once_with(mock_instance, {}) mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')]) drvr.image_backend.image.assert_has_calls([mock.call(mock_instance, 'disk')]) setup_container_call = mock.call( mock_image.get_model(), container_dir='/tmp/rootfs') mock_setup_container.assert_has_calls([setup_container_call]) mock_get_info.assert_has_calls([mock.call(mock_instance)]) mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')]) @mock.patch('nova.virt.disk.api.teardown_container') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info') @mock.patch('nova.virt.disk.api.setup_container') @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch.object(fake_libvirt_utils, 'get_instance_path') def test_create_domain_lxc_not_running(self, mock_get_inst_path, mock_ensure_tree, mock_setup_container, mock_get_info, mock_teardown): self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) mock_instance = mock.MagicMock() inst_sys_meta = dict() mock_instance.system_metadata = inst_sys_meta mock_get_inst_path.return_value = '/tmp/' mock_image_backend = mock.MagicMock() drvr.image_backend = mock_image_backend mock_image = mock.MagicMock() mock_image.path = '/tmp/test.img' drvr.image_backend.image.return_value = mock_image mock_setup_container.return_value = '/dev/nbd0' mock_get_info.return_value = hardware.InstanceInfo( state=power_state.SHUTDOWN) with test.nested( mock.patch.object(drvr, '_create_images_and_backing'), mock.patch.object(drvr, '_is_booted_from_volume', return_value=False), mock.patch.object(drvr, '_create_domain'), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'), mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'), mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')): drvr._create_domain_and_network(self.context, 'xml', mock_instance, [], None) self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name']) self.assertFalse(mock_instance.called) mock_get_inst_path.assert_has_calls([mock.call(mock_instance)]) mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')]) drvr.image_backend.image.assert_has_calls([mock.call(mock_instance, 'disk')]) setup_container_call = mock.call( mock_image.get_model(), container_dir='/tmp/rootfs') mock_setup_container.assert_has_calls([setup_container_call]) mock_get_info.assert_has_calls([mock.call(mock_instance)]) teardown_call = mock.call(container_dir='/tmp/rootfs') mock_teardown.assert_has_calls([teardown_call]) def test_create_domain_define_xml_fails(self): """Tests that the xml is logged when defining the domain fails.""" fake_xml = "this is a test" def fake_defineXML(xml): self.assertEqual(fake_xml, xml) raise fakelibvirt.libvirtError('virDomainDefineXML() failed') def fake_safe_decode(text, *args, **kwargs): return text + 'safe decoded' self.log_error_called = False def fake_error(msg, *args, **kwargs): self.log_error_called = True self.assertIn(fake_xml, msg % args) self.assertIn('safe decoded', msg % args) self.stubs.Set(encodeutils, 'safe_decode', fake_safe_decode) self.stubs.Set(nova.virt.libvirt.guest.LOG, 'error', fake_error) self.create_fake_libvirt_mock(defineXML=fake_defineXML) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(fakelibvirt.libvirtError, drvr._create_domain, fake_xml) self.assertTrue(self.log_error_called) def test_create_domain_with_flags_fails(self): """Tests that the xml is logged when creating the domain with flags fails """ fake_xml = "this is a test" fake_domain = FakeVirtDomain(fake_xml) def fake_createWithFlags(launch_flags): raise fakelibvirt.libvirtError('virDomainCreateWithFlags() failed') self.log_error_called = False def fake_error(msg, *args, **kwargs): self.log_error_called = True self.assertIn(fake_xml, msg % args) self.stubs.Set(fake_domain, 'createWithFlags', fake_createWithFlags) self.stubs.Set(nova.virt.libvirt.guest.LOG, 'error', fake_error) self.create_fake_libvirt_mock() self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(fakelibvirt.libvirtError, drvr._create_domain, domain=fake_domain) self.assertTrue(self.log_error_called) def test_create_domain_enable_hairpin_fails(self): """Tests that the xml is logged when enabling hairpin mode for the domain fails. """ fake_xml = "this is a test" fake_domain = FakeVirtDomain(fake_xml) def fake_execute(*args, **kwargs): raise processutils.ProcessExecutionError('error') def fake_get_interfaces(*args): return ["dev"] self.log_error_called = False def fake_error(msg, *args, **kwargs): self.log_error_called = True self.assertIn(fake_xml, msg % args) self.stubs.Set(nova.virt.libvirt.guest.LOG, 'error', fake_error) self.create_fake_libvirt_mock() self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.stubs.Set(nova.utils, 'execute', fake_execute) self.stubs.Set( nova.virt.libvirt.guest.Guest, 'get_interfaces', fake_get_interfaces) self.assertRaises(processutils.ProcessExecutionError, drvr._create_domain, domain=fake_domain, power_on=False) self.assertTrue(self.log_error_called) def test_get_vnc_console(self): instance = objects.Instance(**self.test_instance) dummyxml = ("instance-0000000a" "" "" "") vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance['name']: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) vnc_dict = drvr.get_vnc_console(self.context, instance) self.assertEqual(vnc_dict.port, '5900') def test_get_vnc_console_unavailable(self): instance = objects.Instance(**self.test_instance) dummyxml = ("instance-0000000a" "") vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance['name']: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.ConsoleTypeUnavailable, drvr.get_vnc_console, self.context, instance) def test_get_spice_console(self): instance = objects.Instance(**self.test_instance) dummyxml = ("instance-0000000a" "" "" "") vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance['name']: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) spice_dict = drvr.get_spice_console(self.context, instance) self.assertEqual(spice_dict.port, '5950') def test_get_spice_console_unavailable(self): instance = objects.Instance(**self.test_instance) dummyxml = ("instance-0000000a" "") vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance['name']: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.ConsoleTypeUnavailable, drvr.get_spice_console, self.context, instance) def test_detach_volume_with_instance_not_found(self): # Test that detach_volume() method does not raise exception, # if the instance does not exist. instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with test.nested( mock.patch.object(host.Host, 'get_domain', side_effect=exception.InstanceNotFound( instance_id=instance.uuid)), mock.patch.object(drvr, '_disconnect_volume') ) as (_get_domain, _disconnect_volume): connection_info = {'driver_volume_type': 'fake'} drvr.detach_volume(connection_info, instance, '/dev/sda') _get_domain.assert_called_once_with(instance) _disconnect_volume.assert_called_once_with(connection_info, 'sda') def _test_attach_detach_interface_get_config(self, method_name): """Tests that the get_config() method is properly called in attach_interface() and detach_interface(). method_name: either \"attach_interface\" or \"detach_interface\" depending on the method to test. """ self.stubs.Set(host.Host, "get_domain", lambda a, b: FakeVirtDomain()) instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self, 1) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) fake_image_meta = objects.ImageMeta.from_dict( {'id': instance['image_ref']}) if method_name == "attach_interface": self.mox.StubOutWithMock(drvr.firewall_driver, 'setup_basic_filtering') drvr.firewall_driver.setup_basic_filtering(instance, network_info) expected = drvr.vif_driver.get_config(instance, network_info[0], fake_image_meta, instance.get_flavor(), CONF.libvirt.virt_type, drvr._host) self.mox.StubOutWithMock(drvr.vif_driver, 'get_config') drvr.vif_driver.get_config(instance, network_info[0], mox.IsA(objects.ImageMeta), mox.IsA(objects.Flavor), CONF.libvirt.virt_type, drvr._host).\ AndReturn(expected) self.mox.ReplayAll() if method_name == "attach_interface": drvr.attach_interface(instance, fake_image_meta, network_info[0]) elif method_name == "detach_interface": drvr.detach_interface(instance, network_info[0]) else: raise ValueError("Unhandled method %s" % method_name) @mock.patch.object(lockutils, "external_lock") def test_attach_interface_get_config(self, mock_lock): """Tests that the get_config() method is properly called in attach_interface(). """ mock_lock.return_value = threading.Semaphore() self._test_attach_detach_interface_get_config("attach_interface") def test_detach_interface_get_config(self): """Tests that the get_config() method is properly called in detach_interface(). """ self._test_attach_detach_interface_get_config("detach_interface") def test_default_root_device_name(self): instance = {'uuid': 'fake_instance'} image_meta = objects.ImageMeta.from_dict({'id': 'fake'}) root_bdm = {'source_type': 'image', 'detination_type': 'volume', 'image_id': 'fake_id'} self.flags(virt_type='fake_libvirt_type', group='libvirt') self.mox.StubOutWithMock(blockinfo, 'get_disk_bus_for_device_type') self.mox.StubOutWithMock(blockinfo, 'get_root_info') blockinfo.get_disk_bus_for_device_type(instance, 'fake_libvirt_type', image_meta, 'disk').InAnyOrder().\ AndReturn('virtio') blockinfo.get_disk_bus_for_device_type(instance, 'fake_libvirt_type', image_meta, 'cdrom').InAnyOrder().\ AndReturn('ide') blockinfo.get_root_info(instance, 'fake_libvirt_type', image_meta, root_bdm, 'virtio', 'ide').AndReturn({'dev': 'vda'}) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertEqual(drvr.default_root_device_name(instance, image_meta, root_bdm), '/dev/vda') @mock.patch.object(objects.BlockDeviceMapping, "save") def test_default_device_names_for_instance(self, save_mock): instance = objects.Instance(**self.test_instance) instance.root_device_name = '/dev/vda' ephemerals = [objects.BlockDeviceMapping( **fake_block_device.AnonFakeDbBlockDeviceDict( {'device_name': 'vdb', 'source_type': 'blank', 'volume_size': 2, 'destination_type': 'local'}))] swap = [objects.BlockDeviceMapping( **fake_block_device.AnonFakeDbBlockDeviceDict( {'device_name': 'vdg', 'source_type': 'blank', 'volume_size': 512, 'guest_format': 'swap', 'destination_type': 'local'}))] block_device_mapping = [ objects.BlockDeviceMapping( **fake_block_device.AnonFakeDbBlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'volume_id': 'fake-image-id', 'device_name': '/dev/vdxx', 'disk_bus': 'scsi'}))] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.default_device_names_for_instance(instance, instance.root_device_name, ephemerals, swap, block_device_mapping) # Ephemeral device name was correct so no changes self.assertEqual('/dev/vdb', ephemerals[0].device_name) # Swap device name was incorrect so it was changed self.assertEqual('/dev/vdc', swap[0].device_name) # Volume device name was changed too, taking the bus into account self.assertEqual('/dev/sda', block_device_mapping[0].device_name) self.assertEqual(3, save_mock.call_count) def _test_get_device_name_for_instance(self, new_bdm, expected_dev): instance = objects.Instance(**self.test_instance) instance.root_device_name = '/dev/vda' instance.ephemeral_gb = 0 drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) got_dev = drvr.get_device_name_for_instance( instance, [], new_bdm) self.assertEqual(expected_dev, got_dev) def test_get_device_name_for_instance_simple(self): new_bdm = objects.BlockDeviceMapping( context=context, source_type='volume', destination_type='volume', boot_index=-1, volume_id='fake-id', device_name=None, guest_format=None, disk_bus=None, device_type=None) self._test_get_device_name_for_instance(new_bdm, '/dev/vdb') def test_get_device_name_for_instance_suggested(self): new_bdm = objects.BlockDeviceMapping( context=context, source_type='volume', destination_type='volume', boot_index=-1, volume_id='fake-id', device_name='/dev/vdg', guest_format=None, disk_bus=None, device_type=None) self._test_get_device_name_for_instance(new_bdm, '/dev/vdb') def test_get_device_name_for_instance_bus(self): new_bdm = objects.BlockDeviceMapping( context=context, source_type='volume', destination_type='volume', boot_index=-1, volume_id='fake-id', device_name=None, guest_format=None, disk_bus='scsi', device_type=None) self._test_get_device_name_for_instance(new_bdm, '/dev/sda') def test_get_device_name_for_instance_device_type(self): new_bdm = objects.BlockDeviceMapping( context=context, source_type='volume', destination_type='volume', boot_index=-1, volume_id='fake-id', device_name=None, guest_format=None, disk_bus=None, device_type='floppy') self._test_get_device_name_for_instance(new_bdm, '/dev/fda') def test_is_supported_fs_format(self): supported_fs = [disk.FS_FORMAT_EXT2, disk.FS_FORMAT_EXT3, disk.FS_FORMAT_EXT4, disk.FS_FORMAT_XFS] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) for fs in supported_fs: self.assertTrue(drvr.is_supported_fs_format(fs)) supported_fs = ['', 'dummy'] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) for fs in supported_fs: self.assertFalse(drvr.is_supported_fs_format(fs)) def test_post_live_migration_at_destination_with_block_device_info(self): # Preparing mocks mock_domain = self.mox.CreateMock(fakelibvirt.virDomain) self.resultXML = None def fake_getLibVersion(): return fakelibvirt.FAKE_LIBVIRT_VERSION def fake_getCapabilities(): return """ cef19ce0-0ca2-11df-855d-b19fbce37686 x86_64 Penryn Intel """ def fake_to_xml(context, instance, network_info, disk_info, image_meta=None, rescue=None, block_device_info=None, write_to_disk=False): if image_meta is None: image_meta = objects.ImageMeta.from_dict({}) conf = drvr._get_guest_config(instance, network_info, image_meta, disk_info, rescue, block_device_info) self.resultXML = conf.to_xml() return self.resultXML def fake_get_domain(instance): return mock_domain def fake_baselineCPU(cpu, flag): return """ Westmere Intel """ network_info = _fake_network_info(self, 1) self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion, getCapabilities=fake_getCapabilities, getVersion=lambda: 1005001, listDefinedDomains=lambda: [], numOfDomains=lambda: 0, baselineCPU=fake_baselineCPU) instance_ref = self.test_instance instance_ref['image_ref'] = 123456 # we send an int to test sha1 call instance = objects.Instance(**instance_ref) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, '_get_guest_xml', fake_to_xml) self.stubs.Set(host.Host, 'get_domain', fake_get_domain) bdm = objects.BlockDeviceMapping( self.context, **fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'guest_format': None, 'boot_index': 0, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/vda', 'disk_bus': 'virtio', 'device_type': 'disk', 'delete_on_termination': False})) block_device_info = {'block_device_mapping': driver_block_device.convert_volumes([bdm])} block_device_info['block_device_mapping'][0]['connection_info'] = ( {'driver_volume_type': 'iscsi'}) with test.nested( mock.patch.object( driver_block_device.DriverVolumeBlockDevice, 'save'), mock.patch.object(objects.Instance, 'save') ) as (mock_volume_save, mock_instance_save): drvr.post_live_migration_at_destination( self.context, instance, network_info, True, block_device_info=block_device_info) self.assertIn('fake', self.resultXML) mock_volume_save.assert_called_once_with() def test_create_propagates_exceptions(self): self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(id=1, uuid='fake-uuid', image_ref='my_fake_image') with test.nested( mock.patch.object(drvr, '_create_domain_setup_lxc'), mock.patch.object(drvr, '_create_domain_cleanup_lxc'), mock.patch.object(drvr, '_is_booted_from_volume', return_value=False), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr, 'firewall_driver'), mock.patch.object(drvr, '_create_domain', side_effect=exception.NovaException), mock.patch.object(drvr, 'cleanup')): self.assertRaises(exception.NovaException, drvr._create_domain_and_network, self.context, 'xml', instance, None, None) def test_create_without_pause(self): self.flags(virt_type='lxc', group='libvirt') @contextlib.contextmanager def fake_lxc_disk_handler(*args, **kwargs): yield drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) with test.nested( mock.patch.object(drvr, '_lxc_disk_handler', side_effect=fake_lxc_disk_handler), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr, 'firewall_driver'), mock.patch.object(drvr, '_create_domain'), mock.patch.object(drvr, 'cleanup')) as ( _handler, cleanup, firewall_driver, create, plug_vifs): domain = drvr._create_domain_and_network(self.context, 'xml', instance, None, None) self.assertEqual(0, create.call_args_list[0][1]['pause']) self.assertEqual(0, domain.resume.call_count) def _test_create_with_network_events(self, neutron_failure=None, power_on=True): generated_events = [] def wait_timeout(): event = mock.MagicMock() if neutron_failure == 'timeout': raise eventlet.timeout.Timeout() elif neutron_failure == 'error': event.status = 'failed' else: event.status = 'completed' return event def fake_prepare(instance, event_name): m = mock.MagicMock() m.instance = instance m.event_name = event_name m.wait.side_effect = wait_timeout generated_events.append(m) return m virtapi = manager.ComputeVirtAPI(mock.MagicMock()) prepare = virtapi._compute.instance_events.prepare_for_instance_event prepare.side_effect = fake_prepare drvr = libvirt_driver.LibvirtDriver(virtapi, False) instance = objects.Instance(**self.test_instance) vifs = [{'id': 'vif1', 'active': False}, {'id': 'vif2', 'active': False}] @mock.patch.object(drvr, 'plug_vifs') @mock.patch.object(drvr, 'firewall_driver') @mock.patch.object(drvr, '_create_domain') @mock.patch.object(drvr, 'cleanup') def test_create(cleanup, create, fw_driver, plug_vifs): domain = drvr._create_domain_and_network(self.context, 'xml', instance, vifs, None, power_on=power_on) plug_vifs.assert_called_with(instance, vifs) pause = self._get_pause_flag(drvr, vifs, power_on=power_on) self.assertEqual(pause, create.call_args_list[0][1]['pause']) if pause: domain.resume.assert_called_once_with() if neutron_failure and CONF.vif_plugging_is_fatal: cleanup.assert_called_once_with(self.context, instance, network_info=vifs, block_device_info=None) test_create() if utils.is_neutron() and CONF.vif_plugging_timeout and power_on: prepare.assert_has_calls([ mock.call(instance, 'network-vif-plugged-vif1'), mock.call(instance, 'network-vif-plugged-vif2')]) for event in generated_events: if neutron_failure and generated_events.index(event) != 0: self.assertEqual(0, event.call_count) elif (neutron_failure == 'error' and not CONF.vif_plugging_is_fatal): event.wait.assert_called_once_with() else: self.assertEqual(0, prepare.call_count) @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron(self, is_neutron): self._test_create_with_network_events() @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron_power_off(self, is_neutron): # Tests that we don't wait for events if we don't start the instance. self._test_create_with_network_events(power_on=False) @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron_nowait(self, is_neutron): self.flags(vif_plugging_timeout=0) self._test_create_with_network_events() @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron_failed_nonfatal_timeout( self, is_neutron): self.flags(vif_plugging_is_fatal=False) self._test_create_with_network_events(neutron_failure='timeout') @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron_failed_fatal_timeout( self, is_neutron): self.assertRaises(exception.VirtualInterfaceCreateException, self._test_create_with_network_events, neutron_failure='timeout') @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron_failed_nonfatal_error( self, is_neutron): self.flags(vif_plugging_is_fatal=False) self._test_create_with_network_events(neutron_failure='error') @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron_failed_fatal_error( self, is_neutron): self.assertRaises(exception.VirtualInterfaceCreateException, self._test_create_with_network_events, neutron_failure='error') @mock.patch('nova.utils.is_neutron', return_value=False) def test_create_with_network_events_non_neutron(self, is_neutron): self._test_create_with_network_events() @mock.patch('nova.volume.encryptors.get_encryption_metadata') @mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm') def test_create_with_bdm(self, get_info_from_bdm, get_encryption_metadata): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) mock_dom = mock.MagicMock() mock_encryption_meta = mock.MagicMock() get_encryption_metadata.return_value = mock_encryption_meta fake_xml = """ instance-00000001 1048576 1 """ fake_volume_id = "fake-volume-id" connection_info = {"driver_volume_type": "fake", "data": {"access_mode": "rw", "volume_id": fake_volume_id}} def fake_getitem(*args, **kwargs): fake_bdm = {'connection_info': connection_info, 'mount_device': '/dev/vda'} return fake_bdm.get(args[0]) mock_volume = mock.MagicMock() mock_volume.__getitem__.side_effect = fake_getitem block_device_info = {'block_device_mapping': [mock_volume]} network_info = [network_model.VIF(id='1'), network_model.VIF(id='2', active=True)] with test.nested( mock.patch.object(drvr, '_get_volume_encryptor'), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'), mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'), mock.patch.object(drvr, '_create_domain'), mock.patch.object(drvr.firewall_driver, 'apply_instance_filter'), ) as (get_volume_encryptor, plug_vifs, setup_basic_filtering, prepare_instance_filter, create_domain, apply_instance_filter): create_domain.return_value = libvirt_guest.Guest(mock_dom) guest = drvr._create_domain_and_network( self.context, fake_xml, instance, network_info, None, block_device_info=block_device_info) get_encryption_metadata.assert_called_once_with(self.context, drvr._volume_api, fake_volume_id, connection_info) get_volume_encryptor.assert_called_once_with(connection_info, mock_encryption_meta) plug_vifs.assert_called_once_with(instance, network_info) setup_basic_filtering.assert_called_once_with(instance, network_info) prepare_instance_filter.assert_called_once_with(instance, network_info) pause = self._get_pause_flag(drvr, network_info) create_domain.assert_called_once_with( fake_xml, pause=pause, power_on=True) self.assertEqual(mock_dom, guest._domain) def test_get_guest_storage_config(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) test_instance = copy.deepcopy(self.test_instance) test_instance["default_swap_device"] = None instance = objects.Instance(**test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = instance.get_flavor() conn_info = {'driver_volume_type': 'fake', 'data': {}} bdm = objects.BlockDeviceMapping( self.context, **fake_block_device.FakeDbBlockDeviceDict({ 'id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/vdc'})) bdi = {'block_device_mapping': driver_block_device.convert_volumes([bdm])} bdm = bdi['block_device_mapping'][0] bdm['connection_info'] = conn_info disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta, bdi) mock_conf = mock.MagicMock(source_path='fake') with test.nested( mock.patch.object(driver_block_device.DriverVolumeBlockDevice, 'save'), mock.patch.object(drvr, '_connect_volume'), mock.patch.object(drvr, '_get_volume_config', return_value=mock_conf), mock.patch.object(drvr, '_set_cache_mode') ) as (volume_save, connect_volume, get_volume_config, set_cache_mode): devices = drvr._get_guest_storage_config(instance, image_meta, disk_info, False, bdi, flavor, "hvm") self.assertEqual(3, len(devices)) self.assertEqual('/dev/vdb', instance.default_ephemeral_device) self.assertIsNone(instance.default_swap_device) connect_volume.assert_called_with(bdm['connection_info'], {'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'}) get_volume_config.assert_called_with(bdm['connection_info'], {'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'}) volume_save.assert_called_once_with() self.assertEqual(3, set_cache_mode.call_count) def test_get_neutron_events(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) network_info = [network_model.VIF(id='1'), network_model.VIF(id='2', active=True)] events = drvr._get_neutron_events(network_info) self.assertEqual([('network-vif-plugged', '1')], events) def test_unplug_vifs_ignores_errors(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) with mock.patch.object(drvr, 'vif_driver') as vif_driver: vif_driver.unplug.side_effect = exception.AgentError( method='unplug') drvr._unplug_vifs('inst', [1], ignore_errors=True) vif_driver.unplug.assert_called_once_with('inst', 1) def test_unplug_vifs_reports_errors(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) with mock.patch.object(drvr, 'vif_driver') as vif_driver: vif_driver.unplug.side_effect = exception.AgentError( method='unplug') self.assertRaises(exception.AgentError, drvr.unplug_vifs, 'inst', [1]) vif_driver.unplug.assert_called_once_with('inst', 1) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain') def test_cleanup_pass_with_no_mount_device(self, undefine, unplug): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) drvr.firewall_driver = mock.Mock() drvr._disconnect_volume = mock.Mock() fake_inst = {'name': 'foo'} fake_bdms = [{'connection_info': 'foo', 'mount_device': None}] with mock.patch('nova.virt.driver' '.block_device_info_get_mapping', return_value=fake_bdms): drvr.cleanup('ctxt', fake_inst, 'netinfo', destroy_disks=False) self.assertTrue(drvr._disconnect_volume.called) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain') def test_cleanup_wants_vif_errors_ignored(self, undefine, unplug): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) fake_inst = {'name': 'foo'} with mock.patch.object(drvr._conn, 'lookupByName') as lookup: lookup.return_value = fake_inst # NOTE(danms): Make unplug cause us to bail early, since # we only care about how it was called unplug.side_effect = test.TestingException self.assertRaises(test.TestingException, drvr.cleanup, 'ctxt', fake_inst, 'netinfo') unplug.assert_called_once_with(fake_inst, 'netinfo', True) @mock.patch.object(libvirt_driver.LibvirtDriver, 'unfilter_instance') @mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files', return_value=True) @mock.patch.object(objects.Instance, 'save') @mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain') def test_cleanup_migrate_data_shared_block_storage(self, _undefine_domain, save, delete_instance_files, unfilter_instance): # Tests the cleanup method when migrate_data has # is_shared_block_storage=True and destroy_disks=False. instance = objects.Instance(self.context, **self.test_instance) migrate_data = objects.LibvirtLiveMigrateData( is_shared_block_storage=True) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) drvr.cleanup( self.context, instance, network_info={}, destroy_disks=False, migrate_data=migrate_data, destroy_vifs=False) delete_instance_files.assert_called_once_with(instance) self.assertEqual(1, int(instance.system_metadata['clean_attempts'])) self.assertTrue(instance.cleaned) save.assert_called_once_with() def test_swap_volume(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) mock_dom = mock.MagicMock() guest = libvirt_guest.Guest(mock_dom) with mock.patch.object(drvr._conn, 'defineXML', create=True) as mock_define: xmldoc = "" srcfile = "/first/path" dstfile = "/second/path" mock_dom.XMLDesc.return_value = xmldoc mock_dom.isPersistent.return_value = True mock_dom.blockJobInfo.return_value = {} drvr._swap_volume(guest, srcfile, dstfile, 1) mock_dom.XMLDesc.assert_called_once_with( flags=(fakelibvirt.VIR_DOMAIN_XML_INACTIVE | fakelibvirt.VIR_DOMAIN_XML_SECURE)) mock_dom.blockRebase.assert_called_once_with( srcfile, dstfile, 0, flags=( fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY | fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT)) mock_dom.blockResize.assert_called_once_with( srcfile, 1 * units.Gi / units.Ki) mock_define.assert_called_once_with(xmldoc) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._swap_volume') @mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.save') @mock.patch('nova.objects.block_device.BlockDeviceMapping.' 'get_by_volume_and_instance') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_config') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._connect_volume') @mock.patch('nova.virt.libvirt.host.Host.get_guest') def test_swap_volume_driver_bdm_save(self, get_guest, connect_volume, get_volume_config, get_by_volume_and_instance, volume_save, swap_volume, disconnect_volume): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) instance = objects.Instance(**self.test_instance) old_connection_info = {'driver_volume_type': 'fake', 'serial': 'old-volume-id', 'data': {'device_path': '/fake-old-volume', 'access_mode': 'rw'}} new_connection_info = {'driver_volume_type': 'fake', 'serial': 'new-volume-id', 'data': {'device_path': '/fake-new-volume', 'access_mode': 'rw'}} mock_dom = mock.MagicMock() guest = libvirt_guest.Guest(mock_dom) mock_dom.XMLDesc.return_value = """ """ mock_dom.name.return_value = 'inst' mock_dom.UUIDString.return_value = 'uuid' get_guest.return_value = guest disk_info = {'bus': 'virtio', 'type': 'disk', 'dev': 'vdb'} get_volume_config.return_value = mock.MagicMock( source_path='/fake-new-volume') bdm = objects.BlockDeviceMapping(self.context, **fake_block_device.FakeDbBlockDeviceDict( {'id': 2, 'instance_uuid': 'fake-instance', 'device_name': '/dev/vdb', 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': 'fake-volume-id-2', 'boot_index': 0})) get_by_volume_and_instance.return_value = bdm conn.swap_volume(old_connection_info, new_connection_info, instance, '/dev/vdb', 1) get_guest.assert_called_once_with(instance) connect_volume.assert_called_once_with(new_connection_info, disk_info) swap_volume.assert_called_once_with(guest, 'vdb', '/fake-new-volume', 1) disconnect_volume.assert_called_once_with(old_connection_info, 'vdb') volume_save.assert_called_once_with() def _test_live_snapshot(self, can_quiesce=False, require_quiesce=False): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) mock_dom = mock.MagicMock() test_image_meta = self.test_image_meta.copy() if require_quiesce: test_image_meta = {'properties': {'os_require_quiesce': 'yes'}} with test.nested( mock.patch.object(drvr._conn, 'defineXML', create=True), mock.patch.object(fake_libvirt_utils, 'get_disk_size'), mock.patch.object(fake_libvirt_utils, 'get_disk_backing_file'), mock.patch.object(fake_libvirt_utils, 'create_cow_image'), mock.patch.object(fake_libvirt_utils, 'chown'), mock.patch.object(fake_libvirt_utils, 'extract_snapshot'), mock.patch.object(drvr, '_set_quiesced') ) as (mock_define, mock_size, mock_backing, mock_create_cow, mock_chown, mock_snapshot, mock_quiesce): xmldoc = "" srcfile = "/first/path" dstfile = "/second/path" bckfile = "/other/path" dltfile = dstfile + ".delta" mock_dom.XMLDesc.return_value = xmldoc mock_dom.isPersistent.return_value = True mock_size.return_value = 1004009 mock_backing.return_value = bckfile guest = libvirt_guest.Guest(mock_dom) if not can_quiesce: mock_quiesce.side_effect = ( exception.InstanceQuiesceNotSupported( instance_id=self.test_instance['id'], reason='test')) image_meta = objects.ImageMeta.from_dict(test_image_meta) drvr._live_snapshot(self.context, self.test_instance, guest, srcfile, dstfile, "qcow2", "qcow2", image_meta) mock_dom.XMLDesc.assert_called_once_with(flags=( fakelibvirt.VIR_DOMAIN_XML_INACTIVE | fakelibvirt.VIR_DOMAIN_XML_SECURE)) mock_dom.blockRebase.assert_called_once_with( srcfile, dltfile, 0, flags=( fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY | fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT | fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)) mock_size.assert_called_once_with(srcfile, format="qcow2") mock_backing.assert_called_once_with(srcfile, basename=False, format="qcow2") mock_create_cow.assert_called_once_with(bckfile, dltfile, 1004009) mock_chown.assert_called_once_with(dltfile, os.getuid()) mock_snapshot.assert_called_once_with(dltfile, "qcow2", dstfile, "qcow2") mock_define.assert_called_once_with(xmldoc) mock_quiesce.assert_any_call(mock.ANY, self.test_instance, mock.ANY, True) if can_quiesce: mock_quiesce.assert_any_call(mock.ANY, self.test_instance, mock.ANY, False) def test_live_snapshot(self): self._test_live_snapshot() def test_live_snapshot_with_quiesce(self): self._test_live_snapshot(can_quiesce=True) def test_live_snapshot_with_require_quiesce(self): self._test_live_snapshot(can_quiesce=True, require_quiesce=True) def test_live_snapshot_with_require_quiesce_fails(self): self.assertRaises(exception.InstanceQuiesceNotSupported, self._test_live_snapshot, can_quiesce=False, require_quiesce=True) @mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration") def test_live_migration_hostname_valid(self, mock_lm): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.live_migration(self.context, self.test_instance, "host1.example.com", lambda x: x, lambda x: x) self.assertEqual(1, mock_lm.call_count) @mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration") @mock.patch.object(fake_libvirt_utils, "is_valid_hostname") def test_live_migration_hostname_invalid(self, mock_hostname, mock_lm): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_hostname.return_value = False self.assertRaises(exception.InvalidHostname, drvr.live_migration, self.context, self.test_instance, "foo/?com=/bin/sh", lambda x: x, lambda x: x) @mock.patch.object(libvirt_driver.LibvirtDriver, "pause") def test_live_migration_force_complete(self, pause): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.live_migration_force_complete(self.test_instance) pause.assert_called_once_with(self.test_instance) @mock.patch.object(fakelibvirt.virDomain, "abortJob") def test_live_migration_abort(self, mock_abort): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) dom = fakelibvirt.Domain(drvr._get_connection(), "", False) guest = libvirt_guest.Guest(dom) with mock.patch.object(nova.virt.libvirt.host.Host, 'get_guest', return_value=guest): drvr.live_migration_abort(self.test_instance) self.assertTrue(mock_abort.called) @mock.patch('os.path.exists', return_value=True) @mock.patch('tempfile.mkstemp') @mock.patch('os.close', return_value=None) def test_check_instance_shared_storage_local_raw(self, mock_close, mock_mkstemp, mock_exists): instance_uuid = str(uuid.uuid4()) self.flags(images_type='raw', group='libvirt') self.flags(instances_path='/tmp') mock_mkstemp.return_value = (-1, '/tmp/{0}/file'.format(instance_uuid)) driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) temp_file = driver.check_instance_shared_storage_local(self.context, instance) self.assertEqual('/tmp/{0}/file'.format(instance_uuid), temp_file['filename']) def test_check_instance_shared_storage_local_rbd(self): self.flags(images_type='rbd', group='libvirt') driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) self.assertIsNone(driver. check_instance_shared_storage_local(self.context, instance)) def test_version_to_string(self): driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) string_ver = driver._version_to_string((4, 33, 173)) self.assertEqual("4.33.173", string_ver) def test_parallels_min_version_fail(self): self.flags(virt_type='parallels', group='libvirt') driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(driver._conn, 'getLibVersion', return_value=1002011): self.assertRaises(exception.NovaException, driver.init_host, 'wibble') def test_parallels_min_version_ok(self): self.flags(virt_type='parallels', group='libvirt') driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(driver._conn, 'getLibVersion', return_value=1002012): driver.init_host('wibble') def test_get_guest_config_parallels_vm(self): self.flags(virt_type='parallels', group='libvirt') self.flags(images_type='ploop', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertEqual("parallels", cfg.virt_type) self.assertEqual(instance_ref["uuid"], cfg.uuid) self.assertEqual(2 * units.Mi, cfg.memory) self.assertEqual(1, cfg.vcpus) self.assertEqual(vm_mode.HVM, cfg.os_type) self.assertIsNone(cfg.os_root) self.assertEqual(6, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[0].driver_format, "ploop") self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestVideo) def test_get_guest_config_parallels_ct(self): self.flags(virt_type='parallels', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) ct_instance = self.test_instance.copy() ct_instance["vm_mode"] = vm_mode.EXE instance_ref = objects.Instance(**ct_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, {'mapping': {'disk': {}}}) self.assertEqual("parallels", cfg.virt_type) self.assertEqual(instance_ref["uuid"], cfg.uuid) self.assertEqual(2 * units.Mi, cfg.memory) self.assertEqual(1, cfg.vcpus) self.assertEqual(vm_mode.EXE, cfg.os_type) self.assertEqual("/sbin/init", cfg.os_init_path) self.assertIsNone(cfg.os_root) self.assertEqual(4, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestFilesys) fs = cfg.devices[0] self.assertEqual(fs.source_type, "file") self.assertEqual(fs.driver_type, "ploop") self.assertEqual(fs.target_dir, "/") self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestVideo) def _test_get_guest_config_parallels_volume(self, vmmode, devices): self.flags(virt_type='parallels', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) ct_instance = self.test_instance.copy() ct_instance["vm_mode"] = vmmode instance_ref = objects.Instance(**ct_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) conn_info = {'driver_volume_type': 'fake'} bdm = objects.BlockDeviceMapping( self.context, **fake_block_device.FakeDbBlockDeviceDict( {'id': 0, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/sda'})) info = {'block_device_mapping': driver_block_device.convert_volumes( [bdm])} info['block_device_mapping'][0]['connection_info'] = conn_info disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, info) with mock.patch.object( driver_block_device.DriverVolumeBlockDevice, 'save' ) as mock_save: cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info, None, info) mock_save.assert_called_once_with() self.assertEqual("parallels", cfg.virt_type) self.assertEqual(instance_ref["uuid"], cfg.uuid) self.assertEqual(2 * units.Mi, cfg.memory) self.assertEqual(1, cfg.vcpus) self.assertEqual(vmmode, cfg.os_type) self.assertIsNone(cfg.os_root) self.assertEqual(devices, len(cfg.devices)) disk_found = False for dev in cfg.devices: result = isinstance(dev, vconfig.LibvirtConfigGuestFilesys) self.assertFalse(result) if (isinstance(dev, vconfig.LibvirtConfigGuestDisk) and (dev.source_path is None or 'disk.local' not in dev.source_path)): self.assertEqual("disk", dev.source_device) self.assertEqual("sda", dev.target_dev) disk_found = True self.assertTrue(disk_found) def test_get_guest_config_parallels_volume(self): self._test_get_guest_config_parallels_volume(vm_mode.EXE, 4) self._test_get_guest_config_parallels_volume(vm_mode.HVM, 6) def test_get_guest_disk_config_rbd_older_config_drive_fall_back(self): # New config drives are stored in rbd but existing instances have # config drives in the old location under the instances path. # Test that the driver falls back to 'raw' for config drive if it # doesn't exist in rbd. self.flags(images_type='rbd', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.image_backend = mock.Mock() mock_rbd_image = mock.Mock() mock_raw_image = mock.Mock() mock_raw_image.libvirt_info.return_value = mock.sentinel.diskconfig drvr.image_backend.image.side_effect = [mock_rbd_image, mock_raw_image] mock_rbd_image.check_image_exists.return_value = False instance = objects.Instance() disk_mapping = {'disk.config': {'bus': 'ide', 'dev': 'hdd', 'type': 'file'}} flavor = objects.Flavor(extra_specs={}) diskconfig = drvr._get_guest_disk_config( instance, 'disk.config', disk_mapping, flavor, drvr._get_disk_config_image_type()) self.assertEqual(2, drvr.image_backend.image.call_count) call1 = mock.call(instance, 'disk.config', 'rbd') call2 = mock.call(instance, 'disk.config', 'raw') drvr.image_backend.image.assert_has_calls([call1, call2]) self.assertEqual(mock.sentinel.diskconfig, diskconfig) def _test_prepare_domain_for_snapshot(self, live_snapshot, state): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance_ref = objects.Instance(**self.test_instance) with mock.patch.object(drvr, "suspend") as mock_suspend: drvr._prepare_domain_for_snapshot( self.context, live_snapshot, state, instance_ref) return mock_suspend.called def test_prepare_domain_for_snapshot(self): # Ensure that suspend() is only called on RUNNING or PAUSED instances for test_power_state in power_state.STATE_MAP.keys(): if test_power_state in (power_state.RUNNING, power_state.PAUSED): self.assertTrue(self._test_prepare_domain_for_snapshot( False, test_power_state)) else: self.assertFalse(self._test_prepare_domain_for_snapshot( False, test_power_state)) def test_prepare_domain_for_snapshot_lxc(self): self.flags(virt_type='lxc', group='libvirt') # Ensure that suspend() is never called with LXC for test_power_state in power_state.STATE_MAP.keys(): self.assertFalse(self._test_prepare_domain_for_snapshot( False, test_power_state)) def test_prepare_domain_for_snapshot_live_snapshots(self): # Ensure that suspend() is never called for live snapshots for test_power_state in power_state.STATE_MAP.keys(): self.assertFalse(self._test_prepare_domain_for_snapshot( True, test_power_state)) class HostStateTestCase(test.NoDBTestCase): cpu_info = {"vendor": "Intel", "model": "pentium", "arch": "i686", "features": ["ssse3", "monitor", "pni", "sse2", "sse", "fxsr", "clflush", "pse36", "pat", "cmov", "mca", "pge", "mtrr", "sep", "apic"], "topology": {"cores": "1", "threads": "1", "sockets": "1"}} instance_caps = [(arch.X86_64, "kvm", "hvm"), (arch.I686, "kvm", "hvm")] pci_devices = [{ "dev_id": "pci_0000_04_00_3", "address": "0000:04:10.3", "product_id": '1521', "vendor_id": '8086', "dev_type": fields.PciDeviceType.SRIOV_PF, "phys_function": None}] numa_topology = objects.NUMATopology( cells=[objects.NUMACell( id=1, cpuset=set([1, 2]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), objects.NUMACell( id=2, cpuset=set([3, 4]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([]))]) class FakeConnection(libvirt_driver.LibvirtDriver): """Fake connection object.""" def __init__(self): super(HostStateTestCase.FakeConnection, self).__init__(fake.FakeVirtAPI(), True) self._host = host.Host("qemu:///system") def _get_memory_mb_total(): return 497 def _get_memory_mb_used(): return 88 self._host.get_memory_mb_total = _get_memory_mb_total self._host.get_memory_mb_used = _get_memory_mb_used def _get_vcpu_total(self): return 1 def _get_vcpu_used(self): return 0 def _get_cpu_info(self): return HostStateTestCase.cpu_info def _get_disk_over_committed_size_total(self): return 0 def _get_local_gb_info(self): return {'total': 100, 'used': 20, 'free': 80} def get_host_uptime(self): return ('10:01:16 up 1:36, 6 users, ' 'load average: 0.21, 0.16, 0.19') def _get_disk_available_least(self): return 13091 def _get_instance_capabilities(self): return HostStateTestCase.instance_caps def _get_pci_passthrough_devices(self): return jsonutils.dumps(HostStateTestCase.pci_devices) def _get_host_numa_topology(self): return HostStateTestCase.numa_topology @mock.patch.object(fakelibvirt, "openAuth") def test_update_status(self, mock_open): mock_open.return_value = fakelibvirt.Connection("qemu:///system") drvr = HostStateTestCase.FakeConnection() stats = drvr.get_available_resource("compute1") self.assertEqual(stats["vcpus"], 1) self.assertEqual(stats["memory_mb"], 497) self.assertEqual(stats["local_gb"], 100) self.assertEqual(stats["vcpus_used"], 0) self.assertEqual(stats["memory_mb_used"], 88) self.assertEqual(stats["local_gb_used"], 20) self.assertEqual(stats["hypervisor_type"], 'QEMU') self.assertEqual(stats["hypervisor_version"], 1001000) self.assertEqual(stats["hypervisor_hostname"], 'compute1') cpu_info = jsonutils.loads(stats["cpu_info"]) self.assertEqual(cpu_info, {"vendor": "Intel", "model": "pentium", "arch": arch.I686, "features": ["ssse3", "monitor", "pni", "sse2", "sse", "fxsr", "clflush", "pse36", "pat", "cmov", "mca", "pge", "mtrr", "sep", "apic"], "topology": {"cores": "1", "threads": "1", "sockets": "1"} }) self.assertEqual(stats["disk_available_least"], 80) self.assertEqual(jsonutils.loads(stats["pci_passthrough_devices"]), HostStateTestCase.pci_devices) self.assertThat(objects.NUMATopology.obj_from_db_obj( stats['numa_topology'])._to_dict(), matchers.DictMatches( HostStateTestCase.numa_topology._to_dict())) class LibvirtDriverTestCase(test.NoDBTestCase): """Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver.""" def setUp(self): super(LibvirtDriverTestCase, self).setUp() self.drvr = libvirt_driver.LibvirtDriver( fake.FakeVirtAPI(), read_only=True) self.context = context.get_admin_context() self.test_image_meta = { "disk_format": "raw", } def _create_instance(self, params=None): """Create a test instance.""" if not params: params = {} flavor = objects.Flavor(memory_mb=512, swap=0, vcpu_weight=None, root_gb=10, id=2, name=u'm1.tiny', ephemeral_gb=20, rxtx_factor=1.0, flavorid=u'1', vcpus=1) inst = {} inst['id'] = 1 inst['uuid'] = '52d3b512-1152-431f-a8f7-28f0288a622b' inst['os_type'] = 'linux' inst['image_ref'] = '1' inst['reservation_id'] = 'r-fakeres' inst['user_id'] = 'fake' inst['project_id'] = 'fake' inst['instance_type_id'] = 2 inst['ami_launch_index'] = 0 inst['host'] = 'host1' inst['root_gb'] = flavor.root_gb inst['ephemeral_gb'] = flavor.ephemeral_gb inst['config_drive'] = True inst['kernel_id'] = 2 inst['ramdisk_id'] = 3 inst['key_data'] = 'ABCDEFG' inst['system_metadata'] = {} inst['metadata'] = {} inst['task_state'] = None inst.update(params) return objects.Instance(flavor=flavor, old_flavor=None, new_flavor=None, **inst) @staticmethod def _disk_info(type='qcow2', config_disk=False): # 10G root and 512M swap disk disk_info = [{'disk_size': 1, 'type': type, 'virt_disk_size': 10737418240, 'path': '/test/disk', 'backing_file': '/base/disk'}, {'disk_size': 1, 'type': type, 'virt_disk_size': 536870912, 'path': '/test/disk.swap', 'backing_file': '/base/swap_512'}] if config_disk: disk_info.append({'disk_size': 1, 'type': 'raw', 'virt_disk_size': 1024, 'path': '/test/disk.config'}) return jsonutils.dumps(disk_info) def test_migrate_disk_and_power_off_exception(self): """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection .migrate_disk_and_power_off. """ self.counter = 0 self.checked_shared_storage = False def fake_get_instance_disk_info(instance, block_device_info=None): return '[]' def fake_destroy(instance): pass def fake_get_host_ip_addr(): return '10.0.0.1' def fake_execute(*args, **kwargs): self.counter += 1 if self.counter == 1: assert False, "intentional failure" def fake_os_path_exists(path): return True def fake_is_storage_shared(dest, inst_base): self.checked_shared_storage = True return False self.stubs.Set(self.drvr, 'get_instance_disk_info', fake_get_instance_disk_info) self.stubs.Set(self.drvr, '_destroy', fake_destroy) self.stubs.Set(self.drvr, 'get_host_ip_addr', fake_get_host_ip_addr) self.stubs.Set(self.drvr, '_is_storage_shared_with', fake_is_storage_shared) self.stubs.Set(utils, 'execute', fake_execute) self.stub_out('os.path.exists', fake_os_path_exists) ins_ref = self._create_instance() flavor = {'root_gb': 10, 'ephemeral_gb': 20} flavor_obj = objects.Flavor(**flavor) self.assertRaises(AssertionError, self.drvr.migrate_disk_and_power_off, context.get_admin_context(), ins_ref, '10.0.0.2', flavor_obj, None) def _test_migrate_disk_and_power_off(self, flavor_obj, block_device_info=None, params_for_instance=None): """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection .migrate_disk_and_power_off. """ disk_info = self._disk_info() def fake_get_instance_disk_info(instance, block_device_info=None): return disk_info def fake_destroy(instance): pass def fake_get_host_ip_addr(): return '10.0.0.1' def fake_execute(*args, **kwargs): pass def fake_copy_image(src, dest, host=None, receive=False, on_execute=None, on_completion=None, compression=True): self.assertIsNotNone(on_execute) self.assertIsNotNone(on_completion) self.stubs.Set(self.drvr, 'get_instance_disk_info', fake_get_instance_disk_info) self.stubs.Set(self.drvr, '_destroy', fake_destroy) self.stubs.Set(self.drvr, 'get_host_ip_addr', fake_get_host_ip_addr) self.stubs.Set(utils, 'execute', fake_execute) self.stubs.Set(libvirt_utils, 'copy_image', fake_copy_image) ins_ref = self._create_instance(params=params_for_instance) # dest is different host case out = self.drvr.migrate_disk_and_power_off( context.get_admin_context(), ins_ref, '10.0.0.2', flavor_obj, None, block_device_info=block_device_info) self.assertEqual(out, disk_info) # dest is same host case out = self.drvr.migrate_disk_and_power_off( context.get_admin_context(), ins_ref, '10.0.0.1', flavor_obj, None, block_device_info=block_device_info) self.assertEqual(out, disk_info) def test_migrate_disk_and_power_off(self): flavor = {'root_gb': 10, 'ephemeral_gb': 20} flavor_obj = objects.Flavor(**flavor) self._test_migrate_disk_and_power_off(flavor_obj) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume') def test_migrate_disk_and_power_off_boot_from_volume(self, disconnect_volume): info = {'block_device_mapping': [{'boot_index': None, 'mount_device': '/dev/vdd', 'connection_info': None}, {'boot_index': 0, 'mount_device': '/dev/vda', 'connection_info': None}]} flavor = {'root_gb': 1, 'ephemeral_gb': 0} flavor_obj = objects.Flavor(**flavor) # Note(Mike_D): The size of instance's ephemeral_gb is 0 gb. self._test_migrate_disk_and_power_off( flavor_obj, block_device_info=info, params_for_instance={'image_ref': None, 'ephemeral_gb': 0}) disconnect_volume.assert_called_with( info['block_device_mapping'][1]['connection_info'], 'vda') @mock.patch('nova.utils.execute') @mock.patch('nova.virt.libvirt.utils.copy_image') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_host_ip_addr') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.get_instance_disk_info') def test_migrate_disk_and_power_off_swap(self, mock_get_disk_info, get_host_ip_addr, mock_destroy, mock_copy_image, mock_execute): """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection .migrate_disk_and_power_off. """ self.copy_or_move_swap_called = False disk_info = self._disk_info() mock_get_disk_info.return_value = disk_info get_host_ip_addr.return_value = '10.0.0.1' def fake_copy_image(*args, **kwargs): # disk.swap should not be touched since it is skipped over if '/test/disk.swap' in list(args): self.copy_or_move_swap_called = True def fake_execute(*args, **kwargs): # disk.swap should not be touched since it is skipped over if set(['mv', '/test/disk.swap']).issubset(list(args)): self.copy_or_move_swap_called = True mock_copy_image.side_effect = fake_copy_image mock_execute.side_effect = fake_execute drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # Original instance config instance = self._create_instance({'root_gb': 10, 'ephemeral_gb': 0}) # Re-size fake instance to 20G root and 1024M swap disk flavor = {'root_gb': 20, 'ephemeral_gb': 0, 'swap': 1024} flavor_obj = objects.Flavor(**flavor) # Destination is same host out = drvr.migrate_disk_and_power_off(context.get_admin_context(), instance, '10.0.0.1', flavor_obj, None) mock_get_disk_info.assert_called_once_with(instance, block_device_info=None) self.assertTrue(get_host_ip_addr.called) mock_destroy.assert_called_once_with(instance) self.assertFalse(self.copy_or_move_swap_called) self.assertEqual(disk_info, out) def _test_migrate_disk_and_power_off_resize_check(self, expected_exc): """Test for nova.virt.libvirt.libvirt_driver.LibvirtConnection .migrate_disk_and_power_off. """ def fake_get_instance_disk_info(instance, xml=None, block_device_info=None): return self._disk_info() def fake_destroy(instance): pass def fake_get_host_ip_addr(): return '10.0.0.1' self.stubs.Set(self.drvr, 'get_instance_disk_info', fake_get_instance_disk_info) self.stubs.Set(self.drvr, '_destroy', fake_destroy) self.stubs.Set(self.drvr, 'get_host_ip_addr', fake_get_host_ip_addr) ins_ref = self._create_instance() flavor = {'root_gb': 10, 'ephemeral_gb': 20} flavor_obj = objects.Flavor(**flavor) # Migration is not implemented for LVM backed instances self.assertRaises(expected_exc, self.drvr.migrate_disk_and_power_off, None, ins_ref, '10.0.0.1', flavor_obj, None) @mock.patch('nova.utils.execute') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.get_instance_disk_info') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '._is_storage_shared_with') def _test_migrate_disk_and_power_off_backing_file(self, shared_storage, mock_is_shared_storage, mock_get_disk_info, mock_destroy, mock_execute): self.convert_file_called = False flavor = {'root_gb': 20, 'ephemeral_gb': 30, 'swap': 0} flavor_obj = objects.Flavor(**flavor) disk_info = [{'type': 'qcow2', 'path': '/test/disk', 'virt_disk_size': '10737418240', 'backing_file': '/base/disk', 'disk_size': '83886080'}] disk_info_text = jsonutils.dumps(disk_info) mock_get_disk_info.return_value = disk_info_text mock_is_shared_storage.return_value = shared_storage def fake_execute(*args, **kwargs): self.assertNotEqual(args[0:2], ['qemu-img', 'convert']) mock_execute.side_effect = fake_execute instance = self._create_instance() out = self.drvr.migrate_disk_and_power_off( context.get_admin_context(), instance, '10.0.0.2', flavor_obj, None) self.assertTrue(mock_is_shared_storage.called) mock_destroy.assert_called_once_with(instance) self.assertEqual(out, disk_info_text) def test_migrate_disk_and_power_off_shared_storage(self): self._test_migrate_disk_and_power_off_backing_file(True) def test_migrate_disk_and_power_off_non_shared_storage(self): self._test_migrate_disk_and_power_off_backing_file(False) def test_migrate_disk_and_power_off_lvm(self): self.flags(images_type='lvm', group='libvirt') def fake_execute(*args, **kwargs): pass self.stubs.Set(utils, 'execute', fake_execute) expected_exc = exception.InstanceFaultRollback self._test_migrate_disk_and_power_off_resize_check(expected_exc) def test_migrate_disk_and_power_off_resize_cannot_ssh(self): def fake_execute(*args, **kwargs): raise processutils.ProcessExecutionError() def fake_is_storage_shared(dest, inst_base): self.checked_shared_storage = True return False self.stubs.Set(self.drvr, '_is_storage_shared_with', fake_is_storage_shared) self.stubs.Set(utils, 'execute', fake_execute) expected_exc = exception.InstanceFaultRollback self._test_migrate_disk_and_power_off_resize_check(expected_exc) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.get_instance_disk_info') def test_migrate_disk_and_power_off_resize_error(self, mock_get_disk_info): instance = self._create_instance() flavor = {'root_gb': 5, 'ephemeral_gb': 10} flavor_obj = objects.Flavor(**flavor) mock_get_disk_info.return_value = self._disk_info() self.assertRaises( exception.InstanceFaultRollback, self.drvr.migrate_disk_and_power_off, 'ctx', instance, '10.0.0.1', flavor_obj, None) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.get_instance_disk_info') def test_migrate_disk_and_power_off_resize_error_default_ephemeral( self, mock_get_disk_info): # Note(Mike_D): The size of this instance's ephemeral_gb is 20 gb. instance = self._create_instance() flavor = {'root_gb': 10, 'ephemeral_gb': 0} flavor_obj = objects.Flavor(**flavor) mock_get_disk_info.return_value = self._disk_info() self.assertRaises(exception.InstanceFaultRollback, self.drvr.migrate_disk_and_power_off, 'ctx', instance, '10.0.0.1', flavor_obj, None) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.get_instance_disk_info') @mock.patch('nova.virt.driver.block_device_info_get_ephemerals') def test_migrate_disk_and_power_off_resize_error_eph(self, mock_get, mock_get_disk_info): mappings = [ { 'device_name': '/dev/sdb4', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': 'swap', 'boot_index': -1, 'volume_size': 1 }, { 'device_name': '/dev/sda1', 'source_type': 'volume', 'destination_type': 'volume', 'device_type': 'disk', 'volume_id': 1, 'guest_format': None, 'boot_index': 1, 'volume_size': 6 }, { 'device_name': '/dev/sda2', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': 1, 'device_type': 'disk', 'guest_format': None, 'boot_index': 0, 'volume_size': 4 }, { 'device_name': '/dev/sda3', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': None, 'boot_index': -1, 'volume_size': 3 } ] mock_get.return_value = mappings instance = self._create_instance() # Old flavor, eph is 20, real disk is 3, target is 2, fail flavor = {'root_gb': 10, 'ephemeral_gb': 2} flavor_obj = objects.Flavor(**flavor) mock_get_disk_info.return_value = self._disk_info() self.assertRaises( exception.InstanceFaultRollback, self.drvr.migrate_disk_and_power_off, 'ctx', instance, '10.0.0.1', flavor_obj, None) # Old flavor, eph is 20, real disk is 3, target is 4 flavor = {'root_gb': 10, 'ephemeral_gb': 4} flavor_obj = objects.Flavor(**flavor) self._test_migrate_disk_and_power_off(flavor_obj) @mock.patch('nova.utils.execute') @mock.patch('nova.virt.libvirt.utils.copy_image') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy') @mock.patch('nova.virt.libvirt.utils.get_instance_path') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '._is_storage_shared_with') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.get_instance_disk_info') def test_migrate_disk_and_power_off_resize_copy_disk_info(self, mock_disk_info, mock_shared, mock_path, mock_destroy, mock_copy, mock_execuate): instance = self._create_instance() disk_info = self._disk_info() disk_info_text = jsonutils.loads(disk_info) instance_base = os.path.dirname(disk_info_text[0]['path']) flavor = {'root_gb': 10, 'ephemeral_gb': 25} flavor_obj = objects.Flavor(**flavor) mock_disk_info.return_value = disk_info mock_path.return_value = instance_base mock_shared.return_value = False src_disk_info_path = os.path.join(instance_base + '_resize', 'disk.info') with mock.patch.object(os.path, 'exists', autospec=True) \ as mock_exists: # disk.info exists on the source mock_exists.side_effect = \ lambda path: path == src_disk_info_path self.drvr.migrate_disk_and_power_off(context.get_admin_context(), instance, mock.sentinel, flavor_obj, None) self.assertTrue(mock_exists.called) dst_disk_info_path = os.path.join(instance_base, 'disk.info') mock_copy.assert_any_call(src_disk_info_path, dst_disk_info_path, host=mock.sentinel, on_execute=mock.ANY, on_completion=mock.ANY) def test_wait_for_running(self): def fake_get_info(instance): if instance['name'] == "not_found": raise exception.InstanceNotFound(instance_id=instance['uuid']) elif instance['name'] == "running": return hardware.InstanceInfo(state=power_state.RUNNING) else: return hardware.InstanceInfo(state=power_state.SHUTDOWN) self.stubs.Set(self.drvr, 'get_info', fake_get_info) # instance not found case self.assertRaises(exception.InstanceNotFound, self.drvr._wait_for_running, {'name': 'not_found', 'uuid': 'not_found_uuid'}) # instance is running case self.assertRaises(loopingcall.LoopingCallDone, self.drvr._wait_for_running, {'name': 'running', 'uuid': 'running_uuid'}) # else case self.drvr._wait_for_running({'name': 'else', 'uuid': 'other_uuid'}) def test_disk_size_from_instance_disk_info(self): instance_data = {'root_gb': 10, 'ephemeral_gb': 20, 'swap_gb': 30} inst = objects.Instance(**instance_data) self.assertEqual(10 * units.Gi, self.drvr._disk_size_from_instance(inst, 'disk')) self.assertEqual(20 * units.Gi, self.drvr._disk_size_from_instance(inst, 'disk.local')) self.assertEqual(0, self.drvr._disk_size_from_instance(inst, 'disk.swap')) @mock.patch('nova.utils.execute') def test_disk_raw_to_qcow2(self, mock_execute): path = '/test/disk' _path_qcow = path + '_qcow' self.drvr._disk_raw_to_qcow2(path) mock_execute.assert_has_calls([ mock.call('qemu-img', 'convert', '-f', 'raw', '-O', 'qcow2', path, _path_qcow), mock.call('mv', _path_qcow, path)]) @mock.patch('nova.utils.execute') def test_disk_qcow2_to_raw(self, mock_execute): path = '/test/disk' _path_raw = path + '_raw' self.drvr._disk_qcow2_to_raw(path) mock_execute.assert_has_calls([ mock.call('qemu-img', 'convert', '-f', 'qcow2', '-O', 'raw', path, _path_raw), mock.call('mv', _path_raw, path)]) @mock.patch('nova.virt.disk.api.extend') def test_disk_resize_raw(self, mock_extend): image = imgmodel.LocalFileImage("/test/disk", imgmodel.FORMAT_RAW) self.drvr._disk_resize(image, 50) mock_extend.assert_called_once_with(image, 50) @mock.patch('nova.virt.disk.api.can_resize_image') @mock.patch('nova.virt.disk.api.is_image_extendable') @mock.patch('nova.virt.disk.api.extend') def test_disk_resize_qcow2( self, mock_extend, mock_can_resize, mock_is_image_extendable): with test.nested( mock.patch.object( self.drvr, '_disk_qcow2_to_raw'), mock.patch.object( self.drvr, '_disk_raw_to_qcow2'))\ as (mock_disk_qcow2_to_raw, mock_disk_raw_to_qcow2): mock_can_resize.return_value = True mock_is_image_extendable.return_value = True imageqcow2 = imgmodel.LocalFileImage("/test/disk", imgmodel.FORMAT_QCOW2) imageraw = imgmodel.LocalFileImage("/test/disk", imgmodel.FORMAT_RAW) self.drvr._disk_resize(imageqcow2, 50) mock_disk_qcow2_to_raw.assert_called_once_with(imageqcow2.path) mock_extend.assert_called_once_with(imageraw, 50) mock_disk_raw_to_qcow2.assert_called_once_with(imageqcow2.path) def _test_finish_migration(self, power_on, resize_instance=False): """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection .finish_migration. """ powered_on = power_on self.fake_create_domain_called = False self.fake_disk_resize_called = False create_image_called = [False] def fake_to_xml(context, instance, network_info, disk_info, image_meta=None, rescue=None, block_device_info=None, write_to_disk=False): return "" def fake_plug_vifs(instance, network_info): pass def fake_create_image(context, inst, disk_mapping, suffix='', disk_images=None, network_info=None, block_device_info=None, inject_files=True, fallback_from_host=None): self.assertFalse(inject_files) create_image_called[0] = True def fake_create_domain_and_network( context, xml, instance, network_info, disk_info, block_device_info=None, power_on=True, reboot=False, vifs_already_plugged=False): self.fake_create_domain_called = True self.assertEqual(powered_on, power_on) self.assertTrue(vifs_already_plugged) def fake_enable_hairpin(): pass def fake_execute(*args, **kwargs): pass def fake_get_info(instance): if powered_on: return hardware.InstanceInfo(state=power_state.RUNNING) else: return hardware.InstanceInfo(state=power_state.SHUTDOWN) def fake_disk_resize(image, size): # Assert that _create_image is called before disk resize, # otherwise we might be trying to resize a disk whose backing # file hasn't been fetched, yet. self.assertTrue(create_image_called[0]) self.fake_disk_resize_called = True self.flags(use_cow_images=True) self.stubs.Set(self.drvr, '_disk_resize', fake_disk_resize) self.stubs.Set(self.drvr, '_get_guest_xml', fake_to_xml) self.stubs.Set(self.drvr, 'plug_vifs', fake_plug_vifs) self.stubs.Set(self.drvr, '_create_image', fake_create_image) self.stubs.Set(self.drvr, '_create_domain_and_network', fake_create_domain_and_network) self.stubs.Set(nova.virt.libvirt.guest.Guest, 'enable_hairpin', fake_enable_hairpin) self.stubs.Set(utils, 'execute', fake_execute) fw = base_firewall.NoopFirewallDriver() self.stubs.Set(self.drvr, 'firewall_driver', fw) self.stubs.Set(self.drvr, 'get_info', fake_get_info) ins_ref = self._create_instance() migration = objects.Migration() migration.source_compute = 'fake-source-compute' migration.dest_compute = 'fake-dest-compute' migration.source_node = 'fake-source-node' migration.dest_node = 'fake-dest-node' image_meta = objects.ImageMeta.from_dict(self.test_image_meta) # Source disks are raw to test conversion disk_info = self._disk_info(type='raw', config_disk=True) with mock.patch.object(self.drvr, '_disk_raw_to_qcow2', autospec=True) as mock_raw_to_qcow2: self.drvr.finish_migration( context.get_admin_context(), migration, ins_ref, disk_info, [], image_meta, resize_instance, None, power_on) # Assert that we converted the root and swap disks convert_calls = [mock.call('/test/disk'), mock.call('/test/disk.swap')] mock_raw_to_qcow2.assert_has_calls(convert_calls, any_order=True) # Implicitly assert that we did not convert the config disk self.assertEqual(len(convert_calls), mock_raw_to_qcow2.call_count) self.assertTrue(self.fake_create_domain_called) self.assertEqual( resize_instance, self.fake_disk_resize_called) def test_finish_migration_resize(self): self._test_finish_migration(True, resize_instance=True) def test_finish_migration_power_on(self): self._test_finish_migration(True) def test_finish_migration_power_off(self): self._test_finish_migration(False) def _test_finish_revert_migration(self, power_on): """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection .finish_revert_migration. """ powered_on = power_on self.fake_create_domain_called = False def fake_execute(*args, **kwargs): pass def fake_plug_vifs(instance, network_info): pass def fake_create_domain(context, xml, instance, network_info, disk_info, block_device_info=None, power_on=None, vifs_already_plugged=None): self.fake_create_domain_called = True self.assertEqual(powered_on, power_on) self.assertTrue(vifs_already_plugged) return mock.MagicMock() def fake_enable_hairpin(): pass def fake_get_info(instance): if powered_on: return hardware.InstanceInfo(state=power_state.RUNNING) else: return hardware.InstanceInfo(state=power_state.SHUTDOWN) def fake_to_xml(context, instance, network_info, disk_info, image_meta=None, rescue=None, block_device_info=None): return "" self.stubs.Set(self.drvr, '_get_guest_xml', fake_to_xml) self.stubs.Set(self.drvr, 'plug_vifs', fake_plug_vifs) self.stubs.Set(utils, 'execute', fake_execute) fw = base_firewall.NoopFirewallDriver() self.stubs.Set(self.drvr, 'firewall_driver', fw) self.stubs.Set(self.drvr, '_create_domain_and_network', fake_create_domain) self.stubs.Set(nova.virt.libvirt.guest.Guest, 'enable_hairpin', fake_enable_hairpin) self.stubs.Set(self.drvr, 'get_info', fake_get_info) self.stubs.Set(utils, 'get_image_from_system_metadata', lambda *a: self.test_image_meta) with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) ins_ref = self._create_instance() os.mkdir(os.path.join(tmpdir, ins_ref['name'])) libvirt_xml_path = os.path.join(tmpdir, ins_ref['name'], 'libvirt.xml') f = open(libvirt_xml_path, 'w') f.close() self.drvr.finish_revert_migration( context.get_admin_context(), ins_ref, [], None, power_on) self.assertTrue(self.fake_create_domain_called) def test_finish_revert_migration_power_on(self): self._test_finish_revert_migration(True) def test_finish_revert_migration_power_off(self): self._test_finish_revert_migration(False) def _test_finish_revert_migration_after_crash(self, backup_made=True, del_inst_failed=False): class FakeLoopingCall(object): def start(self, *a, **k): return self def wait(self): return None context = 'fake_context' instance = self._create_instance() self.mox.StubOutWithMock(imagebackend.Backend, 'image') self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path') self.mox.StubOutWithMock(os.path, 'exists') self.mox.StubOutWithMock(shutil, 'rmtree') self.mox.StubOutWithMock(utils, 'execute') self.stubs.Set(blockinfo, 'get_disk_info', lambda *a: None) self.stubs.Set(self.drvr, '_get_guest_xml', lambda *a, **k: None) self.stubs.Set(self.drvr, '_create_domain_and_network', lambda *a, **kw: None) self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall', lambda *a, **k: FakeLoopingCall()) libvirt_utils.get_instance_path(instance).AndReturn('/fake/foo') os.path.exists('/fake/foo_resize').AndReturn(backup_made) if backup_made: if del_inst_failed: os_error = OSError(errno.ENOENT, 'No such file or directory') shutil.rmtree('/fake/foo').AndRaise(os_error) else: shutil.rmtree('/fake/foo') utils.execute('mv', '/fake/foo_resize', '/fake/foo') imagebackend.Backend.image(mox.IgnoreArg(), 'disk').AndReturn( fake_imagebackend.Raw()) self.mox.StubOutWithMock(fake_imagebackend.Raw, 'check_image_exists') fake_imagebackend.Raw.check_image_exists().AndReturn(True) self.mox.ReplayAll() self.drvr.finish_revert_migration(context, instance, []) def test_finish_revert_migration_after_crash(self): self._test_finish_revert_migration_after_crash(backup_made=True) def test_finish_revert_migration_after_crash_before_new(self): self._test_finish_revert_migration_after_crash(backup_made=True) def test_finish_revert_migration_after_crash_before_backup(self): self._test_finish_revert_migration_after_crash(backup_made=False) def test_finish_revert_migration_after_crash_delete_failed(self): self._test_finish_revert_migration_after_crash(backup_made=True, del_inst_failed=True) def test_finish_revert_migration_preserves_disk_bus(self): def fake_get_guest_xml(context, instance, network_info, disk_info, image_meta, block_device_info=None): self.assertEqual('ide', disk_info['disk_bus']) image_meta = {"disk_format": "raw", "properties": {"hw_disk_bus": "ide"}} instance = self._create_instance() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with test.nested( mock.patch.object(drvr, '_create_domain_and_network'), mock.patch.object(utils, 'get_image_from_system_metadata', return_value=image_meta), mock.patch.object(drvr, '_get_guest_xml', side_effect=fake_get_guest_xml)): drvr.finish_revert_migration('', instance, None, power_on=False) def test_finish_revert_migration_snap_backend(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.image_backend = mock.Mock() drvr.image_backend.image.return_value = drvr.image_backend ins_ref = self._create_instance() with test.nested( mock.patch.object(utils, 'get_image_from_system_metadata'), mock.patch.object(drvr, '_create_domain_and_network'), mock.patch.object(drvr, '_get_guest_xml')) as ( mock_image, mock_cdn, mock_ggx): mock_image.return_value = {'disk_format': 'raw'} drvr.finish_revert_migration('', ins_ref, None, power_on=False) drvr.image_backend.rollback_to_snap.assert_called_once_with( libvirt_utils.RESIZE_SNAPSHOT_NAME) drvr.image_backend.remove_snap.assert_called_once_with( libvirt_utils.RESIZE_SNAPSHOT_NAME, ignore_errors=True) def test_finish_revert_migration_snap_backend_snapshot_not_found(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.image_backend = mock.Mock() drvr.image_backend.image.return_value = drvr.image_backend ins_ref = self._create_instance() with test.nested( mock.patch.object(rbd_utils, 'RBDDriver'), mock.patch.object(utils, 'get_image_from_system_metadata'), mock.patch.object(drvr, '_create_domain_and_network'), mock.patch.object(drvr, '_get_guest_xml')) as ( mock_rbd, mock_image, mock_cdn, mock_ggx): mock_image.return_value = {'disk_format': 'raw'} mock_rbd.rollback_to_snap.side_effect = exception.SnapshotNotFound( snapshot_id='testing') drvr.finish_revert_migration('', ins_ref, None, power_on=False) drvr.image_backend.remove_snap.assert_called_once_with( libvirt_utils.RESIZE_SNAPSHOT_NAME, ignore_errors=True) def test_finish_revert_migration_snap_backend_image_does_not_exist(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.image_backend = mock.Mock() drvr.image_backend.image.return_value = drvr.image_backend drvr.image_backend.check_image_exists.return_value = False ins_ref = self._create_instance() with test.nested( mock.patch.object(rbd_utils, 'RBDDriver'), mock.patch.object(utils, 'get_image_from_system_metadata'), mock.patch.object(drvr, '_create_domain_and_network'), mock.patch.object(drvr, '_get_guest_xml')) as ( mock_rbd, mock_image, mock_cdn, mock_ggx): mock_image.return_value = {'disk_format': 'raw'} drvr.finish_revert_migration('', ins_ref, None, power_on=False) self.assertFalse(drvr.image_backend.rollback_to_snap.called) self.assertFalse(drvr.image_backend.remove_snap.called) def test_cleanup_failed_migration(self): self.mox.StubOutWithMock(shutil, 'rmtree') shutil.rmtree('/fake/inst') self.mox.ReplayAll() self.drvr._cleanup_failed_migration('/fake/inst') def test_confirm_migration(self): ins_ref = self._create_instance() self.mox.StubOutWithMock(self.drvr, "_cleanup_resize") self.drvr._cleanup_resize(ins_ref, _fake_network_info(self, 1)) self.mox.ReplayAll() self.drvr.confirm_migration("migration_ref", ins_ref, _fake_network_info(self, 1)) def test_cleanup_resize_same_host(self): CONF.set_override('policy_dirs', [], group='oslo_policy') ins_ref = self._create_instance({'host': CONF.host}) def fake_os_path_exists(path): return True self.stub_out('os.path.exists', fake_os_path_exists) self.mox.StubOutWithMock(imagebackend.Backend, 'image') self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path') self.mox.StubOutWithMock(utils, 'execute') libvirt_utils.get_instance_path(ins_ref, forceold=True).AndReturn('/fake/inst') utils.execute('rm', '-rf', '/fake/inst_resize', delay_on_retry=True, attempts=5) imagebackend.Backend.image(ins_ref, 'disk').AndReturn( fake_imagebackend.Raw()) self.mox.StubOutWithMock(fake_imagebackend.Raw, 'check_image_exists') fake_imagebackend.Raw.check_image_exists().AndReturn(True) self.mox.ReplayAll() self.drvr._cleanup_resize(ins_ref, _fake_network_info(self, 1)) def test_cleanup_resize_not_same_host(self): CONF.set_override('policy_dirs', [], group='oslo_policy') host = 'not' + CONF.host ins_ref = self._create_instance({'host': host}) def fake_os_path_exists(path): return True def fake_undefine_domain(instance): pass def fake_unplug_vifs(instance, network_info, ignore_errors=False): pass def fake_unfilter_instance(instance, network_info): pass self.stub_out('os.path.exists', fake_os_path_exists) self.stubs.Set(self.drvr, '_undefine_domain', fake_undefine_domain) self.stubs.Set(self.drvr, 'unplug_vifs', fake_unplug_vifs) self.stubs.Set(self.drvr.firewall_driver, 'unfilter_instance', fake_unfilter_instance) self.mox.StubOutWithMock(imagebackend.Backend, 'image') self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path') self.mox.StubOutWithMock(utils, 'execute') libvirt_utils.get_instance_path(ins_ref, forceold=True).AndReturn('/fake/inst') utils.execute('rm', '-rf', '/fake/inst_resize', delay_on_retry=True, attempts=5) imagebackend.Backend.image(ins_ref, 'disk').AndReturn( fake_imagebackend.Raw()) self.mox.StubOutWithMock(fake_imagebackend.Raw, 'check_image_exists') fake_imagebackend.Raw.check_image_exists().AndReturn(True) self.mox.ReplayAll() self.drvr._cleanup_resize(ins_ref, _fake_network_info(self, 1)) def test_cleanup_resize_snap_backend(self): CONF.set_override('policy_dirs', [], group='oslo_policy') ins_ref = self._create_instance({'host': CONF.host}) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.image_backend = mock.Mock() drvr.image_backend.image.return_value = drvr.image_backend with test.nested( mock.patch.object(os.path, 'exists'), mock.patch.object(libvirt_utils, 'get_instance_path'), mock.patch.object(utils, 'execute'), mock.patch.object(drvr.image_backend, 'remove_snap')) as ( mock_exists, mock_get_path, mock_exec, mock_remove): mock_exists.return_value = True mock_get_path.return_value = '/fake/inst' drvr._cleanup_resize(ins_ref, _fake_network_info(self, 1)) mock_get_path.assert_called_once_with(ins_ref, forceold=True) mock_exec.assert_called_once_with('rm', '-rf', '/fake/inst_resize', delay_on_retry=True, attempts=5) mock_remove.assert_called_once_with( libvirt_utils.RESIZE_SNAPSHOT_NAME, ignore_errors=True) def test_cleanup_resize_snap_backend_image_does_not_exist(self): CONF.set_override('policy_dirs', [], group='oslo_policy') ins_ref = self._create_instance({'host': CONF.host}) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.image_backend = mock.Mock() drvr.image_backend.image.return_value = drvr.image_backend drvr.image_backend.check_image_exists.return_value = False with test.nested( mock.patch.object(os.path, 'exists'), mock.patch.object(libvirt_utils, 'get_instance_path'), mock.patch.object(utils, 'execute'), mock.patch.object(drvr.image_backend, 'remove_snap')) as ( mock_exists, mock_get_path, mock_exec, mock_remove): mock_exists.return_value = True mock_get_path.return_value = '/fake/inst' drvr._cleanup_resize(ins_ref, _fake_network_info(self, 1)) mock_get_path.assert_called_once_with(ins_ref, forceold=True) mock_exec.assert_called_once_with('rm', '-rf', '/fake/inst_resize', delay_on_retry=True, attempts=5) self.assertFalse(mock_remove.called) def test_get_instance_disk_info_exception(self): instance = self._create_instance() class FakeExceptionDomain(FakeVirtDomain): def __init__(self): super(FakeExceptionDomain, self).__init__() def XMLDesc(self, flags): raise fakelibvirt.libvirtError("Libvirt error") def fake_get_domain(self, instance): return FakeExceptionDomain() self.stubs.Set(host.Host, 'get_domain', fake_get_domain) self.assertRaises(exception.InstanceNotFound, self.drvr.get_instance_disk_info, instance) @mock.patch('os.path.exists') @mock.patch.object(lvm, 'list_volumes') def test_lvm_disks(self, listlvs, exists): instance = objects.Instance(uuid='fake-uuid', id=1) self.flags(images_volume_group='vols', group='libvirt') exists.return_value = True listlvs.return_value = ['fake-uuid_foo', 'other-uuid_foo'] disks = self.drvr._lvm_disks(instance) self.assertEqual(['/dev/vols/fake-uuid_foo'], disks) def test_is_booted_from_volume(self): func = libvirt_driver.LibvirtDriver._is_booted_from_volume instance, disk_mapping = {}, {} self.assertTrue(func(instance, disk_mapping)) disk_mapping['disk'] = 'map' self.assertTrue(func(instance, disk_mapping)) instance['image_ref'] = 'uuid' self.assertFalse(func(instance, disk_mapping)) @mock.patch('nova.virt.netutils.get_injected_network_template') @mock.patch('nova.virt.disk.api.inject_data') @mock.patch.object(libvirt_driver.LibvirtDriver, "_conn") def _test_inject_data(self, driver_params, path, disk_params, mock_conn, disk_inject_data, inj_network, called=True): class ImageBackend(object): path = '/path' def check_image_exists(self): if self.path == '/fail/path': return False return True def get_model(self, connection): return imgmodel.LocalFileImage(self.path, imgmodel.FORMAT_RAW) def fake_inj_network(*args, **kwds): return args[0] or None inj_network.side_effect = fake_inj_network image_backend = ImageBackend() image_backend.path = path with mock.patch.object( self.drvr.image_backend, 'image', return_value=image_backend): self.flags(inject_partition=0, group='libvirt') self.drvr._inject_data(**driver_params) if called: disk_inject_data.assert_called_once_with( mock.ANY, *disk_params, partition=None, mandatory=('files',)) self.assertEqual(disk_inject_data.called, called) def _test_inject_data_default_driver_params(self, **params): return { 'instance': self._create_instance(params=params), 'network_info': None, 'admin_pass': None, 'files': None, 'suffix': '' } def test_inject_data_adminpass(self): self.flags(inject_password=True, group='libvirt') driver_params = self._test_inject_data_default_driver_params() driver_params['admin_pass'] = 'foobar' disk_params = [ None, # key None, # net {}, # metadata 'foobar', # admin_pass None, # files ] self._test_inject_data(driver_params, "/path", disk_params) # Test with the configuration setted to false. self.flags(inject_password=False, group='libvirt') self._test_inject_data(driver_params, "/path", disk_params, called=False) def test_inject_data_key(self): driver_params = self._test_inject_data_default_driver_params() driver_params['instance']['key_data'] = 'key-content' self.flags(inject_key=True, group='libvirt') disk_params = [ 'key-content', # key None, # net {}, # metadata None, # admin_pass None, # files ] self._test_inject_data(driver_params, "/path", disk_params) # Test with the configuration setted to false. self.flags(inject_key=False, group='libvirt') self._test_inject_data(driver_params, "/path", disk_params, called=False) def test_inject_data_metadata(self): instance_metadata = {'metadata': {'data': 'foo'}} driver_params = self._test_inject_data_default_driver_params( **instance_metadata ) disk_params = [ None, # key None, # net {'data': 'foo'}, # metadata None, # admin_pass None, # files ] self._test_inject_data(driver_params, "/path", disk_params) def test_inject_data_files(self): driver_params = self._test_inject_data_default_driver_params() driver_params['files'] = ['file1', 'file2'] disk_params = [ None, # key None, # net {}, # metadata None, # admin_pass ['file1', 'file2'], # files ] self._test_inject_data(driver_params, "/path", disk_params) def test_inject_data_net(self): driver_params = self._test_inject_data_default_driver_params() driver_params['network_info'] = {'net': 'eno1'} disk_params = [ None, # key {'net': 'eno1'}, # net {}, # metadata None, # admin_pass None, # files ] self._test_inject_data(driver_params, "/path", disk_params) def test_inject_not_exist_image(self): driver_params = self._test_inject_data_default_driver_params() disk_params = [ 'key-content', # key None, # net None, # metadata None, # admin_pass None, # files ] self._test_inject_data(driver_params, "/fail/path", disk_params, called=False) def _test_attach_detach_interface(self, method, power_state, expected_flags): instance = self._create_instance() network_info = _fake_network_info(self, 1) domain = FakeVirtDomain() self.mox.StubOutWithMock(host.Host, 'get_domain') self.mox.StubOutWithMock(self.drvr.firewall_driver, 'setup_basic_filtering') self.mox.StubOutWithMock(domain, 'attachDeviceFlags') self.mox.StubOutWithMock(domain, 'info') host.Host.get_domain(instance).AndReturn(domain) if method == 'attach_interface': self.drvr.firewall_driver.setup_basic_filtering( instance, [network_info[0]]) fake_image_meta = objects.ImageMeta.from_dict( {'id': instance.image_ref}) expected = self.drvr.vif_driver.get_config( instance, network_info[0], fake_image_meta, instance.flavor, CONF.libvirt.virt_type, self.drvr._host) self.mox.StubOutWithMock(self.drvr.vif_driver, 'get_config') self.drvr.vif_driver.get_config( instance, network_info[0], mox.IsA(objects.ImageMeta), mox.IsA(objects.Flavor), CONF.libvirt.virt_type, self.drvr._host).AndReturn(expected) domain.info().AndReturn([power_state, 1, 2, 3, 4]) if method == 'attach_interface': domain.attachDeviceFlags(expected.to_xml(), flags=expected_flags) elif method == 'detach_interface': domain.detachDeviceFlags(expected.to_xml(), expected_flags) self.mox.ReplayAll() if method == 'attach_interface': self.drvr.attach_interface( instance, fake_image_meta, network_info[0]) elif method == 'detach_interface': self.drvr.detach_interface( instance, network_info[0]) self.mox.VerifyAll() def test_attach_interface_with_running_instance(self): self._test_attach_detach_interface( 'attach_interface', power_state.RUNNING, expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)) def test_attach_interface_with_pause_instance(self): self._test_attach_detach_interface( 'attach_interface', power_state.PAUSED, expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)) def test_attach_interface_with_shutdown_instance(self): self._test_attach_detach_interface( 'attach_interface', power_state.SHUTDOWN, expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG)) def test_detach_interface_with_running_instance(self): self._test_attach_detach_interface( 'detach_interface', power_state.RUNNING, expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)) def test_detach_interface_with_pause_instance(self): self._test_attach_detach_interface( 'detach_interface', power_state.PAUSED, expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)) def test_detach_interface_with_shutdown_instance(self): self._test_attach_detach_interface( 'detach_interface', power_state.SHUTDOWN, expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG)) @mock.patch('nova.virt.libvirt.driver.LOG') def test_detach_interface_device_not_found(self, mock_log): # Asserts that we don't log an error when the interface device is not # found on the guest after a libvirt error during detach. instance = self._create_instance() vif = _fake_network_info(self, 1)[0] guest = mock.Mock(spec='nova.virt.libvirt.guest.Guest') guest.get_power_state = mock.Mock() self.drvr._host.get_guest = mock.Mock(return_value=guest) self.drvr.vif_driver = mock.Mock() error = fakelibvirt.libvirtError( 'no matching network device was found') error.err = (fakelibvirt.VIR_ERR_OPERATION_FAILED,) guest.detach_device = mock.Mock(side_effect=error) # mock out that get_interface_by_mac doesn't find the interface guest.get_interface_by_mac = mock.Mock(return_value=None) self.drvr.detach_interface(instance, vif) guest.get_interface_by_mac.assert_called_once_with(vif['address']) # an error shouldn't be logged, but a warning should be logged self.assertFalse(mock_log.error.called) self.assertEqual(1, mock_log.warning.call_count) self.assertIn('the device is no longer found on the guest', six.text_type(mock_log.warning.call_args[0])) def test_rescue(self): instance = self._create_instance({'config_drive': None}) dummyxml = ("instance-0000000a" "" "" "" "" "" "" "" "") network_info = _fake_network_info(self, 1) self.mox.StubOutWithMock(self.drvr, '_get_existing_domain_xml') self.mox.StubOutWithMock(libvirt_utils, 'write_to_file') self.mox.StubOutWithMock(imagebackend.Backend, 'image') self.mox.StubOutWithMock(imagebackend.Image, 'cache') self.mox.StubOutWithMock(self.drvr, '_get_guest_xml') self.mox.StubOutWithMock(self.drvr, '_destroy') self.mox.StubOutWithMock(self.drvr, '_create_domain') self.drvr._get_existing_domain_xml(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(dummyxml) libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg()) libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) imagebackend.Backend.image(instance, 'kernel.rescue', 'raw' ).AndReturn(fake_imagebackend.Raw()) imagebackend.Backend.image(instance, 'ramdisk.rescue', 'raw' ).AndReturn(fake_imagebackend.Raw()) imagebackend.Backend.image(instance, 'disk.rescue', 'default' ).AndReturn(fake_imagebackend.Raw()) imagebackend.Image.cache(context=mox.IgnoreArg(), fetch_func=mox.IgnoreArg(), filename=mox.IgnoreArg(), image_id=mox.IgnoreArg(), project_id=mox.IgnoreArg(), user_id=mox.IgnoreArg()).MultipleTimes() imagebackend.Image.cache(context=mox.IgnoreArg(), fetch_func=mox.IgnoreArg(), filename=mox.IgnoreArg(), image_id=mox.IgnoreArg(), project_id=mox.IgnoreArg(), size=None, user_id=mox.IgnoreArg()) image_meta = objects.ImageMeta.from_dict( {'id': 'fake', 'name': 'fake'}) self.drvr._get_guest_xml(mox.IgnoreArg(), instance, network_info, mox.IgnoreArg(), image_meta, rescue=mox.IgnoreArg(), write_to_disk=mox.IgnoreArg() ).AndReturn(dummyxml) self.drvr._destroy(instance) self.drvr._create_domain(mox.IgnoreArg()) self.mox.ReplayAll() rescue_password = 'fake_password' self.drvr.rescue(self.context, instance, network_info, image_meta, rescue_password) self.mox.VerifyAll() @mock.patch.object(libvirt_utils, 'get_instance_path') @mock.patch.object(libvirt_utils, 'load_file') @mock.patch.object(host.Host, "get_domain") def test_unrescue(self, mock_get_domain, mock_load_file, mock_get_instance_path): dummyxml = ("instance-0000000a" "" "" "" "" "") mock_get_instance_path.return_value = '/path' instance = objects.Instance(uuid='fake=uuid', id=1) fake_dom = FakeVirtDomain(fake_xml=dummyxml) mock_get_domain.return_value = fake_dom mock_load_file.return_value = "fake_unrescue_xml" unrescue_xml_path = os.path.join('/path', 'unrescue.xml') xml_path = os.path.join('/path', 'libvirt.xml') rescue_file = os.path.join('/path', 'rescue.file') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with test.nested( mock.patch.object(libvirt_utils, 'write_to_file'), mock.patch.object(drvr, '_destroy'), mock.patch.object(drvr, '_create_domain'), mock.patch.object(libvirt_utils, 'file_delete'), mock.patch.object(drvr, '_lvm_disks', return_value=['lvm.rescue']), mock.patch.object(lvm, 'remove_volumes'), mock.patch.object(glob, 'iglob', return_value=[rescue_file]) ) as (mock_write, mock_destroy, mock_create, mock_del, mock_lvm_disks, mock_remove_volumes, mock_glob): drvr.unrescue(instance, None) mock_write.assert_called_once_with(xml_path, "fake_unrescue_xml") mock_destroy.assert_called_once_with(instance) mock_create.assert_called_once_with("fake_unrescue_xml", fake_dom) self.assertEqual(2, mock_del.call_count) self.assertEqual(unrescue_xml_path, mock_del.call_args_list[0][0][0]) self.assertEqual(rescue_file, mock_del.call_args_list[1][0][0]) mock_remove_volumes.assert_called_once_with(['lvm.rescue']) @mock.patch( 'nova.virt.configdrive.ConfigDriveBuilder.add_instance_metadata') @mock.patch('nova.virt.configdrive.ConfigDriveBuilder.make_drive') def test_rescue_config_drive(self, mock_make, mock_add): instance = self._create_instance() uuid = instance.uuid configdrive_path = uuid + '/disk.config.rescue' dummyxml = ("instance-0000000a" "" "" "" "" "" "" "" "") network_info = _fake_network_info(self, 1) self.mox.StubOutWithMock(self.drvr, '_get_existing_domain_xml') self.mox.StubOutWithMock(libvirt_utils, 'write_to_file') self.mox.StubOutWithMock(imagebackend.Backend, 'image') self.mox.StubOutWithMock(imagebackend.Image, 'cache') self.mox.StubOutWithMock(instance_metadata.InstanceMetadata, '__init__') self.mox.StubOutWithMock(self.drvr, '_get_guest_xml') self.mox.StubOutWithMock(self.drvr, '_destroy') self.mox.StubOutWithMock(self.drvr, '_create_domain') self.drvr._get_existing_domain_xml(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(dummyxml) libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg()) libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) imagebackend.Backend.image(instance, 'kernel.rescue', 'raw' ).AndReturn(fake_imagebackend.Raw()) imagebackend.Backend.image(instance, 'ramdisk.rescue', 'raw' ).AndReturn(fake_imagebackend.Raw()) imagebackend.Backend.image(instance, 'disk.rescue', 'default' ).AndReturn(fake_imagebackend.Raw()) imagebackend.Backend.image(instance, 'disk.config.rescue', 'raw' ).AndReturn(fake_imagebackend.Raw()) imagebackend.Image.cache(context=mox.IgnoreArg(), fetch_func=mox.IgnoreArg(), filename=mox.IgnoreArg(), image_id=mox.IgnoreArg(), project_id=mox.IgnoreArg(), user_id=mox.IgnoreArg()).MultipleTimes() imagebackend.Image.cache(context=mox.IgnoreArg(), fetch_func=mox.IgnoreArg(), filename=mox.IgnoreArg(), image_id=mox.IgnoreArg(), project_id=mox.IgnoreArg(), size=None, user_id=mox.IgnoreArg()) instance_metadata.InstanceMetadata.__init__(mox.IgnoreArg(), content=mox.IgnoreArg(), extra_md=mox.IgnoreArg(), network_info=mox.IgnoreArg()) image_meta = objects.ImageMeta.from_dict( {'id': 'fake', 'name': 'fake'}) self.drvr._get_guest_xml(mox.IgnoreArg(), instance, network_info, mox.IgnoreArg(), image_meta, rescue=mox.IgnoreArg(), write_to_disk=mox.IgnoreArg() ).AndReturn(dummyxml) self.drvr._destroy(instance) self.drvr._create_domain(mox.IgnoreArg()) self.mox.ReplayAll() rescue_password = 'fake_password' self.drvr.rescue(self.context, instance, network_info, image_meta, rescue_password) self.mox.VerifyAll() mock_add.assert_any_call(mock.ANY) expected_call = [mock.call(os.path.join(CONF.instances_path, configdrive_path))] mock_make.assert_has_calls(expected_call) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid='fake-uuid', id=1) exists.side_effect = [False, False, True, False] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) exe.assert_called_with('mv', '/path', '/path_del') shutil.assert_called_with('/path_del') self.assertTrue(result) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('os.kill') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_kill_running( self, get_instance_path, kill, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid='fake-uuid', id=1) self.drvr.job_tracker.jobs[instance.uuid] = [3, 4] exists.side_effect = [False, False, True, False] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) exe.assert_called_with('mv', '/path', '/path_del') kill.assert_has_calls([mock.call(3, signal.SIGKILL), mock.call(3, 0), mock.call(4, signal.SIGKILL), mock.call(4, 0)]) shutil.assert_called_with('/path_del') self.assertTrue(result) self.assertNotIn(instance.uuid, self.drvr.job_tracker.jobs) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_resize(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid='fake-uuid', id=1) nova.utils.execute.side_effect = [Exception(), None] exists.side_effect = [False, False, True, False] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) expected = [mock.call('mv', '/path', '/path_del'), mock.call('mv', '/path_resize', '/path_del')] self.assertEqual(expected, exe.mock_calls) shutil.assert_called_with('/path_del') self.assertTrue(result) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_failed(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid='fake-uuid', id=1) exists.side_effect = [False, False, True, True] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) exe.assert_called_with('mv', '/path', '/path_del') shutil.assert_called_with('/path_del') self.assertFalse(result) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_mv_failed(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid='fake-uuid', id=1) nova.utils.execute.side_effect = Exception() exists.side_effect = [True, True] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) expected = [mock.call('mv', '/path', '/path_del'), mock.call('mv', '/path_resize', '/path_del')] * 2 self.assertEqual(expected, exe.mock_calls) self.assertFalse(result) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_resume(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid='fake-uuid', id=1) nova.utils.execute.side_effect = Exception() exists.side_effect = [False, False, True, False] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) expected = [mock.call('mv', '/path', '/path_del'), mock.call('mv', '/path_resize', '/path_del')] * 2 self.assertEqual(expected, exe.mock_calls) self.assertTrue(result) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_none(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid='fake-uuid', id=1) nova.utils.execute.side_effect = Exception() exists.side_effect = [False, False, False, False] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) expected = [mock.call('mv', '/path', '/path_del'), mock.call('mv', '/path_resize', '/path_del')] * 2 self.assertEqual(expected, exe.mock_calls) self.assertEqual(0, len(shutil.mock_calls)) self.assertTrue(result) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_concurrent(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid='fake-uuid', id=1) nova.utils.execute.side_effect = [Exception(), Exception(), None] exists.side_effect = [False, False, True, False] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) expected = [mock.call('mv', '/path', '/path_del'), mock.call('mv', '/path_resize', '/path_del')] expected.append(expected[0]) self.assertEqual(expected, exe.mock_calls) shutil.assert_called_with('/path_del') self.assertTrue(result) def _assert_on_id_map(self, idmap, klass, start, target, count): self.assertIsInstance(idmap, klass) self.assertEqual(start, idmap.start) self.assertEqual(target, idmap.target) self.assertEqual(count, idmap.count) def test_get_id_maps(self): self.flags(virt_type="lxc", group="libvirt") CONF.libvirt.virt_type = "lxc" CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"] CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"] idmaps = self.drvr._get_guest_idmaps() self.assertEqual(len(idmaps), 4) self._assert_on_id_map(idmaps[0], vconfig.LibvirtConfigGuestUIDMap, 0, 10000, 1) self._assert_on_id_map(idmaps[1], vconfig.LibvirtConfigGuestUIDMap, 1, 20000, 10) self._assert_on_id_map(idmaps[2], vconfig.LibvirtConfigGuestGIDMap, 0, 10000, 1) self._assert_on_id_map(idmaps[3], vconfig.LibvirtConfigGuestGIDMap, 1, 20000, 10) def test_get_id_maps_not_lxc(self): CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"] CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"] idmaps = self.drvr._get_guest_idmaps() self.assertEqual(0, len(idmaps)) def test_get_id_maps_only_uid(self): self.flags(virt_type="lxc", group="libvirt") CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"] CONF.libvirt.gid_maps = [] idmaps = self.drvr._get_guest_idmaps() self.assertEqual(2, len(idmaps)) self._assert_on_id_map(idmaps[0], vconfig.LibvirtConfigGuestUIDMap, 0, 10000, 1) self._assert_on_id_map(idmaps[1], vconfig.LibvirtConfigGuestUIDMap, 1, 20000, 10) def test_get_id_maps_only_gid(self): self.flags(virt_type="lxc", group="libvirt") CONF.libvirt.uid_maps = [] CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"] idmaps = self.drvr._get_guest_idmaps() self.assertEqual(2, len(idmaps)) self._assert_on_id_map(idmaps[0], vconfig.LibvirtConfigGuestGIDMap, 0, 10000, 1) self._assert_on_id_map(idmaps[1], vconfig.LibvirtConfigGuestGIDMap, 1, 20000, 10) def test_instance_on_disk(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(uuid='fake-uuid', id=1) self.assertFalse(drvr.instance_on_disk(instance)) def test_instance_on_disk_rbd(self): self.flags(images_type='rbd', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(uuid='fake-uuid', id=1) self.assertTrue(drvr.instance_on_disk(instance)) def test_get_disk_xml(self): dom_xml = """ 0e38683e-f0af-418f-a3f1-6b67ea0f919d """ diska_xml = """ 0e38683e-f0af-418f-a3f1-6b67ea0f919d """ diskb_xml = """ """ dom = mock.MagicMock() dom.XMLDesc.return_value = dom_xml guest = libvirt_guest.Guest(dom) # NOTE(gcb): etree.tostring(node) returns an extra line with # some white spaces, need to strip it. actual_diska_xml = guest.get_disk('vda').to_xml() self.assertEqual(diska_xml.strip(), actual_diska_xml.strip()) actual_diskb_xml = guest.get_disk('vdb').to_xml() self.assertEqual(diskb_xml.strip(), actual_diskb_xml.strip()) self.assertIsNone(guest.get_disk('vdc')) def test_vcpu_model_from_config(self): drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) vcpu_model = drv._cpu_config_to_vcpu_model(None, None) self.assertIsNone(vcpu_model) cpu = vconfig.LibvirtConfigGuestCPU() feature1 = vconfig.LibvirtConfigGuestCPUFeature() feature2 = vconfig.LibvirtConfigGuestCPUFeature() feature1.name = 'sse' feature1.policy = cpumodel.POLICY_REQUIRE feature2.name = 'aes' feature2.policy = cpumodel.POLICY_REQUIRE cpu.features = set([feature1, feature2]) cpu.mode = cpumodel.MODE_CUSTOM cpu.sockets = 1 cpu.cores = 2 cpu.threads = 4 vcpu_model = drv._cpu_config_to_vcpu_model(cpu, None) self.assertEqual(cpumodel.MATCH_EXACT, vcpu_model.match) self.assertEqual(cpumodel.MODE_CUSTOM, vcpu_model.mode) self.assertEqual(4, vcpu_model.topology.threads) self.assertEqual(set(['sse', 'aes']), set([f.name for f in vcpu_model.features])) cpu.mode = cpumodel.MODE_HOST_MODEL vcpu_model_1 = drv._cpu_config_to_vcpu_model(cpu, vcpu_model) self.assertEqual(cpumodel.MODE_HOST_MODEL, vcpu_model.mode) self.assertEqual(vcpu_model, vcpu_model_1) @mock.patch.object(lvm, 'get_volume_size', return_value=10) @mock.patch.object(host.Host, "get_guest") @mock.patch.object(dmcrypt, 'delete_volume') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.unfilter_instance') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain') @mock.patch.object(objects.Instance, 'save') def test_cleanup_lvm_encrypted(self, mock_save, mock_undefine_domain, mock_unfilter, mock_delete_volume, mock_get_guest, mock_get_size): drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance = objects.Instance(uuid='fake-uuid', id=1, ephemeral_key_uuid='000-000-000') instance.system_metadata = {} block_device_info = {'root_device_name': '/dev/vda', 'ephemerals': [], 'block_device_mapping': []} self.flags(images_type="lvm", group='libvirt') dom_xml = """ """ dom = mock.MagicMock() dom.XMLDesc.return_value = dom_xml guest = libvirt_guest.Guest(dom) mock_get_guest.return_value = guest drv.cleanup(self.context, instance, 'fake_network', destroy_vifs=False, block_device_info=block_device_info) mock_delete_volume.assert_called_once_with('/dev/mapper/fake-dmcrypt') @mock.patch.object(lvm, 'get_volume_size', return_value=10) @mock.patch.object(host.Host, "get_guest") @mock.patch.object(dmcrypt, 'delete_volume') def _test_cleanup_lvm(self, mock_delete_volume, mock_get_guest, mock_size, encrypted=False): drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance = objects.Instance(uuid='fake-uuid', id=1, ephemeral_key_uuid='000-000-000') block_device_info = {'root_device_name': '/dev/vda', 'ephemerals': [], 'block_device_mapping': []} dev_name = 'fake-dmcrypt' if encrypted else 'fake' dom_xml = """ """ % dev_name dom = mock.MagicMock() dom.XMLDesc.return_value = dom_xml guest = libvirt_guest.Guest(dom) mock_get_guest.return_value = guest drv._cleanup_lvm(instance, block_device_info) if encrypted: mock_delete_volume.assert_called_once_with( '/dev/mapper/fake-dmcrypt') else: self.assertFalse(mock_delete_volume.called) def test_cleanup_lvm(self): self._test_cleanup_lvm() def test_cleanup_encrypted_lvm(self): self._test_cleanup_lvm(encrypted=True) def test_vcpu_model_to_config(self): drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) feature = objects.VirtCPUFeature(policy=cpumodel.POLICY_REQUIRE, name='sse') feature_1 = objects.VirtCPUFeature(policy=cpumodel.POLICY_FORBID, name='aes') topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=4) vcpu_model = objects.VirtCPUModel(mode=cpumodel.MODE_HOST_MODEL, features=[feature, feature_1], topology=topo) cpu = drv._vcpu_model_to_cpu_config(vcpu_model) self.assertEqual(cpumodel.MODE_HOST_MODEL, cpu.mode) self.assertEqual(1, cpu.sockets) self.assertEqual(4, cpu.threads) self.assertEqual(2, len(cpu.features)) self.assertEqual(set(['sse', 'aes']), set([f.name for f in cpu.features])) self.assertEqual(set([cpumodel.POLICY_REQUIRE, cpumodel.POLICY_FORBID]), set([f.policy for f in cpu.features])) def test_trigger_crash_dump(self): mock_guest = mock.Mock(libvirt_guest.Guest, id=1) instance = objects.Instance(uuid='fake-uuid', id=1) with mock.patch.object(self.drvr._host, 'get_guest', return_value=mock_guest): self.drvr.trigger_crash_dump(instance) def test_trigger_crash_dump_not_running(self): ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'Requested operation is not valid: domain is not running', error_code=fakelibvirt.VIR_ERR_OPERATION_INVALID) mock_guest = mock.Mock(libvirt_guest.Guest, id=1) mock_guest.inject_nmi = mock.Mock(side_effect=ex) instance = objects.Instance(uuid='fake-uuid', id=1) with mock.patch.object(self.drvr._host, 'get_guest', return_value=mock_guest): self.assertRaises(exception.InstanceNotRunning, self.drvr.trigger_crash_dump, instance) def test_trigger_crash_dump_not_supported(self): ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, '', error_code=fakelibvirt.VIR_ERR_NO_SUPPORT) mock_guest = mock.Mock(libvirt_guest.Guest, id=1) mock_guest.inject_nmi = mock.Mock(side_effect=ex) instance = objects.Instance(uuid='fake-uuid', id=1) with mock.patch.object(self.drvr._host, 'get_guest', return_value=mock_guest): self.assertRaises(exception.TriggerCrashDumpNotSupported, self.drvr.trigger_crash_dump, instance) def test_trigger_crash_dump_unexpected_error(self): ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'UnexpectedError', error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR) mock_guest = mock.Mock(libvirt_guest.Guest, id=1) mock_guest.inject_nmi = mock.Mock(side_effect=ex) instance = objects.Instance(uuid='fake-uuid', id=1) with mock.patch.object(self.drvr._host, 'get_guest', return_value=mock_guest): self.assertRaises(fakelibvirt.libvirtError, self.drvr.trigger_crash_dump, instance) class LibvirtVolumeUsageTestCase(test.NoDBTestCase): """Test for LibvirtDriver.get_all_volume_usage.""" def setUp(self): super(LibvirtVolumeUsageTestCase, self).setUp() self.drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.c = context.get_admin_context() self.ins_ref = objects.Instance( id=1729, uuid='875a8070-d0b9-4949-8b31-104d125c9a64' ) # verify bootable volume device path also self.bdms = [{'volume_id': 1, 'device_name': '/dev/vde'}, {'volume_id': 2, 'device_name': 'vda'}] def test_get_all_volume_usage(self): def fake_block_stats(instance_name, disk): return (169, 688640, 0, 0, -1) self.stubs.Set(self.drvr, 'block_stats', fake_block_stats) vol_usage = self.drvr.get_all_volume_usage(self.c, [dict(instance=self.ins_ref, instance_bdms=self.bdms)]) expected_usage = [{'volume': 1, 'instance': self.ins_ref, 'rd_bytes': 688640, 'wr_req': 0, 'rd_req': 169, 'wr_bytes': 0}, {'volume': 2, 'instance': self.ins_ref, 'rd_bytes': 688640, 'wr_req': 0, 'rd_req': 169, 'wr_bytes': 0}] self.assertEqual(vol_usage, expected_usage) def test_get_all_volume_usage_device_not_found(self): def fake_get_domain(self, instance): raise exception.InstanceNotFound(instance_id="fakedom") self.stubs.Set(host.Host, 'get_domain', fake_get_domain) vol_usage = self.drvr.get_all_volume_usage(self.c, [dict(instance=self.ins_ref, instance_bdms=self.bdms)]) self.assertEqual(vol_usage, []) class LibvirtNonblockingTestCase(test.NoDBTestCase): """Test libvirtd calls are nonblocking.""" def setUp(self): super(LibvirtNonblockingTestCase, self).setUp() self.flags(connection_uri="test:///default", group='libvirt') def test_connection_to_primitive(self): # Test bug 962840. import nova.virt.libvirt.driver as libvirt_driver drvr = libvirt_driver.LibvirtDriver('') drvr.set_host_enabled = mock.Mock() jsonutils.to_primitive(drvr._conn, convert_instances=True) def test_tpool_execute_calls_libvirt(self): conn = fakelibvirt.virConnect() conn.is_expected = True self.mox.StubOutWithMock(eventlet.tpool, 'execute') eventlet.tpool.execute( fakelibvirt.openAuth, 'test:///default', mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(conn) eventlet.tpool.execute( conn.domainEventRegisterAny, None, fakelibvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, mox.IgnoreArg(), mox.IgnoreArg()) if hasattr(fakelibvirt.virConnect, 'registerCloseCallback'): eventlet.tpool.execute( conn.registerCloseCallback, mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) c = driver._get_connection() self.assertTrue(c.is_expected) class LibvirtVolumeSnapshotTestCase(test.NoDBTestCase): """Tests for libvirtDriver.volume_snapshot_create/delete.""" def setUp(self): super(LibvirtVolumeSnapshotTestCase, self).setUp() self.drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.c = context.get_admin_context() self.flags(instance_name_template='instance-%s') self.flags(qemu_allowed_storage_drivers=[], group='libvirt') # creating instance self.inst = {} self.inst['uuid'] = uuidutils.generate_uuid() self.inst['id'] = '1' # create domain info self.dom_xml = """ 0e38683e-f0af-418f-a3f1-6b67ea0f919d """ # alternate domain info with network-backed snapshot chain self.dom_netdisk_xml = """ 0e38683e-f0af-418f-a3f1-6b67eaffffff 0e38683e-f0af-418f-a3f1-6b67ea0f919d """ # XML with netdisk attached, and 1 snapshot taken self.dom_netdisk_xml_2 = """ 0e38683e-f0af-418f-a3f1-6b67eaffffff 0e38683e-f0af-418f-a3f1-6b67ea0f919d """ self.create_info = {'type': 'qcow2', 'snapshot_id': '1234-5678', 'new_file': 'new-file'} self.volume_uuid = '0e38683e-f0af-418f-a3f1-6b67ea0f919d' self.snapshot_id = '9c3ca9f4-9f4e-4dba-bedd-5c5e4b52b162' self.delete_info_1 = {'type': 'qcow2', 'file_to_merge': 'snap.img', 'merge_target_file': None} self.delete_info_2 = {'type': 'qcow2', 'file_to_merge': 'snap.img', 'merge_target_file': 'other-snap.img'} self.delete_info_3 = {'type': 'qcow2', 'file_to_merge': None, 'merge_target_file': None} self.delete_info_netdisk = {'type': 'qcow2', 'file_to_merge': 'snap.img', 'merge_target_file': 'root.img'} self.delete_info_invalid_type = {'type': 'made_up_type', 'file_to_merge': 'some_file', 'merge_target_file': 'some_other_file'} def tearDown(self): super(LibvirtVolumeSnapshotTestCase, self).tearDown() @mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.' 'refresh_connection_info') @mock.patch('nova.objects.block_device.BlockDeviceMapping.' 'get_by_volume_and_instance') def test_volume_refresh_connection_info(self, mock_get_by_volume_and_instance, mock_refresh_connection_info): instance = objects.Instance(**self.inst) fake_bdm = fake_block_device.FakeDbBlockDeviceDict({ 'id': 123, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sdb', 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': 'fake-volume-id-1', 'connection_info': '{"fake": "connection_info"}'}) fake_bdm = objects.BlockDeviceMapping(self.c, **fake_bdm) mock_get_by_volume_and_instance.return_value = fake_bdm self.drvr._volume_refresh_connection_info(self.c, instance, self.volume_uuid) mock_get_by_volume_and_instance.assert_called_once_with( self.c, self.volume_uuid, instance.uuid) mock_refresh_connection_info.assert_called_once_with(self.c, instance, self.drvr._volume_api, self.drvr) def test_volume_snapshot_create(self, quiesce=True): """Test snapshot creation with file-based disk.""" self.flags(instance_name_template='instance-%s') self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr, '_volume_api') instance = objects.Instance(**self.inst) new_file = 'new-file' domain = FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') self.mox.StubOutWithMock(domain, 'snapshotCreateXML') domain.XMLDesc(flags=0).AndReturn(self.dom_xml) snap_xml_src = ( '\n' ' \n' ' \n' ' \n' ' \n' ' \n' ' \n' '\n') # Older versions of libvirt may be missing these. fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32 fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64 snap_flags = (fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT) snap_flags_q = (snap_flags | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE) if quiesce: domain.snapshotCreateXML(snap_xml_src, flags=snap_flags_q) else: domain.snapshotCreateXML(snap_xml_src, flags=snap_flags_q).\ AndRaise(fakelibvirt.libvirtError( 'quiescing failed, no qemu-ga')) domain.snapshotCreateXML(snap_xml_src, flags=snap_flags) self.mox.ReplayAll() guest = libvirt_guest.Guest(domain) self.drvr._volume_snapshot_create(self.c, instance, guest, self.volume_uuid, new_file) self.mox.VerifyAll() def test_volume_snapshot_create_libgfapi(self, quiesce=True): """Test snapshot creation with libgfapi network disk.""" self.flags(instance_name_template = 'instance-%s') self.flags(qemu_allowed_storage_drivers = ['gluster'], group='libvirt') self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr, '_volume_api') self.dom_xml = """ 0e38683e-f0af-418f-a3f1-6b67ea0f919d """ instance = objects.Instance(**self.inst) new_file = 'new-file' domain = FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') self.mox.StubOutWithMock(domain, 'snapshotCreateXML') domain.XMLDesc(flags=0).AndReturn(self.dom_xml) snap_xml_src = ( '\n' ' \n' ' \n' ' \n' ' \n' ' \n' ' \n' '\n') # Older versions of libvirt may be missing these. fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32 fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64 snap_flags = (fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT) snap_flags_q = (snap_flags | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE) if quiesce: domain.snapshotCreateXML(snap_xml_src, flags=snap_flags_q) else: domain.snapshotCreateXML(snap_xml_src, flags=snap_flags_q).\ AndRaise(fakelibvirt.libvirtError( 'quiescing failed, no qemu-ga')) domain.snapshotCreateXML(snap_xml_src, flags=snap_flags) self.mox.ReplayAll() guest = libvirt_guest.Guest(domain) self.drvr._volume_snapshot_create(self.c, instance, guest, self.volume_uuid, new_file) self.mox.VerifyAll() def test_volume_snapshot_create_noquiesce(self): self.test_volume_snapshot_create(quiesce=False) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_can_quiesce(self, ver): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.inst) image_meta = objects.ImageMeta.from_dict( {"properties": { "hw_qemu_guest_agent": "yes"}}) self.assertIsNone(self.drvr._can_quiesce(instance, image_meta)) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_can_quiesce_bad_hyp(self, ver): self.flags(virt_type='xxx', group='libvirt') instance = objects.Instance(**self.inst) image_meta = objects.ImageMeta.from_dict( {"properties": { "hw_qemu_guest_agent": "yes"}}) self.assertRaises(exception.InstanceQuiesceNotSupported, self.drvr._can_quiesce, instance, image_meta) @mock.patch.object(host.Host, 'has_min_version', return_value=False) def test_can_quiesce_bad_ver(self, ver): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.inst) image_meta = {"properties": { "hw_qemu_guest_agent": "yes"}} self.assertRaises(exception.InstanceQuiesceNotSupported, self.drvr._can_quiesce, instance, image_meta) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_can_quiesce_agent_not_enable(self, ver): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.inst) image_meta = objects.ImageMeta.from_dict({}) self.assertRaises(exception.QemuGuestAgentNotEnabled, self.drvr._can_quiesce, instance, image_meta) def test_volume_snapshot_create_outer_success(self): instance = objects.Instance(**self.inst) domain = FakeVirtDomain(fake_xml=self.dom_xml, id=1) guest = libvirt_guest.Guest(domain) self.mox.StubOutWithMock(self.drvr._host, 'get_guest') self.mox.StubOutWithMock(self.drvr, '_volume_api') self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_create') self.drvr._host.get_guest(instance).AndReturn(guest) self.drvr._volume_snapshot_create(self.c, instance, guest, self.volume_uuid, self.create_info['new_file']) self.drvr._volume_api.update_snapshot_status( self.c, self.create_info['snapshot_id'], 'creating') self.mox.StubOutWithMock(self.drvr._volume_api, 'get_snapshot') self.drvr._volume_api.get_snapshot(self.c, self.create_info['snapshot_id']).AndReturn({'status': 'available'}) self.mox.StubOutWithMock(self.drvr, '_volume_refresh_connection_info') self.drvr._volume_refresh_connection_info(self.c, instance, self.volume_uuid) self.mox.ReplayAll() self.drvr.volume_snapshot_create(self.c, instance, self.volume_uuid, self.create_info) def test_volume_snapshot_create_outer_failure(self): instance = objects.Instance(**self.inst) domain = FakeVirtDomain(fake_xml=self.dom_xml, id=1) guest = libvirt_guest.Guest(domain) self.mox.StubOutWithMock(self.drvr._host, 'get_guest') self.mox.StubOutWithMock(self.drvr, '_volume_api') self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_create') self.drvr._host.get_guest(instance).AndReturn(guest) self.drvr._volume_snapshot_create(self.c, instance, guest, self.volume_uuid, self.create_info['new_file']).\ AndRaise(exception.NovaException('oops')) self.drvr._volume_api.update_snapshot_status( self.c, self.create_info['snapshot_id'], 'error') self.mox.ReplayAll() self.assertRaises(exception.NovaException, self.drvr.volume_snapshot_create, self.c, instance, self.volume_uuid, self.create_info) def test_volume_snapshot_delete_1(self): """Deleting newest snapshot -- blockRebase.""" # libvirt lib doesn't have VIR_DOMAIN_BLOCK_REBASE_RELATIVE flag fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_REBASE_RELATIVE') self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr._host, 'has_min_version') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True) domain.blockRebase('vda', 'snap.img', 0, flags=0) domain.blockJobInfo('vda', flags=0).AndReturn({'cur': 1, 'end': 1000}) domain.blockJobInfo('vda', flags=0).AndReturn( {'cur': 1000, 'end': 1000}) self.mox.ReplayAll() self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.mox.VerifyAll() fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_REBASE_RELATIVE': 8}) def test_volume_snapshot_delete_relative_1(self): """Deleting newest snapshot -- blockRebase using relative flag""" self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeVirtDomain(fake_xml=self.dom_xml) guest = libvirt_guest.Guest(domain) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_guest') self.mox.StubOutWithMock(self.drvr._host, 'has_min_version') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_guest(instance).AndReturn(guest) self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True) domain.blockRebase('vda', 'snap.img', 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE) domain.blockJobInfo('vda', flags=0).AndReturn({'cur': 1, 'end': 1000}) domain.blockJobInfo('vda', flags=0).AndReturn( {'cur': 1000, 'end': 1000}) self.mox.ReplayAll() self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.mox.VerifyAll() def _setup_block_rebase_domain_and_guest_mocks(self, dom_xml): mock_domain = mock.Mock(spec=fakelibvirt.virDomain) mock_domain.XMLDesc.return_value = dom_xml guest = libvirt_guest.Guest(mock_domain) exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'virDomainBlockRebase() failed', error_code=fakelibvirt.VIR_ERR_OPERATION_INVALID) mock_domain.blockRebase.side_effect = exc return mock_domain, guest @mock.patch.object(host.Host, "has_min_version", mock.Mock(return_value=True)) @mock.patch("nova.virt.libvirt.guest.Guest.is_active", mock.Mock(return_value=False)) @mock.patch('nova.virt.images.qemu_img_info', return_value=mock.Mock(file_format="fake_fmt")) @mock.patch('nova.utils.execute') def test_volume_snapshot_delete_when_dom_not_running(self, mock_execute, mock_qemu_img_info): """Deleting newest snapshot of a file-based image when the domain is not running should trigger a blockRebase using qemu-img not libvirt. In this test, we rebase the image with another image as backing file. """ mock_domain, guest = self._setup_block_rebase_domain_and_guest_mocks( self.dom_xml) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' with mock.patch.object(self.drvr._host, 'get_guest', return_value=guest): self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) mock_qemu_img_info.assert_called_once_with("snap.img") mock_execute.assert_called_once_with('qemu-img', 'rebase', '-b', 'snap.img', '-F', 'fake_fmt', 'disk1_file') @mock.patch.object(host.Host, "has_min_version", mock.Mock(return_value=True)) @mock.patch("nova.virt.libvirt.guest.Guest.is_active", mock.Mock(return_value=False)) @mock.patch('nova.virt.images.qemu_img_info', return_value=mock.Mock(file_format="fake_fmt")) @mock.patch('nova.utils.execute') def test_volume_snapshot_delete_when_dom_not_running_and_no_rebase_base( self, mock_execute, mock_qemu_img_info): """Deleting newest snapshot of a file-based image when the domain is not running should trigger a blockRebase using qemu-img not libvirt. In this test, the image is rebased onto no backing file (i.e. it will exist independently of any backing file) """ mock_domain, mock_guest = ( self._setup_block_rebase_domain_and_guest_mocks(self.dom_xml)) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' with mock.patch.object(self.drvr._host, 'get_guest', return_value=mock_guest): self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_3) self.assertEqual(0, mock_qemu_img_info.call_count) mock_execute.assert_called_once_with('qemu-img', 'rebase', '-b', '', 'disk1_file') @mock.patch.object(host.Host, "has_min_version", mock.Mock(return_value=True)) @mock.patch("nova.virt.libvirt.guest.Guest.is_active", mock.Mock(return_value=False)) def test_volume_snapshot_delete_when_dom_with_nw_disk_not_running(self): """Deleting newest snapshot of a network disk when the domain is not running should raise a NovaException. """ mock_domain, mock_guest = ( self._setup_block_rebase_domain_and_guest_mocks( self.dom_netdisk_xml)) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' with mock.patch.object(self.drvr._host, 'get_guest', return_value=mock_guest): ex = self.assertRaises(exception.NovaException, self.drvr._volume_snapshot_delete, self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.assertIn('has not been fully tested', six.text_type(ex)) def test_volume_snapshot_delete_2(self): """Deleting older snapshot -- blockCommit.""" # libvirt lib doesn't have VIR_DOMAIN_BLOCK_COMMIT_RELATIVE fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_COMMIT_RELATIVE') self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr._host, 'has_min_version') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True) self.mox.ReplayAll() self.assertRaises(exception.Invalid, self.drvr._volume_snapshot_delete, self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_2) fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_COMMIT_RELATIVE': 4}) def test_volume_snapshot_delete_relative_2(self): """Deleting older snapshot -- blockCommit using relative flag""" self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr._host, 'has_min_version') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True) domain.blockCommit('vda', 'other-snap.img', 'snap.img', 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE) domain.blockJobInfo('vda', flags=0).AndReturn({'cur': 1, 'end': 1000}) domain.blockJobInfo('vda', flags=0).AndReturn({}) self.mox.ReplayAll() self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_2) self.mox.VerifyAll() def test_volume_snapshot_delete_nonrelative_null_base(self): # Deleting newest and last snapshot of a volume # with blockRebase. So base of the new image will be null. instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeVirtDomain(fake_xml=self.dom_xml) guest = libvirt_guest.Guest(domain) with test.nested( mock.patch.object(domain, 'XMLDesc', return_value=self.dom_xml), mock.patch.object(self.drvr._host, 'get_guest', return_value=guest), mock.patch.object(self.drvr._host, 'has_min_version', return_value=True), mock.patch.object(domain, 'blockRebase'), mock.patch.object(domain, 'blockJobInfo', return_value={'cur': 1000, 'end': 1000}) ) as (mock_xmldesc, mock_get_guest, mock_has_min_version, mock_rebase, mock_job_info): self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_3) mock_xmldesc.assert_called_once_with(flags=0) mock_get_guest.assert_called_once_with(instance) mock_has_min_version.assert_called_once_with((1, 1, 1,)) mock_rebase.assert_called_once_with('vda', None, 0, flags=0) mock_job_info.assert_called_once_with('vda', flags=0) def test_volume_snapshot_delete_netdisk_nonrelative_null_base(self): # Deleting newest and last snapshot of a network attached volume # with blockRebase. So base of the new image will be null. instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeVirtDomain(fake_xml=self.dom_netdisk_xml_2) guest = libvirt_guest.Guest(domain) with test.nested( mock.patch.object(domain, 'XMLDesc', return_value=self.dom_netdisk_xml_2), mock.patch.object(self.drvr._host, 'get_guest', return_value=guest), mock.patch.object(self.drvr._host, 'has_min_version', return_value=True), mock.patch.object(domain, 'blockRebase'), mock.patch.object(domain, 'blockJobInfo', return_value={'cur': 1000, 'end': 1000}) ) as (mock_xmldesc, mock_get_guest, mock_has_min_version, mock_rebase, mock_job_info): self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_3) mock_xmldesc.assert_called_once_with(flags=0) mock_get_guest.assert_called_once_with(instance) mock_has_min_version.assert_called_once_with((1, 1, 1,)) mock_rebase.assert_called_once_with('vdb', None, 0, flags=0) mock_job_info.assert_called_once_with('vdb', flags=0) def test_volume_snapshot_delete_outer_success(self): instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr, '_volume_api') self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_delete') self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, delete_info=self.delete_info_1) self.drvr._volume_api.update_snapshot_status( self.c, snapshot_id, 'deleting') self.mox.StubOutWithMock(self.drvr, '_volume_refresh_connection_info') self.drvr._volume_refresh_connection_info(self.c, instance, self.volume_uuid) self.mox.ReplayAll() self.drvr.volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.mox.VerifyAll() def test_volume_snapshot_delete_outer_failure(self): instance = objects.Instance(**self.inst) snapshot_id = '1234-9876' FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr, '_volume_api') self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_delete') self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, delete_info=self.delete_info_1).\ AndRaise(exception.NovaException('oops')) self.drvr._volume_api.update_snapshot_status( self.c, snapshot_id, 'error_deleting') self.mox.ReplayAll() self.assertRaises(exception.NovaException, self.drvr.volume_snapshot_delete, self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.mox.VerifyAll() def test_volume_snapshot_delete_invalid_type(self): instance = objects.Instance(**self.inst) FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr, '_volume_api') self.mox.StubOutWithMock(self.drvr._host, 'has_min_version') self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True) self.drvr._volume_api.update_snapshot_status( self.c, self.snapshot_id, 'error_deleting') self.mox.ReplayAll() self.assertRaises(exception.NovaException, self.drvr.volume_snapshot_delete, self.c, instance, self.volume_uuid, self.snapshot_id, self.delete_info_invalid_type) def test_volume_snapshot_delete_netdisk_1(self): """Delete newest snapshot -- blockRebase for libgfapi/network disk.""" class FakeNetdiskDomain(FakeVirtDomain): def __init__(self, *args, **kwargs): super(FakeNetdiskDomain, self).__init__(*args, **kwargs) def XMLDesc(self, flags): return self.dom_netdisk_xml # libvirt lib doesn't have VIR_DOMAIN_BLOCK_REBASE_RELATIVE fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_REBASE_RELATIVE') self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr._host, 'has_min_version') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True) domain.blockRebase('vdb', 'vdb[1]', 0, flags=0) domain.blockJobInfo('vdb', flags=0).AndReturn({'cur': 1, 'end': 1000}) domain.blockJobInfo('vdb', flags=0).AndReturn( {'cur': 1000, 'end': 1000}) self.mox.ReplayAll() self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.mox.VerifyAll() fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_REBASE_RELATIVE': 8}) def test_volume_snapshot_delete_netdisk_relative_1(self): """Delete newest snapshot -- blockRebase for libgfapi/network disk.""" class FakeNetdiskDomain(FakeVirtDomain): def __init__(self, *args, **kwargs): super(FakeNetdiskDomain, self).__init__(*args, **kwargs) def XMLDesc(self, flags): return self.dom_netdisk_xml self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr._host, 'has_min_version') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True) domain.blockRebase('vdb', 'vdb[1]', 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE) domain.blockJobInfo('vdb', flags=0).AndReturn({'cur': 1, 'end': 1000}) domain.blockJobInfo('vdb', flags=0).AndReturn( {'cur': 1000, 'end': 1000}) self.mox.ReplayAll() self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.mox.VerifyAll() def test_volume_snapshot_delete_netdisk_2(self): """Delete older snapshot -- blockCommit for libgfapi/network disk.""" class FakeNetdiskDomain(FakeVirtDomain): def __init__(self, *args, **kwargs): super(FakeNetdiskDomain, self).__init__(*args, **kwargs) def XMLDesc(self, flags): return self.dom_netdisk_xml # libvirt lib doesn't have VIR_DOMAIN_BLOCK_COMMIT_RELATIVE fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_COMMIT_RELATIVE') self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr._host, 'has_min_version') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True) self.mox.ReplayAll() self.assertRaises(exception.Invalid, self.drvr._volume_snapshot_delete, self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_netdisk) fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_COMMIT_RELATIVE': 4}) def test_volume_snapshot_delete_netdisk_relative_2(self): """Delete older snapshot -- blockCommit for libgfapi/network disk.""" class FakeNetdiskDomain(FakeVirtDomain): def __init__(self, *args, **kwargs): super(FakeNetdiskDomain, self).__init__(*args, **kwargs) def XMLDesc(self, flags): return self.dom_netdisk_xml self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr._host, 'has_min_version') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True) domain.blockCommit('vdb', 'vdb[0]', 'vdb[1]', 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE) domain.blockJobInfo('vdb', flags=0).AndReturn({'cur': 1, 'end': 1000}) domain.blockJobInfo('vdb', flags=0).AndReturn( {'cur': 1000, 'end': 1000}) self.mox.ReplayAll() self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_netdisk) self.mox.VerifyAll() def _fake_convert_image(source, dest, in_format, out_format, run_as_root=True): libvirt_driver.libvirt_utils.files[dest] = '' class _BaseSnapshotTests(test.NoDBTestCase): def setUp(self): super(_BaseSnapshotTests, self).setUp() self.flags(snapshots_directory='./', group='libvirt') self.context = context.get_admin_context() self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.libvirt_utils', fake_libvirt_utils)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.imagebackend.libvirt_utils', fake_libvirt_utils)) self.image_service = nova.tests.unit.image.fake.stub_out_image_service( self) self.mock_update_task_state = mock.Mock() test_instance = _create_test_instance() self.instance_ref = objects.Instance(**test_instance) self.instance_ref.info_cache = objects.InstanceInfoCache( network_info=None) def _assert_snapshot(self, snapshot, disk_format, expected_properties=None): self.mock_update_task_state.assert_has_calls([ mock.call(task_state=task_states.IMAGE_PENDING_UPLOAD), mock.call(task_state=task_states.IMAGE_UPLOADING, expected_state=task_states.IMAGE_PENDING_UPLOAD)]) props = snapshot['properties'] self.assertEqual(props['image_state'], 'available') self.assertEqual(snapshot['status'], 'active') self.assertEqual(snapshot['disk_format'], disk_format) self.assertEqual(snapshot['name'], 'test-snap') if expected_properties: for expected_key, expected_value in \ six.iteritems(expected_properties): self.assertEqual(expected_value, props[expected_key]) def _create_image(self, extra_properties=None): properties = {'instance_id': self.instance_ref['id'], 'user_id': str(self.context.user_id)} if extra_properties: properties.update(extra_properties) sent_meta = {'name': 'test-snap', 'is_public': False, 'status': 'creating', 'properties': properties} # Create new image. It will be updated in snapshot method # To work with it from snapshot, the single image_service is needed recv_meta = self.image_service.create(self.context, sent_meta) return recv_meta @mock.patch.object(imagebackend.Image, 'resolve_driver_format') @mock.patch.object(host.Host, 'get_domain') def _snapshot(self, image_id, mock_get_domain, mock_resolve): mock_get_domain.return_value = FakeVirtDomain() driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) driver.snapshot(self.context, self.instance_ref, image_id, self.mock_update_task_state) snapshot = self.image_service.show(self.context, image_id) return snapshot def _test_snapshot(self, disk_format, extra_properties=None): recv_meta = self._create_image(extra_properties=extra_properties) snapshot = self._snapshot(recv_meta['id']) self._assert_snapshot(snapshot, disk_format=disk_format, expected_properties=extra_properties) class LibvirtSnapshotTests(_BaseSnapshotTests): def test_ami(self): # Assign different image_ref from nova/images/fakes for testing ami self.instance_ref.image_ref = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' self.instance_ref.system_metadata = \ utils.get_system_metadata_from_image( {'disk_format': 'ami'}) self._test_snapshot(disk_format='ami') @mock.patch.object(fake_libvirt_utils, 'disk_type', new='raw') @mock.patch.object(libvirt_driver.imagebackend.images, 'convert_image', side_effect=_fake_convert_image) def test_raw(self, mock_convert_image): self._test_snapshot(disk_format='raw') def test_qcow2(self): self._test_snapshot(disk_format='qcow2') @mock.patch.object(fake_libvirt_utils, 'disk_type', new='ploop') @mock.patch.object(libvirt_driver.imagebackend.images, 'convert_image', side_effect=_fake_convert_image) def test_ploop(self, mock_convert_image): self._test_snapshot(disk_format='ploop') def test_no_image_architecture(self): self.instance_ref.image_ref = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' self._test_snapshot(disk_format='qcow2') def test_no_original_image(self): self.instance_ref.image_ref = '661122aa-1234-dede-fefe-babababababa' self._test_snapshot(disk_format='qcow2') def test_snapshot_metadata_image(self): # Assign an image with an architecture defined (x86_64) self.instance_ref.image_ref = 'a440c04b-79fa-479c-bed1-0b816eaec379' extra_properties = {'architecture': 'fake_arch', 'key_a': 'value_a', 'key_b': 'value_b', 'os_type': 'linux'} self._test_snapshot(disk_format='qcow2', extra_properties=extra_properties) @mock.patch.object(rbd_utils, 'RBDDriver') @mock.patch.object(rbd_utils, 'rbd') def test_raw_with_rbd_clone(self, mock_rbd, mock_driver): self.flags(images_type='rbd', group='libvirt') rbd = mock_driver.return_value rbd.parent_info = mock.Mock(return_value=['test-pool', '', '']) rbd.parse_url = mock.Mock(return_value=['a', 'b', 'c', 'd']) with mock.patch.object(fake_libvirt_utils, 'find_disk', return_value=('rbd://some/fake/rbd/image', 'raw')): with mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd'): self._test_snapshot(disk_format='raw') rbd.clone.assert_called_with(mock.ANY, mock.ANY, dest_pool='test-pool') rbd.flatten.assert_called_with(mock.ANY, pool='test-pool') @mock.patch.object(rbd_utils, 'RBDDriver') @mock.patch.object(rbd_utils, 'rbd') def test_raw_with_rbd_clone_graceful_fallback(self, mock_rbd, mock_driver): self.flags(images_type='rbd', group='libvirt') rbd = mock_driver.return_value rbd.parent_info = mock.Mock(side_effect=exception.ImageUnacceptable( image_id='fake_id', reason='rbd testing')) with test.nested( mock.patch.object(libvirt_driver.imagebackend.images, 'convert_image', side_effect=_fake_convert_image), mock.patch.object(fake_libvirt_utils, 'find_disk', return_value=('rbd://some/fake/rbd/image', 'raw')), mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd')): self._test_snapshot(disk_format='raw') self.assertFalse(rbd.clone.called) @mock.patch.object(rbd_utils, 'RBDDriver') @mock.patch.object(rbd_utils, 'rbd') def test_raw_with_rbd_clone_eperm(self, mock_rbd, mock_driver): self.flags(images_type='rbd', group='libvirt') rbd = mock_driver.return_value rbd.parent_info = mock.Mock(return_value=['test-pool', '', '']) rbd.parse_url = mock.Mock(return_value=['a', 'b', 'c', 'd']) rbd.clone = mock.Mock(side_effect=exception.Forbidden( image_id='fake_id', reason='rbd testing')) with test.nested( mock.patch.object(libvirt_driver.imagebackend.images, 'convert_image', side_effect=_fake_convert_image), mock.patch.object(fake_libvirt_utils, 'find_disk', return_value=('rbd://some/fake/rbd/image', 'raw')), mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd')): self._test_snapshot(disk_format='raw') # Ensure that the direct_snapshot attempt was cleaned up rbd.remove_snap.assert_called_with('c', 'd', ignore_errors=False, pool='b', force=True) @mock.patch.object(rbd_utils, 'RBDDriver') @mock.patch.object(rbd_utils, 'rbd') def test_raw_with_rbd_clone_post_process_fails(self, mock_rbd, mock_driver): self.flags(images_type='rbd', group='libvirt') rbd = mock_driver.return_value rbd.parent_info = mock.Mock(return_value=['test-pool', '', '']) rbd.parse_url = mock.Mock(return_value=['a', 'b', 'c', 'd']) with test.nested( mock.patch.object(fake_libvirt_utils, 'find_disk', return_value=('rbd://some/fake/rbd/image', 'raw')), mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd'), mock.patch.object(self.image_service, 'update', side_effect=test.TestingException)): self.assertRaises(test.TestingException, self._test_snapshot, disk_format='raw') rbd.clone.assert_called_with(mock.ANY, mock.ANY, dest_pool='test-pool') rbd.flatten.assert_called_with(mock.ANY, pool='test-pool') # Ensure that the direct_snapshot attempt was cleaned up rbd.remove_snap.assert_called_with('c', 'd', ignore_errors=True, pool='b', force=True) @mock.patch.object(imagebackend.Image, 'direct_snapshot') @mock.patch.object(imagebackend.Image, 'resolve_driver_format') @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch.object(host.Host, 'get_guest') def test_raw_with_rbd_clone_is_live_snapshot(self, mock_get_guest, mock_version, mock_resolve, mock_snapshot): self.flags(disable_libvirt_livesnapshot=False, group='workarounds') self.flags(images_type='rbd', group='libvirt') mock_guest = mock.Mock(spec=libvirt_guest.Guest) mock_guest._domain = mock.Mock() mock_get_guest.return_value = mock_guest driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) recv_meta = self._create_image() with mock.patch.object(driver, "suspend") as mock_suspend: driver.snapshot(self.context, self.instance_ref, recv_meta['id'], self.mock_update_task_state) self.assertFalse(mock_suspend.called) @mock.patch.object(libvirt_driver.imagebackend.images, 'convert_image', side_effect=_fake_convert_image) @mock.patch.object(fake_libvirt_utils, 'find_disk') @mock.patch.object(imagebackend.Image, 'resolve_driver_format') @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch.object(host.Host, 'get_guest') @mock.patch.object(rbd_utils, 'RBDDriver') @mock.patch.object(rbd_utils, 'rbd') def test_raw_with_rbd_clone_failure_does_cold_snapshot(self, mock_rbd, mock_driver, mock_get_guest, mock_version, mock_resolve, mock_find_disk, mock_convert): self.flags(disable_libvirt_livesnapshot=False, group='workarounds') self.flags(images_type='rbd', group='libvirt') rbd = mock_driver.return_value rbd.parent_info = mock.Mock(side_effect=exception.ImageUnacceptable( image_id='fake_id', reason='rbd testing')) mock_find_disk.return_value = ('rbd://some/fake/rbd/image', 'raw') mock_guest = mock.Mock(spec=libvirt_guest.Guest) mock_guest.get_power_state.return_value = power_state.RUNNING mock_guest._domain = mock.Mock() mock_get_guest.return_value = mock_guest driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) recv_meta = self._create_image() with mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd'): with mock.patch.object(driver, "suspend") as mock_suspend: driver.snapshot(self.context, self.instance_ref, recv_meta['id'], self.mock_update_task_state) self.assertTrue(mock_suspend.called) class LXCSnapshotTests(LibvirtSnapshotTests): """Repeat all of the Libvirt snapshot tests, but with LXC enabled""" def setUp(self): super(LXCSnapshotTests, self).setUp() self.flags(virt_type='lxc', group='libvirt') def test_raw_with_rbd_clone_failure_does_cold_snapshot(self): self.skipTest("managedSave is not supported with LXC") class LVMSnapshotTests(_BaseSnapshotTests): @mock.patch.object(fake_libvirt_utils, 'disk_type', new='lvm') @mock.patch.object(libvirt_driver.imagebackend.images, 'convert_image', side_effect=_fake_convert_image) @mock.patch.object(libvirt_driver.imagebackend.lvm, 'volume_info') def _test_lvm_snapshot(self, disk_format, mock_volume_info, mock_convert_image): self.flags(images_type='lvm', images_volume_group='nova-vg', group='libvirt') self._test_snapshot(disk_format=disk_format) mock_volume_info.assert_has_calls([mock.call('/dev/nova-vg/lv')]) mock_convert_image.assert_called_once_with( '/dev/nova-vg/lv', mock.ANY, 'raw', disk_format, run_as_root=True) def test_raw(self): self._test_lvm_snapshot('raw') def test_qcow2(self): self.flags(snapshot_image_format='qcow2', group='libvirt') self._test_lvm_snapshot('qcow2') nova-13.1.4/nova/tests/unit/virt/libvirt/test_host.py0000664000567000056710000012714713064447152024037 0ustar jenkinsjenkins00000000000000# Copyright 2010 OpenStack Foundation # Copyright 2012 University Of Minho # Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import eventlet from eventlet import greenthread import mock import six from nova.compute import arch from nova import exception from nova import objects from nova import test from nova.tests.unit.virt.libvirt import fakelibvirt from nova.virt import event from nova.virt.libvirt import config as vconfig from nova.virt.libvirt import driver as libvirt_driver from nova.virt.libvirt import guest as libvirt_guest from nova.virt.libvirt import host host.libvirt = fakelibvirt libvirt_guest.libvirt = fakelibvirt class FakeVirtDomain(object): def __init__(self, id=-1, name=None): self._id = id self._name = name self._uuid = str(uuid.uuid4()) def name(self): return self._name def ID(self): return self._id def UUIDString(self): return self._uuid class HostTestCase(test.NoDBTestCase): def setUp(self): super(HostTestCase, self).setUp() self.useFixture(fakelibvirt.FakeLibvirtFixture()) self.host = host.Host("qemu:///system") @mock.patch.object(fakelibvirt.virConnect, "registerCloseCallback") def test_close_callback(self, mock_close): self.close_callback = None def set_close_callback(cb, opaque): self.close_callback = cb mock_close.side_effect = set_close_callback # verify that the driver registers for the close callback self.host.get_connection() self.assertTrue(self.close_callback) @mock.patch.object(fakelibvirt.virConnect, "registerCloseCallback") def test_close_callback_bad_signature(self, mock_close): '''Validates that a connection to libvirt exist, even when registerCloseCallback method has a different number of arguments in the libvirt python library. ''' mock_close.side_effect = TypeError('dd') connection = self.host.get_connection() self.assertTrue(connection) @mock.patch.object(fakelibvirt.virConnect, "registerCloseCallback") def test_close_callback_not_defined(self, mock_close): '''Validates that a connection to libvirt exist, even when registerCloseCallback method missing from the libvirt python library. ''' mock_close.side_effect = AttributeError('dd') connection = self.host.get_connection() self.assertTrue(connection) @mock.patch.object(fakelibvirt.virConnect, "getLibVersion") def test_broken_connection(self, mock_ver): for (error, domain) in ( (fakelibvirt.VIR_ERR_SYSTEM_ERROR, fakelibvirt.VIR_FROM_REMOTE), (fakelibvirt.VIR_ERR_SYSTEM_ERROR, fakelibvirt.VIR_FROM_RPC), (fakelibvirt.VIR_ERR_INTERNAL_ERROR, fakelibvirt.VIR_FROM_RPC)): conn = self.host._connect("qemu:///system", False) mock_ver.side_effect = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "Connection broken", error_code=error, error_domain=domain) self.assertFalse(self.host._test_connection(conn)) @mock.patch.object(host, 'LOG') def test_connect_auth_cb_exception(self, log_mock): creds = dict(authname='nova', password='verybadpass') self.assertRaises(exception.NovaException, self.host._connect_auth_cb, creds, False) self.assertEqual(0, len(log_mock.method_calls), 'LOG should not be used in _connect_auth_cb.') @mock.patch.object(greenthread, 'spawn_after') def test_event_dispatch(self, mock_spawn_after): # Validate that the libvirt self-pipe for forwarding # events between threads is working sanely def handler(event): got_events.append(event) hostimpl = host.Host("qemu:///system", lifecycle_event_handler=handler) got_events = [] hostimpl._init_events_pipe() event1 = event.LifecycleEvent( "cef19ce0-0ca2-11df-855d-b19fbce37686", event.EVENT_LIFECYCLE_STARTED) event2 = event.LifecycleEvent( "cef19ce0-0ca2-11df-855d-b19fbce37686", event.EVENT_LIFECYCLE_PAUSED) hostimpl._queue_event(event1) hostimpl._queue_event(event2) hostimpl._dispatch_events() want_events = [event1, event2] self.assertEqual(want_events, got_events) event3 = event.LifecycleEvent( "cef19ce0-0ca2-11df-855d-b19fbce37686", event.EVENT_LIFECYCLE_RESUMED) event4 = event.LifecycleEvent( "cef19ce0-0ca2-11df-855d-b19fbce37686", event.EVENT_LIFECYCLE_STOPPED) hostimpl._queue_event(event3) hostimpl._queue_event(event4) hostimpl._dispatch_events() want_events = [event1, event2, event3] self.assertEqual(want_events, got_events) # STOPPED is delayed so it's handled separately mock_spawn_after.assert_called_once_with( hostimpl._lifecycle_delay, hostimpl._event_emit, event4) def test_event_lifecycle(self): got_events = [] # Validate that libvirt events are correctly translated # to Nova events def spawn_after(seconds, func, *args, **kwargs): got_events.append(args[0]) return mock.Mock(spec=greenthread.GreenThread) greenthread.spawn_after = mock.Mock(side_effect=spawn_after) hostimpl = host.Host("qemu:///system", lifecycle_event_handler=lambda e: None) conn = hostimpl.get_connection() hostimpl._init_events_pipe() fake_dom_xml = """ cef19ce0-0ca2-11df-855d-b19fbce37686 """ dom = fakelibvirt.Domain(conn, fake_dom_xml, False) hostimpl._event_lifecycle_callback( conn, dom, fakelibvirt.VIR_DOMAIN_EVENT_STOPPED, 0, hostimpl) hostimpl._dispatch_events() self.assertEqual(len(got_events), 1) self.assertIsInstance(got_events[0], event.LifecycleEvent) self.assertEqual(got_events[0].uuid, "cef19ce0-0ca2-11df-855d-b19fbce37686") self.assertEqual(got_events[0].transition, event.EVENT_LIFECYCLE_STOPPED) def test_event_emit_delayed_call_delayed(self): ev = event.LifecycleEvent( "cef19ce0-0ca2-11df-855d-b19fbce37686", event.EVENT_LIFECYCLE_STOPPED) for uri in ("qemu:///system", "xen:///"): spawn_after_mock = mock.Mock() greenthread.spawn_after = spawn_after_mock hostimpl = host.Host(uri, lifecycle_event_handler=lambda e: None) hostimpl._event_emit_delayed(ev) spawn_after_mock.assert_called_once_with( 15, hostimpl._event_emit, ev) @mock.patch.object(greenthread, 'spawn_after') def test_event_emit_delayed_call_delayed_pending(self, spawn_after_mock): hostimpl = host.Host("xen:///", lifecycle_event_handler=lambda e: None) uuid = "cef19ce0-0ca2-11df-855d-b19fbce37686" gt_mock = mock.Mock() hostimpl._events_delayed[uuid] = gt_mock ev = event.LifecycleEvent( uuid, event.EVENT_LIFECYCLE_STOPPED) hostimpl._event_emit_delayed(ev) gt_mock.cancel.assert_called_once_with() self.assertTrue(spawn_after_mock.called) def test_event_delayed_cleanup(self): hostimpl = host.Host("xen:///", lifecycle_event_handler=lambda e: None) uuid = "cef19ce0-0ca2-11df-855d-b19fbce37686" ev = event.LifecycleEvent( uuid, event.EVENT_LIFECYCLE_STARTED) gt_mock = mock.Mock() hostimpl._events_delayed[uuid] = gt_mock hostimpl._event_emit_delayed(ev) gt_mock.cancel.assert_called_once_with() self.assertNotIn(uuid, hostimpl._events_delayed.keys()) @mock.patch.object(fakelibvirt.virConnect, "domainEventRegisterAny") @mock.patch.object(host.Host, "_connect") def test_get_connection_serial(self, mock_conn, mock_event): def get_conn_currency(host): host.get_connection().getLibVersion() def connect_with_block(*a, **k): # enough to allow another connect to run eventlet.sleep(0) self.connect_calls += 1 return fakelibvirt.openAuth("qemu:///system", [[], lambda: 1, None], 0) def fake_register(*a, **k): self.register_calls += 1 self.connect_calls = 0 self.register_calls = 0 mock_conn.side_effect = connect_with_block mock_event.side_effect = fake_register # call serially get_conn_currency(self.host) get_conn_currency(self.host) self.assertEqual(self.connect_calls, 1) self.assertEqual(self.register_calls, 1) @mock.patch.object(fakelibvirt.virConnect, "domainEventRegisterAny") @mock.patch.object(host.Host, "_connect") def test_get_connection_concurrency(self, mock_conn, mock_event): def get_conn_currency(host): host.get_connection().getLibVersion() def connect_with_block(*a, **k): # enough to allow another connect to run eventlet.sleep(0) self.connect_calls += 1 return fakelibvirt.openAuth("qemu:///system", [[], lambda: 1, None], 0) def fake_register(*a, **k): self.register_calls += 1 self.connect_calls = 0 self.register_calls = 0 mock_conn.side_effect = connect_with_block mock_event.side_effect = fake_register # call concurrently thr1 = eventlet.spawn(get_conn_currency, self.host) thr2 = eventlet.spawn(get_conn_currency, self.host) # let threads run eventlet.sleep(0) thr1.wait() thr2.wait() self.assertEqual(self.connect_calls, 1) self.assertEqual(self.register_calls, 1) @mock.patch.object(fakelibvirt.virConnect, "getLibVersion") @mock.patch.object(fakelibvirt.virConnect, "getVersion") @mock.patch.object(fakelibvirt.virConnect, "getType") def test_has_min_version(self, fake_hv_type, fake_hv_ver, fake_lv_ver): fake_lv_ver.return_value = 1002003 fake_hv_ver.return_value = 4005006 fake_hv_type.return_value = 'xyz' lv_ver = (1, 2, 3) hv_ver = (4, 5, 6) hv_type = 'xyz' self.assertTrue(self.host.has_min_version(lv_ver, hv_ver, hv_type)) self.assertFalse(self.host.has_min_version(lv_ver, hv_ver, 'abc')) self.assertFalse(self.host.has_min_version(lv_ver, (4, 5, 7), hv_type)) self.assertFalse(self.host.has_min_version((1, 3, 3), hv_ver, hv_type)) self.assertTrue(self.host.has_min_version(lv_ver, hv_ver, None)) self.assertTrue(self.host.has_min_version(lv_ver, None, hv_type)) self.assertTrue(self.host.has_min_version(None, hv_ver, hv_type)) @mock.patch.object(fakelibvirt.virConnect, "getLibVersion") @mock.patch.object(fakelibvirt.virConnect, "getVersion") @mock.patch.object(fakelibvirt.virConnect, "getType") def test_has_version(self, fake_hv_type, fake_hv_ver, fake_lv_ver): fake_lv_ver.return_value = 1002003 fake_hv_ver.return_value = 4005006 fake_hv_type.return_value = 'xyz' lv_ver = (1, 2, 3) hv_ver = (4, 5, 6) hv_type = 'xyz' self.assertTrue(self.host.has_version(lv_ver, hv_ver, hv_type)) for lv_ver_ in [(1, 2, 2), (1, 2, 4)]: self.assertFalse(self.host.has_version(lv_ver_, hv_ver, hv_type)) for hv_ver_ in [(4, 4, 6), (4, 6, 6)]: self.assertFalse(self.host.has_version(lv_ver, hv_ver_, hv_type)) self.assertFalse(self.host.has_version(lv_ver, hv_ver, 'abc')) self.assertTrue(self.host.has_version(lv_ver, hv_ver, None)) self.assertTrue(self.host.has_version(lv_ver, None, hv_type)) self.assertTrue(self.host.has_version(None, hv_ver, hv_type)) @mock.patch.object(fakelibvirt.virConnect, "lookupByID") def test_get_domain_by_id(self, fake_lookup): dom = fakelibvirt.virDomain(self.host.get_connection(), "") fake_lookup.return_value = dom self.assertEqual(dom, self.host._get_domain_by_id(7)) fake_lookup.assert_called_once_with(7) @mock.patch.object(fakelibvirt.virConnect, "lookupByID") def test_get_domain_by_id_raises(self, fake_lookup): fake_lookup.side_effect = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'Domain not found: no domain with matching id 7', error_code=fakelibvirt.VIR_ERR_NO_DOMAIN, error_domain=fakelibvirt.VIR_FROM_QEMU) self.assertRaises(exception.InstanceNotFound, self.host._get_domain_by_id, 7) fake_lookup.assert_called_once_with(7) @mock.patch.object(fakelibvirt.virConnect, "lookupByName") def test_get_domain_by_name(self, fake_lookup): dom = fakelibvirt.virDomain(self.host.get_connection(), "") fake_lookup.return_value = dom self.assertEqual(dom, self.host._get_domain_by_name("wibble")) fake_lookup.assert_called_once_with("wibble") @mock.patch.object(fakelibvirt.virConnect, "lookupByName") def test_get_domain_by_name_raises(self, fake_lookup): fake_lookup.side_effect = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'Domain not found: no domain with matching name', error_code=fakelibvirt.VIR_ERR_NO_DOMAIN, error_domain=fakelibvirt.VIR_FROM_QEMU) self.assertRaises(exception.InstanceNotFound, self.host._get_domain_by_name, "wibble") fake_lookup.assert_called_once_with("wibble") @mock.patch.object(host.Host, "_get_domain_by_name") def test_get_domain(self, fake_get_domain): dom = fakelibvirt.virDomain(self.host.get_connection(), "") fake_get_domain.return_value = dom instance = objects.Instance(id="124") self.assertEqual(dom, self.host.get_domain(instance)) fake_get_domain.assert_called_once_with("instance-0000007c") @mock.patch.object(host.Host, "_get_domain_by_name") def test_get_guest(self, fake_get_domain): dom = fakelibvirt.virDomain(self.host.get_connection(), "") fake_get_domain.return_value = dom instance = objects.Instance(id="124") guest = self.host.get_guest(instance) self.assertEqual(dom, guest._domain) self.assertIsInstance(guest, libvirt_guest.Guest) fake_get_domain.assert_called_once_with("instance-0000007c") @mock.patch.object(fakelibvirt.Connection, "listAllDomains") def test_list_instance_domains_fast(self, mock_list_all): vm1 = FakeVirtDomain(id=3, name="instance00000001") vm2 = FakeVirtDomain(id=17, name="instance00000002") vm3 = FakeVirtDomain(name="instance00000003") vm4 = FakeVirtDomain(name="instance00000004") def fake_list_all(flags): vms = [] if flags & fakelibvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE: vms.extend([vm1, vm2]) if flags & fakelibvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE: vms.extend([vm3, vm4]) return vms mock_list_all.side_effect = fake_list_all doms = self.host._list_instance_domains_fast() mock_list_all.assert_called_once_with( fakelibvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE) mock_list_all.reset_mock() self.assertEqual(len(doms), 2) self.assertEqual(doms[0].name(), vm1.name()) self.assertEqual(doms[1].name(), vm2.name()) doms = self.host._list_instance_domains_fast(only_running=False) mock_list_all.assert_called_once_with( fakelibvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE | fakelibvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE) self.assertEqual(len(doms), 4) self.assertEqual(doms[0].name(), vm1.name()) self.assertEqual(doms[1].name(), vm2.name()) self.assertEqual(doms[2].name(), vm3.name()) self.assertEqual(doms[3].name(), vm4.name()) @mock.patch.object(fakelibvirt.Connection, "numOfDomains") @mock.patch.object(fakelibvirt.Connection, "listDefinedDomains") @mock.patch.object(fakelibvirt.Connection, "listDomainsID") @mock.patch.object(host.Host, "_get_domain_by_name") @mock.patch.object(host.Host, "_get_domain_by_id") def test_list_instance_domains_slow(self, mock_get_id, mock_get_name, mock_list_ids, mock_list_names, mock_num_ids): vm1 = FakeVirtDomain(id=3, name="instance00000001") vm2 = FakeVirtDomain(id=17, name="instance00000002") vm3 = FakeVirtDomain(name="instance00000003") vm4 = FakeVirtDomain(name="instance00000004") vms = [vm1, vm2, vm3, vm4] def fake_get_domain_by_id(id): for vm in vms: if vm.ID() == id: return vm raise exception.InstanceNotFound(instance_id=id) def fake_get_domain_by_name(name): for vm in vms: if vm.name() == name: return vm raise exception.InstanceNotFound(instance_id=name) def fake_list_ids(): # Include one ID that no longer exists return [vm1.ID(), vm2.ID(), 666] def fake_list_names(): # Include one name that no longer exists and # one dup from running list to show race in # transition from inactive -> running return [vm1.name(), vm3.name(), vm4.name(), "fishfood"] mock_get_id.side_effect = fake_get_domain_by_id mock_get_name.side_effect = fake_get_domain_by_name mock_list_ids.side_effect = fake_list_ids mock_list_names.side_effect = fake_list_names mock_num_ids.return_value = 2 doms = self.host._list_instance_domains_slow() mock_list_ids.assert_called_once_with() mock_num_ids.assert_called_once_with() self.assertFalse(mock_list_names.called) mock_list_ids.reset_mock() mock_list_names.reset_mock() mock_num_ids.reset_mock() self.assertEqual(len(doms), 2) self.assertEqual(doms[0].name(), vm1.name()) self.assertEqual(doms[1].name(), vm2.name()) doms = self.host._list_instance_domains_slow(only_running=False) mock_list_ids.assert_called_once_with() mock_num_ids.assert_called_once_with() mock_list_names.assert_called_once_with() self.assertEqual(len(doms), 4) self.assertEqual(doms[0].name(), vm1.name()) self.assertEqual(doms[1].name(), vm2.name()) self.assertEqual(doms[2].name(), vm3.name()) self.assertEqual(doms[3].name(), vm4.name()) @mock.patch.object(fakelibvirt.Connection, "listAllDomains") @mock.patch.object(fakelibvirt.Connection, "numOfDomains") @mock.patch.object(fakelibvirt.Connection, "listDomainsID") @mock.patch.object(host.Host, "_get_domain_by_id") def test_list_instance_domains_fallback(self, mock_get_id, mock_list_ids, mock_num_ids, mock_list_all): vm1 = FakeVirtDomain(id=3, name="instance00000001") vm2 = FakeVirtDomain(id=17, name="instance00000002") vms = [vm1, vm2] def fake_get_domain_by_id(id): for vm in vms: if vm.ID() == id: return vm raise exception.InstanceNotFound(instance_id=id) def fake_list_doms(): return [vm1.ID(), vm2.ID()] def fake_list_all(flags): ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "API is not supported", error_code=fakelibvirt.VIR_ERR_NO_SUPPORT) raise ex mock_get_id.side_effect = fake_get_domain_by_id mock_list_ids.side_effect = fake_list_doms mock_num_ids.return_value = 2 mock_list_all.side_effect = fake_list_all doms = self.host.list_instance_domains() mock_list_all.assert_called_once_with( fakelibvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE) mock_list_ids.assert_called_once_with() mock_num_ids.assert_called_once_with() self.assertEqual(len(doms), 2) self.assertEqual(doms[0].ID(), vm1.ID()) self.assertEqual(doms[1].ID(), vm2.ID()) @mock.patch.object(host.Host, "_list_instance_domains_fast") def test_list_instance_domains_filtering(self, mock_list): vm0 = FakeVirtDomain(id=0, name="Domain-0") # Xen dom-0 vm1 = FakeVirtDomain(id=3, name="instance00000001") vm2 = FakeVirtDomain(id=17, name="instance00000002") vm3 = FakeVirtDomain(name="instance00000003") vm4 = FakeVirtDomain(name="instance00000004") mock_list.return_value = [vm0, vm1, vm2] doms = self.host.list_instance_domains() self.assertEqual(len(doms), 2) self.assertEqual(doms[0].name(), vm1.name()) self.assertEqual(doms[1].name(), vm2.name()) mock_list.assert_called_with(True) mock_list.return_value = [vm0, vm1, vm2, vm3, vm4] doms = self.host.list_instance_domains(only_running=False) self.assertEqual(len(doms), 4) self.assertEqual(doms[0].name(), vm1.name()) self.assertEqual(doms[1].name(), vm2.name()) self.assertEqual(doms[2].name(), vm3.name()) self.assertEqual(doms[3].name(), vm4.name()) mock_list.assert_called_with(False) mock_list.return_value = [vm0, vm1, vm2] doms = self.host.list_instance_domains(only_guests=False) self.assertEqual(len(doms), 3) self.assertEqual(doms[0].name(), vm0.name()) self.assertEqual(doms[1].name(), vm1.name()) self.assertEqual(doms[2].name(), vm2.name()) mock_list.assert_called_with(True) @mock.patch.object(host.Host, "list_instance_domains") def test_list_guests(self, mock_list_domains): dom0 = mock.Mock(spec=fakelibvirt.virDomain) dom1 = mock.Mock(spec=fakelibvirt.virDomain) mock_list_domains.return_value = [ dom0, dom1] result = self.host.list_guests(True, False) mock_list_domains.assert_called_once_with( only_running=True, only_guests=False) self.assertEqual(dom0, result[0]._domain) self.assertEqual(dom1, result[1]._domain) def test_cpu_features_bug_1217630(self): self.host.get_connection() # Test old version of libvirt, it shouldn't see the `aes' feature with mock.patch('nova.virt.libvirt.host.libvirt') as mock_libvirt: del mock_libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES caps = self.host.get_capabilities() self.assertNotIn('aes', [x.name for x in caps.host.cpu.features]) # Cleanup the capabilities cache firstly self.host._caps = None # Test new version of libvirt, should find the `aes' feature with mock.patch('nova.virt.libvirt.host.libvirt') as mock_libvirt: mock_libvirt['VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'] = 1 caps = self.host.get_capabilities() self.assertIn('aes', [x.name for x in caps.host.cpu.features]) def test_cpu_features_are_not_duplicated(self): self.host.get_connection() # Test old version of libvirt. Should return single 'hypervisor' with mock.patch('nova.virt.libvirt.host.libvirt') as mock_libvirt: del mock_libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES caps = self.host.get_capabilities() cnt = [x.name for x in caps.host.cpu.features].count('xtpr') self.assertEqual(1, cnt) # Cleanup the capabilities cache firstly self.host._caps = None # Test new version of libvirt. Should still return single 'hypervisor' with mock.patch('nova.virt.libvirt.host.libvirt') as mock_libvirt: mock_libvirt['VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'] = 1 caps = self.host.get_capabilities() cnt = [x.name for x in caps.host.cpu.features].count('xtpr') self.assertEqual(1, cnt) def test_baseline_cpu_not_supported(self): # Handle just the NO_SUPPORT error not_supported_exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'this function is not supported by the connection driver:' ' virConnectBaselineCPU', error_code=fakelibvirt.VIR_ERR_NO_SUPPORT) with mock.patch.object(fakelibvirt.virConnect, 'baselineCPU', side_effect=not_supported_exc): caps = self.host.get_capabilities() self.assertEqual(vconfig.LibvirtConfigCaps, type(caps)) self.assertNotIn('aes', [x.name for x in caps.host.cpu.features]) # Clear cached result so we can test again... self.host._caps = None # Other errors should not be caught other_exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'other exc', error_code=fakelibvirt.VIR_ERR_NO_DOMAIN) with mock.patch.object(fakelibvirt.virConnect, 'baselineCPU', side_effect=other_exc): self.assertRaises(fakelibvirt.libvirtError, self.host.get_capabilities) def test_lxc_get_host_capabilities_failed(self): with mock.patch.object(fakelibvirt.virConnect, 'baselineCPU', return_value=-1): caps = self.host.get_capabilities() self.assertEqual(vconfig.LibvirtConfigCaps, type(caps)) self.assertNotIn('aes', [x.name for x in caps.host.cpu.features]) def test_get_capabilities_no_host_cpu_model(self): """Tests that cpu features are not retrieved when the host cpu model is not in the capabilities. """ fake_caps_xml = ''' cef19ce0-0ca2-11df-855d-b19fbce37686 x86_64 Intel ''' with mock.patch.object(fakelibvirt.virConnect, 'getCapabilities', return_value=fake_caps_xml): caps = self.host.get_capabilities() self.assertEqual(vconfig.LibvirtConfigCaps, type(caps)) self.assertIsNone(caps.host.cpu.model) self.assertEqual(0, len(caps.host.cpu.features)) @mock.patch.object(fakelibvirt.virConnect, "getHostname") def test_get_hostname_caching(self, mock_hostname): mock_hostname.return_value = "foo" self.assertEqual('foo', self.host.get_hostname()) mock_hostname.assert_called_with() mock_hostname.reset_mock() mock_hostname.return_value = "bar" self.assertEqual('foo', self.host.get_hostname()) mock_hostname.assert_called_with() @mock.patch.object(fakelibvirt.virConnect, "getType") def test_get_driver_type(self, mock_type): mock_type.return_value = "qemu" self.assertEqual("qemu", self.host.get_driver_type()) mock_type.assert_called_once_with() @mock.patch.object(fakelibvirt.virConnect, "getVersion") def test_get_version(self, mock_version): mock_version.return_value = 1005001 self.assertEqual(1005001, self.host.get_version()) mock_version.assert_called_once_with() @mock.patch.object(fakelibvirt.virConnect, "secretLookupByUsage") def test_find_secret(self, mock_sec): """finding secrets with various usage_type.""" expected = [ mock.call(fakelibvirt.VIR_SECRET_USAGE_TYPE_CEPH, 'rbdvol'), mock.call(fakelibvirt.VIR_SECRET_USAGE_TYPE_CEPH, 'cephvol'), mock.call(fakelibvirt.VIR_SECRET_USAGE_TYPE_ISCSI, 'iscsivol'), mock.call(fakelibvirt.VIR_SECRET_USAGE_TYPE_VOLUME, 'vol')] self.host.find_secret('rbd', 'rbdvol') self.host.find_secret('ceph', 'cephvol') self.host.find_secret('iscsi', 'iscsivol') self.host.find_secret('volume', 'vol') self.assertEqual(expected, mock_sec.mock_calls) self.assertRaises(exception.NovaException, self.host.find_secret, "foo", "foovol") mock_sec.side_effect = fakelibvirt.libvirtError("") mock_sec.side_effect.err = (66, ) self.assertIsNone(self.host.find_secret('rbd', 'rbdvol')) @mock.patch.object(fakelibvirt.virConnect, "secretDefineXML") def test_create_secret(self, mock_sec): """creating secrets with various usage_type.""" self.host.create_secret('rbd', 'rbdvol') self.host.create_secret('ceph', 'cephvol') self.host.create_secret('iscsi', 'iscsivol') self.host.create_secret('volume', 'vol') self.assertRaises(exception.NovaException, self.host.create_secret, "foo", "foovol") secret = mock.MagicMock() mock_sec.return_value = secret self.host.create_secret('iscsi', 'iscsivol', password="foo") secret.setValue.assert_called_once_with("foo") @mock.patch('nova.virt.libvirt.host.Host.find_secret') def test_delete_secret(self, mock_find_secret): """deleting secret.""" secret = mock.MagicMock() mock_find_secret.return_value = secret expected = [mock.call('rbd', 'rbdvol'), mock.call().undefine()] self.host.delete_secret('rbd', 'rbdvol') self.assertEqual(expected, mock_find_secret.mock_calls) mock_find_secret.return_value = None self.host.delete_secret("rbd", "rbdvol") def test_get_cpu_count(self): with mock.patch.object(host.Host, "get_connection") as mock_conn: mock_conn().getInfo.return_value = ['zero', 'one', 'two'] self.assertEqual('two', self.host.get_cpu_count()) def test_get_memory_total(self): with mock.patch.object(host.Host, "get_connection") as mock_conn: mock_conn().getInfo.return_value = ['zero', 'one', 'two'] self.assertEqual('one', self.host.get_memory_mb_total()) def test_get_memory_used(self): m = mock.mock_open(read_data=""" MemTotal: 16194180 kB MemFree: 233092 kB MemAvailable: 8892356 kB Buffers: 567708 kB Cached: 8362404 kB SwapCached: 0 kB Active: 8381604 kB """) with test.nested( mock.patch.object(six.moves.builtins, "open", m, create=True), mock.patch.object(host.Host, "get_connection"), mock.patch('sys.platform', 'linux2'), ) as (mock_file, mock_conn, mock_platform): mock_conn().getInfo.return_value = [ arch.X86_64, 15814, 8, 1208, 1, 1, 4, 2] self.assertEqual(6866, self.host.get_memory_mb_used()) def test_get_memory_used_xen(self): self.flags(virt_type='xen', group='libvirt') class DiagFakeDomain(object): def __init__(self, id, memmb): self.id = id self.memmb = memmb def info(self): return [0, 0, self.memmb * 1024] def ID(self): return self.id def name(self): return "instance000001" def UUIDString(self): return str(uuid.uuid4()) m = mock.mock_open(read_data=""" MemTotal: 16194180 kB MemFree: 233092 kB MemAvailable: 8892356 kB Buffers: 567708 kB Cached: 8362404 kB SwapCached: 0 kB Active: 8381604 kB """) with test.nested( mock.patch.object(six.moves.builtins, "open", m, create=True), mock.patch.object(host.Host, "list_guests"), mock.patch.object(libvirt_driver.LibvirtDriver, "_conn"), mock.patch('sys.platform', 'linux2'), ) as (mock_file, mock_list, mock_conn, mock_platform): mock_list.return_value = [ libvirt_guest.Guest(DiagFakeDomain(0, 15814)), libvirt_guest.Guest(DiagFakeDomain(1, 750)), libvirt_guest.Guest(DiagFakeDomain(2, 1042))] mock_conn.getInfo.return_value = [ arch.X86_64, 15814, 8, 1208, 1, 1, 4, 2] self.assertEqual(8657, self.host.get_memory_mb_used()) mock_list.assert_called_with(only_guests=False) def test_get_cpu_stats(self): stats = self.host.get_cpu_stats() self.assertEqual( {'kernel': 5664160000000, 'idle': 1592705190000000, 'frequency': 800, 'user': 26728850000000, 'iowait': 6121490000000}, stats) @mock.patch.object(fakelibvirt.virConnect, "defineXML") def test_write_instance_config(self, mock_defineXML): xml = "foo" self.host.write_instance_config(xml) mock_defineXML.assert_called_once_with(xml) @mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName") def test_device_lookup_by_name(self, mock_nodeDeviceLookupByName): self.host.device_lookup_by_name("foo") mock_nodeDeviceLookupByName.assert_called_once_with("foo") @mock.patch.object(fakelibvirt.virConnect, "listDevices") def test_list_pci_devices(self, mock_listDevices): self.host.list_pci_devices(8) mock_listDevices.assert_called_once_with('pci', 8) @mock.patch.object(fakelibvirt.virConnect, "compareCPU") def test_compare_cpu(self, mock_compareCPU): self.host.compare_cpu("cpuxml") mock_compareCPU.assert_called_once_with("cpuxml", 0) def test_is_cpu_control_policy_capable_ok(self): m = mock.mock_open( read_data="""cg /cgroup/cpu,cpuacct cg opt1,cpu,opt3 0 0 cg /cgroup/memory cg opt1,opt2 0 0 """) with mock.patch( "six.moves.builtins.open", m, create=True): self.assertTrue(self.host.is_cpu_control_policy_capable()) def test_is_cpu_control_policy_capable_ko(self): m = mock.mock_open( read_data="""cg /cgroup/cpu,cpuacct cg opt1,opt2,opt3 0 0 cg /cgroup/memory cg opt1,opt2 0 0 """) with mock.patch( "six.moves.builtins.open", m, create=True): self.assertFalse(self.host.is_cpu_control_policy_capable()) @mock.patch('six.moves.builtins.open', side_effect=IOError) def test_is_cpu_control_policy_capable_ioerror(self, mock_open): self.assertFalse(self.host.is_cpu_control_policy_capable()) class DomainJobInfoTestCase(test.NoDBTestCase): def setUp(self): super(DomainJobInfoTestCase, self).setUp() self.useFixture(fakelibvirt.FakeLibvirtFixture()) self.conn = fakelibvirt.openAuth("qemu:///system", [[], lambda: True]) xml = ("" " instance-0000000a" "") self.dom = self.conn.createXML(xml, 0) host.DomainJobInfo._have_job_stats = True @mock.patch.object(fakelibvirt.virDomain, "jobInfo") @mock.patch.object(fakelibvirt.virDomain, "jobStats") def test_job_stats(self, mock_stats, mock_info): mock_stats.return_value = { "type": fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, "memory_total": 75, "memory_processed": 50, "memory_remaining": 33, "some_new_libvirt_stat_we_dont_know_about": 83 } info = host.DomainJobInfo.for_domain(self.dom) self.assertIsInstance(info, host.DomainJobInfo) self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, info.type) self.assertEqual(75, info.memory_total) self.assertEqual(50, info.memory_processed) self.assertEqual(33, info.memory_remaining) self.assertEqual(0, info.disk_total) self.assertEqual(0, info.disk_processed) self.assertEqual(0, info.disk_remaining) mock_stats.assert_called_once_with() self.assertFalse(mock_info.called) @mock.patch.object(fakelibvirt.virDomain, "jobInfo") @mock.patch.object(fakelibvirt.virDomain, "jobStats") def test_job_info_no_support(self, mock_stats, mock_info): mock_stats.side_effect = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "virDomainGetJobStats not implemented", fakelibvirt.VIR_ERR_NO_SUPPORT) mock_info.return_value = [ fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, 100, 99, 10, 11, 12, 75, 50, 33, 1, 2, 3] info = host.DomainJobInfo.for_domain(self.dom) self.assertIsInstance(info, host.DomainJobInfo) self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, info.type) self.assertEqual(100, info.time_elapsed) self.assertEqual(99, info.time_remaining) self.assertEqual(10, info.data_total) self.assertEqual(11, info.data_processed) self.assertEqual(12, info.data_remaining) self.assertEqual(75, info.memory_total) self.assertEqual(50, info.memory_processed) self.assertEqual(33, info.memory_remaining) self.assertEqual(1, info.disk_total) self.assertEqual(2, info.disk_processed) self.assertEqual(3, info.disk_remaining) mock_stats.assert_called_once_with() mock_info.assert_called_once_with() @mock.patch.object(fakelibvirt.virDomain, "jobInfo") @mock.patch.object(fakelibvirt.virDomain, "jobStats") def test_job_info_attr_error(self, mock_stats, mock_info): mock_stats.side_effect = AttributeError("No such API") mock_info.return_value = [ fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, 100, 99, 10, 11, 12, 75, 50, 33, 1, 2, 3] info = host.DomainJobInfo.for_domain(self.dom) self.assertIsInstance(info, host.DomainJobInfo) self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, info.type) self.assertEqual(100, info.time_elapsed) self.assertEqual(99, info.time_remaining) self.assertEqual(10, info.data_total) self.assertEqual(11, info.data_processed) self.assertEqual(12, info.data_remaining) self.assertEqual(75, info.memory_total) self.assertEqual(50, info.memory_processed) self.assertEqual(33, info.memory_remaining) self.assertEqual(1, info.disk_total) self.assertEqual(2, info.disk_processed) self.assertEqual(3, info.disk_remaining) mock_stats.assert_called_once_with() mock_info.assert_called_once_with() @mock.patch.object(fakelibvirt.virDomain, "jobInfo") @mock.patch.object(fakelibvirt.virDomain, "jobStats") def test_job_stats_no_domain(self, mock_stats, mock_info): mock_stats.side_effect = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "No such domain with UUID blah", fakelibvirt.VIR_ERR_NO_DOMAIN) info = host.DomainJobInfo.for_domain(self.dom) self.assertIsInstance(info, host.DomainJobInfo) self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_COMPLETED, info.type) self.assertEqual(0, info.time_elapsed) self.assertEqual(0, info.time_remaining) self.assertEqual(0, info.memory_total) self.assertEqual(0, info.memory_processed) self.assertEqual(0, info.memory_remaining) mock_stats.assert_called_once_with() self.assertFalse(mock_info.called) @mock.patch.object(fakelibvirt.virDomain, "jobInfo") @mock.patch.object(fakelibvirt.virDomain, "jobStats") def test_job_info_no_domain(self, mock_stats, mock_info): mock_stats.side_effect = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "virDomainGetJobStats not implemented", fakelibvirt.VIR_ERR_NO_SUPPORT) mock_info.side_effect = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "No such domain with UUID blah", fakelibvirt.VIR_ERR_NO_DOMAIN) info = host.DomainJobInfo.for_domain(self.dom) self.assertIsInstance(info, host.DomainJobInfo) self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_COMPLETED, info.type) self.assertEqual(0, info.time_elapsed) self.assertEqual(0, info.time_remaining) self.assertEqual(0, info.memory_total) self.assertEqual(0, info.memory_processed) self.assertEqual(0, info.memory_remaining) mock_stats.assert_called_once_with() mock_info.assert_called_once_with() @mock.patch.object(fakelibvirt.virDomain, "jobInfo") @mock.patch.object(fakelibvirt.virDomain, "jobStats") def test_job_stats_operation_invalid(self, mock_stats, mock_info): mock_stats.side_effect = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "Domain is not running", fakelibvirt.VIR_ERR_OPERATION_INVALID) info = host.DomainJobInfo.for_domain(self.dom) self.assertIsInstance(info, host.DomainJobInfo) self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_COMPLETED, info.type) self.assertEqual(0, info.time_elapsed) self.assertEqual(0, info.time_remaining) self.assertEqual(0, info.memory_total) self.assertEqual(0, info.memory_processed) self.assertEqual(0, info.memory_remaining) mock_stats.assert_called_once_with() self.assertFalse(mock_info.called) @mock.patch.object(fakelibvirt.virDomain, "jobInfo") @mock.patch.object(fakelibvirt.virDomain, "jobStats") def test_job_info_operation_invalid(self, mock_stats, mock_info): mock_stats.side_effect = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "virDomainGetJobStats not implemented", fakelibvirt.VIR_ERR_NO_SUPPORT) mock_info.side_effect = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "Domain is not running", fakelibvirt.VIR_ERR_OPERATION_INVALID) info = host.DomainJobInfo.for_domain(self.dom) self.assertIsInstance(info, host.DomainJobInfo) self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_COMPLETED, info.type) self.assertEqual(0, info.time_elapsed) self.assertEqual(0, info.time_remaining) self.assertEqual(0, info.memory_total) self.assertEqual(0, info.memory_processed) self.assertEqual(0, info.memory_remaining) mock_stats.assert_called_once_with() mock_info.assert_called_once_with() nova-13.1.4/nova/tests/unit/virt/libvirt/test_config.py0000664000567000056710000027211413064447152024322 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from lxml import etree from oslo_utils import units from nova.compute import arch from nova import test from nova.tests.unit import matchers from nova.virt.libvirt import config class LibvirtConfigBaseTest(test.NoDBTestCase): def assertXmlEqual(self, expectedXmlstr, actualXmlstr): self.assertThat(actualXmlstr, matchers.XMLMatches(expectedXmlstr)) class LibvirtConfigTest(LibvirtConfigBaseTest): def test_config_plain(self): obj = config.LibvirtConfigObject(root_name="demo") xml = obj.to_xml() self.assertXmlEqual(xml, "") def test_config_ns(self): obj = config.LibvirtConfigObject(root_name="demo", ns_prefix="foo", ns_uri="http://example.com/foo") xml = obj.to_xml() self.assertXmlEqual(xml, """ """) def test_config_text(self): obj = config.LibvirtConfigObject(root_name="demo") root = obj.format_dom() root.append(obj._text_node("foo", "bar")) xml = etree.tostring(root) self.assertXmlEqual(xml, "bar") def test_config_text_unicode(self): obj = config.LibvirtConfigObject(root_name='demo') root = obj.format_dom() root.append(obj._text_node('foo', u'\xF0\x9F\x92\xA9')) self.assertXmlEqual('💩', etree.tostring(root)) def test_config_parse(self): inxml = "" obj = config.LibvirtConfigObject(root_name="demo") obj.parse_str(inxml) class LibvirtConfigCapsTest(LibvirtConfigBaseTest): def test_config_host(self): xmlin = """ c7a5fdbd-edaf-9455-926a-d65c16db1809 x86_64 Opteron_G3 AMD 4048280 1011941 0 4127684 1031921 0 hvm hvm """ obj = config.LibvirtConfigCaps() obj.parse_str(xmlin) self.assertIsInstance(obj.host, config.LibvirtConfigCapsHost) self.assertEqual(obj.host.uuid, "c7a5fdbd-edaf-9455-926a-d65c16db1809") xmlout = obj.to_xml() self.assertXmlEqual(xmlin, xmlout) def test_config_host_numa_cell_no_memory_caps(self): xmlin = """ """ obj = config.LibvirtConfigCapsNUMACell() obj.parse_str(xmlin) self.assertEqual(0, obj.memory) self.assertEqual(1, len(obj.cpus)) def test_config_host_numa_cell_no_cpus_caps(self): xmlin = """ 128 """ obj = config.LibvirtConfigCapsNUMACell() obj.parse_str(xmlin) self.assertEqual(128, obj.memory) self.assertEqual(0, len(obj.cpus)) class LibvirtConfigGuestTimerTest(LibvirtConfigBaseTest): def test_config_platform(self): obj = config.LibvirtConfigGuestTimer() obj.track = "host" xml = obj.to_xml() self.assertXmlEqual(xml, """ """) def test_config_pit(self): obj = config.LibvirtConfigGuestTimer() obj.name = "pit" obj.tickpolicy = "discard" xml = obj.to_xml() self.assertXmlEqual(xml, """ """) def test_config_hpet(self): obj = config.LibvirtConfigGuestTimer() obj.name = "hpet" obj.present = False xml = obj.to_xml() self.assertXmlEqual(xml, """ """) class LibvirtConfigGuestClockTest(LibvirtConfigBaseTest): def test_config_utc(self): obj = config.LibvirtConfigGuestClock() xml = obj.to_xml() self.assertXmlEqual(xml, """ """) def test_config_localtime(self): obj = config.LibvirtConfigGuestClock() obj.offset = "localtime" xml = obj.to_xml() self.assertXmlEqual(xml, """ """) def test_config_timezone(self): obj = config.LibvirtConfigGuestClock() obj.offset = "timezone" obj.timezone = "EDT" xml = obj.to_xml() self.assertXmlEqual(xml, """ """) def test_config_variable(self): obj = config.LibvirtConfigGuestClock() obj.offset = "variable" obj.adjustment = "123456" xml = obj.to_xml() self.assertXmlEqual(xml, """ """) def test_config_timers(self): obj = config.LibvirtConfigGuestClock() tmpit = config.LibvirtConfigGuestTimer() tmpit.name = "pit" tmpit.tickpolicy = "discard" tmrtc = config.LibvirtConfigGuestTimer() tmrtc.name = "rtc" tmrtc.tickpolicy = "merge" obj.add_timer(tmpit) obj.add_timer(tmrtc) xml = obj.to_xml() self.assertXmlEqual(xml, """ """) class LibvirtConfigCPUFeatureTest(LibvirtConfigBaseTest): def test_config_simple(self): obj = config.LibvirtConfigCPUFeature("mtrr") xml = obj.to_xml() self.assertXmlEqual(xml, """ """) class LibvirtConfigGuestCPUFeatureTest(LibvirtConfigBaseTest): def test_config_simple(self): obj = config.LibvirtConfigGuestCPUFeature("mtrr") obj.policy = "force" xml = obj.to_xml() self.assertXmlEqual(xml, """ """) class LibvirtConfigGuestCPUNUMATest(LibvirtConfigBaseTest): def test_parse_dom(self): xml = """ """ xmldoc = etree.fromstring(xml) obj = config.LibvirtConfigGuestCPUNUMA() obj.parse_dom(xmldoc) self.assertEqual(2, len(obj.cells)) def test_config_simple(self): obj = config.LibvirtConfigGuestCPUNUMA() cell = config.LibvirtConfigGuestCPUNUMACell() cell.id = 0 cell.cpus = set([0, 1]) cell.memory = 1000000 cell.memAccess = "shared" obj.cells.append(cell) cell = config.LibvirtConfigGuestCPUNUMACell() cell.id = 1 cell.cpus = set([2, 3]) cell.memory = 1500000 cell.memAccess = "private" obj.cells.append(cell) xml = obj.to_xml() self.assertXmlEqual(xml, """ """) class LibvirtConfigCPUTest(LibvirtConfigBaseTest): def test_config_simple(self): obj = config.LibvirtConfigCPU() obj.model = "Penryn" xml = obj.to_xml() self.assertXmlEqual(xml, """ Penryn """) def test_config_complex(self): obj = config.LibvirtConfigCPU() obj.model = "Penryn" obj.vendor = "Intel" obj.arch = arch.X86_64 obj.add_feature(config.LibvirtConfigCPUFeature("mtrr")) obj.add_feature(config.LibvirtConfigCPUFeature("apic")) xml = obj.to_xml() self.assertXmlEqual(xml, """ x86_64 Penryn Intel """) def test_only_uniq_cpu_featues(self): obj = config.LibvirtConfigCPU() obj.model = "Penryn" obj.vendor = "Intel" obj.arch = arch.X86_64 obj.add_feature(config.LibvirtConfigCPUFeature("mtrr")) obj.add_feature(config.LibvirtConfigCPUFeature("apic")) obj.add_feature(config.LibvirtConfigCPUFeature("apic")) obj.add_feature(config.LibvirtConfigCPUFeature("mtrr")) xml = obj.to_xml() self.assertXmlEqual(xml, """ x86_64 Penryn Intel """) def test_config_topology(self): obj = config.LibvirtConfigCPU() obj.model = "Penryn" obj.sockets = 4 obj.cores = 4 obj.threads = 2 xml = obj.to_xml() self.assertXmlEqual(xml, """ Penryn """) class LibvirtConfigGuestCPUTest(LibvirtConfigBaseTest): def test_config_simple(self): obj = config.LibvirtConfigGuestCPU() obj.model = "Penryn" xml = obj.to_xml() self.assertXmlEqual(xml, """ Penryn """) def test_config_complex(self): obj = config.LibvirtConfigGuestCPU() obj.model = "Penryn" obj.vendor = "Intel" obj.arch = arch.X86_64 obj.mode = "custom" obj.add_feature(config.LibvirtConfigGuestCPUFeature("mtrr")) obj.add_feature(config.LibvirtConfigGuestCPUFeature("apic")) xml = obj.to_xml() self.assertXmlEqual(xml, """ x86_64 Penryn Intel """) def test_config_host(self): obj = config.LibvirtConfigGuestCPU() obj.mode = "host-model" obj.match = "exact" xml = obj.to_xml() self.assertXmlEqual(xml, """ """) def test_config_host_with_numa(self): obj = config.LibvirtConfigGuestCPU() obj.mode = "host-model" obj.match = "exact" numa = config.LibvirtConfigGuestCPUNUMA() cell = config.LibvirtConfigGuestCPUNUMACell() cell.id = 0 cell.cpus = set([0, 1]) cell.memory = 1000000 cell.memAccess = "private" numa.cells.append(cell) cell = config.LibvirtConfigGuestCPUNUMACell() cell.id = 1 cell.cpus = set([2, 3]) cell.memory = 1500000 numa.cells.append(cell) obj.numa = numa xml = obj.to_xml() self.assertXmlEqual(xml, """ """) class LibvirtConfigGuestSMBIOSTest(LibvirtConfigBaseTest): def test_config_simple(self): obj = config.LibvirtConfigGuestSMBIOS() xml = obj.to_xml() self.assertXmlEqual(xml, """ """) class LibvirtConfigGuestSysinfoTest(LibvirtConfigBaseTest): def test_config_simple(self): obj = config.LibvirtConfigGuestSysinfo() xml = obj.to_xml() self.assertXmlEqual(xml, """ """) def test_config_bios(self): obj = config.LibvirtConfigGuestSysinfo() obj.bios_vendor = "Acme" obj.bios_version = "6.6.6" xml = obj.to_xml() self.assertXmlEqual(xml, """ Acme 6.6.6 """) def test_config_system(self): obj = config.LibvirtConfigGuestSysinfo() obj.system_manufacturer = "Acme" obj.system_product = "Wile Coyote" obj.system_version = "6.6.6" obj.system_serial = "123456" obj.system_uuid = "c7a5fdbd-edaf-9455-926a-d65c16db1809" obj.system_family = "Anvils" xml = obj.to_xml() self.assertXmlEqual(xml, """ Acme Wile Coyote 6.6.6 123456 c7a5fdbd-edaf-9455-926a-d65c16db1809 Anvils """) def test_config_mixed(self): obj = config.LibvirtConfigGuestSysinfo() obj.bios_vendor = "Acme" obj.system_manufacturer = "Acme" obj.system_product = "Wile Coyote" obj.system_uuid = "c7a5fdbd-edaf-9455-926a-d65c16db1809" obj.system_family = "Anvils" xml = obj.to_xml() self.assertXmlEqual(xml, """ Acme Acme Wile Coyote c7a5fdbd-edaf-9455-926a-d65c16db1809 Anvils """) class LibvirtConfigGuestDiskTest(LibvirtConfigBaseTest): def test_config_file(self): obj = config.LibvirtConfigGuestDisk() obj.source_type = "file" obj.source_path = "/tmp/hello" obj.target_dev = "/dev/hda" obj.target_bus = "ide" xml = obj.to_xml() self.assertXmlEqual(xml, """ """) def test_config_file_parse(self): xml = """ """ xmldoc = etree.fromstring(xml) obj = config.LibvirtConfigGuestDisk() obj.parse_dom(xmldoc) self.assertEqual(obj.source_type, 'file') self.assertEqual(obj.source_path, '/tmp/hello') self.assertEqual(obj.target_dev, '/dev/hda') self.assertEqual(obj.target_bus, 'ide') self.assertFalse(obj.readonly) self.assertFalse(obj.shareable) def test_config_file_readonly(self): obj = config.LibvirtConfigGuestDisk() obj.source_type = "file" obj.source_path = "/tmp/hello" obj.target_dev = "/dev/hda" obj.target_bus = "ide" obj.readonly = True xml = obj.to_xml() self.assertXmlEqual(xml, """ """) def test_config_file_parse_readonly(self): xml = """ """ xmldoc = etree.fromstring(xml) obj = config.LibvirtConfigGuestDisk() obj.parse_dom(xmldoc) self.assertEqual(obj.source_type, 'file') self.assertEqual(obj.source_path, '/tmp/hello') self.assertEqual(obj.target_dev, '/dev/hda') self.assertEqual(obj.target_bus, 'ide') self.assertTrue(obj.readonly) self.assertFalse(obj.shareable) def test_config_file_shareable(self): obj = config.LibvirtConfigGuestDisk() obj.source_type = "file" obj.source_path = "/tmp/hello" obj.target_dev = "/dev/hda" obj.target_bus = "ide" obj.shareable = True xml = obj.to_xml() self.assertXmlEqual(xml, """ """) def test_config_file_parse_shareable(self): xml = """ """ xmldoc = etree.fromstring(xml) obj = config.LibvirtConfigGuestDisk() obj.parse_dom(xmldoc) self.assertEqual(obj.source_type, 'file') self.assertEqual(obj.source_path, '/tmp/hello') self.assertEqual(obj.target_dev, '/dev/hda') self.assertEqual(obj.target_bus, 'ide') self.assertFalse(obj.readonly) self.assertTrue(obj.shareable) def test_config_file_serial(self): obj = config.LibvirtConfigGuestDisk() obj.source_type = "file" obj.source_path = "/tmp/hello" obj.target_dev = "/dev/hda" obj.target_bus = "ide" obj.serial = "7a97c4a3-6f59-41d4-bf47-191d7f97f8e9" xml = obj.to_xml() self.assertXmlEqual(xml, """ 7a97c4a3-6f59-41d4-bf47-191d7f97f8e9 """) def test_config_file_serial_parse(self): xml = """ 7a97c4a3-6f59-41d4-bf47-191d7f97f8e9 """ xmldoc = etree.fromstring(xml) obj = config.LibvirtConfigGuestDisk() obj.parse_dom(xmldoc) self.assertEqual(obj.source_type, 'file') self.assertEqual(obj.serial, '7a97c4a3-6f59-41d4-bf47-191d7f97f8e9') def test_config_file_discard(self): obj = config.LibvirtConfigGuestDisk() obj.driver_name = "qemu" obj.driver_format = "qcow2" obj.driver_cache = "none" obj.driver_discard = "unmap" obj.source_type = "file" obj.source_path = "/tmp/hello.qcow2" obj.target_dev = "/dev/hda" obj.target_bus = "ide" obj.serial = "7a97c4a3-6f59-41d4-bf47-191d7f97f8e9" xml = obj.to_xml() self.assertXmlEqual(""" 7a97c4a3-6f59-41d4-bf47-191d7f97f8e9 """, xml) def test_config_file_discard_parse(self): xml = """ 7a97c4a3-6f59-41d4-bf47-191d7f97f8e9 """ xmldoc = etree.fromstring(xml) obj = config.LibvirtConfigGuestDisk() obj.parse_dom(xmldoc) self.assertEqual('unmap', obj.driver_discard) def test_config_file_io(self): obj = config.LibvirtConfigGuestDisk() obj.driver_name = "qemu" obj.driver_format = "qcow2" obj.driver_cache = "none" obj.driver_io = "native" obj.source_type = "file" obj.source_path = "/tmp/hello.qcow2" obj.target_dev = "/dev/hda" obj.target_bus = "ide" obj.serial = "7a97c4a3-6f59-41d4-bf47-191d7f97f8e9" xml = obj.to_xml() self.assertXmlEqual(""" 7a97c4a3-6f59-41d4-bf47-191d7f97f8e9 """, xml) def test_config_file_io_parse(self): xml = """ 7a97c4a3-6f59-41d4-bf47-191d7f97f8e9 """ xmldoc = etree.fromstring(xml) obj = config.LibvirtConfigGuestDisk() obj.parse_dom(xmldoc) self.assertEqual('native', obj.driver_io) def test_config_block(self): obj = config.LibvirtConfigGuestDisk() obj.source_type = "block" obj.source_path = "/tmp/hello" obj.source_device = "cdrom" obj.driver_name = "qemu" obj.target_dev = "/dev/hdc" obj.target_bus = "ide" xml = obj.to_xml() self.assertXmlEqual(xml, """ """) def test_config_block_parse(self): xml = """ """ xmldoc = etree.fromstring(xml) obj = config.LibvirtConfigGuestDisk() obj.parse_dom(xmldoc) self.assertEqual(obj.source_type, 'block') self.assertEqual(obj.source_path, '/tmp/hello') self.assertEqual(obj.target_dev, '/dev/hdc') self.assertEqual(obj.target_bus, 'ide') def test_config_network(self): obj = config.LibvirtConfigGuestDisk() obj.source_type = "network" obj.source_protocol = "iscsi" obj.source_name = "foo.bar.com" obj.driver_name = "qemu" obj.driver_format = "qcow2" obj.target_dev = "/dev/hda" obj.target_bus = "ide" xml = obj.to_xml() self.assertXmlEqual(xml, """ """) def test_config_network_parse(self): xml = """ """ xmldoc = etree.fromstring(xml) obj = config.LibvirtConfigGuestDisk() obj.parse_dom(xmldoc) self.assertEqual(obj.source_type, 'network') self.assertEqual(obj.source_protocol, 'iscsi') self.assertEqual(obj.source_name, 'foo.bar.com') self.assertEqual(obj.driver_name, 'qemu') self.assertEqual(obj.driver_format, 'qcow2') self.assertEqual(obj.target_dev, '/dev/hda') self.assertEqual(obj.target_bus, 'ide') def test_config_network_no_name(self): obj = config.LibvirtConfigGuestDisk() obj.source_type = 'network' obj.source_protocol = 'nbd' obj.source_hosts = ['foo.bar.com'] obj.source_ports = [None] obj.driver_name = 'qemu' obj.driver_format = 'raw' obj.target_dev = '/dev/vda' obj.target_bus = 'virtio' xml = obj.to_xml() self.assertXmlEqual(xml, """ """) def test_config_network_multihost(self): obj = config.LibvirtConfigGuestDisk() obj.source_type = 'network' obj.source_protocol = 'rbd' obj.source_name = 'pool/image' obj.source_hosts = ['foo.bar.com', '::1', '1.2.3.4'] obj.source_ports = [None, '123', '456'] obj.driver_name = 'qemu' obj.driver_format = 'raw' obj.target_dev = '/dev/vda' obj.target_bus = 'virtio' xml = obj.to_xml() self.assertXmlEqual(xml, """ """) def test_config_network_auth(self): obj = config.LibvirtConfigGuestDisk() obj.source_type = "network" obj.source_protocol = "rbd" obj.source_name = "pool/image" obj.driver_name = "qemu" obj.driver_format = "raw" obj.target_dev = "/dev/vda" obj.target_bus = "virtio" obj.auth_username = "foo" obj.auth_secret_type = "ceph" obj.auth_secret_uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147" xml = obj.to_xml() self.assertXmlEqual(xml, """ """) def test_config_iotune(self): obj = config.LibvirtConfigGuestDisk() obj.source_type = "file" obj.source_path = "/tmp/hello" obj.target_dev = "/dev/hda" obj.target_bus = "ide" obj.disk_read_bytes_sec = 1024000 obj.disk_read_iops_sec = 1000 obj.disk_total_bytes_sec = 2048000 obj.disk_write_bytes_sec = 1024000 obj.disk_write_iops_sec = 1000 obj.disk_total_iops_sec = 2000 xml = obj.to_xml() self.assertXmlEqual(xml, """ 1024000 1000 1024000 1000 2048000 2000 """) def test_config_blockio(self): obj = config.LibvirtConfigGuestDisk() obj.source_type = "file" obj.source_path = "/tmp/hello" obj.target_dev = "/dev/hda" obj.target_bus = "ide" obj.logical_block_size = "4096" obj.physical_block_size = "4096" xml = obj.to_xml() self.assertXmlEqual(""" """, xml) class LibvirtConfigGuestSnapshotDiskTest(LibvirtConfigBaseTest): def test_config_file(self): obj = config.LibvirtConfigGuestDisk() obj.source_type = "file" obj.source_path = "/tmp/hello" obj.target_dev = "/dev/hda" obj.target_bus = "ide" xml = obj.to_xml() self.assertXmlEqual(xml, """ """) def test_config_file_parse(self): xml = """ """ xmldoc = etree.fromstring(xml) obj = config.LibvirtConfigGuestDisk() obj.parse_dom(xmldoc) self.assertEqual(obj.source_type, 'file') self.assertEqual(obj.source_path, '/tmp/hello') self.assertEqual(obj.target_dev, '/dev/hda') self.assertEqual(obj.target_bus, 'ide') class LibvirtConfigGuestDiskBackingStoreTest(LibvirtConfigBaseTest): def test_config_file_parse(self): xml = """ """ xmldoc = etree.fromstring(xml) obj = config.LibvirtConfigGuestDiskBackingStore() obj.parse_dom(xmldoc) self.assertEqual(obj.driver_name, 'qemu') self.assertEqual(obj.driver_format, 'qcow2') self.assertEqual(obj.source_type, 'file') self.assertEqual(obj.source_file, '/var/lib/libvirt/images/mid.qcow2') self.assertEqual(obj.backing_store.driver_name, 'qemu') self.assertEqual(obj.backing_store.source_type, 'file') self.assertEqual(obj.backing_store.source_file, '/var/lib/libvirt/images/base.qcow2') self.assertIsNone(obj.backing_store.backing_store) def test_config_network_parse(self): xml = """ """ xmldoc = etree.fromstring(xml) obj = config.LibvirtConfigGuestDiskBackingStore() obj.parse_dom(xmldoc) self.assertEqual(obj.source_type, 'network') self.assertEqual(obj.source_protocol, 'gluster') self.assertEqual(obj.source_name, 'volume1/img1') self.assertEqual(obj.source_hosts[0], 'host1') self.assertEqual(obj.source_ports[0], '24007') self.assertEqual(obj.index, '1') self.assertEqual(obj.backing_store.source_name, 'volume1/img2') self.assertEqual(obj.backing_store.index, '2') self.assertEqual(obj.backing_store.source_hosts[0], 'host1') self.assertEqual(obj.backing_store.source_ports[0], '24007') self.assertIsNone(obj.backing_store.backing_store) class LibvirtConfigGuestFilesysTest(LibvirtConfigBaseTest): def test_config_mount(self): obj = config.LibvirtConfigGuestFilesys() obj.source_type = "mount" obj.source_dir = "/tmp/hello" obj.target_dir = "/mnt" xml = obj.to_xml() self.assertXmlEqual(xml, """ """) def test_config_block(self): obj = config.LibvirtConfigGuestFilesys() obj.source_type = "block" obj.source_dev = "/dev/sdb" obj.target_dir = "/mnt" xml = obj.to_xml() self.assertXmlEqual(xml, """ """) def test_config_file(self): obj = config.LibvirtConfigGuestFilesys() obj.source_type = "file" obj.source_file = "/data/myimage.qcow2" obj.driver_type = "nbd" obj.driver_format = "qcow2" obj.target_dir = "/mnt" xml = obj.to_xml() self.assertXmlEqual(xml, """ """) class LibvirtConfigGuestInputTest(LibvirtConfigBaseTest): def test_config_tablet(self): obj = config.LibvirtConfigGuestInput() xml = obj.to_xml() self.assertXmlEqual(xml, """ """) class LibvirtConfigGuestGraphicsTest(LibvirtConfigBaseTest): def test_config_graphics(self): obj = config.LibvirtConfigGuestGraphics() obj.type = "vnc" obj.autoport = True obj.keymap = "en_US" obj.listen = "127.0.0.1" xml = obj.to_xml() self.assertXmlEqual(xml, """ """) class LibvirtConfigGuestHostdev(LibvirtConfigBaseTest): def test_config_pci_guest_host_dev(self): obj = config.LibvirtConfigGuestHostdev(mode='subsystem', type='pci') xml = obj.to_xml() expected = """ """ self.assertXmlEqual(xml, expected) def test_parse_GuestHostdev(self): xmldoc = """""" obj = config.LibvirtConfigGuestHostdev() obj.parse_str(xmldoc) self.assertEqual(obj.mode, 'subsystem') self.assertEqual(obj.type, 'pci') self.assertEqual(obj.managed, 'yes') def test_parse_GuestHostdev_non_pci(self): xmldoc = """""" obj = config.LibvirtConfigGuestHostdev() obj.parse_str(xmldoc) self.assertEqual(obj.mode, 'subsystem') self.assertEqual(obj.type, 'usb') self.assertEqual(obj.managed, 'no') class LibvirtConfigGuestHostdevPCI(LibvirtConfigBaseTest): expected = """
""" def test_config_guest_hosdev_pci(self): hostdev = config.LibvirtConfigGuestHostdevPCI() hostdev.domain = "1234" hostdev.bus = "11" hostdev.slot = "22" hostdev.function = "3" xml = hostdev.to_xml() self.assertXmlEqual(self.expected, xml) def test_parse_guest_hosdev_pci(self): xmldoc = self.expected obj = config.LibvirtConfigGuestHostdevPCI() obj.parse_str(xmldoc) self.assertEqual(obj.mode, 'subsystem') self.assertEqual(obj.type, 'pci') self.assertEqual(obj.managed, 'yes') self.assertEqual(obj.domain, '0x1234') self.assertEqual(obj.bus, '0x11') self.assertEqual(obj.slot, '0x22') self.assertEqual(obj.function, '0x3') def test_parse_guest_hosdev_usb(self): xmldoc = """ """ obj = config.LibvirtConfigGuestHostdevPCI() obj.parse_str(xmldoc) self.assertEqual(obj.mode, 'subsystem') self.assertEqual(obj.type, 'usb') class LibvirtConfigGuestSerialTest(LibvirtConfigBaseTest): def test_config_file(self): obj = config.LibvirtConfigGuestSerial() obj.type = "file" obj.source_path = "/tmp/vm.log" xml = obj.to_xml() self.assertXmlEqual(xml, """ """) def test_config_serial_port(self): obj = config.LibvirtConfigGuestSerial() obj.type = "tcp" obj.listen_port = 11111 obj.listen_host = "0.0.0.0" xml = obj.to_xml() self.assertXmlEqual(xml, """ """) class LibvirtConfigGuestConsoleTest(LibvirtConfigBaseTest): def test_config_pty(self): obj = config.LibvirtConfigGuestConsole() obj.type = "pty" xml = obj.to_xml() self.assertXmlEqual(xml, """ """) def test_config_target_type(self): obj = config.LibvirtConfigGuestConsole() obj.type = "pty" obj.target_type = "sclp" xml = obj.to_xml() self.assertXmlEqual(xml, """ """) def test_config_type_file_with_target_type(self): obj = config.LibvirtConfigGuestConsole() obj.type = "file" obj.target_type = "sclplm" obj.source_path = "/var/lib/nova/instances/uuid/console.log" xml = obj.to_xml() self.assertXmlEqual(xml, """ """) def test_config_target_port(self): obj = config.LibvirtConfigGuestConsole() obj.target_port = 0 xml = obj.to_xml() self.assertXmlEqual(xml, """ """) class LibvirtConfigGuestChannelTest(LibvirtConfigBaseTest): def test_config_spice_minimal(self): obj = config.LibvirtConfigGuestChannel() obj.type = "spicevmc" xml = obj.to_xml() self.assertXmlEqual(xml, """ """) def test_config_spice_full(self): obj = config.LibvirtConfigGuestChannel() obj.type = "spicevmc" obj.target_name = "com.redhat.spice.0" xml = obj.to_xml() self.assertXmlEqual(xml, """ """) def test_config_qga_full(self): obj = config.LibvirtConfigGuestChannel() obj.type = "unix" obj.target_name = "org.qemu.guest_agent.0" obj.source_path = "/var/lib/libvirt/qemu/%s.%s.sock" % ( obj.target_name, "instance-name") xml = obj.to_xml() self.assertXmlEqual(xml, """ """ % obj.source_path) class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest): def test_config_ethernet(self): obj = config.LibvirtConfigGuestInterface() obj.net_type = "ethernet" obj.mac_addr = "DE:AD:BE:EF:CA:FE" obj.model = "virtio" obj.target_dev = "vnet0" obj.driver_name = "vhost" obj.vif_inbound_average = 16384 obj.vif_inbound_peak = 32768 obj.vif_inbound_burst = 3276 obj.vif_outbound_average = 32768 obj.vif_outbound_peak = 65536 obj.vif_outbound_burst = 6553 xml = obj.to_xml() self.assertXmlEqual(xml, """ """) # parse the xml from the first object into a new object and make sure # they are the same obj2 = config.LibvirtConfigGuestInterface() obj2.parse_str(xml) self.assertXmlEqual(xml, obj2.to_xml()) def test_config_driver_options(self): obj = config.LibvirtConfigGuestInterface() obj.net_type = "ethernet" obj.mac_addr = "DE:AD:BE:EF:CA:FE" obj.model = "virtio" obj.target_dev = "vnet0" obj.driver_name = "vhost" obj.vhost_queues = 4 xml = obj.to_xml() self.assertXmlEqual(xml, """ """) # parse the xml from the first object into a new object and make sure # they are the same obj2 = config.LibvirtConfigGuestInterface() obj2.parse_str(xml) self.assertXmlEqual(xml, obj2.to_xml()) def test_config_bridge(self): obj = config.LibvirtConfigGuestInterface() obj.net_type = "bridge" obj.source_dev = "br0" obj.mac_addr = "DE:AD:BE:EF:CA:FE" obj.model = "virtio" obj.target_dev = "tap12345678" obj.filtername = "clean-traffic" obj.filterparams.append({"key": "IP", "value": "192.168.122.1"}) obj.vif_inbound_average = 16384 obj.vif_inbound_peak = 32768 obj.vif_inbound_burst = 3276 obj.vif_outbound_average = 32768 obj.vif_outbound_peak = 65536 obj.vif_outbound_burst = 6553 xml = obj.to_xml() self.assertXmlEqual(xml, """ """) # parse the xml from the first object into a new object and make sure # they are the same obj2 = config.LibvirtConfigGuestInterface() obj2.parse_str(xml) self.assertXmlEqual(xml, obj2.to_xml()) def test_config_bridge_ovs(self): obj = config.LibvirtConfigGuestInterface() obj.net_type = "bridge" obj.source_dev = "br0" obj.mac_addr = "DE:AD:BE:EF:CA:FE" obj.model = "virtio" obj.target_dev = "tap12345678" obj.vporttype = "openvswitch" obj.vportparams.append({"key": "instanceid", "value": "foobar"}) xml = obj.to_xml() self.assertXmlEqual(xml, """ """) # parse the xml from the first object into a new object and make sure # they are the same obj2 = config.LibvirtConfigGuestInterface() obj2.parse_str(xml) self.assertXmlEqual(xml, obj2.to_xml()) def test_config_bridge_xen(self): obj = config.LibvirtConfigGuestInterface() obj.net_type = "bridge" obj.source_dev = "br0" obj.mac_addr = "CA:FE:BE:EF:CA:FE" obj.script = "/path/to/test-vif-openstack" xml = obj.to_xml() self.assertXmlEqual(xml, """